text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from configure import Configuration, ConfigurationError
from ksgen import docstring, yaml_utils, utils
from ksgen.tree import OrderedTree
from ksgen.yaml_utils import LookupDirective
from docopt import docopt, DocoptExit
import logging
import os
import re
import yaml
VALUES_KEY = '!value'
DEFAULTS_TAG = 'defaults'
logger = logging.getLogger(__name__)
class KeyValueError(Exception):
def __init__(self, arg, reason, *args, **kwargs):
super(KeyValueError, self).__init__(*args, **kwargs)
self.arg = arg
self.reason = reason
def __str__(self):
return "Invalid key-value pair: %s, - %s" % (self.arg, self.reason)
class OptionError(Exception):
def __init__(self, paths, *args, **kwargs):
super(OptionError, self).__init__(*args, **kwargs)
self._paths = paths
def __str__(self):
return "Invalid values passed, files : %s not found" % self._paths
class ArgsConflictError(Exception):
pass
class Generator(object):
"""
Usage:
generate [options] <output-file>
generate [--extra-vars=KEY_PAIR]... [options] <output-file>
Options:
--rules-file=<file> Rules file that contains generation rules
Process the rules first, so the additional
args will override args in rules file
--extra-vars=<val>... Provide extra vars {options}
"""
def __init__(self, config_dir, args):
self.config_dir = config_dir
self.args = _normalize_args(args)
logger.debug("Generator: config_dir: %s, args: %s", config_dir, args)
self._doc_string = Generator.__doc__.format(
options=docstring.Generator(config_dir).generate())
self.settings = None
self.output_file = None
self.rules_file = None
self._rules = None
self.parsed = None
self.extra_vars = None
self.all_settings = None
self.defaults = []
def run(self):
if not self._parse():
return 1
loader = Loader(self.config_dir, self.settings)
self._merge_rules_file_exports(loader)
loader.load()
self._merge_extra_vars(loader)
self.all_settings = loader.settings()
self._replace_in_string_lookup()
logger.debug(yaml_utils.to_yaml("All Settings", self.all_settings))
logger.info("Writing to file: %s", self.output_file)
with open(self.output_file, 'w') as out:
out.write(
yaml.safe_dump(self.all_settings, default_flow_style=False))
return 0
def _prepare_defaults(self):
for arg in self.args:
if not arg.startswith('--'):
continue
arg = arg[2:].split('=')[0]
if '-' not in arg:
logging.debug("Preparing defaults for %s:" % arg)
self._load_defaults(self.config_dir + os.sep + arg,
self.parsed['--' + arg])
self._merge_defaults()
logger.info("Defaults \n%s", self.defaults)
def _load_defaults(self, path, value):
param = '-'.join(path[len(self.config_dir + os.sep):].split('/')[::2])
if not self.parsed['--' + param]:
logging.warning(
"\'--%s\' hasn't been provided, using \'%s\' as default" % (
param, value))
self.defaults.append(''.join(['--', param, '=', str(value)]))
else:
value = self.parsed['--' + param]
file_path = path + os.sep + str(value) + '.yml'
loaded_file = Configuration.from_file(file_path)
if loaded_file.get(DEFAULTS_TAG):
path += os.sep + str(value)
for sub_key, sub_value in loaded_file[DEFAULTS_TAG].iteritems():
self._load_defaults(path + os.sep + sub_key, sub_value)
def _merge_defaults(self):
for element in self.defaults:
(option, value) = element.split('=')
if not self.parsed[option]:
self.parsed[option] = value
def _merge_rules_file_exports(self, loader):
if not self._rules:
return
try:
logger.debug("Try loading exports in rules file %s",
self.rules_file)
loader.merge(self._rules.export)
logger.info('Loaded exports from rules file: %s', self.rules_file)
except KeyError:
logger.debug("No 'exports' in rules file %s", self.rules_file)
def _parse(self):
# create the settings tree and preserve the order in which arguments
# are passed. Convert all args into an ordered tree so that
# --foo fooz --too moo --foo-bar baaz --foo-arg vali
# will be like the ordered tree below
# foo:
# <special-key>: fooz
# bar: ### <-- foo-bar is not foo/bar
# <special-key>: baaz
# arg: ### <-- arg comes after bar
# <special-key>: val
# too:
# <special-key>: moo
logger.debug("Parsing: %s", self.args)
logger.debug("DocString for Generate: %s", self._doc_string)
try:
self.parsed = docopt(self._doc_string,
options_first=True, argv=self.args)
except DocoptExit:
logger.error(self._doc_string)
return False
logger.info("Parsed \n%s", self.parsed)
self._prepare_defaults()
if not self._apply_rules():
logger.error("Error while validating rules: check args %s",
' \n'.join(self.args))
return False
logger.debug("New Args: %s", self.args)
logger.info("After applying rules Parsed: \n%s", self.parsed)
self.output_file = utils.extract_value(
self.parsed, '<output-file>', optional=False)
self.extra_vars = utils.extract_value(self.parsed, '--extra-vars')
# filter only options; [ --foo, fooz, --bar baz ] -> [--foo, --bar]
options = [x for x in self.args + self.defaults if x.startswith('--')]
settings = OrderedTree(delimiter='-')
for option in options: # iterate options to preserve order of args
option = option.split('=')[0]
value = self.parsed.get(option)
if not value:
continue
key = option[2:] + settings.delimiter + VALUES_KEY
settings[key] = value
logger.debug("%s: %s", key, value)
logger.debug(yaml_utils.to_yaml(
"Directory structure from args:", settings))
self.settings = settings
return True
def _apply_rules(self):
self.rules_file = utils.extract_value(self.parsed, '--rules-file')
if not self.rules_file:
return True # No rules to be applied
self.rules_file = os.path.abspath(self.rules_file)
logger.debug('Rule file: %s', self.rules_file)
self._rules = load_configuration(self.rules_file, os.path.curdir)
# create --key=value pairs from the rules.args
args_in_rules = self._rules.get('args', {})
extra_vars = utils.extract_value(args_in_rules, 'extra-vars')
args = ['--%s=%s' % (k, v) for k, v in args_in_rules.iteritems()]
if extra_vars:
extra_vars = utils.to_list(extra_vars)
args.extend(['--extra-vars=%s' % x for x in extra_vars])
logger.debug('Args in rules file: %s', args)
logger.debug('Args in self: %s', self.args)
logger.debug('Args in rules: %s', args_in_rules)
# get the key part without first two -- from --key=value
def _key(x):
return x.split('=')[0][2:]
# filter out all args present in rules file
conflicting_keys = [_key(x) for x in self.args
if _key(x) in args_in_rules]
if conflicting_keys:
raise ArgsConflictError(
"Command line args: '{0}' are in conflict with args defined "
"in the rules file.".format(
', '.join(conflicting_keys)))
# prepend the args from the rules file and re-parse the args
if args:
self.args = args + self.args
try:
self.parsed = docopt(self._doc_string,
options_first=True, argv=self.args)
except DocoptExit:
logger.error(self._doc_string)
return False
# remove rules-file from the parse tree
del self.parsed['--rules-file']
# validate args
return self._validate_args()
def _validate_args(self):
# validate args
try:
mandatory_args = self._rules.validation.must_have
except KeyError:
logger.debug('No validations in rules file')
return True
if not mandatory_args:
logger.debug('No validations in rules file')
return True
logger.debug('Must have args from rule: %s', mandatory_args)
logger.debug("New Parsed \n%s", self.parsed)
missing_args = [x for x in mandatory_args
if not self.parsed.get("--" + x)]
logger.debug('Missing args: %s', missing_args)
if missing_args:
logger.error("Error: missing mandatory args: %s",
', '.join(missing_args))
return False
return True
def _merge_extra_vars(self, loader):
if not self.extra_vars or len(self.extra_vars) == 0:
return
for var in self.extra_vars:
if var.startswith('@'):
loader.load_file(var[1:]) # remove @
elif '=' in var:
key, val = var.split('=', 1)
tree = OrderedTree(delimiter='.')
tree[key] = val
loader.merge(tree)
else:
raise KeyValueError(var, "No = found between key and value")
def _replace_in_string_lookup(self, sub_lookup_dict=None):
"""
Search and replace 'in-string' !lookup
Example:
key: 'pre_string{{ !lookup key.sub_key.sub_sub_key }}post_string'
"""
if sub_lookup_dict is None:
sub_lookup_dict = self.all_settings
my_iter = sub_lookup_dict.iteritems() if \
isinstance(sub_lookup_dict, dict) else enumerate(sub_lookup_dict)
for key, value in my_iter:
if isinstance(value, OrderedTree):
self._replace_in_string_lookup(sub_lookup_dict[key])
elif isinstance(value, list):
self._replace_in_string_lookup(value)
elif isinstance(value, str):
while True:
parser = re.compile('\{\{\s*!lookup\s[^\s]+\s*\}\}')
additional_lookups = parser.findall(value)
if not additional_lookups:
break
for additional_lookup in additional_lookups:
lookup_key = re.search('(\w+\.?)+ *?\}\}',
additional_lookup)
lookup_key = lookup_key.group(0).strip()[:-2].strip()
lookup_value = str(
self.all_settings[lookup_key.replace('.', '!')])
sub_lookup_dict[key] = re.sub(additional_lookup,
lookup_value, value)
value = sub_lookup_dict[key]
class Loader(object):
def __init__(self, config_dir, settings):
self._settings = settings
self._config_dir = config_dir
self._loaded = False
self._all_settings = None
self._file_list = None
self._invalid_paths = None
self._all_settings = OrderedTree('!')
def settings(self):
self.load()
LookupDirective.lookup_table = self._all_settings
return self._all_settings
def load_file(self, f):
self.load()
cfg = load_configuration(f, self._config_dir)
self._all_settings.merge(cfg)
def merge(self, tree):
self._all_settings.merge(tree)
def load(self):
if self._loaded:
return
self._file_list = []
self._invalid_paths = []
self._create_file_list(self._settings, self._file_list)
logger.info(
"\nList of files to load :\n - %s",
'\n - '.join([
x[len(self._config_dir) + 1:] for x in self._file_list
]))
if self._invalid_paths:
logger.info("invalid files :\n %s", '\n'.join(self._invalid_paths))
raise OptionError(self._invalid_paths)
all_cfg = Configuration.from_dict({})
for f in self._file_list:
cfg = load_configuration(f, self._config_dir)
try:
del cfg[DEFAULTS_TAG]
except KeyError:
pass
else:
logger.debug("Successfully removed default traces from %s" % f)
all_cfg.merge(cfg)
self._all_settings.merge(all_cfg)
self._loaded = True
def _create_file_list(self, settings, file_list, parent_path=""):
""" Appends list of files to be process to self._file_list
and list of invalid file paths to self._invalid_paths
"""
logger.debug('settings:\n %s \n parent: %s \n files: %s',
settings, parent_path, file_list)
for key, sub_tree in settings.items():
# ignore the special key value
if key == VALUES_KEY:
continue
logger.debug("key: %s, subtree: %s", key, sub_tree)
path = "%(parent_path)s%(key)s%(sep)s%(file)s" % {
'parent_path': parent_path,
'key': key,
'sep': os.sep,
'file': sub_tree[VALUES_KEY]
}
abs_file_path = os.path.abspath(
self._config_dir + os.sep + path + '.yml')
file_list.append(abs_file_path)
logger.debug('path: %s', abs_file_path)
if not os.path.exists(abs_file_path):
self._invalid_paths.append(abs_file_path)
# recurse if there are sub settings
if (isinstance(sub_tree, dict)
and len(sub_tree.keys()) > 1):
logger.debug('recursing into: sub-tree: %s', sub_tree)
self._create_file_list(sub_tree, file_list, path + os.sep)
def _normalize_args(args):
"""
Converts all --key val to --key=val
"""
args_normalized = []
merge_with_prev = False
for arg in args:
if merge_with_prev:
# add the val part to last --key to get --key=val
args_normalized[-1] += '=%s' % arg
merge_with_prev = False
continue
first_two_chars = arg[:2]
if first_two_chars == '--' and '=' not in arg:
merge_with_prev = True
args_normalized.append(arg)
return args_normalized
def load_configuration(file_path, rel_dir=None):
""" Return the Configuration for the file_path. Also logs an error if
there is an error while parsing the file.
If the optional rel_dir is passed the error message would print
the file_path relative to rel_dir instead of current directory
"""
logger.debug('Loading file: %s', file_path)
try:
return Configuration.from_file(file_path).configure()
except (yaml.scanner.ScannerError,
yaml.parser.ParserError,
TypeError,
ConfigurationError) as e:
rel_dir = rel_dir or os.curdir
logger.error("Error loading: %s; reason: %s",
os.path.relpath(file_path, rel_dir), e)
raise
return None
| tkammer/infrared | tools/ksgen/ksgen/settings.py | Python | gpl-3.0 | 15,945 | 0.000815 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from contextlib import contextmanager
import re
from cartouche._portability import u
from .errors import CartoucheError
from .nodes import (Node, Raises, Except, Note, Warning, Returns, Arg, Yields,
Attribute, Usage, ensure_terminal_blank)
OPTIONAL_BULLET_PATTERN = u(r'(?:[\*\+\-\•\‣\⁃]\s+)?')
ARGS_PATTERN = u(r'(\*{0,2}\w+)(\s+\(([\.\w]+)\))?\s*:\s*(.*)')
ATTRIBUTES_PATTERN = u(r'(\*{0,2}\w+)(\s+\(([\.\w]+)\))?\s*:\s*(.*)')
RAISES_PATTERN = u(r'([\w\.]+)\s*:\s*(.*)')
ARGS_REGEX = re.compile(ARGS_PATTERN)
ATTRIBUTES_REGEX = re.compile(ATTRIBUTES_PATTERN)
RAISES_REGEX = re.compile(RAISES_PATTERN)
class CartoucheSyntaxError(CartoucheError):
pass
def parse_cartouche_text(lines):
'''Parse text in cartouche format and return a reStructuredText equivalent
Args:
lines: A sequence of strings representing the lines of a single
docstring as read from the source by Sphinx. This string should be
in a format that can be parsed by cartouche.
Returns:
A list of lines containing the transformed docstring as
reStructuredText as produced by cartouche.
Raises:
RuntimeError: If the docstring cannot be parsed.
'''
indent_lines = unindent(lines)
indent_lines = pad_blank_lines(indent_lines)
indent_lines = first_paragraph_indent(indent_lines)
indent_paragraphs = gather_lines(indent_lines)
parse_tree = group_paragraphs(indent_paragraphs)
syntax_tree = extract_structure(parse_tree)
result = syntax_tree.render_rst()
ensure_terminal_blank(result)
return result
def unindent(lines):
'''Convert an iterable of indented lines into a sequence of tuples.
The first element of each tuple is the indent in number of characters, and
the second element is the unindented string.
Args:
lines: A sequence of strings representing the lines of text in a docstring.
Returns:
A list of tuples where each tuple corresponds to one line of the input
list. Each tuple has two entries - the first is an integer giving the
size of the indent in characters, the second is the unindented text.
'''
unindented_lines = []
for line in lines:
unindented_line = line.lstrip()
indent = len(line) - len(unindented_line)
unindented_lines.append((indent, unindented_line))
return unindented_lines
def pad_blank_lines(indent_texts):
'''Give blank (empty) lines the same indent level as the preceding line.
Args:
indent_texts: An iterable of tuples each containing an integer in the
first element and a string in the second element.
Returns:
A list of tuples each containing an integer in the first element and a
string in the second element.
'''
current_indent = 0
result = []
for indent, text in indent_texts:
if len(text) > 0:
current_indent = indent
result.append((current_indent, text))
return result
def extract_structure(parse_tree):
'''Create an Abstract Syntax Tree representing the semantics of a parse tree.
Args:
parse_tree: TODO
Returns:
A Node with is the result of an Abstract Syntax Tree representing the
docstring.
Raises:
CartoucheError: In the event that the parse tree cannot be understood.
'''
return convert_node(parse_tree)
def convert_node(node):
if node.indent == 0 and len(node.lines) == 0:
return convert_children(node)
if node.lines[0].startswith('Args:'):
return convert_args(node)
if node.lines[0].startswith('Returns:'):
return convert_returns(node)
if node.lines[0].startswith('Yields:'):
return convert_yields(node)
if node.lines[0].startswith('Raises:'):
return convert_raises(node)
if node.lines[0].startswith('Note:'):
return convert_note(node)
if node.lines[0].startswith('Warning:'):
return convert_warning(node)
if node.lines[0].startswith('Attributes:'):
return convert_attributes(node)
if node.lines[0].startswith('Usage:'):
return convert_usage(node)
result = convert_children(node)
result.lines = node.lines
result.indent = node.indent
return result
def convert_children(node):
converted_children = [convert_node(child) for child in node.children]
result = Node()
result.children = converted_children
return result
def append_child_to_args_group_node(child, group_node, indent):
arg = None
non_empty_lines = (line for line in child.lines if line)
for line in non_empty_lines:
m = ARGS_REGEX.match(line)
if m is None:
raise CartoucheSyntaxError('Cartouche: Invalid argument syntax "{line}" for Args block'.format(line=line))
param_name = m.group(1)
param_type = m.group(3)
param_text = m.group(4)
arg = Arg(indent, param_name)
group_node.children.append(arg)
arg.type = param_type
if param_text is not None:
arg.children.append(Node(indent, [param_text], arg))
if arg is not None:
last_child = arg.children[-1] if len(arg.children) != 0 else arg
for grandchild in child.children:
last_child.children.append(grandchild)
def append_child_to_attributes_group_node(child, group_node, indent):
attribute = None
non_empty_lines = (line for line in child.lines if line)
for line in non_empty_lines:
m = ATTRIBUTES_REGEX.match(line)
if m is None:
raise CartoucheSyntaxError('Cartouche: Invalid attribute syntax "{line}" for Attributes block'.format(line=line))
attribute_name = m.group(1)
attribute_type = m.group(3)
attribute_text = m.group(4)
attribute = Attribute(indent, attribute_name)
group_node.children.append(attribute)
attribute.type = attribute_type
if attribute_text is not None:
attribute.children.append(Node(indent, [attribute_text], attribute))
if attribute is not None:
last_child = attribute.children[-1] if len(attribute.children) != 0 else attribute
for grandchild in child.children:
last_child.children.append(grandchild)
def convert_args(node):
assert node.lines[0].startswith('Args:')
group_node = Node()
for child in node.children:
append_child_to_args_group_node(child, group_node, node.indent)
return group_node
def convert_returns(node):
assert node.lines[0].startswith('Returns:')
returns = Returns(node.indent)
returns.line = node.lines[0][8:].strip()
returns.children = node.children
return returns
def convert_yields(node):
assert node.lines[0].startswith('Yields:')
returns = Yields(node.indent)
returns.line = node.lines[0][8:].strip()
returns.children = node.children
return returns
def convert_note(node):
assert node.lines[0].startswith('Note:')
note = Note(node.indent)
note.line = node.lines[0][5:].strip()
note.children = node.children
return note
def convert_warning(node):
assert node.lines[0].startswith('Warning:')
warning = Warning(node.indent)
warning.line = node.lines[0][8:].strip()
warning.children = node.children
return warning
def convert_raises(node):
assert node.lines[0].startswith('Raises:')
group_node = Raises(node.indent)
for child in node.children:
append_child_to_raise_node(child, group_node)
return group_node
def convert_attributes(node):
assert node.lines[0].startswith('Attributes:')
group_node = Node()
for child in node.children:
append_child_to_attributes_group_node(child, group_node, node.indent)
return group_node
def convert_usage(node):
assert node.lines[0].startswith('Usage:')
usage = Usage(node.indent)
usage.children = node.children
return usage
def parse_exception(line):
'''Parse the first line of a Cartouche exception description.
Args:
line (str): A single line Cartouche exception description.
Returns:
A 2-tuple containing the exception type and the first line of the description.
'''
m = RAISES_REGEX.match(line)
if m is None:
raise CartoucheSyntaxError('Cartouche: Invalid argument syntax "{line}" for Raises block'.format(line=line))
return m.group(2), m.group(1)
def append_child_to_raise_node(child, group_node):
exception = None
non_empty_lines = (line for line in child.lines if line)
for line in non_empty_lines:
exception_text, exception_type = parse_exception(line)
exception = Except(child.indent, exception_type)
group_node.children.append(exception) # TODO: Could use parent here.
if exception_text is not None:
exception.children.append( Node(child.indent,
[exception_text], exception))
if exception is not None:
last_child = exception.children[-1] if len(exception.children) != 0 else exception
for grandchild in child.children:
last_child.children.append(grandchild)
def group_paragraphs(indent_paragraphs):
'''
Group paragraphs so that more indented paragraphs become children of less
indented paragraphs.
'''
# The tree consists of tuples of the form (indent, [children]) where the
# children may be strings or other tuples
root = Node(0, [], None)
current_node = root
previous_indent = -1
for indent, lines in indent_paragraphs:
if indent > previous_indent:
current_node = create_child_node(current_node, indent, lines)
elif indent == previous_indent:
current_node = create_sibling_node(current_node, indent, lines)
elif indent < previous_indent:
current_node = create_uncle_node(current_node, indent, lines)
previous_indent = indent
return root
def create_sibling_node(current_node, indent, lines):
sibling = Node(indent, lines, current_node.parent)
current_node.parent.add_child(sibling)
current_node = sibling
return current_node
def create_child_node(current_node, indent, lines):
child = Node(indent, lines, current_node)
current_node.add_child(child)
current_node = child
return current_node
def create_uncle_node(current_node, indent, lines):
ancestor = current_node
while ancestor.indent >= indent:
if ancestor.parent is None:
break
ancestor = ancestor.parent
uncle = Node(indent, lines, ancestor)
ancestor.add_child(uncle)
current_node = uncle
return current_node
def gather_lines(indent_lines):
'''Split the list of (int, str) tuples into a list of (int, [str]) tuples
to group the lines into paragraphs of consistent indent.
'''
return remove_empty_paragraphs(split_separated_lines(gather_lines_by_indent(indent_lines)))
def gather_lines_by_indent(indent_lines):
result = []
previous_indent = -1
for indent, line in indent_lines:
if indent != previous_indent:
paragraph = (indent, [])
result.append(paragraph)
else:
paragraph = result[-1]
paragraph[1].append(line)
previous_indent = indent
return result
def split_separated_lines(indent_paragraphs):
result = []
for indent, paragraph in indent_paragraphs:
result.append((indent, []))
if len(paragraph) > 0:
result[-1][1].append(paragraph[0])
if len(paragraph) > 2:
for line in paragraph[1: -1]:
result[-1][1].append(line)
if len(line) == 0:
result.append((indent, []))
if len(paragraph) > 1:
result[-1][1].append(paragraph[-1])
return result
def remove_empty_paragraphs(indent_paragraphs):
return [(indent, paragraph) for indent, paragraph in indent_paragraphs if len(paragraph)]
def first_paragraph_indent(indent_texts):
'''Fix the indentation on the first paragraph.
This occurs because the first line of a multi-line docstring following the
opening quote usually has no indent.
Args:
indent_texts: The lines of the docstring as an iterable over 2-tuples
each containing an integer indent level as the first element and
the text as the second element.
Return:
A list of 2-tuples, each containing an integer indent level as the
first element and the text as the second element.
'''
opening_indent = determine_opening_indent(indent_texts)
result = []
input = iter(indent_texts)
for indent, text in input:
if indent == 0:
result.append((opening_indent, text))
else:
result.append((indent, text))
break
for indent, text in input:
result.append((indent, text))
return result
def determine_opening_indent(indent_texts):
'''Determine the opening indent level for a docstring.
The opening indent level is the indent level is the first non-zero indent
level of a non-empty line in the docstring.
Args:
indent_texts: The lines of the docstring as an iterable over 2-tuples
each containing an integer indent level as the first element and
the text as the second element.
Returns:
The opening indent level as an integer.
'''
num_lines = len(indent_texts)
if num_lines < 1:
return 0
assert num_lines >= 1
first_line_indent = indent_texts[0][0]
if num_lines == 1:
return first_line_indent
assert num_lines >= 2
second_line_indent = indent_texts[1][0]
second_line_text = indent_texts[1][1]
if len(second_line_text) == 0:
return first_line_indent
return second_line_indent
#noinspection PyUnusedLocal
def rewrite_autodoc(app, what, name, obj, options, lines):
'''Convert lines from Cartouche to Sphinx format.
The function to be called by the Sphinx autodoc extension when autodoc
has read and processed a docstring. This function modified its
``lines`` argument *in place* replacing Cartouche syntax input into
Sphinx reStructuredText output.
Args:
apps: The Sphinx application object.
what: The type of object which the docstring belongs to. One of
'module', 'class', 'exception', 'function', 'method', 'attribute'
name: The fully qualified name of the object.
obj: The object itself.
options: The options given to the directive. An object with attributes
``inherited_members``, ``undoc_members``, ``show_inheritance`` and
``noindex`` that are ``True`` if the flag option of the same name
was given to the auto directive.
lines: The lines of the docstring. Will be modified *in place*.
Raises:
CartoucheSyntaxError: If the docstring is malformed.
'''
try:
lines[:] = parse_cartouche_text(lines)
except CartoucheSyntaxError as syntax_error:
args = syntax_error.args
arg0 = args[0] if args else ''
arg0 += " in docstring for {what} {name} :".format(what=what, name=name)
arg0 += "\n=== BEGIN DOCSTRING ===\n{lines}\n=== END DOCSTRING ===\n".format(lines='\n'.join(lines))
#noinspection PyPropertyAccess
syntax_error.args = (arg0,) + args[1:]
raise
def accept_bulleted_args():
'''Further use of the parser will accept bulleted lists for Args.'''
global ARGS_REGEX
ARGS_REGEX = re.compile(OPTIONAL_BULLET_PATTERN + ARGS_PATTERN)
def reject_bulleted_args():
'''Further use of the parser will reject bulleted lists for Args.'''
global ARGS_REGEX
ARGS_REGEX = re.compile(ARGS_PATTERN)
def accept_bulleted_raises():
'''Further use of the parser will accept bulleted lists for Raises.'''
global RAISES_REGEX
RAISES_REGEX = re.compile(OPTIONAL_BULLET_PATTERN + RAISES_PATTERN)
def reject_bulleted_raises():
'''Further use of the parser will reject bulleted lists for Raises.'''
global RAISES_REGEX
RAISES_REGEX = re.compile(RAISES_PATTERN)
@contextmanager
def bulleted_args():
'''A context manager within the scope of which bulleted Args will be accepted.'''
global ARGS_REGEX
previous_args_regex = ARGS_REGEX
accept_bulleted_args()
yield
ARGS_REGEX = previous_args_regex
@contextmanager
def bulleted_raises():
'''A context manager within the scope of which bulleted Raises will be accepted.'''
global RAISES_REGEX
previous_raises_regex = RAISES_REGEX
accept_bulleted_raises()
yield
RAISES_REGEX = previous_raises_regex
def builder_inited(app):
if app.config.cartouche_accept_bulleted_args:
accept_bulleted_args()
if app.config.cartouche_accept_bulleted_raises:
accept_bulleted_raises()
| rob-smallshire/cartouche | cartouche/parser.py | Python | bsd-3-clause | 17,022 | 0.001646 |
from utils import utils
from artifacts import scw_artifact
url_mapping = {}
current_vrt = utils.get_json(utils.VRT_FILENAME)
scw_artifact.write_artifact_file(
scw_artifact.generate_urls(current_vrt['content'], url_mapping)
)
| bugcrowd/vulnerability-rating-taxonomy | lib/generate_artifacts.py | Python | apache-2.0 | 228 | 0 |
#!/usr/bin/env python
""" Python Character Mapping Codec for ROT13.
See http://ucsub.colorado.edu/~kominek/rot13/ for details.
Written by Marc-Andre Lemburg ([email protected]).
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='rot-13',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0041: 0x004e,
0x0042: 0x004f,
0x0043: 0x0050,
0x0044: 0x0051,
0x0045: 0x0052,
0x0046: 0x0053,
0x0047: 0x0054,
0x0048: 0x0055,
0x0049: 0x0056,
0x004a: 0x0057,
0x004b: 0x0058,
0x004c: 0x0059,
0x004d: 0x005a,
0x004e: 0x0041,
0x004f: 0x0042,
0x0050: 0x0043,
0x0051: 0x0044,
0x0052: 0x0045,
0x0053: 0x0046,
0x0054: 0x0047,
0x0055: 0x0048,
0x0056: 0x0049,
0x0057: 0x004a,
0x0058: 0x004b,
0x0059: 0x004c,
0x005a: 0x004d,
0x0061: 0x006e,
0x0062: 0x006f,
0x0063: 0x0070,
0x0064: 0x0071,
0x0065: 0x0072,
0x0066: 0x0073,
0x0067: 0x0074,
0x0068: 0x0075,
0x0069: 0x0076,
0x006a: 0x0077,
0x006b: 0x0078,
0x006c: 0x0079,
0x006d: 0x007a,
0x006e: 0x0061,
0x006f: 0x0062,
0x0070: 0x0063,
0x0071: 0x0064,
0x0072: 0x0065,
0x0073: 0x0066,
0x0074: 0x0067,
0x0075: 0x0068,
0x0076: 0x0069,
0x0077: 0x006a,
0x0078: 0x006b,
0x0079: 0x006c,
0x007a: 0x006d,
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
### Filter API
def rot13(infile, outfile):
outfile.write(infile.read().encode('rot-13'))
if __name__ == '__main__':
import sys
rot13(sys.stdin, sys.stdout)
| ktan2020/legacy-automation | win/Lib/encodings/rot_13.py | Python | mit | 2,697 | 0.011123 |
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
from __future__ import print_function
import os
import sys
import psycopg2
__all__ = ('DB_CONNECTION_STRING', 'TESTING_DATA_DIRECTORY',
'postgresql_fixture', 'db_connect',
)
here = os.path.abspath(os.path.dirname(__file__))
_DB_CONNECTION_STRING_ENV_VAR_NAME = 'DB_CONNECTION_STRING'
_DB_CONNECTION_STRING_CLI_OPT_NAME = '--db-conn-str'
try:
DB_CONNECTION_STRING = os.environ[_DB_CONNECTION_STRING_ENV_VAR_NAME]
except:
try:
arg_pos = sys.argv.index(_DB_CONNECTION_STRING_CLI_OPT_NAME)
except ValueError:
# Use default...
print("Using default database connection string.",
file=sys.stderr)
DB_CONNECTION_STRING = "dbname=cnxarchive-testing user=cnxarchive password=cnxarchive"
else:
DB_CONNECTION_STRING = sys.argv[arg_pos+1]
TESTING_DATA_DIRECTORY = os.path.join(here, 'data')
class PostgresqlFixture:
"""A testing fixture for a live (same as production) SQL database.
This will set up the database once for a test case. After each test
case has completed, the database will be cleaned (all tables dropped).
On a personal note, this seems archaic... Why can't I rollback to a
transaction?
"""
def __init__(self):
# Configure the database connection.
self.connection_string = DB_CONNECTION_STRING
# Drop all existing tables from the database.
self._drop_all()
def _drop_all(self):
"""Drop all tables in the database."""
with psycopg2.connect(self.connection_string) as db_connection:
with db_connection.cursor() as cursor:
cursor.execute("DROP SCHEMA public CASCADE")
cursor.execute("CREATE SCHEMA public")
def setUp(self):
# Initialize the database schema.
from cnxarchive.database import initdb
settings = {'db-connection-string': self.connection_string}
initdb(settings)
def tearDown(self):
# Drop all tables.
self._drop_all()
postgresql_fixture = PostgresqlFixture()
def db_connect(method):
"""Decorator for methods that need to use the database
Example:
@db_connect
def setUp(self, cursor):
cursor.execute(some_sql)
# some other code
"""
def wrapped(self, *args, **kwargs):
with psycopg2.connect(DB_CONNECTION_STRING) as db_connection:
with db_connection.cursor() as cursor:
return method(self, cursor, *args, **kwargs)
db_connection.commit()
return wrapped
| Connexions/cnx-upgrade | cnxupgrade/tests/__init__.py | Python | agpl-3.0 | 2,755 | 0.001452 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2017 Jesús Espino <[email protected]>
# Copyright (C) 2014-2017 David Barragán <[email protected]>
# Copyright (C) 2014-2017 Alejandro Alonso <[email protected]>
# Copyright (C) 2014-2017 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from tests import factories as f
from tests.utils import disconnect_signals, reconnect_signals
import pytest
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
def test_auth_create(client):
url = reverse('auth-list')
user = f.UserFactory.create()
login_data = json.dumps({
"type": "normal",
"username": user.username,
"password": user.username,
})
result = client.post(url, login_data, content_type="application/json")
assert result.status_code == 200
def test_auth_action_register(client, settings):
settings.PUBLIC_REGISTER_ENABLED = True
url = reverse('auth-register')
register_data = json.dumps({
"type": "public",
"username": "test",
"password": "test",
"full_name": "test",
"email": "[email protected]",
})
result = client.post(url, register_data, content_type="application/json")
assert result.status_code == 201
| dayatz/taiga-back | tests/integration/resources_permissions/test_auth_resources.py | Python | agpl-3.0 | 2,107 | 0 |
from fabric.api import *
from fabric.contrib.files import exists
import fabric
counter = 0
@roles('master')
def start_spark():
run('/home/mdindex/scripts/startSystems.sh')
@roles('master')
def stop_spark():
run('/home/mdindex/scripts/stopSystems.sh')
@roles('master')
def start_zookeeper():
run('/home/mdindex/scripts/startZookeeper.sh')
@roles('master')
def stop_zookeeper():
run('/home/mdindex/scripts/stopZookeeper.sh')
def run_bg(cmd, before=None, sockname="dtach", use_sudo=False):
"""Run a command in the background using dtach
:param cmd: The command to run
:param output_file: The file to send all of the output to.
:param before: The command to run before the dtach. E.g. exporting
environment variable
:param sockname: The socket name to use for the temp file
:param use_sudo: Whether or not to use sudo
"""
if not exists("/usr/bin/dtach"):
print "Install dtach first !"
return
if before:
cmd = "{}; dtach -n `mktemp -u /tmp/{}.XXXX` {}".format(
before, sockname, cmd)
else:
cmd = "dtach -n `mktemp -u /tmp/{}.XXXX` {}".format(sockname, cmd)
if use_sudo:
return sudo(cmd)
else:
return run(cmd)
@runs_once
def build_jar():
local('cd /Users/anil/Dev/repos/mdindex/; gradle shadowJar')
@parallel
def update_jar():
if not exists('/data/mdindex/jars'):
run('mkdir -p /data/mdindex/jars')
put('../build/libs/amoeba-all.jar', '/data/mdindex/jars/')
@roles('master')
def update_master_jar():
if not exists('/data/mdindex/jars'):
run('mkdir -p /data/mdindex/jars')
put('../build/libs/amoeba-all.jar', '/data/mdindex/jars/')
@serial
def update_config():
global counter
put('server/server.properties', '/home/mdindex/amoeba.properties')
run('echo "MACHINE_ID = %d" >> /home/mdindex/amoeba.properties' % counter)
counter += 1
@parallel
def clean_cluster():
run('rm -R /data/mdindex/logs/hadoop/')
run('rm -R /home/mdindex/spark-1.6.0-bin-hadoop2.6/logs/')
run('rm -R /home/mdindex/spark-1.6.0-bin-hadoop2.6/work/')
@parallel
def parallel_shell():
run('mv /data/mdindex/tpchd100/download_data.sh ~/')
| mitdbg/mdindex | scripts/fabfile/utils.py | Python | mit | 2,214 | 0.006775 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'EmailUserHandler.to_ident'
db.add_column('form_builder_emailuserhandler', 'to_ident',
self.gf('django.db.models.fields.CharField')(default='', max_length=36),
keep_default=False)
# Changing field 'FormSubmission.created_at'
db.alter_column('form_builder_formsubmission', 'created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True))
def backwards(self, orm):
# Deleting field 'EmailUserHandler.to_ident'
db.delete_column('form_builder_emailuserhandler', 'to_ident')
# Changing field 'FormSubmission.created_at'
db.alter_column('form_builder_formsubmission', 'created_at', self.gf('django.db.models.fields.DateTimeField')())
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'form_builder.choicefield': {
'Meta': {'object_name': 'ChoiceField'},
'choices': ('django.db.models.fields.TextField', [], {}),
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'form_builder.emailsuccesshandler': {
'Meta': {'object_name': 'EmailSuccessHandler'},
'content': ('widgy.contrib.page_builder.db.fields.MarkdownField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to': ('django.db.models.fields.EmailField', [], {'max_length': '75'})
},
'form_builder.emailuserhandler': {
'Meta': {'object_name': 'EmailUserHandler'},
'content': ('widgy.contrib.page_builder.db.fields.MarkdownField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'null': 'True', 'to': "orm['widgy.Node']"}),
'to_ident': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'form_builder.form': {
'Meta': {'object_name': 'Form'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u'Untitled form 8'", 'max_length': '255'})
},
'form_builder.formbody': {
'Meta': {'object_name': 'FormBody'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.forminput': {
'Meta': {'object_name': 'FormInput'},
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'form_builder.formmeta': {
'Meta': {'object_name': 'FormMeta'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.formsubmission': {
'Meta': {'object_name': 'FormSubmission'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'form_ident': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'form_node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'form_submissions'", 'on_delete': 'models.PROTECT', 'to': "orm['widgy.Node']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'form_builder.formvalue': {
'Meta': {'object_name': 'FormValue'},
'field_ident': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'field_node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['widgy.Node']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'values'", 'to': "orm['form_builder.FormSubmission']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'form_builder.multiplechoicefield': {
'Meta': {'object_name': 'MultipleChoiceField'},
'choices': ('django.db.models.fields.TextField', [], {}),
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'form_builder.savedatahandler': {
'Meta': {'object_name': 'SaveDataHandler'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.submitbutton': {
'Meta': {'object_name': 'SubmitButton'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'default': "u'submit'", 'max_length': '255'})
},
'form_builder.successhandlers': {
'Meta': {'object_name': 'SuccessHandlers'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.successmessagebucket': {
'Meta': {'object_name': 'SuccessMessageBucket'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'form_builder.textarea': {
'Meta': {'object_name': 'Textarea'},
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'form_builder.uncaptcha': {
'Meta': {'object_name': 'Uncaptcha'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'widgy.node': {
'Meta': {'object_name': 'Node'},
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_frozen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['form_builder'] | j00bar/django-widgy | widgy/contrib/form_builder/south_migrations/0010_auto__add_field_emailuserhandler_to_ident__chg_field_formsubmission_cr.py | Python | apache-2.0 | 9,301 | 0.007526 |
# -*- coding: utf-8 -*-
# © 2013 - Guadaltech - Alberto Martín Cortada
# © 2015 - AvanzOSC - Ainara Galdona
# © 2014-2016 - Serv. Tecnol. Avanzados - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api, _
class L10nEsAeatMod303Report(models.Model):
_inherit = "l10n.es.aeat.report.tax.mapping"
_name = "l10n.es.aeat.mod303.report"
_description = "AEAT 303 Report"
def _get_export_conf(self):
try:
return self.env.ref(
'l10n_es_aeat_mod303.aeat_mod303_main_export_config').id
except ValueError:
return self.env['aeat.model.export.config']
def _default_counterpart_303(self):
return self.env['account.account'].search(
[('code', 'like', '4750%'), ('type', '!=', 'view')])[:1]
@api.multi
@api.depends('tax_lines', 'tax_lines.amount')
def _compute_total_devengado(self):
casillas_devengado = (3, 6, 9, 11, 13, 15, 18, 21, 24, 26)
for report in self:
tax_lines = report.tax_lines.filtered(
lambda x: x.field_number in casillas_devengado)
report.total_devengado = sum(tax_lines.mapped('amount'))
@api.multi
@api.depends('tax_lines', 'tax_lines.amount')
def _compute_total_deducir(self):
casillas_deducir = (29, 31, 33, 35, 37, 39, 41, 42, 43, 44)
for report in self:
tax_lines = report.tax_lines.filtered(
lambda x: x.field_number in casillas_deducir)
report.total_deducir = sum(tax_lines.mapped('amount'))
@api.multi
@api.depends('total_devengado', 'total_deducir')
def _compute_casilla_46(self):
for report in self:
report.casilla_46 = report.total_devengado - report.total_deducir
@api.multi
@api.depends('porcentaje_atribuible_estado', 'casilla_46')
def _compute_atribuible_estado(self):
for report in self:
report.atribuible_estado = (
report.casilla_46 * report.porcentaje_atribuible_estado / 100)
@api.multi
@api.depends('atribuible_estado', 'cuota_compensar',
'regularizacion_anual', 'casilla_77')
def _compute_casilla_69(self):
for report in self:
report.casilla_69 = (
report.atribuible_estado + report.casilla_77 +
report.cuota_compensar + report.regularizacion_anual)
@api.multi
@api.depends('casilla_69', 'previous_result')
def _compute_resultado_liquidacion(self):
for report in self:
report.resultado_liquidacion = (
report.casilla_69 - report.previous_result)
currency_id = fields.Many2one(
comodel_name='res.currency', string='Currency',
related='company_id.currency_id', store=True, readonly=True)
number = fields.Char(default='303')
export_config = fields.Many2one(default=_get_export_conf)
company_partner_id = fields.Many2one('res.partner', string='Partner',
relation='company_id.partner_id',
store=True)
devolucion_mensual = fields.Boolean(
string="Devolución mensual", states={'done': [('readonly', True)]},
help="Inscrito en el Registro de Devolución Mensual")
total_devengado = fields.Float(
string="[27] IVA devengado", readonly=True,
compute="_compute_total_devengado", store=True)
total_deducir = fields.Float(
string="[45] IVA a deducir", readonly=True,
compute="_compute_total_deducir", store=True)
casilla_46 = fields.Float(
string="[46] Resultado régimen general", readonly=True, store=True,
help="(IVA devengado - IVA deducible)", compute="_compute_casilla_46")
porcentaje_atribuible_estado = fields.Float(
string="[65] % atribuible al Estado",
states={'done': [('readonly', True)]},
help="Los sujetos pasivos que tributen conjuntamente a la "
"Administración del Estado y a las Diputaciones Forales del País "
"Vasco o a la Comunidad Foral de Navarra, consignarán en esta "
"casilla el porcentaje del volumen de operaciones en territorio "
"común. Los demás sujetos pasivos consignarán en esta casilla el "
"100%", default=100)
atribuible_estado = fields.Float(
string="[66] Atribuible a la Administración", readonly=True,
compute="_compute_atribuible_estado", store=True)
cuota_compensar = fields.Float(
string="[67] Cuotas a compensar", default=0,
states={'done': [('readonly', True)]},
help="Cuota a compensar de periodos anteriores, en los que su "
"declaración fue a devolver y se escogió la opción de "
"compensación posterior")
regularizacion_anual = fields.Float(
string="[68] Regularización anual",
states={'done': [('readonly', True)]},
help="En la última autoliquidación del año (la del período 4T o mes "
"12) se hará constar, con el signo que corresponda, el resultado "
"de la regularización anual conforme disponen las Leyes por las "
"que se aprueban el Concierto Económico entre el Estado y la "
"Comunidad Autónoma del País Vasco y el Convenio Económico entre "
"el Estado y la Comunidad Foral de Navarra.""")
casilla_69 = fields.Float(
string="[69] Resultado", readonly=True, compute="_compute_casilla_69",
help="Atribuible a la Administración [66] - Cuotas a compensar [67] + "
"Regularización anual [68]""", store=True)
casilla_77 = fields.Float(
string="[77] Iva Diferido (Liquidado por aduana)",
help="Se hará constar el importe de las cuotas del Impuesto a la "
"importación incluidas en los documentos en los que conste la "
"liquidación practicada por la Administración recibidos en el "
"periodo de liquidación. Solamente podrá cumplimentarse esta "
"casilla cuando se cumplan los requisitos establecidos en el "
"artículo 74.1 del Reglamento del Impuesto sobre el Valor "
"Añadido. ")
previous_result = fields.Float(
string="[70] A deducir",
help="Resultado de la anterior o anteriores declaraciones del mismo "
"concepto, ejercicio y periodo",
states={'done': [('readonly', True)]})
resultado_liquidacion = fields.Float(
string="[71] Result. liquidación", readonly=True,
compute="_compute_resultado_liquidacion", store=True)
result_type = fields.Selection(
selection=[('I', 'A ingresar'),
('D', 'A devolver'),
('N', 'Sin actividad/Resultado cero')],
compute="_compute_result_type")
compensate = fields.Boolean(
string="Compensate", states={'done': [('readonly', True)]},
help="Si se marca, indicará que el importe a devolver se compensará "
"en posteriores declaraciones")
bank_account = fields.Many2one(
comodel_name="res.partner.bank", string="Bank account",
states={'done': [('readonly', True)]})
counterpart_account = fields.Many2one(default=_default_counterpart_303)
allow_posting = fields.Boolean(default=True)
def __init__(self, pool, cr):
self._aeat_number = '303'
super(L10nEsAeatMod303Report, self).__init__(pool, cr)
@api.one
def _compute_allow_posting(self):
self.allow_posting = True
@api.one
@api.depends('resultado_liquidacion')
def _compute_result_type(self):
if self.resultado_liquidacion == 0:
self.result_type = 'N'
elif self.resultado_liquidacion > 0:
self.result_type = 'I'
else:
self.result_type = 'D'
@api.onchange('period_type', 'fiscalyear_id')
def onchange_period_type(self):
super(L10nEsAeatMod303Report, self).onchange_period_type()
if self.period_type not in ('4T', '12'):
self.regularizacion_anual = 0
@api.onchange('type')
def onchange_type(self):
if self.type != 'C':
self.previous_result = 0
@api.onchange('result_type')
def onchange_result_type(self):
if self.result_type != 'B':
self.compensate = False
@api.multi
def button_confirm(self):
"""Check records"""
msg = ""
for mod303 in self:
if mod303.result_type == 'I' and not mod303.bank_account:
msg = _('Select an account for making the charge')
if mod303.result_type == 'B' and not not mod303.bank_account:
msg = _('Select an account for receiving the money')
if msg:
# Don't raise error, because data is not used
# raise exceptions.Warning(msg)
pass
return super(L10nEsAeatMod303Report, self).button_confirm()
| Endika/l10n-spain | l10n_es_aeat_mod303/models/mod303.py | Python | agpl-3.0 | 9,035 | 0 |
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES, PKCS1_OAEP
file_in = open("encrypted_data.bin", "rb")
private_key = RSA.import_key(open("private.pem").read())
enc_session_key, nonce, tag, ciphertext = [ file_in.read(x) for x in (private_key.size_in_bytes(), 16, 16, -1) ]
# Decrypt the session key with the private RSA key
cipher_rsa = PKCS1_OAEP.new(private_key)
session_key = cipher_rsa.decrypt(enc_session_key)
# Decrypt the data with the AES session key
cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)
data = cipher_aes.decrypt_and_verify(ciphertext, tag)
print(data.decode("utf-8"))
| siwells/teaching_set09103 | code/topic_11/decrypt.py | Python | gpl-3.0 | 616 | 0.00487 |
# coding=utf-8
#Converts a csv contining country codes and numerical values, to json
#Years should be given in the header, like this:
#
# land, 1980, 1981, 1982
# se, 12, 13, 11
# fi 7, 10, 14
import csv
import json
import argparse
import os.path
import sys
import math
#Check if file exists
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
#Check if values are numbers, for our purposes
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def doRounding(s,d):
if d > 0:
return round(float(s),d)
else:
return int(str(round(float(s),d))[:-2])
#Check if values are years
def isYear(s):
try:
float(s)
if 4 is len(s):
return True
else:
return False
except ValueError:
return False
#Define command line arguments
parser = argparse.ArgumentParser(description='Converts a csv contining country codes and numerical values, to json.')
#Input file
parser.add_argument("-i", "--input", dest="infile", required=True,
help="input file", metavar="FILE",
type=lambda x: is_valid_file(parser,x))
#Output file
parser.add_argument("-o", "--output", dest="outfile",
help="output file", metavar="FILE")
#Column
parser.add_argument("-c", "--column", dest="column",
help="column containing nation codes. The first column is “0”", type=int, default=0)
#Rounding
parser.add_argument("-d", "--decimals", dest="decimals",
help="Number of decimals to keep. Default is -1, meaning “keep all”", type=int, default=-1)
args = parser.parse_args()
inputFile = args.infile #"/home/leo/Världen/demo/patents/raw-pre.csv"
if args.outfile is None:
outputFile = os.path.splitext(inputFile)[0] + ".json"
print "No output file given, using %s" % outputFile
else:
outputFile = args.outfile
if os.path.isfile(outputFile):
print "File %s already exists. Overwrite? [y/N]" % outputFile
choice = raw_input().lower()
if not choice in ('y', 'yes'):
sys.exit()
indataColumn = args.column
print indataColumn
outdata = {}
headers = []
#Open file
try:
with open(inputFile, 'rb') as csvfile:
datacsv = csv.reader(csvfile,delimiter=',',quotechar='"')
firstRow = True
for row in datacsv:
if firstRow:
firstRow = False
for col in row:
headers.append(col)
else:
currentNation = row[indataColumn]
outdata[currentNation] = []
i = 0
for col in row:
currentHeader = headers[i]
if isYear(currentHeader):
if is_number(col):
if (args.decimals > -1):
outdata[currentNation].append(doRounding(col,args.decimals))
else:
outdata[currentNation].append(col)
else:
outdata[currentNation].append(None)
i += 1
except IOError:
print ("Could not open input file")
print "Writing %s..." % outputFile
with open(outputFile, 'w') as outfile:
json.dump(outdata, outfile)
print "done"
| jplusplus/thenmap-v0 | generators/utils/convert-csv-to-json.py | Python | gpl-2.0 | 2,991 | 0.030181 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.contrib.auth.models import User
from django.db.utils import IntegrityError
from django.core.exceptions import ValidationError
from loans.models import Business
class BusinessTestCase(TestCase):
'''
Creating two business owners
'''
def setUp(self):
self.john = User.objects.create(username="johndoe", first_name="John", last_name="Doe")
self.john.borrower.is_borrower = True
self.john.borrower.telephone_number = '+44 7762 25 4775'
self.john.save()
# self.jane = User.objects.create(username="janedoe", first_name="Jane", last_name="Doe")
# self.jane.borrower.is_borrower = True
# self.jane.borrower.telephone_number = '+40 745 497 778'
# self.jane.save()
'''
We must be able to create a business
'''
def test_model_create(self):
acme = Business.objects.create(
crn = '09264172',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
acme.full_clean()
'''
Two businesses cannot have the same company number
'''
def test_company_number_duplicate(self):
acme = Business.objects.create(
crn = '09264172',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
acme.full_clean()
with self.assertRaises(IntegrityError):
duplicate = Business.objects.create(
crn = '09264172',
owner = self.john,
name = "ACME Duplicate Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'Manchester',
postcode = 'M14 5SZ',
)
'''
The company number must be added in a valid format
'''
def test_company_number_format(self):
# 8 character number should be accepted
acme = Business.objects.create(
crn = '09264172',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
acme.full_clean()
# > 8 characters should not be accepted
acme = Business.objects.create(
crn = '09264172123123',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
with self.assertRaises(ValidationError):
acme.full_clean()
# < 8 characters should not be accepted
acme = Business.objects.create(
crn = '0926',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
with self.assertRaises(ValidationError):
acme.full_clean()
'''
The address must be added in a valid format
'''
def test_address_format(self):
# First address field must have some characters
acme = Business.objects.create(
crn = '09260926',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = '',
address_two = 'Street',
city = 'London',
postcode = 'W8 5EH',
)
with self.assertRaises(ValidationError):
acme.full_clean()
# Second address field can be empty
acme = Business.objects.create(
crn = '09260927',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = '',
city = 'London',
postcode = 'W8 5EH',
)
acme.full_clean()
# Postcode must be valid
acme = Business.objects.create(
crn = '09260928',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = '',
city = 'London',
postcode = 'INVALID POSTCODE',
)
with self.assertRaises(ValidationError):
acme.full_clean()
acme = Business.objects.create(
crn = '09260929',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = '',
city = 'London',
postcode = ' 2NP',
)
with self.assertRaises(ValidationError):
acme.full_clean()
acme = Business.objects.create(
crn = '09260930',
owner = self.john,
name = "ACME Inc.",
sector = 'PS',
address_one = 'Building and Number',
address_two = '',
city = 'London',
postcode = 'M145S',
)
with self.assertRaises(ValidationError):
acme.full_clean()
| Amandil/django-tech-test | loans/tests/tests_model_business.py | Python | bsd-3-clause | 5,699 | 0.031409 |
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import toolz
import numpy as np
import theano
import theano.tensor as T
from .. import utils
from .inits import ZeroInit
ENABLE_TEST_VALUE = theano.config.compute_test_value != "off"
VALID_TAGS = set("""
input
output
weight
bias
parameter
hyperparameter
monitor
state
tied
""".split())
class VariableWrapper(object):
def __init__(self,
name,
shape=None,
dtype=None,
broadcastable=None,
is_shared=None,
tags=None,
ndim=None,
variable=None,
inits=None,
relative_network=None,
default_inits=None,
default_inits_hyperparameters=("inits",)):
self.name = name
self.shape_ = shape
self.dtype_ = dtype
self.broadcastable_ = broadcastable
self.is_shared_ = is_shared
self.tags_ = tags
self.ndim_ = ndim
self.variable_ = variable
if default_inits is not None:
# replace inits with calculated inits from network
assert inits is None
assert relative_network is not None
assert isinstance(default_inits, (list, tuple))
inits = list(toolz.concat(relative_network.find_hyperparameters(
default_inits_hyperparameters,
default_inits)))
self.inits = inits
# relative_network is provided so that variables can auto-compute
# their shape
self.relative_network = relative_network
self.validate()
def to_state(self, name):
return dict(
shape=self.shape_,
dtype=self.dtype_,
broadcastable=self.broadcastable_,
is_shared=self.is_shared_,
)
def validate(self):
shape = self.shape_
dtype = self.dtype_
broadcastable = self.broadcastable_
is_shared = self.is_shared_
tags = self.tags_
ndim = self.ndim_
variable = self.variable_
if ndim is not None and shape is not None:
assert len(shape) == ndim
if ndim is not None and variable is not None:
assert ndim == variable.ndim
if broadcastable is not None and variable is not None:
assert broadcastable == variable.broadcastable
if is_shared is not None and variable is not None:
assert is_shared == utils.is_shared_variable(self.variable)
if dtype is not None and variable is not None:
assert dtype == variable.dtype
if tags is not None:
self.verify_tags(set(tags))
if is_shared:
assert self.inits is not None, dict(
name=self.name,
msg=("if inits is None, then this shared variable cannot be "
"shared or loaded"),
)
def verify_tags(self, tags):
for tag in tags:
assert tag in VALID_TAGS
if self.is_shared:
# exactly one of parameter and state should be set
assert ("parameter" in tags) != ("state" in tags)
# the only valid tags for shared are the following:
assert len(tags - {"weight", "bias", "parameter", "state"}) == 0
else:
assert len({"weight", "bias", "parameter", "state"} & tags) == 0
@property
def is_shared(self):
if self.is_shared_ is None:
# if is_shared is not supplied, a variable must be supplied
assert self.variable_ is not None
self.is_shared_ = utils.is_shared_variable(self.variable)
return self.is_shared_
@property
def tags(self):
if self.tags_ is None:
self.tags_ = []
if not isinstance(self.tags_, set):
self.tags_ = set(self.tags_)
self.verify_tags(self.tags_)
return self.tags_
@property
def ndim(self):
if self.ndim_ is None:
if self.variable_ is not None:
self.ndim_ = self.variable_.ndim
elif self.shape_ is not None:
self.ndim_ = len(self.shape_)
else:
raise ValueError("ndim not defined")
return self.ndim_
@property
def dtype(self):
if self.dtype_ is None:
if self.variable_ is not None:
self.dtype_ = self.variable_.dtype
else:
self.dtype_ = theano.config.floatX
return self.dtype_
@property
def broadcastable(self):
if self.broadcastable_ is None:
if self.variable_ is not None:
self.broadcastable_ = self.variable_.broadcastable
else:
self.broadcastable_ = (False, ) * self.ndim
return self.broadcastable_
@property
def variable(self):
if self.variable_ is None:
if self.is_shared:
# find appropriate initialization scheme
inits = self.inits
if inits is None:
inits = []
for initialization in inits:
if initialization.predicate(self):
break
else:
# default to zero initialization if none work
initialization = ZeroInit()
# create the shared variable
variable = initialization.create_shared(self)
else:
variable = T.TensorType(self.dtype,
self.broadcastable)(self.name)
self.variable_ = variable
# for ease of debugging, add test values
# ---
# this must be done after self.variable_ is set to avoid a
# recursive loop when calling self.shape
if (not self.is_shared) and ENABLE_TEST_VALUE:
test_value = np.random.rand(*self.shape).astype(self.dtype)
variable.tag.test_value = test_value
# re-validate that initialization worked properly
self.validate()
return self.variable_
@property
def shape(self):
if self.shape_ is None:
# cannot derive shape for shared variable
assert not self.is_shared
# TODO
# can we call variable.get_value().shape
# we don't know if one of the intended dimensions is None,
# but it seems like a fair assumption that a shared variable
# won't change shape (maybe we can add a flag of whether or not
# shape doesn't change that defaults to True)
# FIXME calculate shape
# assert False
self.shape_ = tuple(self.variable_.shape.eval())
return self.shape_
def symbolic_shape(self):
"""
returns the shape of the variable with None's replaced by symbolic
variables
"""
shape = self.shape
var_shape = self.variable.shape
out_shape = []
for i, s in enumerate(shape):
if s is not None:
out_shape.append(s)
else:
out_shape.append(var_shape[i])
return tuple(out_shape)
@property
def value(self):
assert self.is_shared
return self.variable.get_value()
@value.setter
def value(self, new_value):
assert new_value.dtype == self.dtype
assert new_value.shape == self.shape
self.variable.set_value(new_value)
def __repr__(self):
return "{cls}(name={name})".format(cls=self.__class__.__name__,
name=self.name)
| diogo149/treeano | treeano/core/variable.py | Python | apache-2.0 | 7,794 | 0 |
#!/usr/bin/python
import unittest
import os
import sys
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isdir(os.path.join(basedir, 'virttest')):
sys.path.append(basedir)
from virttest import utils_params
BASE_DICT = {
'image_boot': 'yes',
'image_boot_stg': 'no',
'image_chain': '',
'image_clone_command': 'cp --reflink=auto %s %s',
'image_format': 'qcow2',
'image_format_stg': 'qcow2',
'image_name': 'images/f18-64',
'image_name_stg': 'enospc',
'image_raw_device': 'no',
'image_remove_command': 'rm -rf %s',
'image_size': '10G',
'image_snapshot_stg': 'no',
'image_unbootable_pattern': 'Hard Disk.*not a bootable disk',
'image_verify_bootable': 'yes',
'images': 'image1 stg',
}
CORRECT_RESULT_MAPPING = {"image1": {'image_boot_stg': 'no',
'image_snapshot_stg': 'no',
'image_chain': '',
'image_unbootable_pattern': 'Hard Disk.*not a bootable disk',
'image_name': 'images/f18-64',
'image_remove_command': 'rm -rf %s',
'image_name_stg': 'enospc',
'image_clone_command': 'cp --reflink=auto %s %s',
'image_size': '10G', 'images': 'image1 stg',
'image_raw_device': 'no',
'image_format': 'qcow2',
'image_boot': 'yes',
'image_verify_bootable': 'yes',
'image_format_stg': 'qcow2'},
"stg": {'image_snapshot': 'no',
'image_boot_stg': 'no',
'image_snapshot_stg': 'no',
'image_chain': '',
'image_unbootable_pattern': 'Hard Disk.*not a bootable disk',
'image_name': 'enospc',
'image_remove_command': 'rm -rf %s',
'image_name_stg': 'enospc',
'image_clone_command': 'cp --reflink=auto %s %s',
'image_size': '10G',
'images': 'image1 stg',
'image_raw_device': 'no',
'image_format': 'qcow2',
'image_boot': 'no',
'image_verify_bootable': 'yes',
'image_format_stg': 'qcow2'}}
class TestParams(unittest.TestCase):
def setUp(self):
self.params = utils_params.Params(BASE_DICT)
def testObjects(self):
self.assertEquals(self.params.objects("images"), ['image1', 'stg'])
def testObjectsParams(self):
for key in CORRECT_RESULT_MAPPING.keys():
self.assertEquals(self.params.object_params(key),
CORRECT_RESULT_MAPPING[key])
def testGetItemMissing(self):
try:
self.params['bogus']
raise ValueError("Did not get a ParamNotFound error when trying "
"to access a non-existing param")
# pylint: disable=E0712
except utils_params.ParamNotFound:
pass
def testGetItem(self):
self.assertEqual(self.params['image_size'], "10G")
if __name__ == "__main__":
unittest.main()
| spcui/avocado-vt | selftests/unit/test_utils_params.py | Python | gpl-2.0 | 3,700 | 0.001622 |
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.PyMol import MolViewer
from rdkit.Chem.Subshape import SubshapeBuilder,SubshapeObjects,SubshapeAligner
from rdkit.six.moves import cPickle
import copy
m1 = Chem.MolFromMolFile('test_data/square1.mol')
m2 = Chem.MolFromMolFile('test_data/square2.mol')
b = SubshapeBuilder.SubshapeBuilder()
b.gridDims=(10.,10.,5)
b.gridSpacing=0.4
b.winRad=2.0
if 1:
print('m1:')
s1 = b.GenerateSubshapeShape(m1)
cPickle.dump(s1,file('test_data/square1.shp.pkl','wb+'))
print('m2:')
s2 = b.GenerateSubshapeShape(m2)
cPickle.dump(s2,file('test_data/square2.shp.pkl','wb+'))
ns1 = b.CombineSubshapes(s1,s2)
b.GenerateSubshapeSkeleton(ns1)
cPickle.dump(ns1,file('test_data/combined.shp.pkl','wb+'))
else:
s1 = cPickle.load(file('test_data/square1.shp.pkl','rb'))
s2 = cPickle.load(file('test_data/square2.shp.pkl','rb'))
#ns1 = cPickle.load(file('test_data/combined.shp.pkl','rb'))
ns1=cPickle.load(file('test_data/combined.shp.pkl','rb'))
v = MolViewer()
SubshapeObjects.DisplaySubshape(v,s1,'shape1')
SubshapeObjects.DisplaySubshape(v,ns1,'ns1')
#SubshapeObjects.DisplaySubshape(v,s2,'shape2')
a = SubshapeAligner.SubshapeAligner()
pruneStats={}
algs =a.GetSubshapeAlignments(None,ns1,m1,s1,b,pruneStats=pruneStats)
print(len(algs))
print(pruneStats)
import os,tempfile
from rdkit import Geometry
fName = tempfile.mktemp('.grd')
Geometry.WriteGridToFile(ns1.coarseGrid.grid,fName)
v.server.loadSurface(fName,'coarse','',2.5)
os.unlink(fName)
fName = tempfile.mktemp('.grd')
Geometry.WriteGridToFile(ns1.medGrid.grid,fName)
v.server.loadSurface(fName,'med','',2.5)
os.unlink(fName)
| soerendip42/rdkit | rdkit/Chem/Subshape/testCombined.py | Python | bsd-3-clause | 1,702 | 0.03349 |
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import hashlib
import signal
import sys
import time
import uuid
import functools
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
import six
from six import moves
from neutron.agent.common import ovs_lib
from neutron.agent.common import polling
from neutron.agent.common import utils
from neutron.agent.l2.extensions import manager as ext_manager
from neutron.agent.linux import ip_lib
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import config
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.common import ipv6_utils as ipv6
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_dvr_neutron_agent
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
'common.config')
# A placeholder for dead vlans.
DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1
UINT64_BITMASK = (1 << 64) - 1
class _mac_mydialect(netaddr.mac_unix):
word_fmt = '%.2x'
class DeviceListRetrievalError(exceptions.NeutronException):
message = _("Unable to retrieve port details for devices: %(devices)s ")
class LocalVLANMapping(object):
def __init__(self, vlan, network_type, physical_network, segmentation_id,
vif_ports=None):
if vif_ports is None:
vif_ports = {}
self.vlan = vlan
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
self.vif_ports = vif_ports
# set of tunnel ports on which packets should be flooded
self.tun_ofports = set()
def __str__(self):
return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
(self.vlan, self.network_type, self.physical_network,
self.segmentation_id))
class OVSPluginApi(agent_rpc.PluginApi):
pass
def has_zero_prefixlen_address(ip_addresses):
return any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in ip_addresses)
class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2population_rpc.L2populationRpcCallBackTunnelMixin,
dvr_rpc.DVRAgentRpcCallbackMixin):
'''Implements OVS-based tunneling, VLANs and flat networks.
Two local bridges are created: an integration bridge (defaults to
'br-int') and a tunneling bridge (defaults to 'br-tun'). An
additional bridge is created for each physical network interface
used for VLANs and/or flat networks.
All VM VIFs are plugged into the integration bridge. VM VIFs on a
given virtual network share a common "local" VLAN (i.e. not
propagated externally). The VLAN id of this local VLAN is mapped
to the physical networking details realizing that virtual network.
For virtual networks realized as GRE tunnels, a Logical Switch
(LS) identifier is used to differentiate tenant traffic on
inter-HV tunnels. A mesh of tunnels is created to other
Hypervisors in the cloud. These tunnels originate and terminate on
the tunneling bridge of each hypervisor. Port patching is done to
connect local VLANs on the integration bridge to inter-hypervisor
tunnels on the tunnel bridge.
For each virtual network realized as a VLAN or flat network, a
veth or a pair of patch ports is used to connect the local VLAN on
the integration bridge with the physical network bridge, with flow
rules adding, modifying, or stripping VLAN tags as necessary.
'''
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
# 1.2 Support DVR (Distributed Virtual Router) RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def __init__(self, bridge_classes, integ_br, tun_br, local_ip,
bridge_mappings, polling_interval, tunnel_types=None,
veth_mtu=None, l2_population=False,
enable_distributed_routing=False,
minimize_polling=False,
ovsdb_monitor_respawn_interval=(
constants.DEFAULT_OVSDBMON_RESPAWN),
arp_responder=False,
prevent_arp_spoofing=True,
use_veth_interconnection=False,
quitting_rpc_timeout=None,
conf=None):
'''Constructor.
:param bridge_classes: a dict for bridge classes.
:param integ_br: name of the integration bridge.
:param tun_br: name of the tunnel bridge.
:param local_ip: local IP address of this hypervisor.
:param bridge_mappings: mappings from physical network name to bridge.
:param polling_interval: interval (secs) to poll DB.
:param tunnel_types: A list of tunnel types to enable support for in
the agent. If set, will automatically set enable_tunneling to
True.
:param veth_mtu: MTU size for veth interfaces.
:param l2_population: Optional, whether L2 population is turned on
:param minimize_polling: Optional, whether to minimize polling by
monitoring ovsdb for interface changes.
:param ovsdb_monitor_respawn_interval: Optional, when using polling
minimization, the number of seconds to wait before respawning
the ovsdb monitor.
:param arp_responder: Optional, enable local ARP responder if it is
supported.
:param prevent_arp_spoofing: Optional, enable suppression of any ARP
responses from ports that don't match an IP address that belongs
to the ports. Spoofing rules will not be added to ports that
have port security disabled.
:param use_veth_interconnection: use veths instead of patch ports to
interconnect the integration bridge to physical bridges.
:param quitting_rpc_timeout: timeout in seconds for rpc calls after
SIGTERM is received
:param conf: an instance of ConfigOpts
'''
super(OVSNeutronAgent, self).__init__()
self.conf = conf or cfg.CONF
self.ovs = ovs_lib.BaseOVS()
self.fullsync = True
# init bridge classes with configured datapath type.
self.br_int_cls, self.br_phys_cls, self.br_tun_cls = (
functools.partial(bridge_classes[b],
datapath_type=self.conf.OVS.datapath_type)
for b in ('br_int', 'br_phys', 'br_tun'))
self.use_veth_interconnection = use_veth_interconnection
self.veth_mtu = veth_mtu
self.available_local_vlans = set(moves.range(p_const.MIN_VLAN_TAG,
p_const.MAX_VLAN_TAG))
self.tunnel_types = tunnel_types or []
self.l2_pop = l2_population
# TODO(ethuleau): Change ARP responder so it's not dependent on the
# ML2 l2 population mechanism driver.
self.enable_distributed_routing = enable_distributed_routing
self.arp_responder_enabled = arp_responder and self.l2_pop
self.prevent_arp_spoofing = prevent_arp_spoofing
host = self.conf.host
self.agent_id = 'ovs-agent-%s' % host
if tunnel_types:
self.enable_tunneling = True
else:
self.enable_tunneling = False
# Validate agent configurations
self._check_agent_configurations()
# Keep track of int_br's device count for use by _report_state()
self.int_br_device_count = 0
self.agent_uuid_stamp = uuid.uuid4().int & UINT64_BITMASK
self.int_br = self.br_int_cls(integ_br)
self.setup_integration_br()
# Stores port update notifications for processing in main rpc loop
self.updated_ports = set()
# Stores port delete notifications
self.deleted_ports = set()
self.network_ports = collections.defaultdict(set)
# keeps association between ports and ofports to detect ofport change
self.vifname_to_ofport_map = {}
self.setup_rpc()
self.init_extension_manager(self.connection)
self.bridge_mappings = bridge_mappings
self.setup_physical_bridges(self.bridge_mappings)
self.local_vlan_map = {}
self.tun_br_ofports = {p_const.TYPE_GENEVE: {},
p_const.TYPE_GRE: {},
p_const.TYPE_VXLAN: {}}
self.polling_interval = polling_interval
self.minimize_polling = minimize_polling
self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval
self.local_ip = local_ip
self.tunnel_count = 0
self.vxlan_udp_port = self.conf.AGENT.vxlan_udp_port
self.dont_fragment = self.conf.AGENT.dont_fragment
self.tunnel_csum = cfg.CONF.AGENT.tunnel_csum
self.tun_br = None
self.patch_int_ofport = constants.OFPORT_INVALID
self.patch_tun_ofport = constants.OFPORT_INVALID
if self.enable_tunneling:
# The patch_int_ofport and patch_tun_ofport are updated
# here inside the call to setup_tunnel_br()
self.setup_tunnel_br(tun_br)
self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent(
self.context,
self.dvr_plugin_rpc,
self.int_br,
self.tun_br,
self.bridge_mappings,
self.phys_brs,
self.int_ofports,
self.phys_ofports,
self.patch_int_ofport,
self.patch_tun_ofport,
host,
self.enable_tunneling,
self.enable_distributed_routing)
self.agent_state = {
'binary': 'neutron-openvswitch-agent',
'host': host,
'topic': n_const.L2_AGENT_TOPIC,
'configurations': {'bridge_mappings': bridge_mappings,
'tunnel_types': self.tunnel_types,
'tunneling_ip': local_ip,
'l2_population': self.l2_pop,
'arp_responder_enabled':
self.arp_responder_enabled,
'enable_distributed_routing':
self.enable_distributed_routing,
'log_agent_heartbeats':
self.conf.AGENT.log_agent_heartbeats,
'extensions': self.ext_manager.names(),
'datapath_type': self.conf.OVS.datapath_type,
'ovs_capabilities': self.ovs.capabilities,
'vhostuser_socket_dir':
self.conf.OVS.vhostuser_socket_dir},
'agent_type': self.conf.AGENT.agent_type,
'start_flag': True}
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
if self.enable_tunneling:
self.setup_tunnel_br_flows()
self.dvr_agent.setup_dvr_flows()
# Collect additional bridges to monitor
self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br)
# In order to keep existed device's local vlan unchanged,
# restore local vlan mapping at start
self._restore_local_vlan_map()
# Security group agent support
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc, self.local_vlan_map,
defer_refresh_firewall=True)
# Initialize iteration counter
self.iter_num = 0
self.run_daemon_loop = True
self.catch_sigterm = False
self.catch_sighup = False
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
self.quitting_rpc_timeout = quitting_rpc_timeout
def _report_state(self):
# How many devices are likely used by a VM
self.agent_state.get('configurations')['devices'] = (
self.int_br_device_count)
self.agent_state.get('configurations')['in_distributed_mode'] = (
self.dvr_agent.in_distributed_mode())
try:
agent_status = self.state_rpc.report_state(self.context,
self.agent_state,
True)
if agent_status == n_const.AGENT_REVIVED:
LOG.info(_LI('Agent has just been revived. '
'Doing a full sync.'))
self.fullsync = True
self.use_call = False
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _restore_local_vlan_map(self):
self._local_vlan_hints = {}
cur_ports = self.int_br.get_vif_ports()
port_names = [p.port_name for p in cur_ports]
port_info = self.int_br.get_ports_attributes(
"Port", columns=["name", "other_config", "tag"], ports=port_names)
by_name = {x['name']: x for x in port_info}
for port in cur_ports:
# if a port was deleted between get_vif_ports and
# get_ports_attributes, we
# will get a KeyError
try:
local_vlan_map = by_name[port.port_name]['other_config']
local_vlan = by_name[port.port_name]['tag']
except KeyError:
continue
if not local_vlan:
continue
net_uuid = local_vlan_map.get('net_uuid')
if (net_uuid and net_uuid not in self._local_vlan_hints
and local_vlan != DEAD_VLAN_TAG):
self.available_local_vlans.remove(local_vlan)
self._local_vlan_hints[local_vlan_map['net_uuid']] = \
local_vlan
def _dispose_local_vlan_hints(self):
self.available_local_vlans.update(self._local_vlan_hints.values())
self._local_vlan_hints = {}
def setup_rpc(self):
self.plugin_rpc = OVSPluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
# RPC network init
self.context = context.get_admin_context_without_session()
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.PORT, topics.DELETE],
[constants.TUNNEL, topics.UPDATE],
[constants.TUNNEL, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE],
[topics.DVR, topics.UPDATE],
[topics.NETWORK, topics.UPDATE]]
if self.l2_pop:
consumers.append([topics.L2POPULATION, topics.UPDATE])
self.connection = agent_rpc.create_consumers([self],
topics.AGENT,
consumers,
start_listening=False)
def init_extension_manager(self, connection):
ext_manager.register_opts(self.conf)
self.ext_manager = (
ext_manager.AgentExtensionsManager(self.conf))
self.ext_manager.initialize(
connection, constants.EXTENSION_DRIVER_TYPE)
def get_net_uuid(self, vif_id):
for network_id, vlan_mapping in six.iteritems(self.local_vlan_map):
if vif_id in vlan_mapping.vif_ports:
return network_id
def port_update(self, context, **kwargs):
port = kwargs.get('port')
# Put the port identifier in the updated_ports set.
# Even if full port details might be provided to this call,
# they are not used since there is no guarantee the notifications
# are processed in the same order as the relevant API requests
self.updated_ports.add(port['id'])
LOG.debug("port_update message processed for port %s", port['id'])
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
self.deleted_ports.add(port_id)
self.updated_ports.discard(port_id)
LOG.debug("port_delete message processed for port %s", port_id)
def network_update(self, context, **kwargs):
network_id = kwargs['network']['id']
for port_id in self.network_ports[network_id]:
# notifications could arrive out of order, if the port is deleted
# we don't want to update it anymore
if port_id not in self.deleted_ports:
self.updated_ports.add(port_id)
LOG.debug("network_update message processed for network "
"%(network_id)s, with ports: %(ports)s",
{'network_id': network_id,
'ports': self.network_ports[network_id]})
def _clean_network_ports(self, port_id):
for port_set in self.network_ports.values():
if port_id in port_set:
port_set.remove(port_id)
break
def process_deleted_ports(self, port_info):
# don't try to process removed ports as deleted ports since
# they are already gone
if 'removed' in port_info:
self.deleted_ports -= port_info['removed']
deleted_ports = list(self.deleted_ports)
while self.deleted_ports:
port_id = self.deleted_ports.pop()
port = self.int_br.get_vif_port_by_id(port_id)
self._clean_network_ports(port_id)
self.ext_manager.delete_port(self.context,
{"vif_port": port,
"port_id": port_id})
# move to dead VLAN so deleted ports no
# longer have access to the network
if port:
# don't log errors since there is a chance someone will be
# removing the port from the bridge at the same time
self.port_dead(port, log_errors=False)
self.port_unbound(port_id)
# Flush firewall rules after ports are put on dead VLAN to be
# more secure
self.sg_agent.remove_devices_filter(deleted_ports)
def tunnel_update(self, context, **kwargs):
LOG.debug("tunnel_update received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
tunnel_ip_hex = self.get_ip_in_hex(tunnel_ip)
if not tunnel_ip_hex:
return
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_LE("No tunnel_type specified, cannot create tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_LE("tunnel_type %s not supported by agent"),
tunnel_type)
return
if tunnel_ip == self.local_ip:
return
tun_name = '%s-%s' % (tunnel_type, tunnel_ip_hex)
if not self.l2_pop:
self._setup_tunnel_port(self.tun_br, tun_name, tunnel_ip,
tunnel_type)
def tunnel_delete(self, context, **kwargs):
LOG.debug("tunnel_delete received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
if not tunnel_ip:
LOG.error(_LE("No tunnel_ip specified, cannot delete tunnels"))
return
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_LE("No tunnel_type specified, cannot delete tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_LE("tunnel_type %s not supported by agent"),
tunnel_type)
return
ofport = self.tun_br_ofports[tunnel_type].get(tunnel_ip)
self.cleanup_tunnel_port(self.tun_br, ofport, tunnel_type)
def _tunnel_port_lookup(self, network_type, remote_ip):
return self.tun_br_ofports[network_type].get(remote_ip)
def fdb_add(self, context, fdb_entries):
LOG.debug("fdb_add received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_add_tun(context, deferred_br, lvm,
agent_ports, self._tunnel_port_lookup)
else:
self.fdb_add_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def fdb_remove(self, context, fdb_entries):
LOG.debug("fdb_remove received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_remove_tun(context, deferred_br, lvm,
agent_ports,
self._tunnel_port_lookup)
else:
self.fdb_remove_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
lvm.tun_ofports.add(ofport)
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
self.setup_entry_for_arp_reply(br, 'add', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.install_unicast_to_tun(lvm.vlan,
lvm.segmentation_id,
ofport,
port_info.mac_address)
def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
if ofport not in lvm.tun_ofports:
LOG.debug("attempt to remove a non-existent port %s", ofport)
return
lvm.tun_ofports.remove(ofport)
if len(lvm.tun_ofports) > 0:
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
# This local vlan doesn't require any more tunneling
br.delete_flood_to_tun(lvm.vlan)
else:
self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.delete_unicast_to_tun(lvm.vlan, port_info.mac_address)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug("update chg_ip received")
with self.tun_br.deferred() as deferred_br:
self.fdb_chg_ip_tun(context, deferred_br, fdb_entries,
self.local_ip, self.local_vlan_map)
def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
ip_address):
'''Set the ARP respond entry.
When the l2 population mechanism driver and OVS supports to edit ARP
fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the
tunnel bridge.
'''
if not self.arp_responder_enabled:
return
ip = netaddr.IPAddress(ip_address)
if ip.version == 6:
return
ip = str(ip)
mac = str(netaddr.EUI(mac_address, dialect=_mac_mydialect))
if action == 'add':
br.install_arp_responder(local_vid, ip, mac)
elif action == 'remove':
br.delete_arp_responder(local_vid, ip)
else:
LOG.warning(_LW('Action %s not supported'), action)
def _local_vlan_for_flat(self, lvid, physical_network):
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=None,
distributed=False)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=None)
def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id):
distributed = self.enable_distributed_routing
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=segmentation_id,
distributed=distributed)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=segmentation_id)
def provision_local_vlan(self, net_uuid, network_type, physical_network,
segmentation_id):
'''Provisions a local VLAN.
:param net_uuid: the uuid of the network associated with this vlan.
:param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
'local', 'geneve')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
# On a restart or crash of OVS, the network associated with this VLAN
# will already be assigned, so check for that here before assigning a
# new one.
lvm = self.local_vlan_map.get(net_uuid)
if lvm:
lvid = lvm.vlan
else:
lvid = self._local_vlan_hints.pop(net_uuid, None)
if lvid is None:
if not self.available_local_vlans:
LOG.error(_LE("No local VLAN available for net-id=%s"),
net_uuid)
return
lvid = self.available_local_vlans.pop()
self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid,
network_type,
physical_network,
segmentation_id)
LOG.info(_LI("Assigning %(vlan_id)s as local vlan for "
"net-id=%(net_uuid)s"),
{'vlan_id': lvid, 'net_uuid': net_uuid})
if network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
# outbound broadcast/multicast
ofports = list(self.tun_br_ofports[network_type].values())
if ofports:
self.tun_br.install_flood_to_tun(lvid,
segmentation_id,
ofports)
# inbound from tunnels: set lvid in the right table
# and resubmit to Table LEARN_FROM_TUN for mac learning
if self.enable_distributed_routing:
self.dvr_agent.process_tunneled_network(
network_type, lvid, segmentation_id)
else:
self.tun_br.provision_local_vlan(
network_type=network_type, lvid=lvid,
segmentation_id=segmentation_id)
else:
LOG.error(_LE("Cannot provision %(network_type)s network for "
"net-id=%(net_uuid)s - tunneling disabled"),
{'network_type': network_type,
'net_uuid': net_uuid})
elif network_type == p_const.TYPE_FLAT:
if physical_network in self.phys_brs:
self._local_vlan_for_flat(lvid, physical_network)
else:
LOG.error(_LE("Cannot provision flat network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == p_const.TYPE_VLAN:
if physical_network in self.phys_brs:
self._local_vlan_for_vlan(lvid, physical_network,
segmentation_id)
else:
LOG.error(_LE("Cannot provision VLAN network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot provision unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': network_type,
'net_uuid': net_uuid})
def reclaim_local_vlan(self, net_uuid):
'''Reclaim a local VLAN.
:param net_uuid: the network uuid associated with this vlan.
'''
lvm = self.local_vlan_map.pop(net_uuid, None)
if lvm is None:
LOG.debug("Network %s not used on agent.", net_uuid)
return
LOG.info(_LI("Reclaiming vlan = %(vlan_id)s from "
"net-id = %(net_uuid)s"),
{'vlan_id': lvm.vlan, 'net_uuid': net_uuid})
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
self.tun_br.reclaim_local_vlan(
network_type=lvm.network_type,
segmentation_id=lvm.segmentation_id)
self.tun_br.delete_flood_to_tun(lvm.vlan)
self.tun_br.delete_unicast_to_tun(lvm.vlan, None)
self.tun_br.delete_arp_responder(lvm.vlan, None)
if self.l2_pop:
# Try to remove tunnel ports if not used by other networks
for ofport in lvm.tun_ofports:
self.cleanup_tunnel_port(self.tun_br, ofport,
lvm.network_type)
elif lvm.network_type == p_const.TYPE_FLAT:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=None)
elif lvm.network_type == p_const.TYPE_VLAN:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=lvm.segmentation_id)
elif lvm.network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot reclaim unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': lvm.network_type,
'net_uuid': net_uuid})
self.available_local_vlans.add(lvm.vlan)
def port_bound(self, port, net_uuid,
network_type, physical_network,
segmentation_id, fixed_ips, device_owner,
ovs_restarted):
'''Bind port to net_uuid/lsw_id and install flow for inbound traffic
to vm.
:param port: a ovs_lib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param network_type: the network type ('gre', 'vlan', 'flat', 'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param fixed_ips: the ip addresses assigned to this port
:param device_owner: the string indicative of owner of this port
:param ovs_restarted: indicates if this is called for an OVS restart.
'''
if net_uuid not in self.local_vlan_map or ovs_restarted:
self.provision_local_vlan(net_uuid, network_type,
physical_network, segmentation_id)
lvm = self.local_vlan_map[net_uuid]
lvm.vif_ports[port.vif_id] = port
self.dvr_agent.bind_port_to_dvr(port, lvm,
fixed_ips,
device_owner)
port_other_config = self.int_br.db_get_val("Port", port.port_name,
"other_config")
if port_other_config is None:
if port.vif_id in self.deleted_ports:
LOG.debug("Port %s deleted concurrently", port.vif_id)
elif port.vif_id in self.updated_ports:
LOG.error(_LE("Expected port %s not found"), port.vif_id)
else:
LOG.debug("Unable to get config for port %s", port.vif_id)
return False
vlan_mapping = {'net_uuid': net_uuid,
'network_type': network_type,
'physical_network': physical_network}
if segmentation_id is not None:
vlan_mapping['segmentation_id'] = segmentation_id
port_other_config.update(vlan_mapping)
self.int_br.set_db_attribute("Port", port.port_name, "other_config",
port_other_config)
return True
def _bind_devices(self, need_binding_ports):
devices_up = []
devices_down = []
port_names = [p['vif_port'].port_name for p in need_binding_ports]
port_info = self.int_br.get_ports_attributes(
"Port", columns=["name", "tag"], ports=port_names, if_exists=True)
tags_by_name = {x['name']: x['tag'] for x in port_info}
for port_detail in need_binding_ports:
lvm = self.local_vlan_map.get(port_detail['network_id'])
if not lvm:
# network for port was deleted. skip this port since it
# will need to be handled as a DEAD port in the next scan
continue
port = port_detail['vif_port']
device = port_detail['device']
# Do not bind a port if it's already bound
cur_tag = tags_by_name.get(port.port_name)
if cur_tag is None:
LOG.debug("Port %s was deleted concurrently, skipping it",
port.port_name)
continue
if cur_tag != lvm.vlan:
self.int_br.delete_flows(in_port=port.ofport)
if self.prevent_arp_spoofing:
self.setup_arp_spoofing_protection(self.int_br,
port, port_detail)
if cur_tag != lvm.vlan:
self.int_br.set_db_attribute(
"Port", port.port_name, "tag", lvm.vlan)
# update plugin about port status
# FIXME(salv-orlando): Failures while updating device status
# must be handled appropriately. Otherwise this might prevent
# neutron server from sending network-vif-* events to the nova
# API server, thus possibly preventing instance spawn.
if port_detail.get('admin_state_up'):
LOG.debug("Setting status for %s to UP", device)
devices_up.append(device)
else:
LOG.debug("Setting status for %s to DOWN", device)
devices_down.append(device)
failed_devices = []
if devices_up or devices_down:
devices_set = self.plugin_rpc.update_device_list(
self.context, devices_up, devices_down, self.agent_id,
self.conf.host)
failed_devices = (devices_set.get('failed_devices_up') +
devices_set.get('failed_devices_down'))
if failed_devices:
LOG.error(_LE("Configuration for devices %s failed!"),
failed_devices)
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=failed_devices)
LOG.info(_LI("Configuration for devices up %(up)s and devices "
"down %(down)s completed."),
{'up': devices_up, 'down': devices_down})
@staticmethod
def setup_arp_spoofing_protection(bridge, vif, port_details):
# clear any previous flows related to this port in our ARP table
bridge.delete_arp_spoofing_protection(port=vif.ofport)
if not port_details.get('port_security_enabled', True):
LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because "
"it has port security disabled"), vif.port_name)
return
if port_details['device_owner'].startswith(
n_const.DEVICE_OWNER_NETWORK_PREFIX):
LOG.debug("Skipping ARP spoofing rules for network owned port "
"'%s'.", vif.port_name)
return
# collect all of the addresses and cidrs that belong to the port
addresses = {f['ip_address'] for f in port_details['fixed_ips']}
mac_addresses = {vif.vif_mac}
if port_details.get('allowed_address_pairs'):
addresses |= {p['ip_address']
for p in port_details['allowed_address_pairs']}
mac_addresses |= {p['mac_address']
for p in port_details['allowed_address_pairs']
if p.get('mac_address')}
ipv6_addresses = {ip for ip in addresses
if netaddr.IPNetwork(ip).version == 6}
# Allow neighbor advertisements for LLA address.
ipv6_addresses |= {str(ipv6.get_ipv6_addr_by_EUI64(
n_const.IPV6_LLA_PREFIX, mac))
for mac in mac_addresses}
if not has_zero_prefixlen_address(ipv6_addresses):
# Install protection only when prefix is not zero because a /0
# prefix allows any address anyway and the nd_target can only
# match on /1 or more.
bridge.install_icmpv6_na_spoofing_protection(port=vif.ofport,
ip_addresses=ipv6_addresses)
ipv4_addresses = {ip for ip in addresses
if netaddr.IPNetwork(ip).version == 4}
if not has_zero_prefixlen_address(ipv4_addresses):
# Install protection only when prefix is not zero because a /0
# prefix allows any address anyway and the ARP_SPA can only
# match on /1 or more.
bridge.install_arp_spoofing_protection(port=vif.ofport,
ip_addresses=ipv4_addresses)
def port_unbound(self, vif_id, net_uuid=None):
'''Unbind port.
Removes corresponding local vlan mapping object if this is its last
VIF.
:param vif_id: the id of the vif
:param net_uuid: the net_uuid this port is associated with.
'''
if net_uuid is None:
net_uuid = self.get_net_uuid(vif_id)
if not self.local_vlan_map.get(net_uuid):
LOG.info(_LI('port_unbound(): net_uuid %s not in local_vlan_map'),
net_uuid)
return
lvm = self.local_vlan_map[net_uuid]
if vif_id in lvm.vif_ports:
vif_port = lvm.vif_ports[vif_id]
self.dvr_agent.unbind_port_from_dvr(vif_port, lvm)
lvm.vif_ports.pop(vif_id, None)
if not lvm.vif_ports:
self.reclaim_local_vlan(net_uuid)
def port_dead(self, port, log_errors=True):
'''Once a port has no binding, put it on the "dead vlan".
:param port: a ovs_lib.VifPort object.
'''
# Don't kill a port if it's already dead
cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag",
log_errors=log_errors)
if cur_tag != DEAD_VLAN_TAG:
self.int_br.set_db_attribute("Port", port.port_name, "tag",
DEAD_VLAN_TAG, log_errors=log_errors)
self.int_br.drop_port(in_port=port.ofport)
def setup_integration_br(self):
'''Setup the integration bridge.
'''
self.int_br.set_agent_uuid_stamp(self.agent_uuid_stamp)
# Ensure the integration bridge is created.
# ovs_lib.OVSBridge.create() will run
# ovs-vsctl -- --may-exist add-br BRIDGE_NAME
# which does nothing if bridge already exists.
self.int_br.create()
self.int_br.set_secure_mode()
self.int_br.setup_controllers(self.conf)
self.int_br.delete_port(self.conf.OVS.int_peer_patch_port)
if self.conf.AGENT.drop_flows_on_start:
self.int_br.delete_flows()
self.int_br.setup_default_table()
def setup_ancillary_bridges(self, integ_br, tun_br):
'''Setup ancillary bridges - for example br-ex.'''
ovs = ovs_lib.BaseOVS()
ovs_bridges = set(ovs.get_bridges())
# Remove all known bridges
ovs_bridges.remove(integ_br)
if self.enable_tunneling:
ovs_bridges.remove(tun_br)
br_names = [self.phys_brs[physical_network].br_name for
physical_network in self.phys_brs]
ovs_bridges.difference_update(br_names)
# Filter list of bridges to those that have external
# bridge-id's configured
br_names = []
for bridge in ovs_bridges:
bridge_id = ovs.get_bridge_external_bridge_id(bridge)
if bridge_id != bridge:
br_names.append(bridge)
ovs_bridges.difference_update(br_names)
ancillary_bridges = []
for bridge in ovs_bridges:
br = ovs_lib.OVSBridge(bridge)
LOG.info(_LI('Adding %s to list of bridges.'), bridge)
ancillary_bridges.append(br)
return ancillary_bridges
def setup_tunnel_br(self, tun_br_name=None):
'''(re)initialize the tunnel bridge.
Creates tunnel bridge, and links it to the integration bridge
using a patch port.
:param tun_br_name: the name of the tunnel bridge.
'''
if not self.tun_br:
self.tun_br = self.br_tun_cls(tun_br_name)
self.tun_br.set_agent_uuid_stamp(self.agent_uuid_stamp)
if not self.tun_br.bridge_exists(self.tun_br.br_name):
self.tun_br.create(secure_mode=True)
self.tun_br.setup_controllers(self.conf)
if (not self.int_br.port_exists(self.conf.OVS.int_peer_patch_port) or
self.patch_tun_ofport == ovs_lib.INVALID_OFPORT):
self.patch_tun_ofport = self.int_br.add_patch_port(
self.conf.OVS.int_peer_patch_port,
self.conf.OVS.tun_peer_patch_port)
if (not self.tun_br.port_exists(self.conf.OVS.tun_peer_patch_port) or
self.patch_int_ofport == ovs_lib.INVALID_OFPORT):
self.patch_int_ofport = self.tun_br.add_patch_port(
self.conf.OVS.tun_peer_patch_port,
self.conf.OVS.int_peer_patch_port)
if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport,
self.patch_int_ofport):
LOG.error(_LE("Failed to create OVS patch port. Cannot have "
"tunneling enabled on this agent, since this "
"version of OVS does not support tunnels or patch "
"ports. Agent terminated!"))
exit(1)
if self.conf.AGENT.drop_flows_on_start:
self.tun_br.delete_flows()
def setup_tunnel_br_flows(self):
'''Setup the tunnel bridge.
Add all flows to the tunnel bridge.
'''
self.tun_br.setup_default_table(self.patch_int_ofport,
self.arp_responder_enabled)
def get_peer_name(self, prefix, name):
"""Construct a peer name based on the prefix and name.
The peer name can not exceed the maximum length allowed for a linux
device. Longer names are hashed to help ensure uniqueness.
"""
if len(prefix + name) <= n_const.DEVICE_NAME_MAX_LEN:
return prefix + name
# We can't just truncate because bridges may be distinguished
# by an ident at the end. A hash over the name should be unique.
# Leave part of the bridge name on for easier identification
hashlen = 6
namelen = n_const.DEVICE_NAME_MAX_LEN - len(prefix) - hashlen
if isinstance(name, six.text_type):
hashed_name = hashlib.sha1(name.encode('utf-8'))
else:
hashed_name = hashlib.sha1(name)
new_name = ('%(prefix)s%(truncated)s%(hash)s' %
{'prefix': prefix, 'truncated': name[0:namelen],
'hash': hashed_name.hexdigest()[0:hashlen]})
LOG.warning(_LW("Creating an interface named %(name)s exceeds the "
"%(limit)d character limitation. It was shortened to "
"%(new_name)s to fit."),
{'name': name, 'limit': n_const.DEVICE_NAME_MAX_LEN,
'new_name': new_name})
return new_name
def setup_physical_bridges(self, bridge_mappings):
'''Setup the physical network bridges.
Creates physical network bridges and links them to the
integration bridge using veths or patch ports.
:param bridge_mappings: map physical network names to bridge names.
'''
self.phys_brs = {}
self.int_ofports = {}
self.phys_ofports = {}
ip_wrapper = ip_lib.IPWrapper()
ovs = ovs_lib.BaseOVS()
ovs_bridges = ovs.get_bridges()
for physical_network, bridge in six.iteritems(bridge_mappings):
LOG.info(_LI("Mapping physical network %(physical_network)s to "
"bridge %(bridge)s"),
{'physical_network': physical_network,
'bridge': bridge})
# setup physical bridge
if bridge not in ovs_bridges:
LOG.error(_LE("Bridge %(bridge)s for physical network "
"%(physical_network)s does not exist. Agent "
"terminated!"),
{'physical_network': physical_network,
'bridge': bridge})
sys.exit(1)
br = self.br_phys_cls(bridge)
br.setup_controllers(self.conf)
br.setup_default_table()
self.phys_brs[physical_network] = br
# interconnect physical and integration bridges using veth/patchs
int_if_name = self.get_peer_name(constants.PEER_INTEGRATION_PREFIX,
bridge)
phys_if_name = self.get_peer_name(constants.PEER_PHYSICAL_PREFIX,
bridge)
# Interface type of port for physical and integration bridges must
# be same, so check only one of them.
int_type = self.int_br.db_get_val("Interface", int_if_name, "type")
if self.use_veth_interconnection:
# Drop ports if the interface types doesn't match the
# configuration value.
if int_type == 'patch':
self.int_br.delete_port(int_if_name)
br.delete_port(phys_if_name)
device = ip_lib.IPDevice(int_if_name)
if device.exists():
device.link.delete()
# Give udev a chance to process its rules here, to avoid
# race conditions between commands launched by udev rules
# and the subsequent call to ip_wrapper.add_veth
utils.execute(['udevadm', 'settle', '--timeout=10'])
int_veth, phys_veth = ip_wrapper.add_veth(int_if_name,
phys_if_name)
int_ofport = self.int_br.add_port(int_veth)
phys_ofport = br.add_port(phys_veth)
else:
# Drop ports if the interface type doesn't match the
# configuration value
if int_type == 'veth':
self.int_br.delete_port(int_if_name)
br.delete_port(phys_if_name)
# Create patch ports without associating them in order to block
# untranslated traffic before association
int_ofport = self.int_br.add_patch_port(
int_if_name, constants.NONEXISTENT_PEER)
phys_ofport = br.add_patch_port(
phys_if_name, constants.NONEXISTENT_PEER)
self.int_ofports[physical_network] = int_ofport
self.phys_ofports[physical_network] = phys_ofport
# block all untranslated traffic between bridges
self.int_br.drop_port(in_port=int_ofport)
br.drop_port(in_port=phys_ofport)
if self.use_veth_interconnection:
# enable veth to pass traffic
int_veth.link.set_up()
phys_veth.link.set_up()
if self.veth_mtu:
# set up mtu size for veth interfaces
int_veth.link.set_mtu(self.veth_mtu)
phys_veth.link.set_mtu(self.veth_mtu)
else:
# associate patch ports to pass traffic
self.int_br.set_db_attribute('Interface', int_if_name,
'options:peer', phys_if_name)
br.set_db_attribute('Interface', phys_if_name,
'options:peer', int_if_name)
def update_stale_ofport_rules(self):
# right now the ARP spoofing rules are the only thing that utilizes
# ofport-based rules, so make arp_spoofing protection a conditional
# until something else uses ofport
if not self.prevent_arp_spoofing:
return []
previous = self.vifname_to_ofport_map
current = self.int_br.get_vif_port_to_ofport_map()
# if any ofport numbers have changed, re-process the devices as
# added ports so any rules based on ofport numbers are updated.
moved_ports = self._get_ofport_moves(current, previous)
# delete any stale rules based on removed ofports
ofports_deleted = set(previous.values()) - set(current.values())
for ofport in ofports_deleted:
self.int_br.delete_arp_spoofing_protection(port=ofport)
# store map for next iteration
self.vifname_to_ofport_map = current
return moved_ports
@staticmethod
def _get_ofport_moves(current, previous):
"""Returns a list of moved ports.
Takes two port->ofport maps and returns a list ports that moved to a
different ofport. Deleted ports are not included.
"""
port_moves = []
for name, ofport in previous.items():
if name not in current:
continue
current_ofport = current[name]
if ofport != current_ofport:
port_moves.append(name)
return port_moves
def _get_port_info(self, registered_ports, cur_ports,
readd_registered_ports):
port_info = {'current': cur_ports}
# FIXME(salv-orlando): It's not really necessary to return early
# if nothing has changed.
if not readd_registered_ports and cur_ports == registered_ports:
return port_info
if readd_registered_ports:
port_info['added'] = cur_ports
else:
port_info['added'] = cur_ports - registered_ports
# Update port_info with ports not found on the integration bridge
port_info['removed'] = registered_ports - cur_ports
return port_info
def scan_ports(self, registered_ports, sync, updated_ports=None):
cur_ports = self.int_br.get_vif_port_set()
self.int_br_device_count = len(cur_ports)
port_info = self._get_port_info(registered_ports, cur_ports, sync)
if updated_ports is None:
updated_ports = set()
updated_ports.update(self.check_changed_vlans())
if updated_ports:
# Some updated ports might have been removed in the
# meanwhile, and therefore should not be processed.
# In this case the updated port won't be found among
# current ports.
updated_ports &= cur_ports
if updated_ports:
port_info['updated'] = updated_ports
return port_info
def scan_ancillary_ports(self, registered_ports, sync):
cur_ports = set()
for bridge in self.ancillary_brs:
cur_ports |= bridge.get_vif_port_set()
return self._get_port_info(registered_ports, cur_ports, sync)
def check_changed_vlans(self):
"""Return ports which have lost their vlan tag.
The returned value is a set of port ids of the ports concerned by a
vlan tag loss.
"""
port_tags = self.int_br.get_port_tag_dict()
changed_ports = set()
for lvm in self.local_vlan_map.values():
for port in lvm.vif_ports.values():
if (
port.port_name in port_tags
and port_tags[port.port_name] != lvm.vlan
):
LOG.info(
_LI("Port '%(port_name)s' has lost "
"its vlan tag '%(vlan_tag)d'!"),
{'port_name': port.port_name,
'vlan_tag': lvm.vlan}
)
changed_ports.add(port.vif_id)
return changed_ports
def treat_vif_port(self, vif_port, port_id, network_id, network_type,
physical_network, segmentation_id, admin_state_up,
fixed_ips, device_owner, ovs_restarted):
# When this function is called for a port, the port should have
# an OVS ofport configured, as only these ports were considered
# for being treated. If that does not happen, it is a potential
# error condition of which operators should be aware
port_needs_binding = True
if not vif_port.ofport:
LOG.warn(_LW("VIF port: %s has no ofport configured, "
"and might not be able to transmit"), vif_port.vif_id)
if vif_port:
if admin_state_up:
port_needs_binding = self.port_bound(
vif_port, network_id, network_type,
physical_network, segmentation_id,
fixed_ips, device_owner, ovs_restarted)
else:
self.port_dead(vif_port)
port_needs_binding = False
else:
LOG.debug("No VIF port for port %s defined on agent.", port_id)
return port_needs_binding
def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type):
ofport = br.add_tunnel_port(port_name,
remote_ip,
self.local_ip,
tunnel_type,
self.vxlan_udp_port,
self.dont_fragment,
self.tunnel_csum)
if ofport == ovs_lib.INVALID_OFPORT:
LOG.error(_LE("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': tunnel_type, 'ip': remote_ip})
return 0
self.tun_br_ofports[tunnel_type][remote_ip] = ofport
# Add flow in default table to resubmit to the right
# tunneling table (lvid will be set in the latter)
br.setup_tunnel_port(tunnel_type, ofport)
ofports = self.tun_br_ofports[tunnel_type].values()
if ofports and not self.l2_pop:
# Update flooding flows to include the new tunnel
for vlan_mapping in list(self.local_vlan_map.values()):
if vlan_mapping.network_type == tunnel_type:
br.install_flood_to_tun(vlan_mapping.vlan,
vlan_mapping.segmentation_id,
ofports)
return ofport
def setup_tunnel_port(self, br, remote_ip, network_type):
remote_ip_hex = self.get_ip_in_hex(remote_ip)
if not remote_ip_hex:
return 0
port_name = '%s-%s' % (network_type, remote_ip_hex)
ofport = self._setup_tunnel_port(br,
port_name,
remote_ip,
network_type)
return ofport
def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
# Check if this tunnel port is still used
for lvm in self.local_vlan_map.values():
if tun_ofport in lvm.tun_ofports:
break
# If not, remove it
else:
items = list(self.tun_br_ofports[tunnel_type].items())
for remote_ip, ofport in items:
if ofport == tun_ofport:
port_name = '%s-%s' % (tunnel_type,
self.get_ip_in_hex(remote_ip))
br.delete_port(port_name)
br.cleanup_tunnel_port(ofport)
self.tun_br_ofports[tunnel_type].pop(remote_ip, None)
def treat_devices_added_or_updated(self, devices, ovs_restarted):
skipped_devices = []
need_binding_devices = []
security_disabled_devices = []
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(
self.context,
devices,
self.agent_id,
self.conf.host))
if devices_details_list.get('failed_devices'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=devices)
devices = devices_details_list.get('devices')
vif_by_id = self.int_br.get_vifs_by_ids(
[vif['device'] for vif in devices])
for details in devices:
device = details['device']
LOG.debug("Processing port: %s", device)
port = vif_by_id.get(device)
if not port:
# The port disappeared and cannot be processed
LOG.info(_LI("Port %s was not found on the integration bridge "
"and will therefore not be processed"), device)
skipped_devices.append(device)
continue
if 'port_id' in details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': details})
details['vif_port'] = port
need_binding = self.treat_vif_port(port, details['port_id'],
details['network_id'],
details['network_type'],
details['physical_network'],
details['segmentation_id'],
details['admin_state_up'],
details['fixed_ips'],
details['device_owner'],
ovs_restarted)
if need_binding:
need_binding_devices.append(details)
port_security = details['port_security_enabled']
has_sgs = 'security_groups' in details
if not port_security or not has_sgs:
security_disabled_devices.append(device)
self._update_port_network(details['port_id'],
details['network_id'])
self.ext_manager.handle_port(self.context, details)
else:
LOG.warn(_LW("Device %s not defined on plugin"), device)
if (port and port.ofport != -1):
self.port_dead(port)
return skipped_devices, need_binding_devices, security_disabled_devices
def _update_port_network(self, port_id, network_id):
self._clean_network_ports(port_id)
self.network_ports[network_id].add(port_id)
def treat_ancillary_devices_added(self, devices):
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(
self.context,
devices,
self.agent_id,
self.conf.host))
if devices_details_list.get('failed_devices'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=devices)
devices_added = [
d['device'] for d in devices_details_list.get('devices')]
LOG.info(_LI("Ancillary Ports %s added"), devices_added)
# update plugin about port status
devices_set_up = (
self.plugin_rpc.update_device_list(self.context,
devices_added,
[],
self.agent_id,
self.conf.host))
if devices_set_up.get('failed_devices_up'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError()
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
LOG.info(_LI("Ports %s removed"), devices)
devices_down = self.plugin_rpc.update_device_list(self.context,
[],
devices,
self.agent_id,
self.conf.host)
failed_devices = devices_down.get('failed_devices_down')
if failed_devices:
LOG.debug("Port removal failed for %(devices)s ", failed_devices)
resync = True
for device in devices:
self.port_unbound(device)
return resync
def treat_ancillary_devices_removed(self, devices):
resync = False
LOG.info(_LI("Ancillary ports %s removed"), devices)
devices_down = self.plugin_rpc.update_device_list(self.context,
[],
devices,
self.agent_id,
self.conf.host)
failed_devices = devices_down.get('failed_devices_down')
if failed_devices:
LOG.debug("Port removal failed for %(devices)s ", failed_devices)
resync = True
for detail in devices_down.get('devices_down'):
if detail['exists']:
LOG.info(_LI("Port %s updated."), detail['device'])
# Nothing to do regarding local networking
else:
LOG.debug("Device %s not defined on plugin", detail['device'])
return resync
def process_network_ports(self, port_info, ovs_restarted):
resync_a = False
resync_b = False
# TODO(salv-orlando): consider a solution for ensuring notifications
# are processed exactly in the same order in which they were
# received. This is tricky because there are two notification
# sources: the neutron server, and the ovs db monitor process
# If there is an exception while processing security groups ports
# will not be wired anyway, and a resync will be triggered
# VIF wiring needs to be performed always for 'new' devices.
# For updated ports, re-wiring is not needed in most cases, but needs
# to be performed anyway when the admin state of a device is changed.
# A device might be both in the 'added' and 'updated'
# list at the same time; avoid processing it twice.
devices_added_updated = (port_info.get('added', set()) |
port_info.get('updated', set()))
need_binding_devices = []
security_disabled_ports = []
if devices_added_updated:
start = time.time()
try:
(skipped_devices, need_binding_devices,
security_disabled_ports) = (
self.treat_devices_added_or_updated(
devices_added_updated, ovs_restarted))
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_added_or_updated completed. "
"Skipped %(num_skipped)d devices of "
"%(num_current)d devices currently available. "
"Time elapsed: %(elapsed).3f",
{'iter_num': self.iter_num,
'num_skipped': len(skipped_devices),
'num_current': len(port_info['current']),
'elapsed': time.time() - start})
# Update the list of current ports storing only those which
# have been actually processed.
port_info['current'] = (port_info['current'] -
set(skipped_devices))
except DeviceListRetrievalError:
# Need to resync as there was an error with server
# communication.
LOG.exception(_LE("process_network_ports - iteration:%d - "
"failure while retrieving port details "
"from server"), self.iter_num)
resync_a = True
# Ports are bound before calling the sg_agent setup. This function
# fulfill the information needed by the sg_agent setup.
self._bind_devices(need_binding_devices)
# TODO(salv-orlando): Optimize avoiding applying filters
# unnecessarily, (eg: when there are no IP address changes)
added_ports = port_info.get('added', set())
if security_disabled_ports:
added_ports -= set(security_disabled_ports)
self.sg_agent.setup_port_filters(added_ports,
port_info.get('updated', set()))
if 'removed' in port_info and port_info['removed']:
start = time.time()
resync_b = self.treat_devices_removed(port_info['removed'])
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_removed completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def process_ancillary_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info and port_info['added']:
start = time.time()
try:
self.treat_ancillary_devices_added(port_info['added'])
LOG.debug("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_added "
"completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
except DeviceListRetrievalError:
# Need to resync as there was an error with server
# communication.
LOG.exception(_LE("process_ancillary_network_ports - "
"iteration:%d - failure while retrieving "
"port details from server"), self.iter_num)
resync_a = True
if 'removed' in port_info and port_info['removed']:
start = time.time()
resync_b = self.treat_ancillary_devices_removed(
port_info['removed'])
LOG.debug("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_removed "
"completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def get_ip_in_hex(self, ip_address):
try:
return '%08x' % netaddr.IPAddress(ip_address, version=4)
except Exception:
LOG.warn(_LW("Invalid remote IP: %s"), ip_address)
return
def tunnel_sync(self):
try:
for tunnel_type in self.tunnel_types:
details = self.plugin_rpc.tunnel_sync(self.context,
self.local_ip,
tunnel_type,
self.conf.host)
if not self.l2_pop:
tunnels = details['tunnels']
for tunnel in tunnels:
if self.local_ip != tunnel['ip_address']:
remote_ip = tunnel['ip_address']
remote_ip_hex = self.get_ip_in_hex(remote_ip)
if not remote_ip_hex:
continue
tun_name = '%s-%s' % (tunnel_type, remote_ip_hex)
self._setup_tunnel_port(self.tun_br,
tun_name,
tunnel['ip_address'],
tunnel_type)
except Exception as e:
LOG.debug("Unable to sync tunnel IP %(local_ip)s: %(e)s",
{'local_ip': self.local_ip, 'e': e})
return True
return False
def _agent_has_updates(self, polling_manager):
return (polling_manager.is_polling_required or
self.updated_ports or
self.deleted_ports or
self.sg_agent.firewall_refresh_needed())
def _port_info_has_changes(self, port_info):
return (port_info.get('added') or
port_info.get('removed') or
port_info.get('updated'))
def check_ovs_status(self):
# Check for the canary flow
status = self.int_br.check_canary_table()
if status == constants.OVS_RESTARTED:
LOG.warn(_LW("OVS is restarted. OVSNeutronAgent will reset "
"bridges and recover ports."))
elif status == constants.OVS_DEAD:
LOG.warn(_LW("OVS is dead. OVSNeutronAgent will keep running "
"and checking OVS status periodically."))
return status
def loop_count_and_wait(self, start_time, port_stats):
# sleep till end of polling interval
elapsed = time.time() - start_time
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d "
"completed. Processed ports statistics: "
"%(port_stats)s. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'port_stats': port_stats,
'elapsed': elapsed})
if elapsed < self.polling_interval:
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
def get_port_stats(self, port_info, ancillary_port_info):
port_stats = {
'regular': {
'added': len(port_info.get('added', [])),
'updated': len(port_info.get('updated', [])),
'removed': len(port_info.get('removed', []))}}
if self.ancillary_brs:
port_stats['ancillary'] = {
'added': len(ancillary_port_info.get('added', [])),
'removed': len(ancillary_port_info.get('removed', []))}
return port_stats
def cleanup_stale_flows(self):
bridges = [self.int_br]
if self.enable_tunneling:
bridges.append(self.tun_br)
for bridge in bridges:
LOG.info(_LI("Cleaning stale %s flows"), bridge.br_name)
bridge.cleanup_flows()
def rpc_loop(self, polling_manager=None):
if not polling_manager:
polling_manager = polling.get_polling_manager(
minimize_polling=False)
sync = True
ports = set()
updated_ports_copy = set()
ancillary_ports = set()
tunnel_sync = True
ovs_restarted = False
consecutive_resyncs = 0
need_clean_stale_flow = True
while self._check_and_handle_signal():
if self.fullsync:
LOG.info(_LI("rpc_loop doing a full sync."))
sync = True
self.fullsync = False
port_info = {}
ancillary_port_info = {}
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
polling_manager.force_polling()
consecutive_resyncs = consecutive_resyncs + 1
if consecutive_resyncs >= constants.MAX_DEVICE_RETRIES:
LOG.warn(_LW("Clearing cache of registered ports, retrials"
" to resync were > %s"),
constants.MAX_DEVICE_RETRIES)
ports.clear()
ancillary_ports.clear()
sync = False
consecutive_resyncs = 0
else:
consecutive_resyncs = 0
ovs_status = self.check_ovs_status()
if ovs_status == constants.OVS_RESTARTED:
self.setup_integration_br()
self.setup_physical_bridges(self.bridge_mappings)
if self.enable_tunneling:
self.setup_tunnel_br()
self.setup_tunnel_br_flows()
tunnel_sync = True
if self.enable_distributed_routing:
self.dvr_agent.reset_ovs_parameters(self.int_br,
self.tun_br,
self.patch_int_ofport,
self.patch_tun_ofport)
self.dvr_agent.reset_dvr_parameters()
self.dvr_agent.setup_dvr_flows()
elif ovs_status == constants.OVS_DEAD:
# Agent doesn't apply any operations when ovs is dead, to
# prevent unexpected failure or crash. Sleep and continue
# loop in which ovs status will be checked periodically.
port_stats = self.get_port_stats({}, {})
self.loop_count_and_wait(start, port_stats)
continue
# Notify the plugin of tunnel IP
if self.enable_tunneling and tunnel_sync:
LOG.info(_LI("Agent tunnel out of sync with plugin!"))
try:
tunnel_sync = self.tunnel_sync()
except Exception:
LOG.exception(_LE("Error while synchronizing tunnels"))
tunnel_sync = True
ovs_restarted |= (ovs_status == constants.OVS_RESTARTED)
if self._agent_has_updates(polling_manager) or ovs_restarted:
try:
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"starting polling. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Save updated ports dict to perform rollback in
# case resync would be needed, and then clear
# self.updated_ports. As the greenthread should not yield
# between these two statements, this will be thread-safe
updated_ports_copy = self.updated_ports
self.updated_ports = set()
reg_ports = (set() if ovs_restarted else ports)
port_info = self.scan_ports(reg_ports, sync,
updated_ports_copy)
self.process_deleted_ports(port_info)
ofport_changed_ports = self.update_stale_ofport_rules()
if ofport_changed_ports:
port_info.setdefault('updated', set()).update(
ofport_changed_ports)
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"port information retrieved. "
"Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Treat ancillary devices if they exist
if self.ancillary_brs:
ancillary_port_info = self.scan_ancillary_ports(
ancillary_ports, sync)
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"ancillary port info retrieved. "
"Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
sync = False
# Secure and wire/unwire VIFs and update their status
# on Neutron server
if (self._port_info_has_changes(port_info) or
self.sg_agent.firewall_refresh_needed() or
ovs_restarted):
LOG.debug("Starting to process devices in:%s",
port_info)
# If treat devices fails - must resync with plugin
sync = self.process_network_ports(port_info,
ovs_restarted)
if not sync and need_clean_stale_flow:
self.cleanup_stale_flows()
need_clean_stale_flow = False
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"ports processed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ports = port_info['current']
if self.ancillary_brs:
sync |= self.process_ancillary_network_ports(
ancillary_port_info)
LOG.debug("Agent rpc_loop - iteration: "
"%(iter_num)d - ancillary ports "
"processed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ancillary_ports = ancillary_port_info['current']
polling_manager.polling_completed()
# Keep this flag in the last line of "try" block,
# so we can sure that no other Exception occurred.
if not sync:
ovs_restarted = False
self._dispose_local_vlan_hints()
except Exception:
LOG.exception(_LE("Error while processing VIF ports"))
# Put the ports back in self.updated_port
self.updated_ports |= updated_ports_copy
sync = True
port_stats = self.get_port_stats(port_info, ancillary_port_info)
self.loop_count_and_wait(start, port_stats)
def daemon_loop(self):
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
signal.signal(signal.SIGTERM, self._handle_sigterm)
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, self._handle_sighup)
with polling.get_polling_manager(
self.minimize_polling,
self.ovsdb_monitor_respawn_interval) as pm:
self.rpc_loop(polling_manager=pm)
def _handle_sigterm(self, signum, frame):
self.catch_sigterm = True
if self.quitting_rpc_timeout:
self.set_rpc_timeout(self.quitting_rpc_timeout)
def _handle_sighup(self, signum, frame):
self.catch_sighup = True
def _check_and_handle_signal(self):
if self.catch_sigterm:
LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop."))
self.run_daemon_loop = False
self.catch_sigterm = False
if self.catch_sighup:
LOG.info(_LI("Agent caught SIGHUP, resetting."))
self.conf.reload_config_files()
config.setup_logging()
LOG.debug('Full set of CONF:')
self.conf.log_opt_values(LOG, logging.DEBUG)
self.catch_sighup = False
return self.run_daemon_loop
def set_rpc_timeout(self, timeout):
for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc,
self.dvr_plugin_rpc, self.state_rpc):
rpc_api.client.timeout = timeout
def _check_agent_configurations(self):
if (self.enable_distributed_routing and self.enable_tunneling
and not self.l2_pop):
raise ValueError(_("DVR deployments for VXLAN/GRE/Geneve "
"underlays require L2-pop to be enabled, "
"in both the Agent and Server side."))
def create_agent_config_map(config):
"""Create a map of agent config parameters.
:param config: an instance of cfg.CONF
:returns: a map of agent configuration parameters
"""
try:
bridge_mappings = n_utils.parse_mappings(config.OVS.bridge_mappings)
except ValueError as e:
raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
kwargs = dict(
integ_br=config.OVS.integration_bridge,
tun_br=config.OVS.tunnel_bridge,
local_ip=config.OVS.local_ip,
bridge_mappings=bridge_mappings,
polling_interval=config.AGENT.polling_interval,
minimize_polling=config.AGENT.minimize_polling,
tunnel_types=config.AGENT.tunnel_types,
veth_mtu=config.AGENT.veth_mtu,
enable_distributed_routing=config.AGENT.enable_distributed_routing,
l2_population=config.AGENT.l2_population,
arp_responder=config.AGENT.arp_responder,
prevent_arp_spoofing=config.AGENT.prevent_arp_spoofing,
use_veth_interconnection=config.OVS.use_veth_interconnection,
quitting_rpc_timeout=config.AGENT.quitting_rpc_timeout
)
# Verify the tunnel_types specified are valid
for tun in kwargs['tunnel_types']:
if tun not in constants.TUNNEL_NETWORK_TYPES:
msg = _('Invalid tunnel type specified: %s'), tun
raise ValueError(msg)
if not kwargs['local_ip']:
msg = _('Tunneling cannot be enabled without a valid local_ip.')
raise ValueError(msg)
return kwargs
def validate_local_ip(local_ip):
"""If tunneling is enabled, verify if the ip exists on the agent's host."""
if not cfg.CONF.AGENT.tunnel_types:
return
if not ip_lib.IPWrapper().get_device_by_ip(local_ip):
LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'."
" IP couldn't be found on this host's interfaces."),
local_ip)
raise SystemExit(1)
def prepare_xen_compute():
is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper
if is_xen_compute_host:
# Force ip_lib to always use the root helper to ensure that ip
# commands target xen dom0 rather than domU.
cfg.CONF.register_opts(ip_lib.OPTS)
cfg.CONF.set_default('ip_lib_force_root', True)
def main(bridge_classes):
try:
agent_config = create_agent_config_map(cfg.CONF)
except ValueError:
LOG.exception(_LE("Agent failed to create agent config map"))
raise SystemExit(1)
prepare_xen_compute()
validate_local_ip(agent_config['local_ip'])
try:
agent = OVSNeutronAgent(bridge_classes, **agent_config)
except (RuntimeError, ValueError) as e:
LOG.error(_LE("%s Agent terminated!"), e)
sys.exit(1)
agent.daemon_loop()
| chitr/neutron | neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py | Python | apache-2.0 | 89,243 | 0.000179 |
"""
Constants for testing purposes
"""
DUMMY_REDIRECT_URL = u'https://example.com/edx/redirect'
| tanmaykm/edx-platform | openedx/core/djangoapps/oauth_dispatch/tests/constants.py | Python | agpl-3.0 | 97 | 0 |
from MaKaC.webinterface.rh import conferenceDisplay
def index(req,**params):
return conferenceDisplay.RHAbstractBook(req).process(params)
def test(req,**params):
return conferenceDisplay.RHAbstractBook(req).process(params)
| belokop-an/agenda-tools | code/htdocs/confAbstractBook.py | Python | gpl-2.0 | 233 | 0.017167 |
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Cameron White <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2014 Vincent Jacques <[email protected]> #
# Copyright 2016 Peter Buckley <[email protected]> #
# Copyright 2016 humbug <bah> #
# Copyright 2018 sfdye <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import json
class GithubException(Exception):
"""
Error handling in PyGithub is done with exceptions. This class is the base of all exceptions raised by PyGithub (but :class:`github.GithubException.BadAttributeException`).
Some other types of exceptions might be raised by underlying libraries, for example for network-related issues.
"""
def __init__(self, status, data):
super().__init__()
self.__status = status
self.__data = data
self.args = [status, data]
@property
def status(self):
"""
The status returned by the Github API
"""
return self.__status
@property
def data(self):
"""
The (decoded) data returned by the Github API
"""
return self.__data
def __str__(self):
return "{status} {data}".format(status=self.status, data=json.dumps(self.data))
class BadCredentialsException(GithubException):
"""
Exception raised in case of bad credentials (when Github API replies with a 401 or 403 HTML status)
"""
class UnknownObjectException(GithubException):
"""
Exception raised when a non-existing object is requested (when Github API replies with a 404 HTML status)
"""
class BadUserAgentException(GithubException):
"""
Exception raised when request is sent with a bad user agent header (when Github API replies with a 403 bad user agent HTML status)
"""
class RateLimitExceededException(GithubException):
"""
Exception raised when the rate limit is exceeded (when Github API replies with a 403 rate limit exceeded HTML status)
"""
class BadAttributeException(Exception):
"""
Exception raised when Github returns an attribute with the wrong type.
"""
def __init__(self, actualValue, expectedType, transformationException):
self.__actualValue = actualValue
self.__expectedType = expectedType
self.__transformationException = transformationException
@property
def actual_value(self):
"""
The value returned by Github
"""
return self.__actualValue
@property
def expected_type(self):
"""
The type PyGithub expected
"""
return self.__expectedType
@property
def transformation_exception(self):
"""
The exception raised when PyGithub tried to parse the value
"""
return self.__transformationException
class TwoFactorException(GithubException):
"""
Exception raised when Github requires a onetime password for two-factor authentication
"""
class IncompletableObject(GithubException):
"""
Exception raised when we can not request an object from Github because the data returned did not include a URL
"""
class MockException(Exception):
"""
Exception raised when there is a missing or invalid data in the mock values
"""
| ahmad88me/PyGithub | github/GithubException.py | Python | lgpl-3.0 | 5,277 | 0.007391 |
import tkinter as tk
import sys
import json
import sys
from tkinter import filedialog
from tkinter import Spinbox
from tkinter import Canvas
from tkinter import RIGHT
from tkinter import ttk
from PIL import Image, ImageTk
from core.readAlgoFile import readAlgo
from core.readAlgoFile import getAlgoList
from core.ConfigParser import parse
from core.modifyConfigFile import addAlgorithme
class GUI(tk.Frame):
def __init__(self, root):
self.photo = None
self.filename=None
self.configFile=("./test.json")
tk.Frame.__init__(self, root)
self.parametre = {}
# define canvas
self.w = Canvas(root, width=200, height=200, borderwidth=3, background="black")
self.w.pack(side=RIGHT)
self.algo={}
# algorithms choice dropdown
lst1 = getAlgoList()
self.dropdownvalue = tk.StringVar()
self.dropdownvalue.set("Please Select")
drop = tk.OptionMenu(root,self.dropdownvalue, *lst1)
drop.pack()
#TODO : Create different frames for
self.dropdownvalue.trace("w", self.callback)
# options for buttons
button_opt = {'fill': 'both' , 'padx': 5, 'pady': 5}
# define buttons
self.selectbtn = tk.Button(self, text='Select Image', command=self.askopenfilename).pack(**button_opt)
self.runalgobtn = tk.Button(self, text='Run algo', command=self.runAlgorithm).pack(side=RIGHT, **button_opt)
self.previewbtn = tk.Button(self, text='Preview', command=self.previewalgorithm).pack(**button_opt)
# define options for opening or saving a file
self.file_opt = options = {}
options['defaultextension'] = '.PNG'
options['filetypes'] = [('all files', '.*'), ('PNG file', '.png'), ('TIFF file', '.tiff')]
options['initialdir'] = '.'
options['initialfile'] = ''
options['parent'] = root
options['title'] = 'Please select an image'
# This is only available on the Macintosh, and only when Navigation Services are installed.
#options['message'] = 'message'
# if you use the multiple file version of the module functions this option is set automatically.
#options['multiple'] = 1
def askopenfilename(self):
"""Returns an opened file in read mode.
This time the dialog just returns a filename and the file is opened by your own code.
"""
# get filename
self.filename = filedialog.askopenfilename(**self.file_opt)
# Code below should put the image in the canvas
if self.filename:
# TODO : get only the filename from the path
image = Image.open(0, self.filename)
photo = ImageTk.PhotoImage(image)
#self.w.create_image(photo)
def popUpAlgo(self,algo,nameAlgo):
"""
:param algo: a list of algo
:return:
"""
button_opt = {'fill': 'both', 'padx': 5, 'pady': 5}
popup=tk.Tk()
popup.wm_title("Select your parameters")
labelList=[]
entryList=[]
paramAlgo=algo[0]["params"]
keyAlgo=list(paramAlgo.keys())
nbrParam=len(paramAlgo)
for i in range(nbrParam):
labelList.append(tk.Label(popup,text=keyAlgo[i]))
entryList.append(tk.Entry(popup))
labelList[i].pack()
entryList[i].pack()
tk.Button(popup, text='Apply',command=lambda:self.appyAlgo(labelList,entryList,nameAlgo)).pack(**button_opt)
tk.Button(popup, text='Done', command=popup.destroy).pack(**button_opt)
popup.mainloop()
def previewalgorithm(self):
# Here, the algo should be run
# in future releases, it should take the canvas image as input
# and output it in the canvas
pass
def appyAlgo(self,labelList,entryList,nameAlgo):
"""
Loads a
:param labelList:
:param entryList:
:return:
"""
for i in range(len(labelList)):
self.parametre[labelList[i].cget("text")]=entryList[i].get()
self.algo["name"]=nameAlgo
self.algo["parametre"]=self.parametre
def runAlgorithm(self):
"""
Change configfile, launch algo
:return:
"""
addAlgorithme(self.configFile,"preprocessing",self.algo)
#parse(self.configFile)
def callback(self, *args):
"""
Callback for our dropdown
:param args:
:return:
"""
print(self.dropdownvalue.get())
self.popUpAlgo(readAlgo(self.dropdownvalue.get()),self.dropdownvalue.get())
if __name__=='__main__':
root = tk.Tk()
GUI(root).pack()
root.mainloop() | neuropoly/axonsegpy | axonsegpy/GUI.py | Python | mit | 4,301 | 0.016043 |
# -*- coding: utf-8 -*-
"""
Thread forms
"""
import os, hashlib
from django.conf import settings
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.core.files.storage import FileSystemStorage
from project.manager_frontend.forms import CrispyFormMixin
from project.utils.imports import safe_import_module
BIOS_FS_STORAGE = FileSystemStorage(location=settings.RECALBOX_BIOS_PATH, base_url=settings.MEDIA_URL)
def hashfile(afile, hasher, blocksize=65536):
"""
Efficient way to generate checksum from a file, return hexdigest checksum
Use it like this:
import hashlib
hashfile(open(BIOS_FS_STORAGE.path(YOUR_FILEPATH), 'rb'), hashlib.md5())
Stealed from http://stackoverflow.com/a/3431835/4884485
"""
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
class BiosDeleteForm(CrispyFormMixin, forms.Form):
"""
Form to delete many bios
"""
form_key = 'delete'
form_fieldname_trigger = 'delete_submit'
#crispy_form_helper_path = 'project.manager_frontend.forms.crispies.bios_delete_helper'
#crispy_form_helper_kwargs = {}
def __init__(self, *args, **kwargs):
self.bios_choices = kwargs.pop('bios_choices')
super(BiosDeleteForm, self).__init__(*args, **kwargs)
super(forms.Form, self).__init__(*args, **kwargs)
self.fields['bios_files'] = forms.MultipleChoiceField(choices=self.bios_choices, widget=forms.CheckboxSelectMultiple, required=False)
def save(self):
bios_map = dict(self.bios_choices)
deleted_bios = []
# Delete all selected bios files
for md5hash in self.cleaned_data["bios_files"]:
filename = bios_map.get(md5hash)
if BIOS_FS_STORAGE.exists(filename):
BIOS_FS_STORAGE.delete(filename)
deleted_bios.append(filename)
return deleted_bios
class BiosUploadForm(CrispyFormMixin, forms.Form):
"""
Bios upload form
"""
#crispy_form_helper_path = 'project.manager_frontend.forms.crispies.bios_helper'
#crispy_form_helper_kwargs = {}
form_key = 'upload'
form_fieldname_trigger = 'upload_submit'
bios = forms.FileField(label=_('Bios file'), required=True)
def __init__(self, *args, **kwargs):
self.manifest = kwargs.pop('bios_manifest')
super(BiosUploadForm, self).__init__(*args, **kwargs)
super(forms.Form, self).__init__(*args, **kwargs)
def clean_bios(self):
"""
Validate bios file from Recalbox Manifest
The bios file must have the right file name and the right md5 checksum
"""
bios = self.cleaned_data['bios']
if bios:
#simple_manifest = {filename: md5hash for (md5hash,filename,system_name,exists) in self.manifest}
simple_manifest = {values[0]: md5hash for md5hash,values in self.manifest.items()}
name = os.path.basename(bios.name)
if name not in simple_manifest:
raise forms.ValidationError(_("Your file does not seem to be a supported Bios"))
else:
bios_checksum = hashfile(bios, hashlib.md5())
if bios_checksum != simple_manifest[name]:
raise forms.ValidationError(_("Your file does not have a correct MD5 checksum"))
return bios
def save(self):
bios = self.cleaned_data["bios"]
# Remove the previous file with identical name if any
if BIOS_FS_STORAGE.exists(bios.name):
BIOS_FS_STORAGE.delete(bios.name)
# Save the new uploaded file
BIOS_FS_STORAGE.save(bios.name, bios)
return BIOS_FS_STORAGE.path(bios.name)
| sveetch/recalbox-manager | project/manager_frontend/forms/bios.py | Python | mit | 3,935 | 0.009911 |
#!/usr/bin/env python
from ethereum.tools import tester
from ethereum.tools.tester import TransactionFailed
from pytest import raises
from utils import fix, longTo32Bytes
from constants import LONG, YES, NO
def test_escapeHatch(contractsFixture, cash, market):
controller = contractsFixture.contracts['Controller']
createOrder = contractsFixture.contracts['CreateOrder']
fillOrder = contractsFixture.contracts['FillOrder']
trade = contractsFixture.contracts['Trade']
tradingEscapeHatch = contractsFixture.contracts['TradingEscapeHatch']
yesShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(YES))
noShareToken = contractsFixture.applySignature('ShareToken', market.getShareToken(NO))
initialTester1ETH = contractsFixture.chain.head_state.get_balance(tester.a1)
initialTester2ETH = contractsFixture.chain.head_state.get_balance(tester.a2)
# create order with cash
orderID = createOrder.publicCreateOrder(contractsFixture.contracts['Constants'].ASK(), fix(1), 6000, market.address, YES, longTo32Bytes(0), longTo32Bytes(0), "42", sender=tester.k1, value=fix('1', '4000'))
assert orderID
# fill order with cash using on-chain matcher
assert trade.publicFillBestOrder(LONG, market.address, YES, fix(1), 6000, sender=tester.k2, value=fix('1', '6000')) == 0
# assert starting values
assert cash.balanceOf(market.address) == fix('10000')
assert noShareToken.balanceOf(tester.a1) == fix(1)
assert yesShareToken.balanceOf(tester.a2) == fix(1)
with raises(TransactionFailed):
tradingEscapeHatch.claimSharesInUpdate(market.address)
# emergency stop and then have everyone liquidate their position
controller.emergencyStop()
curTester1Balance = contractsFixture.chain.head_state.get_balance(tester.a1)
assert tradingEscapeHatch.getFrozenShareValueInMarket(market.address, sender = tester.k1) == initialTester1ETH - curTester1Balance
assert tradingEscapeHatch.claimSharesInUpdate(market.address, sender = tester.k1)
assert tradingEscapeHatch.claimSharesInUpdate(market.address, sender = tester.k2)
# assert final values (should be a zero sum game)
assert contractsFixture.chain.head_state.get_balance(tester.a1) == initialTester1ETH
assert contractsFixture.chain.head_state.get_balance(tester.a2) == initialTester2ETH
assert cash.balanceOf(market.address) == 0
assert noShareToken.balanceOf(tester.a1) == 0
assert yesShareToken.balanceOf(tester.a2) == 0
| AugurProject/augur-core | tests/trading/test_tradingEscapeHatch.py | Python | gpl-3.0 | 2,511 | 0.007168 |
__all__ = ["core"] | alfiyansys/a-reconsidered-sign | algorithms/__init__.py | Python | gpl-3.0 | 18 | 0.055556 |
# ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
class CTXObject(object):
def update(self, ctx):
self.__dict__.update(**ctx)
class EXPObject(CTXObject):
pass
class CMDObject(CTXObject):
pass
class MeasurementCTXObject(object):
def create(self, yd):
for k in (
"baseline",
"multicollect",
"peakcenter",
"equilibration",
"whiff",
"peakhop",
):
try:
c = CTXObject()
c.update(yd[k])
setattr(self, k, c)
except KeyError:
pass
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/pyscripts/contexts.py | Python | apache-2.0 | 1,376 | 0 |
# -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Julius Network Solutions SARL <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
import pytz, datetime
from ast import literal_eval
from onesignal import OneSignal
USER_AUTH_KEY = 'NWQ4ZDNiMjUtYTdhNS00YTBhLTg2MTctYjlmOTM0OTdhZjBi'
YOUR_APP_ID = '3fcdf8f2-9523-4ca7-8489-fefb29cbecc4'
REST_API_KEY = 'MDdjN2JjZDctNWE2ZS00ZGNjLTlkNmEtNTRkYmUwYzBjZjc3'
class im_chat_session(osv.osv):
_inherit = 'im_chat.session'
def add_user(self, cr, uid, uuid, user_id, context=None):
""" add the given user to the given session """
sids = self.search(cr, uid, [('uuid', '=', uuid)], context=context, limit=1)
for session in self.browse(cr, uid, sids, context=context):
if user_id not in [u.id for u in session.user_ids]:
self.write(cr, uid, [session.id], {'user_ids': [(4, user_id)]}, context=context)
# notify the all the channel users and anonymous channel
notifications = []
for channel_user_id in session.user_ids:
info = self.session_info(cr, channel_user_id.id, [session.id], context=context)
notifications.append([(cr.dbname, 'im_chat.session', channel_user_id.id), info])
# Anonymous are not notified when a new user is added : cannot exec session_info as uid = None
info = self.session_info(cr, SUPERUSER_ID, [session.id], context=context)
notifications.append([session.uuid, info])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
# send a message to the conversation
user = self.pool['res.users'].browse(cr, uid, user_id, context=context)
chat_names = self.pool['res.users'].get_chat_name(cr, uid, [user.id], context=context)
notify_msg = _('%s joined the conversation.') % (chat_names[user.id] or user.name)
self.pool["im_chat.message"].post(cr, uid, uid, session.uuid, "meta", notify_msg, context=context)
def users_infos(self, cr, uid, ids, context=None):
""" get the user infos for all the user in the session """
for session in self.pool["im_chat.session"].browse(cr, SUPERUSER_ID, ids, context=context):
users_infos = self.pool["res.users"].read(cr, SUPERUSER_ID, [u.id for u in session.user_ids], ['id','name', 'im_status'], context=context)
return users_infos
class email_template(osv.osv):
_inherit = "email.template"
def _get_default_gateway(self, cr, uid, context=None):
gateway_id = self.pool.get('sms.smsclient').search(cr, uid, [], context=context, limit=1)
return gateway_id and gateway_id[0] or None
_defaults = {
'sms_template': True,
'gateway_id': _get_default_gateway,
}
class res_user(osv.osv):
_inherit = 'res.users'
def _get_schools(self, cr, uid, context=None):
school_ids = []
res_user = self.pool.get('res.users')
res_partner = self.pool.get('res.partner')
user_id = res_user.browse(cr, SUPERUSER_ID, uid, context=context)
if user_id.partner_id and user_id.partner_id.mobile:
mobile = user_id.partner_id.mobile
partner_ids = res_partner.search(cr, SUPERUSER_ID, [('mobile', '=', mobile), ('customer', '=', True)],
context=context)
if (partner_ids):
for partner_id in partner_ids:
partner = res_partner.browse(cr, SUPERUSER_ID, partner_id, context=context)
if partner and partner.company_id and partner.company_id.school:
school_ids.append(partner.company_id.id)
return school_ids
def _get_all_children(self, cr, uid, context=None):
child_ids = []
res_user = self.pool.get('res.users')
res_partner = self.pool.get('res.partner')
user_id = res_user.browse(cr, SUPERUSER_ID, uid, context=context)
if user_id.partner_id and user_id.partner_id.mobile:
mobile = user_id.partner_id.mobile
partner_ids = res_partner.search(cr, SUPERUSER_ID, [('mobile', '=', mobile), ('customer', '=', True)],
context=context)
if (partner_ids):
for partner_id in partner_ids:
partner = res_partner.browse(cr, SUPERUSER_ID, partner_id, context=context)
for child_id in partner.children:
child_ids.append(child_id.id)
return child_ids
def get_relate_schools(self, cr, uid, ids, name, args, context=None):
res = {}
for user_id in self.browse(cr, SUPERUSER_ID, ids, context=context):
res[user_id.id] = self._get_schools(cr, user_id.id, context=context)
return res
def get_all_children(self, cr, uid, ids, name, args, context=None):
res = {}
for user_id in self.browse(cr, SUPERUSER_ID, ids, context=context):
res[user_id.id] = self._get_all_children(cr, user_id.id, context=context)
return res
def select_school(self, cr, uid, school=None, context=None):
if school:
company_id = self.pool.get('res.company').browse(cr, SUPERUSER_ID, school, context=context)
if company_id:
user_data = {
'company_id': company_id.id,
'company_ids': [(6, 0, [company_id.id])],
}
self.pool.get("res.users").write(cr, SUPERUSER_ID, uid, user_data, context=context)
else:
raise osv.except_osv(_('error!'), _("Invalid school selected"))
def get_chat_name(self, cr, uid, user_ids, context=None):
result = {}
for user_id in user_ids:
user = self.browse(cr, SUPERUSER_ID, user_id, context=context)
company_id = user.company_id.id
name = user.name
employee_ids = self.pool.get('hr.employee').search(cr, SUPERUSER_ID, [("user_id",'=', user_id)], context=context)
employee_id = employee_ids and employee_ids[0] or None
if not employee_id:
mobile = user.partner_id.mobile
if mobile:
# search parent in school
parent_ids = self.pool.get('res.partner').search(cr, SUPERUSER_ID, [("mobile", '=', mobile),
('customer','=', True),
('company_id','=', company_id)], context=context)
if not parent_ids or len(parent_ids) == 0:
# search parent not in school
parent_ids = self.pool.get('res.partner').search(cr, SUPERUSER_ID, [("mobile", '=', mobile),
('customer', '=', True)], context=context)
parent_id = parent_ids and parent_ids[0] or None
if parent_id:
parent = self.pool.get('res.partner').browse(cr, SUPERUSER_ID, parent_id, context=context)
name = parent.name
else:
employee = self.pool.get('hr.employee').browse(cr, SUPERUSER_ID, employee_id, context=context)
name = employee.name
result[user_id] = name
return result
def get_scheduled_subjects(self, cr, uid, class_id, semester, context=None):
subjects = []
school_admin = self.pool.get("ir.model.access").check_groups(cr, uid, "school_link.group_school_admin")
if school_admin:
# return all subject
subjects = self.pool.get('school.subject').search(cr, uid, [], context=context)
return subjects
schedule_ids = self.pool.get('school.schedule').search(cr, uid,
[('class_id','=',class_id), ('semester','=', semester)], context=context)
schedule_line_ids = self.pool.get('school.schedule.line').search(cr, uid, [('schedule_id','in', schedule_ids)], context=context)
schedule_lines = self.pool.get('school.schedule.line').browse(cr, uid, schedule_line_ids, context=context)
for line in schedule_lines:
if line.teacher_id and line.teacher_id.user_id and line.teacher_id.user_id.id == uid:
subjects.append(line.subject_id.id)
if len(subjects) > 0:
# Prevent Duplicate by search from database
subjects = self.pool.get('school.subject').search(cr, uid, [('id', 'in', subjects)], context=context)
return subjects
_columns = {
'school_ids': fields.function(get_relate_schools, relation='res.company', type='many2many',
string='Related Schools', readonly=True),
'children': fields.function(get_all_children, relation='hr.employee', type='many2many',
string='Children', readonly=True),
}
def signup(self, cr, uid, values, token=None, context=None):
if context is None:
context = {}
code_obj = self.pool.get('sms.authentication')
if context.has_key('verify_phonenumber'):
if token:
if not values.has_key("written_code"):
raise osv.except_osv(_('error!'), _("No verification code"))
written_code = values['written_code']
code_ids = code_obj.search(cr, uid, [('id', '=', token)], limit=1)
code = code_obj.browse(cr, uid, code_ids[0])
if not code or code.state == 'cancel' or datetime.datetime.strptime(code.validity, "%Y-%m-%d %H:%M:%S") <= datetime.datetime.now() :
raise osv.except_osv(_('error!'), _("Invalid verification code (Expired or Cancelled)."))
if not code_obj.verify_code(written_code):
raise osv.except_osv(_('error!'), _("Wrong verification code"))
return self._create_parent_user(cr, uid, values, context=context)
else:
verfication_code_data = {
'mobile': values['login'],
'country_id': 243,
}
code_id = code_obj.create(cr, uid, verfication_code_data, context=context)
return code_id
else:
return super(res_user, self).signup(cr, uid, values, token=False, context=context)
def _create_parent_user(self, cr, uid, values, context=None):
""" create a new user from the template user """
ir_config_parameter = self.pool.get('ir.config_parameter')
template_user_id = literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.template_user_id', 'False'))
assert template_user_id and self.exists(cr, uid, template_user_id, context=context), 'Signup: invalid template user'
assert values.get('login'), "Signup: no login given for new user"
assert values.get('partner_id') or values.get('name'), "Signup: no name or partner given for new user"
# create a copy of the template user (attached to a specific partner_id if given)
values['active'] = True
context = dict(context or {}, no_reset_password=True)
return self.copy(cr, uid, template_user_id, values, context=context)
class res_partner(osv.osv):
_inherit = 'res.partner'
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
def get_parent_user(self, cr, uid, ids, name, args, context=None):
res = {}
for partner_id in self.browse(cr, SUPERUSER_ID, ids, context=context):
res[partner_id.id] = {}
mobile = partner_id.mobile
if mobile:
same_mobile_ids = self.search(cr, SUPERUSER_ID, [('mobile','=',mobile)], context=context)
related_users = self.pool.get('res.users').search(cr, SUPERUSER_ID, [('partner_id','in', same_mobile_ids)], context=context)
res[partner_id.id]['parent_user_id'] = related_users and related_users[0] or None
user_partner_id = related_users and self.pool.get('res.users').browse(cr, SUPERUSER_ID, related_users[0], context=context).partner_id.id or None
res[partner_id.id]['parent_partner_id'] = user_partner_id
else:
res[partner_id.id]['parent_user_id'] = None
res[partner_id.id]['parent_partner_id'] = None
return res
_columns = {
'children': fields.many2many('hr.employee', 'parent_student_rel', 'student_id', 'partner_id', 'Childs', readonly=True),
'parent_user_id': fields.function(get_parent_user, multi="parent_user_partner", relation='res.users', type='many2one', string='Related User', readonly=True),
'parent_partner_id': fields.function(get_parent_user, multi="parent_user_partner", relation='res.partner', type='many2one', string='Related User', readonly=True),
'property_account_payable': fields.property(
type='many2one',
relation='account.account',
string="Account Payable",
domain="[('type', '=', 'payable')]",
help="This account will be used instead of the default one as the payable account for the current partner",
required=False),
'property_account_receivable': fields.property(
type='many2one',
relation='account.account',
string="Account Receivable",
domain="[('type', '=', 'receivable')]",
help="This account will be used instead of the default one as the receivable account for the current partner",
required=False),
}
_defaults = {
'company_id': _get_default_company,
}
def create_multi(self, cr, uid, datas, context=None):
if context is None:
context = {}
partner_ids = []
for vals in datas:
partner_id = self.create(cr, uid, vals, context=context)
partner_ids.append(partner_id)
return partner_ids
class res_company(osv.osv):
_inherit = 'res.company'
def get_study_child(self, cr, uid, ids, name, args, context=None):
res = {}
for school_id in self.browse(cr, uid, ids, context=context):
res[school_id.id] = self._get_children(cr, uid, school_id.id, context=context)
return res
def _get_children(self, cr, uid, school, context=None):
childs = []
user_id = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user_id.partner_id and user_id.partner_id.mobile:
mobile = user_id.partner_id.mobile
partner_ids = self.pool.get('res.partner').search(cr, SUPERUSER_ID, [('mobile', '=', mobile), ('customer', '=', True),('company_id', '=', school)], context=context)
if (partner_ids):
partner_ids = self.pool.get('res.partner').browse(cr, SUPERUSER_ID, partner_ids, context=context)
for partner_id in partner_ids:
if partner_id.children:
for x in partner_id.children:
childs.append(x.id)
return childs
def name_get(self, cr, uid, ids, context=None):
return super(res_company, self).name_get(cr, SUPERUSER_ID, ids, context=context)
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_columns = {
'display_name': fields.function(_name_get_fnc, type="char", string='Display Name', store=True),
'school': fields.boolean('School'),
'children': fields.function(get_study_child, relation='hr.employee', type='many2many', string='Childs in study', readonly=True),
}
class hr_employee(osv.osv):
_name = "hr.employee"
_inherit = ['hr.employee']
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
def name_get(self, cr, uid, ids, context=None):
res = []
for emp in self.browse(cr, SUPERUSER_ID, ids, context=context):
name = emp.name
last = emp.last_name
if last:
name = last + " " + name
res.append((emp.id, name))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
def get_teach_subjects(self, cr, uid, ids, name, args, context=None):
res = {}
for emp in self.browse(cr, uid, ids, context=context):
subjects = []
schedule_line_ids = self.pool.get('school.schedule.line').search(cr, uid, [('teacher_id','=', emp.id),('class_id.active','=',True)], context=context)
if schedule_line_ids:
for schedule_line in self.pool.get('school.schedule.line').browse(cr, uid, schedule_line_ids, context=context):
if schedule_line.subject_id:
subjects.append(schedule_line.subject_id.id)
subjects = self.pool.get('school.subject').search(cr, uid, [('id','in', subjects)], context=context)
res[emp.id] = subjects
return res
def get_teach_classes(self, cr, uid, ids, name, args, context=None):
res = {}
for emp in self.browse(cr, uid, ids, context=context):
classes = []
schedule_line_ids = self.pool.get('school.schedule.line').search(cr, uid, [('teacher_id', '=', emp.id),
('class_id.active', '=', True)],
context=context)
if schedule_line_ids:
for schedule_line in self.pool.get('school.schedule.line').browse(cr, uid, schedule_line_ids, context=context):
classes.append(schedule_line.class_id.id)
classes = self.pool.get('school.class').search(cr, uid, [('id','in', classes)], context=context)
res[emp.id] = classes
return res
_columns = {
'last_name': fields.char('Last Name'),
'display_name': fields.function(_name_get_fnc, type="char", string='Display Name', store=True),
'home_town': fields.char('Home town'),
'home_address': fields.char('Home Address'),
'teacher': fields.boolean('Is a Teacher'),
'student': fields.boolean('Is a Student'),
'parent_ids': fields.many2many('res.partner', 'parent_student_rel', 'partner_id', 'student_id', 'Parents'),
'class_ids': fields.many2many('school.class', 'school_class_student_rel', 'student_id', 'class_id', 'Classes', domain="[('active','=',True)]"),
'subject_ids': fields.function(get_teach_subjects, relation='school.subject', type='many2many', string='Teaching Subjects', readonly=True),
'teaching_class_ids': fields.function(get_teach_classes, relation='school.class', type='many2many', string='Teaching Classes', readonly=True),
}
_defaults = {
'company_id': _get_default_company,
}
def create_multi(self, cr, uid, datas, context=None):
if context is None:
context = {}
emp_ids = []
for vals in datas:
emp_id = self.create(cr, uid, vals, context=context)
emp_ids.append(emp_id)
return emp_ids
class school_scholarity(osv.osv):
_name = 'school.scholarity'
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
def _search_active(self, cr, uid, obj, name, args, context):
res = []
for field, operator, value in args:
where = ""
assert value in (True, False), 'Operation not supported'
now = datetime.datetime.now().strftime(DATETIME_FORMAT)
if (operator == '=' and value == True) or (operator == '!=' and value == False):
where = " WHERE date_start <= '%s' AND date_end >= '%s'" % (now, now)
else:
where = " WHERE date_start > '%s' OR date_end < '%s'" % (now, now)
cr.execute('select id FROM school_scholarity %s' % (where,))
res_ids = [x[0] for x in cr.fetchall()]
res.append(('id', "in", res_ids))
return res
def _is_active(self, cr, uid, ids, field_name, arg, context=None):
res = {}
now = datetime.datetime.now()
for scholarity in self.browse(cr, uid, ids, context=context):
res[scholarity.id] = True
if scholarity.date_start:
ds = datetime.datetime.strptime(scholarity.date_start, '%Y-%m-%d %H:%M:%S')
if ds > now:
res[scholarity.id] = False
if scholarity.date_end:
de = datetime.datetime.strptime(scholarity.date_end, '%Y-%m-%d %H:%M:%S')
if de < now:
res[scholarity.id] = False
return res
_columns = {
'name': fields.char('Scholarity Year', required=True),
'company_id':fields.many2one('res.company', 'School', domain="[('school','=',True)]"),
'date_start': fields.datetime('Start Date', required=True),
'date_end': fields.datetime('End Date', required=True),
'active': fields.function(_is_active, store=False, string='Active', type='boolean', fnct_search=_search_active),
}
_defaults = {
'company_id': _get_default_company,
}
class school_class_group(osv.osv):
_name = 'school.class.group'
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
_columns = {
'name': fields.char('Name', required=True),
'company_id':fields.many2one('res.company', 'School', required=True, domain="[('school','=',True)]"),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list "),
}
_defaults = {
'company_id': _get_default_company,
}
def _check_duplicate(self, cr, uid, ids, context=None):
for group in self.browse(cr, uid, ids, context=None):
domain = [
('name', '=', group.name),
('company_id','=', group.company_id.id)
]
count = self.search_count(cr, uid, domain, context=context)
if count > 1:
return False
return True
_constraints = [
(_check_duplicate, 'Duplicate group name',['name']),
]
class school_subject(osv.osv):
_name = 'school.subject'
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
_columns = {
'name': fields.char('Name', required=True),
'company_id':fields.many2one('res.company', 'School', domain="[('school','=',True)]"),
'weight': fields.integer('Weight', help="Define weight to calculate average"),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list "),
}
_defaults = {
'company_id': _get_default_company,
}
def _check_duplicate(self, cr, uid, ids, context=None):
for subject in self.browse(cr, uid, ids, context=None):
domain = [
('name', '=', subject.name),
('company_id','=', subject.company_id and subject.company_id.id or False)
]
count = self.search_count(cr, uid, domain, context=context)
if count > 1:
return False
return True
_constraints = [
(_check_duplicate, 'Duplicate subject name',['name']),
]
class school_class(osv.osv):
_name = 'school.class'
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
def _get_all_teachers(self, cr, uid, ids, name, args, context=None):
res = {}
for class_id in self.browse(cr, uid, ids, context=context):
teacher_ids = []
teacher_ids.append(class_id.teacher_id.id) # add primary teacher
schedule_line_ids = self.pool.get('school.schedule.line').search(cr, uid,[('schedule_id.class_id', '=', class_id.id)], context=context)
schedule_lines = self.pool.get('school.schedule.line').browse(cr, uid, schedule_line_ids, context=context)
for schedule_line in schedule_lines:
teacher_ids.append(schedule_line.teacher_id.id) # add other teachers
all_teacher_ids = self.pool.get('hr.employee').search(cr, uid, [('id', 'in', teacher_ids)], context=context)
res[class_id.id] = all_teacher_ids
return res
def _get_all_parents(self, cr, uid, ids, name, args, context=None):
res = {}
for class_id in self.browse(cr, uid, ids, context=context):
parent_ids = []
for student in class_id.student_ids:
student_parent_ids = student.parent_ids
for parent_id in student_parent_ids:
parent_ids.append(parent_id.id)
all_parent_ids = self.pool.get('res.partner').search(cr, uid, [('id', 'in', parent_ids)], context=context)
res[class_id.id] = all_parent_ids
return res
def _check_duplicate(self, cr, uid, ids, context=None):
for school_class in self.browse(cr, uid, ids, context=None):
domain = [
('name', '=', school_class.name),
('year_id', '=', school_class.year_id.id),
('company_id','=', school_class.company_id.id)
]
count = self.search_count(cr, uid, domain, context=context)
if count > 1:
return False
return True
_columns = {
'name': fields.char('Name', required=True),
'year_id':fields.many2one('school.scholarity', 'Year', required=True),
'company_id':fields.many2one('res.company', 'School', required=True, domain="[('school','=',True)]"),
'group_id':fields.many2one('school.class.group', 'Group', required=True, ondelete="cascade"),
'teacher_id':fields.many2one('hr.employee', 'Responsible', domain="[('teacher','=',True)]", ondelete="cascade"),
'student_ids':fields.many2many('hr.employee', 'school_class_student_rel', 'class_id', 'student_id', 'Students', domain="[('student','=',True)]"),
'active': fields.related('year_id', 'active', type='boolean', string='Active'),
'teacher_ids': fields.function(_get_all_teachers, relation='hr.employee', type='many2many', string='Teachers',readonly=True),
'parent_ids': fields.function(_get_all_parents, relation='res.partner', type='many2many', string='Parents', readonly=True),
}
_constraints = [
(_check_duplicate, 'Duplicate class name', ['name']),
]
_defaults = {
'company_id': _get_default_company,
}
class school_exam_move(osv.osv):
_name = 'school.exam.move'
def _get_default_teacher(self, cr, uid, context=None):
teacher_ids = self.pool.get('hr.employee').search(cr, uid, [('teacher', '=', True), ('user_id', "=", uid)],context=context)
return teacher_ids and teacher_ids[0] or None
_columns = {
'name': fields.char('Name'),
'class_id': fields.many2one('school.class', 'Class', required=True, ondelete="cascade"),
'teacher_id': fields.many2one('hr.employee', 'Teacher', required=True, domain=[('teacher', '=', True)], ondelete="cascade"),
'student_id': fields.many2one('hr.employee', 'Student', required=True, domain=[('student', '=', True)], ondelete="cascade"),
'mark': fields.float('Mark'),
'date_exam': fields.datetime('Date', required=True),
'subject_id': fields.many2one('school.subject', 'Subject', ondelete="cascade"),
'weight': fields.integer('Weight', help="Define weight to calculate average"),
'type': fields.selection([
('w1', 'Weight 1'),
('w2', 'Weight 2'),
('final', 'Final Exam'),
('average', 'Average'),
('conduct', 'Conduct'),
('overall', 'Overall'),
], 'Type', required=True, select=True),
'semester': fields.selection([
('first', 'First Semester'),
('second', 'Second Semester'),
], 'Semester', required=True, select=True),
'sequence': fields.integer('Sequence', help="Sort by order"),
'company_id': fields.related('class_id', 'company_id', type='many2one', relation='res.company', string='School'),
}
_defaults = {
'name': "/",
'teacher_id': _get_default_teacher,
}
def create_multi(self, cr, uid, moves, context=None):
if context is None:
context = {}
move_ids = []
for move in moves:
move_id = self.create(cr, uid, move, context=context)
move_ids.append(move_id)
return move_ids
def write_multi(self, cr, uid, moves, context=None):
if context is None:
context = {}
for move in moves:
if move.has_key('id'):
self.write(cr, uid, [move['id']], move, context=context)
class school_schedule(osv.osv):
_name = 'school.schedule'
_columns = {
'name': fields.char('Name'),
'class_id': fields.many2one('school.class', 'Class', required=True, ondelete="cascade"),
'semester': fields.selection([
('first', 'First Semester'),
('second', 'Second Semester'),
], 'Semester', required=True, select=True),
'lines': fields.one2many('school.schedule.line', 'schedule_id', 'Schedulle Lines'),
'company_id': fields.related('class_id', 'company_id', relation='res.company', type='many2one', string='School'),
}
_defaults = {
'name': '/',
}
def _check_duplicate(self, cr, uid, ids, context=None):
for schedule in self.browse(cr, uid, ids, context=None):
domain = [
('class_id', '=', schedule.class_id.id),
('semester', '=', schedule.semester),
('company_id','=', schedule.company_id.id)
]
count = self.search_count(cr, uid, domain, context=context)
if count>1:
return False
return True
_constraints = [
#(_check_duplicate, 'One schedule allowed only for a semester of class',['class_id','semester','company_id']),
]
class school_schedule_line(osv.osv):
_name = 'school.schedule.line'
_columns = {
'name': fields.char('Name'),
'schedule_id': fields.many2one('school.schedule', 'Schedule Ref', required=True, ondelete='cascade', select=True),
'week_day': fields.selection([
('mon', 'Monday'),
('tue', 'Tuesday'),
('wed', 'Wednesday'),
('thu', 'Thursday'),
('fri', 'Friday'),
('sat', 'Saturday'),
('sun', 'Sunday'),
], 'Week Day', required=True, select=True),
'class_id': fields.related('schedule_id', 'class_id', type='many2one', relation='school.class', store=True, string='Class', ondelete="cascade"),
'subject_id': fields.many2one('school.subject', 'Subject', required=True, ondelete="cascade"),
'teacher_id': fields.many2one('hr.employee', 'Teacher', required=True, domain="[('teacher','=',True)]", ondelete="cascade"),
'begin': fields.datetime('Begin', required=True),
'end': fields.datetime('End', required=True),
}
_defaults = {
'name': '/',
}
class im_chat_message(osv.Model):
_inherit = 'im_chat.message'
_columns = {
'delay_time': fields.datetime('Delay Time'),
'self_notify': fields.boolean("Is Self Notification"),
}
_defaults = {
'self_notify': False,
}
def get_messages(self, cr, uid, uuid, last_id=False, limit=20, context=None):
""" get messages (id desc) from given last_id in the given session """
Session = self.pool['im_chat.session']
if Session.is_in_session(cr, uid, uuid, uid, context=context):
domain = [("to_id.uuid", "=", uuid),("delay_time", '=', None)]
if last_id:
domain.append(("id", "<", last_id));
messages = self.search_read(cr, uid, domain, ['id', 'create_date','to_id','from_id', 'type', 'message'], limit=limit, context=context)
for message in messages:
timeStamp = fields.datetime.context_timestamp(cr, uid, datetime.datetime.strptime(message['create_date'],
DATETIME_FORMAT),
context=context)
message['create_date'] = timeStamp.strftime(DATETIME_FORMAT)
return messages
return False
def post_delay(self, cr, uid, from_uid, uuid, message_type, message_content, delayTime, context=None):
""" post and broadcast a message, return the message id """
message_id = False
Session = self.pool['im_chat.session']
session_ids = Session.search(cr, uid, [('uuid','=',uuid)], context=context)
notifications = []
for session in Session.browse(cr, uid, session_ids, context=context):
vals = {
"from_id": from_uid,
"to_id": session.id,
"type": message_type,
"message": _('You have just sent a timer message.'),
"self_notify": True,
}
# Create self notification first
message_id = self.create(cr, uid, vals, context=context)
# broadcast it to channel (anonymous users) and users_ids
data = self.read(cr, uid, [message_id], ['from_id', 'to_id', 'create_date', 'type', 'message'], context=context)[0]
notifications.append([uuid, data])
notifications.append([(cr.dbname, 'im_chat.session', from_uid), data])
self.pool['bus.bus'].sendmany(cr, uid, notifications)
# build the timer message
if context and context.get('tz'):
tz_name = context['tz']
else:
user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
if tz_name:
local = pytz.timezone(tz_name)
delayTime = datetime.datetime.strptime(delayTime, DATETIME_FORMAT)
local_dt = local.localize(delayTime, is_dst=None)
utc_dt = local_dt.astimezone(pytz.UTC)
delayTime = utc_dt.strftime(DATETIME_FORMAT)
vals = {
"from_id": from_uid,
"to_id": session.id,
"type": message_type,
"message": message_content,
"delay_time": delayTime,
}
# save it & broastcast later
message_id = self.create(cr, uid, vals, context=context)
return message_id
def broadcast_delay(self, cr, uid, context=None):
one_signal = OneSignal(REST_API_KEY, YOUR_APP_ID)
now = datetime.datetime.now().strftime(DATETIME_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid)
tz_name = user.tz
if tz_name:
local = pytz.timezone(tz_name)
now = datetime.datetime.strptime(now, DATETIME_FORMAT)
local_dt = local.localize(now, is_dst=None)
utc_dt = local_dt.astimezone(pytz.UTC)
now = utc_dt.strftime(DATETIME_FORMAT)
delay_ids = self.search(cr, SUPERUSER_ID, [('delay_time','<=', now)], context=context)
if delay_ids and len(delay_ids) > 0:
for message_id in delay_ids:
notifications = []
# broadcast it to channel (anonymous users) and users_ids
data = self.read(cr, SUPERUSER_ID, [message_id], ['from_id', 'to_id', 'create_date', 'type', 'message'],
context=context)[0]
uuid = data['to_id']
session_ids = self.pool['im_chat.session'].search(cr, SUPERUSER_ID, [('uuid','=', uuid)], context=context, limit=1)
session_id = session_ids and session_ids[0] or None
if session_id:
session = self.pool['im_chat.session'].browse(cr, SUPERUSER_ID, session_id, context=context)
notifications.append([uuid, data])
filterTag = []
for user in session.user_ids:
notifications.append([(cr.dbname, 'im_chat.session', user.id), data])
if len(filterTag) > 0:
filterTag.append({"operator": "OR"});
filterTag.append({"field": "tag", "key": "user_id", "relation": "=", "value": str(user.id)});
self.pool['bus.bus'].sendmany(cr, uid, notifications)
one_signal.create_notification(data['message'], filters=filterTag, included_segments=None)
# Clear delaytime
self.write(cr, SUPERUSER_ID, delay_ids, {'delay_time': None}, context=context) | ncliam/serverpos | openerp/custom_modules/school_link/school.py | Python | agpl-3.0 | 39,903 | 0.00599 |
#!/usr/bin/python -u
#
# imports the API description and fills up a database with
# name relevance to modules, functions or web pages
#
# Operation needed:
# =================
#
# install mysqld, the python wrappers for mysql and libxml2, start mysqld
# - mysql-server
# - mysql
# - php-mysql
# - MySQL-python
# Change the root passwd of mysql:
# mysqladmin -u root password new_password
# Create the new database libvir
# mysqladmin -p create libvir
# Create a database user 'veillard' and give him password access
# change veillard and abcde with the right user name and passwd
# mysql -p
# password:
# mysql> GRANT ALL PRIVILEGES ON libvir TO veillard@localhost
# IDENTIFIED BY 'abcde' WITH GRANT OPTION;
# mysql> GRANT ALL PRIVILEGES ON libvir.* TO veillard@localhost
# IDENTIFIED BY 'abcde' WITH GRANT OPTION;
#
# As the user check the access:
# mysql -p libvir
# Enter password:
# Welcome to the MySQL monitor....
# mysql> use libvir
# Database changed
# mysql> quit
# Bye
#
# Then run the script in the doc subdir, it will create the symbols and
# word tables and populate them with information extracted from
# the libvirt-api.xml API description, and make then accessible read-only
# by nobody@loaclhost the user expected to be Apache's one
#
# On the Apache configuration, make sure you have php support enabled
#
import MySQLdb
import libxml2
import sys
import string
import os
#
# We are not interested in parsing errors here
#
def callback(ctx, str):
return
libxml2.registerErrorHandler(callback, None)
#
# The dictionary of tables required and the SQL command needed
# to create them
#
TABLES={
"symbols" : """CREATE TABLE symbols (
name varchar(255) BINARY NOT NULL,
module varchar(255) BINARY NOT NULL,
type varchar(25) NOT NULL,
descr varchar(255),
UNIQUE KEY name (name),
KEY module (module))""",
"words" : """CREATE TABLE words (
name varchar(50) BINARY NOT NULL,
symbol varchar(255) BINARY NOT NULL,
relevance int,
KEY name (name),
KEY symbol (symbol),
UNIQUE KEY ID (name, symbol))""",
"wordsHTML" : """CREATE TABLE wordsHTML (
name varchar(50) BINARY NOT NULL,
resource varchar(255) BINARY NOT NULL,
section varchar(255),
id varchar(50),
relevance int,
KEY name (name),
KEY resource (resource),
UNIQUE KEY ref (name, resource))""",
"wordsArchive" : """CREATE TABLE wordsArchive (
name varchar(50) BINARY NOT NULL,
ID int(11) NOT NULL,
relevance int,
KEY name (name),
UNIQUE KEY ref (name, ID))""",
"pages" : """CREATE TABLE pages (
resource varchar(255) BINARY NOT NULL,
title varchar(255) BINARY NOT NULL,
UNIQUE KEY name (resource))""",
"archives" : """CREATE TABLE archives (
ID int(11) NOT NULL auto_increment,
resource varchar(255) BINARY NOT NULL,
title varchar(255) BINARY NOT NULL,
UNIQUE KEY id (ID,resource(255)),
INDEX (ID),
INDEX (resource))""",
"Queries" : """CREATE TABLE Queries (
ID int(11) NOT NULL auto_increment,
Value varchar(50) NOT NULL,
Count int(11) NOT NULL,
UNIQUE KEY id (ID,Value(35)),
INDEX (ID))""",
"AllQueries" : """CREATE TABLE AllQueries (
ID int(11) NOT NULL auto_increment,
Value varchar(50) NOT NULL,
Count int(11) NOT NULL,
UNIQUE KEY id (ID,Value(35)),
INDEX (ID))""",
}
#
# The XML API description file to parse
#
API="libvirt-api.xml"
DB=None
#########################################################################
# #
# MySQL database interfaces #
# #
#########################################################################
def createTable(db, name):
global TABLES
if db is None:
return -1
if name is None:
return -1
c = db.cursor()
ret = c.execute("DROP TABLE IF EXISTS %s" % (name))
if ret == 1:
print "Removed table %s" % (name)
print "Creating table %s" % (name)
try:
ret = c.execute(TABLES[name])
except:
print "Failed to create table %s" % (name)
return -1
return ret
def checkTables(db, verbose = 1):
global TABLES
if db is None:
return -1
c = db.cursor()
nbtables = c.execute("show tables")
if verbose:
print "Found %d tables" % (nbtables)
tables = {}
i = 0
while i < nbtables:
l = c.fetchone()
name = l[0]
tables[name] = {}
i = i + 1
for table in TABLES.keys():
if not tables.has_key(table):
print "table %s missing" % (table)
createTable(db, table)
try:
ret = c.execute("SELECT count(*) from %s" % table)
row = c.fetchone()
if verbose:
print "Table %s contains %d records" % (table, row[0])
except:
print "Troubles with table %s : repairing" % (table)
ret = c.execute("repair table %s" % table)
print "repairing returned %d" % (ret)
ret = c.execute("SELECT count(*) from %s" % table)
row = c.fetchone()
print "Table %s contains %d records" % (table, row[0])
if verbose:
print "checkTables finished"
# make sure apache can access the tables read-only
try:
ret = c.execute("GRANT SELECT ON libvir.* TO nobody@localhost")
ret = c.execute("GRANT INSERT,SELECT,UPDATE ON libvir.Queries TO nobody@localhost")
except:
pass
return 0
def openMySQL(db="libvir", passwd=None, verbose = 1):
global DB
if passwd is None:
try:
passwd = os.environ["MySQL_PASS"]
except:
print "No password available, set environment MySQL_PASS"
sys.exit(1)
DB = MySQLdb.connect(passwd=passwd, db=db)
if DB is None:
return -1
ret = checkTables(DB, verbose)
return ret
def updateWord(name, symbol, relevance):
global DB
if DB is None:
openMySQL()
if DB is None:
return -1
if name is None:
return -1
if symbol is None:
return -1
c = DB.cursor()
try:
ret = c.execute(
"""INSERT INTO words (name, symbol, relevance) VALUES ('%s','%s', %d)""" %
(name, symbol, relevance))
except:
try:
ret = c.execute(
"""UPDATE words SET relevance = %d where name = '%s' and symbol = '%s'""" %
(relevance, name, symbol))
except:
print "Update word (%s, %s, %s) failed command" % (name, symbol, relevance)
print "UPDATE words SET relevance = %d where name = '%s' and symbol = '%s'" % (relevance, name, symbol)
print sys.exc_type, sys.exc_value
return -1
return ret
def updateSymbol(name, module, type, desc):
global DB
updateWord(name, name, 50)
if DB is None:
openMySQL()
if DB is None:
return -1
if name is None:
return -1
if module is None:
return -1
if type is None:
return -1
try:
desc = string.replace(desc, "'", " ")
l = string.split(desc, ".")
desc = l[0]
desc = desc[0:99]
except:
desc = ""
c = DB.cursor()
try:
ret = c.execute(
"""INSERT INTO symbols (name, module, type, descr) VALUES ('%s','%s', '%s', '%s')""" %
(name, module, type, desc))
except:
try:
ret = c.execute(
"""UPDATE symbols SET module='%s', type='%s', descr='%s' where name='%s'""" %
(module, type, desc, name))
except:
print "Update symbol (%s, %s, %s) failed command" % (name, module, type)
print """UPDATE symbols SET module='%s', type='%s', descr='%s' where name='%s'""" % (module, type, desc, name)
print sys.exc_type, sys.exc_value
return -1
return ret
def addFunction(name, module, desc = ""):
return updateSymbol(name, module, 'function', desc)
def addMacro(name, module, desc = ""):
return updateSymbol(name, module, 'macro', desc)
def addEnum(name, module, desc = ""):
return updateSymbol(name, module, 'enum', desc)
def addStruct(name, module, desc = ""):
return updateSymbol(name, module, 'struct', desc)
def addConst(name, module, desc = ""):
return updateSymbol(name, module, 'const', desc)
def addType(name, module, desc = ""):
return updateSymbol(name, module, 'type', desc)
def addFunctype(name, module, desc = ""):
return updateSymbol(name, module, 'functype', desc)
def addPage(resource, title):
global DB
if DB is None:
openMySQL()
if DB is None:
return -1
if resource is None:
return -1
c = DB.cursor()
try:
ret = c.execute(
"""INSERT INTO pages (resource, title) VALUES ('%s','%s')""" %
(resource, title))
except:
try:
ret = c.execute(
"""UPDATE pages SET title='%s' WHERE resource='%s'""" %
(title, resource))
except:
print "Update symbol (%s, %s, %s) failed command" % (name, module, type)
print """UPDATE pages SET title='%s' WHERE resource='%s'""" % (title, resource)
print sys.exc_type, sys.exc_value
return -1
return ret
def updateWordHTML(name, resource, desc, id, relevance):
global DB
if DB is None:
openMySQL()
if DB is None:
return -1
if name is None:
return -1
if resource is None:
return -1
if id is None:
id = ""
if desc is None:
desc = ""
else:
try:
desc = string.replace(desc, "'", " ")
desc = desc[0:99]
except:
desc = ""
c = DB.cursor()
try:
ret = c.execute(
"""INSERT INTO wordsHTML (name, resource, section, id, relevance) VALUES ('%s','%s', '%s', '%s', '%d')""" %
(name, resource, desc, id, relevance))
except:
try:
ret = c.execute(
"""UPDATE wordsHTML SET section='%s', id='%s', relevance='%d' where name='%s' and resource='%s'""" %
(desc, id, relevance, name, resource))
except:
print "Update symbol (%s, %s, %d) failed command" % (name, resource, relevance)
print """UPDATE wordsHTML SET section='%s', id='%s', relevance='%d' where name='%s' and resource='%s'""" % (desc, id, relevance, name, resource)
print sys.exc_type, sys.exc_value
return -1
return ret
def checkXMLMsgArchive(url):
global DB
if DB is None:
openMySQL()
if DB is None:
return -1
if url is None:
return -1
c = DB.cursor()
try:
ret = c.execute(
"""SELECT ID FROM archives WHERE resource='%s'""" % (url))
row = c.fetchone()
if row is None:
return -1
except:
return -1
return row[0]
def addXMLMsgArchive(url, title):
global DB
if DB is None:
openMySQL()
if DB is None:
return -1
if url is None:
return -1
if title is None:
title = ""
else:
title = string.replace(title, "'", " ")
title = title[0:99]
c = DB.cursor()
try:
cmd = """INSERT INTO archives (resource, title) VALUES ('%s','%s')""" % (url, title)
ret = c.execute(cmd)
cmd = """SELECT ID FROM archives WHERE resource='%s'""" % (url)
ret = c.execute(cmd)
row = c.fetchone()
if row is None:
print "addXMLMsgArchive failed to get the ID: %s" % (url)
return -1
except:
print "addXMLMsgArchive failed command: %s" % (cmd)
return -1
return((int)(row[0]))
def updateWordArchive(name, id, relevance):
global DB
if DB is None:
openMySQL()
if DB is None:
return -1
if name is None:
return -1
if id is None:
return -1
c = DB.cursor()
try:
ret = c.execute(
"""INSERT INTO wordsArchive (name, id, relevance) VALUES ('%s', '%d', '%d')""" %
(name, id, relevance))
except:
try:
ret = c.execute(
"""UPDATE wordsArchive SET relevance='%d' where name='%s' and ID='%d'""" %
(relevance, name, id))
except:
print "Update word archive (%s, %d, %d) failed command" % (name, id, relevance)
print """UPDATE wordsArchive SET relevance='%d' where name='%s' and ID='%d'""" % (relevance, name, id)
print sys.exc_type, sys.exc_value
return -1
return ret
#########################################################################
# #
# Word dictionary and analysis routines #
# #
#########################################################################
#
# top 100 english word without the one len < 3 + own set
#
dropWords = {
'the':0, 'this':0, 'can':0, 'man':0, 'had':0, 'him':0, 'only':0,
'and':0, 'not':0, 'been':0, 'other':0, 'even':0, 'are':0, 'was':0,
'new':0, 'most':0, 'but':0, 'when':0, 'some':0, 'made':0, 'from':0,
'who':0, 'could':0, 'after':0, 'that':0, 'will':0, 'time':0, 'also':0,
'have':0, 'more':0, 'these':0, 'did':0, 'was':0, 'two':0, 'many':0,
'they':0, 'may':0, 'before':0, 'for':0, 'which':0, 'out':0, 'then':0,
'must':0, 'one':0, 'through':0, 'with':0, 'you':0, 'said':0,
'first':0, 'back':0, 'were':0, 'what':0, 'any':0, 'years':0, 'his':0,
'her':0, 'where':0, 'all':0, 'its':0, 'now':0, 'much':0, 'she':0,
'about':0, 'such':0, 'your':0, 'there':0, 'into':0, 'like':0, 'may':0,
'would':0, 'than':0, 'our':0, 'well':0, 'their':0, 'them':0, 'over':0,
'down':0,
'net':0, 'www':0, 'bad':0, 'Okay':0, 'bin':0, 'cur':0,
}
wordsDict = {}
wordsDictHTML = {}
wordsDictArchive = {}
def cleanupWordsString(str):
str = string.replace(str, ".", " ")
str = string.replace(str, "!", " ")
str = string.replace(str, "?", " ")
str = string.replace(str, ",", " ")
str = string.replace(str, "'", " ")
str = string.replace(str, '"', " ")
str = string.replace(str, ";", " ")
str = string.replace(str, "(", " ")
str = string.replace(str, ")", " ")
str = string.replace(str, "{", " ")
str = string.replace(str, "}", " ")
str = string.replace(str, "<", " ")
str = string.replace(str, ">", " ")
str = string.replace(str, "=", " ")
str = string.replace(str, "/", " ")
str = string.replace(str, "*", " ")
str = string.replace(str, ":", " ")
str = string.replace(str, "#", " ")
str = string.replace(str, "\\", " ")
str = string.replace(str, "\n", " ")
str = string.replace(str, "\r", " ")
str = string.replace(str, "\xc2", " ")
str = string.replace(str, "\xa0", " ")
return str
def cleanupDescrString(str):
str = string.replace(str, "'", " ")
str = string.replace(str, "\n", " ")
str = string.replace(str, "\r", " ")
str = string.replace(str, "\xc2", " ")
str = string.replace(str, "\xa0", " ")
l = string.split(str)
str = string.join(str)
return str
def splitIdentifier(str):
ret = []
while str != "":
cur = string.lower(str[0])
str = str[1:]
if ((cur < 'a') or (cur > 'z')):
continue
while (str != "") and (str[0] >= 'A') and (str[0] <= 'Z'):
cur = cur + string.lower(str[0])
str = str[1:]
while (str != "") and (str[0] >= 'a') and (str[0] <= 'z'):
cur = cur + str[0]
str = str[1:]
while (str != "") and (str[0] >= '0') and (str[0] <= '9'):
str = str[1:]
ret.append(cur)
return ret
def addWord(word, module, symbol, relevance):
global wordsDict
if word is None or len(word) < 3:
return -1
if module is None or symbol is None:
return -1
if dropWords.has_key(word):
return 0
if ord(word[0]) > 0x80:
return 0
if wordsDict.has_key(word):
d = wordsDict[word]
if d is None:
return 0
if len(d) > 500:
wordsDict[word] = None
return 0
try:
relevance = relevance + d[(module, symbol)]
except:
pass
else:
wordsDict[word] = {}
wordsDict[word][(module, symbol)] = relevance
return relevance
def addString(str, module, symbol, relevance):
if str is None or len(str) < 3:
return -1
ret = 0
str = cleanupWordsString(str)
l = string.split(str)
for word in l:
if len(word) > 2:
ret = ret + addWord(word, module, symbol, 5)
return ret
def addWordHTML(word, resource, id, section, relevance):
global wordsDictHTML
if word is None or len(word) < 3:
return -1
if resource is None or section is None:
return -1
if dropWords.has_key(word):
return 0
if ord(word[0]) > 0x80:
return 0
section = cleanupDescrString(section)
if wordsDictHTML.has_key(word):
d = wordsDictHTML[word]
if d is None:
print "skipped %s" % (word)
return 0
try:
(r,i,s) = d[resource]
if i is not None:
id = i
if s is not None:
section = s
relevance = relevance + r
except:
pass
else:
wordsDictHTML[word] = {}
d = wordsDictHTML[word]
d[resource] = (relevance, id, section)
return relevance
def addStringHTML(str, resource, id, section, relevance):
if str is None or len(str) < 3:
return -1
ret = 0
str = cleanupWordsString(str)
l = string.split(str)
for word in l:
if len(word) > 2:
try:
r = addWordHTML(word, resource, id, section, relevance)
if r < 0:
print "addWordHTML failed: %s %s" % (word, resource)
ret = ret + r
except:
print "addWordHTML failed: %s %s %d" % (word, resource, relevance)
print sys.exc_type, sys.exc_value
return ret
def addWordArchive(word, id, relevance):
global wordsDictArchive
if word is None or len(word) < 3:
return -1
if id is None or id == -1:
return -1
if dropWords.has_key(word):
return 0
if ord(word[0]) > 0x80:
return 0
if wordsDictArchive.has_key(word):
d = wordsDictArchive[word]
if d is None:
print "skipped %s" % (word)
return 0
try:
r = d[id]
relevance = relevance + r
except:
pass
else:
wordsDictArchive[word] = {}
d = wordsDictArchive[word]
d[id] = relevance
return relevance
def addStringArchive(str, id, relevance):
if str is None or len(str) < 3:
return -1
ret = 0
str = cleanupWordsString(str)
l = string.split(str)
for word in l:
i = len(word)
if i > 2:
try:
r = addWordArchive(word, id, relevance)
if r < 0:
print "addWordArchive failed: %s %s" % (word, id)
else:
ret = ret + r
except:
print "addWordArchive failed: %s %s %d" % (word, id, relevance)
print sys.exc_type, sys.exc_value
return ret
#########################################################################
# #
# XML API description analysis #
# #
#########################################################################
def loadAPI(filename):
doc = libxml2.parseFile(filename)
print "loaded %s" % (filename)
return doc
def foundExport(file, symbol):
if file is None:
return 0
if symbol is None:
return 0
addFunction(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPIFile(top):
count = 0
name = top.prop("name")
cur = top.children
while cur is not None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "exports":
count = count + foundExport(name, cur.prop("symbol"))
else:
print "unexpected element %s in API doc <file name='%s'>" % (name)
cur = cur.next
return count
def analyzeAPIFiles(top):
count = 0
cur = top.children
while cur is not None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "file":
count = count + analyzeAPIFile(cur)
else:
print "unexpected element %s in API doc <files>" % (cur.name)
cur = cur.next
return count
def analyzeAPIEnum(top):
file = top.prop("file")
if file is None:
return 0
symbol = top.prop("name")
if symbol is None:
return 0
addEnum(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPIConst(top):
file = top.prop("file")
if file is None:
return 0
symbol = top.prop("name")
if symbol is None:
return 0
addConst(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPIType(top):
file = top.prop("file")
if file is None:
return 0
symbol = top.prop("name")
if symbol is None:
return 0
addType(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPIFunctype(top):
file = top.prop("file")
if file is None:
return 0
symbol = top.prop("name")
if symbol is None:
return 0
addFunctype(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPIStruct(top):
file = top.prop("file")
if file is None:
return 0
symbol = top.prop("name")
if symbol is None:
return 0
addStruct(symbol, file)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
info = top.prop("info")
if info is not None:
info = string.replace(info, "'", " ")
info = string.strip(info)
l = string.split(info)
for word in l:
if len(word) > 2:
addWord(word, file, symbol, 5)
return 1
def analyzeAPIMacro(top):
file = top.prop("file")
if file is None:
return 0
symbol = top.prop("name")
if symbol is None:
return 0
symbol = string.replace(symbol, "'", " ")
symbol = string.strip(symbol)
info = None
cur = top.children
while cur is not None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "info":
info = cur.content
break
cur = cur.next
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
if info is None:
addMacro(symbol, file)
print "Macro %s description has no <info>" % (symbol)
return 0
info = string.replace(info, "'", " ")
info = string.strip(info)
addMacro(symbol, file, info)
l = string.split(info)
for word in l:
if len(word) > 2:
addWord(word, file, symbol, 5)
return 1
def analyzeAPIFunction(top):
file = top.prop("file")
if file is None:
return 0
symbol = top.prop("name")
if symbol is None:
return 0
symbol = string.replace(symbol, "'", " ")
symbol = string.strip(symbol)
info = None
cur = top.children
while cur is not None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "info":
info = cur.content
elif cur.name == "return":
rinfo = cur.prop("info")
if rinfo is not None:
rinfo = string.replace(rinfo, "'", " ")
rinfo = string.strip(rinfo)
addString(rinfo, file, symbol, 7)
elif cur.name == "arg":
ainfo = cur.prop("info")
if ainfo is not None:
ainfo = string.replace(ainfo, "'", " ")
ainfo = string.strip(ainfo)
addString(ainfo, file, symbol, 5)
name = cur.prop("name")
if name is not None:
name = string.replace(name, "'", " ")
name = string.strip(name)
addWord(name, file, symbol, 7)
cur = cur.next
if info is None:
print "Function %s description has no <info>" % (symbol)
addFunction(symbol, file, "")
else:
info = string.replace(info, "'", " ")
info = string.strip(info)
addFunction(symbol, file, info)
addString(info, file, symbol, 5)
l = splitIdentifier(symbol)
for word in l:
addWord(word, file, symbol, 10)
return 1
def analyzeAPISymbols(top):
count = 0
cur = top.children
while cur is not None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "macro":
count = count + analyzeAPIMacro(cur)
elif cur.name == "function":
count = count + analyzeAPIFunction(cur)
elif cur.name == "const":
count = count + analyzeAPIConst(cur)
elif cur.name == "typedef":
count = count + analyzeAPIType(cur)
elif cur.name == "struct":
count = count + analyzeAPIStruct(cur)
elif cur.name == "enum":
count = count + analyzeAPIEnum(cur)
elif cur.name == "functype":
count = count + analyzeAPIFunctype(cur)
else:
print "unexpected element %s in API doc <files>" % (cur.name)
cur = cur.next
return count
def analyzeAPI(doc):
count = 0
if doc is None:
return -1
root = doc.getRootElement()
if root.name != "api":
print "Unexpected root name"
return -1
cur = root.children
while cur is not None:
if cur.type == 'text':
cur = cur.next
continue
if cur.name == "files":
pass
# count = count + analyzeAPIFiles(cur)
elif cur.name == "symbols":
count = count + analyzeAPISymbols(cur)
else:
print "unexpected element %s in API doc" % (cur.name)
cur = cur.next
return count
#########################################################################
# #
# Web pages parsing and analysis #
# #
#########################################################################
import glob
def analyzeHTMLText(doc, resource, p, section, id):
words = 0
try:
content = p.content
words = words + addStringHTML(content, resource, id, section, 5)
except:
return -1
return words
def analyzeHTMLPara(doc, resource, p, section, id):
words = 0
try:
content = p.content
words = words + addStringHTML(content, resource, id, section, 5)
except:
return -1
return words
def analyzeHTMLPre(doc, resource, p, section, id):
words = 0
try:
content = p.content
words = words + addStringHTML(content, resource, id, section, 5)
except:
return -1
return words
def analyzeHTML(doc, resource, p, section, id):
words = 0
try:
content = p.content
words = words + addStringHTML(content, resource, id, section, 5)
except:
return -1
return words
def analyzeHTML(doc, resource):
para = 0
ctxt = doc.xpathNewContext()
try:
res = ctxt.xpathEval("//head/title")
title = res[0].content
except:
title = "Page %s" % (resource)
addPage(resource, title)
try:
items = ctxt.xpathEval("//h1 | //h2 | //h3 | //text()")
section = title
id = ""
for item in items:
if item.name == 'h1' or item.name == 'h2' or item.name == 'h3':
section = item.content
if item.prop("id"):
id = item.prop("id")
elif item.prop("name"):
id = item.prop("name")
elif item.type == 'text':
analyzeHTMLText(doc, resource, item, section, id)
para = para + 1
elif item.name == 'p':
analyzeHTMLPara(doc, resource, item, section, id)
para = para + 1
elif item.name == 'pre':
analyzeHTMLPre(doc, resource, item, section, id)
para = para + 1
else:
print "Page %s, unexpected %s element" % (resource, item.name)
except:
print "Page %s: problem analyzing" % (resource)
print sys.exc_type, sys.exc_value
return para
def analyzeHTMLPages():
ret = 0
HTMLfiles = glob.glob("*.html") + glob.glob("tutorial/*.html") + \
glob.glob("CIM/*.html") + glob.glob("ocaml/*.html") + \
glob.glob("ruby/*.html")
for html in HTMLfiles:
if html[0:3] == "API":
continue
if html == "xml.html":
continue
try:
doc = libxml2.parseFile(html)
except:
doc = libxml2.htmlParseFile(html, None)
try:
res = analyzeHTML(doc, html)
print "Parsed %s : %d paragraphs" % (html, res)
ret = ret + 1
except:
print "could not parse %s" % (html)
return ret
#########################################################################
# #
# Mail archives parsing and analysis #
# #
#########################################################################
import time
def getXMLDateArchive(t = None):
if t is None:
t = time.time()
T = time.gmtime(t)
month = time.strftime("%B", T)
year = T[0]
url = "http://www.redhat.com/archives/libvir-list/%d-%s/date.html" % (year, month)
return url
def scanXMLMsgArchive(url, title, force = 0):
if url is None or title is None:
return 0
ID = checkXMLMsgArchive(url)
if force == 0 and ID != -1:
return 0
if ID == -1:
ID = addXMLMsgArchive(url, title)
if ID == -1:
return 0
try:
print "Loading %s" % (url)
doc = libxml2.htmlParseFile(url, None)
except:
doc = None
if doc is None:
print "Failed to parse %s" % (url)
return 0
addStringArchive(title, ID, 20)
ctxt = doc.xpathNewContext()
texts = ctxt.xpathEval("//pre//text()")
for text in texts:
addStringArchive(text.content, ID, 5)
return 1
def scanXMLDateArchive(t = None, force = 0):
global wordsDictArchive
wordsDictArchive = {}
url = getXMLDateArchive(t)
print "loading %s" % (url)
try:
doc = libxml2.htmlParseFile(url, None)
except:
doc = None
if doc is None:
print "Failed to parse %s" % (url)
return -1
ctxt = doc.xpathNewContext()
anchors = ctxt.xpathEval("//a[@href]")
links = 0
newmsg = 0
for anchor in anchors:
href = anchor.prop("href")
if href is None or href[0:3] != "msg":
continue
try:
links = links + 1
msg = libxml2.buildURI(href, url)
title = anchor.content
if title is not None and title[0:4] == 'Re: ':
title = title[4:]
if title is not None and title[0:6] == '[xml] ':
title = title[6:]
newmsg = newmsg + scanXMLMsgArchive(msg, title, force)
except:
pass
return newmsg
#########################################################################
# #
# Main code: open the DB, the API XML and analyze it #
# #
#########################################################################
def analyzeArchives(t = None, force = 0):
global wordsDictArchive
ret = scanXMLDateArchive(t, force)
print "Indexed %d words in %d archive pages" % (len(wordsDictArchive), ret)
i = 0
skipped = 0
for word in wordsDictArchive.keys():
refs = wordsDictArchive[word]
if refs is None:
skipped = skipped + 1
continue
for id in refs.keys():
relevance = refs[id]
updateWordArchive(word, id, relevance)
i = i + 1
print "Found %d associations in HTML pages" % (i)
def analyzeHTMLTop():
global wordsDictHTML
ret = analyzeHTMLPages()
print "Indexed %d words in %d HTML pages" % (len(wordsDictHTML), ret)
i = 0
skipped = 0
for word in wordsDictHTML.keys():
refs = wordsDictHTML[word]
if refs is None:
skipped = skipped + 1
continue
for resource in refs.keys():
(relevance, id, section) = refs[resource]
updateWordHTML(word, resource, section, id, relevance)
i = i + 1
print "Found %d associations in HTML pages" % (i)
def analyzeAPITop():
global wordsDict
global API
try:
doc = loadAPI(API)
ret = analyzeAPI(doc)
print "Analyzed %d blocs" % (ret)
doc.freeDoc()
except:
print "Failed to parse and analyze %s" % (API)
print sys.exc_type, sys.exc_value
sys.exit(1)
print "Indexed %d words" % (len(wordsDict))
i = 0
skipped = 0
for word in wordsDict.keys():
refs = wordsDict[word]
if refs is None:
skipped = skipped + 1
continue
for (module, symbol) in refs.keys():
updateWord(word, symbol, refs[(module, symbol)])
i = i + 1
print "Found %d associations, skipped %d words" % (i, skipped)
def usage():
print "Usage index.py [--force] [--archive] [--archive-year year] [--archive-month month] [--API] [--docs]"
sys.exit(1)
def main():
try:
openMySQL()
except:
print "Failed to open the database"
print sys.exc_type, sys.exc_value
sys.exit(1)
args = sys.argv[1:]
force = 0
if args:
i = 0
while i < len(args):
if args[i] == '--force':
force = 1
elif args[i] == '--archive':
analyzeArchives(None, force)
elif args[i] == '--archive-year':
i = i + 1
year = args[i]
months = ["January" , "February", "March", "April", "May",
"June", "July", "August", "September", "October",
"November", "December"]
for month in months:
try:
str = "%s-%s" % (year, month)
T = time.strptime(str, "%Y-%B")
t = time.mktime(T) + 3600 * 24 * 10
analyzeArchives(t, force)
except:
print "Failed to index month archive:"
print sys.exc_type, sys.exc_value
elif args[i] == '--archive-month':
i = i + 1
month = args[i]
try:
T = time.strptime(month, "%Y-%B")
t = time.mktime(T) + 3600 * 24 * 10
analyzeArchives(t, force)
except:
print "Failed to index month archive:"
print sys.exc_type, sys.exc_value
elif args[i] == '--API':
analyzeAPITop()
elif args[i] == '--docs':
analyzeHTMLTop()
else:
usage()
i = i + 1
else:
usage()
if __name__ == "__main__":
main()
| taget/libvirt | docs/index.py | Python | lgpl-2.1 | 37,181 | 0.007504 |
'''
@author: David W.H. Swenson
'''
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from past.utils import old_div
from builtins import object
import os
from nose.tools import (assert_equal, assert_not_equal, assert_almost_equal)
from nose.plugins.skip import SkipTest
import openpathsampling as paths
import openpathsampling.engines.toy as toy
from .test_helpers import (true_func, assert_equal_array_array,
assert_items_equal)
import numpy as np
# =========================================================================
# This single test module includes all the tests for the toy_dynamics
# subpackage.
# =========================================================================
def setUp():
# set up globals
global gaussian, linear, outer, harmonic
gaussian = toy.Gaussian(6.0, [2.5, 40.0], [0.8, 0.5])
outer = toy.OuterWalls([1.12, 2.0], [0.2, -0.25])
linear = toy.LinearSlope([1.5, 0.75], 0.5)
harmonic = toy.HarmonicOscillator([1.5, 2.0], [0.5, 3.0], [0.25, 0.75])
global init_pos, init_vel, sys_mass
init_pos = np.array([0.7, 0.65])
init_vel = np.array([0.6, 0.5])
sys_mass = np.array([1.5, 1.5])
# === TESTS FOR TOY POTENTIAL ENERGY SURFACES =============================
class testHarmonicOscillator(object):
def setUp(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
# k = m * omega^2 = [1.5, 1.5] * [0.5, 3.0]^2
# = [0.375, 13.5]
# V = 0.5*( 1.5*0.375*((0.7)-0.25)^2 + 2.0*13.5*((0.65)-0.75)^2)
# = 0.191953125
assert_almost_equal(harmonic.V(self), 0.191953125)
def test_dVdx(self):
# [1.5, 2.0] * [1.5, 1.5] * [0.5, 3.0]^2 * [(0.7)-0.25, (0.65)-0.75]
# = [1.5*1.5*0.5^2*((0.7)-0.25), 2.0*1.5*3.0^2*((0.65)-0.75)]
# = [0.253125, -2.7]
for (experiment, theory) in zip(harmonic.dVdx(self),
[0.253125, -2.7]):
assert_almost_equal(experiment, theory)
class testGaussian(object):
def setUp(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
# 6.0*exp(-2.5*((0.7)-0.8)^2-40.0*((0.65)-0.5)^2) = 2.37918851445
assert_almost_equal(gaussian.V(self), 2.37918851445)
def test_dVdx(self):
# exp(-2.5*((0.7)-0.8)^2-40*((0.65)-0.5)^2)*(-30*(0.7)+24)
assert_almost_equal(gaussian.dVdx(self)[0], 1.18959425722)
# -480*((0.65)-0.5)*exp(-2.5*((0.7)-0.8)^2-40*((0.65)-0.5)^2)
assert_almost_equal(gaussian.dVdx(self)[1], -28.5502621734)
class testOuterWalls(object):
def setUp(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
# 1.12*(0.7-0.2)^6+2.0*(0.65-(-0.25))^6 = 1.080382
assert_almost_equal(outer.V(self), 1.080382)
def test_dVdx(self):
# 6*1.12*(0.7-0.2)^5 = 0.21
assert_almost_equal(outer.dVdx(self)[0], 0.21)
# 6*2.0*(0.65-(-0.25))^5 = 7.08588
assert_almost_equal(outer.dVdx(self)[1], 7.08588)
class testLinearSlope(object):
def setUp(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
def test_V(self):
assert_almost_equal(linear.V(self), 2.0375)
def test_dVdx(self):
assert_equal(linear.dVdx(self), [1.5, 0.75])
class testCombinations(object):
def setUp(self):
self.positions = init_pos
self.velocities = init_vel
self.mass = sys_mass
self.simpletest = gaussian + gaussian
self.fullertest = gaussian + outer - linear
def test_V(self):
assert_almost_equal(self.simpletest.V(self), 2*2.37918851445)
assert_almost_equal(self.fullertest.V(self),
2.37918851445 + 1.080382 - 2.0375)
def test_dVdx(self):
assert_almost_equal(self.simpletest.dVdx(self)[0], 2*1.18959425722)
assert_almost_equal(self.simpletest.dVdx(self)[1], 2*-28.5502621734)
assert_almost_equal(self.fullertest.dVdx(self)[0],
1.18959425722 + 0.21 - 1.5)
assert_almost_equal(self.fullertest.dVdx(self)[1],
-28.5502621734 + 7.08588 - 0.75)
def test_kinetic_energy(self):
assert_almost_equal(self.simpletest.kinetic_energy(self), 0.4575)
# === TESTS FOR TOY ENGINE OBJECT =========================================
class test_convert_fcn(object):
def test_convert_to_3Ndim(v):
raise SkipTest
assert_equal_array_array(toy.convert_to_3Ndim([1.0, 2.0]),
np.array([[1.0, 2.0, 0.0]]))
assert_equal_array_array(toy.convert_to_3Ndim([1.0, 2.0, 3.0]),
np.array([[1.0, 2.0, 3.0]]))
assert_equal_array_array(toy.convert_to_3Ndim([1.0, 2.0, 3.0, 4.0]),
np.array([[1.0, 2.0, 3.0], [4.0, 0.0, 0.0]]))
class testToyEngine(object):
def setUp(self):
pes = linear
integ = toy.LeapfrogVerletIntegrator(dt=0.002)
topology=toy.Topology(
n_spatial = 2,
masses = sys_mass,
pes = pes
)
options={
'integ' : integ,
'n_frames_max' : 5}
sim = toy.Engine(options=options,
topology=topology
)
template = toy.Snapshot(
coordinates=init_pos.copy(),
velocities=init_pos.copy(),
engine=sim
)
sim.positions = init_pos.copy()
sim.velocities = init_vel.copy()
sim.n_steps_per_frame = 10
self.sim = sim
def teardown(self):
if os.path.isfile('toy_tmp.nc'):
os.remove('toy_tmp.nc')
def test_sanity(self):
assert_items_equal(self.sim._mass, sys_mass)
assert_items_equal(self.sim._minv, [old_div(1.0,m_i) for m_i in sys_mass])
assert_equal(self.sim.n_steps_per_frame, 10)
def test_snapshot_timestep(self):
assert_equal(self.sim.snapshot_timestep, 0.02)
def test_snapshot_get(self):
snapshot = self.sim.current_snapshot
assert_items_equal(snapshot.velocities[0],
self.sim.velocities)
assert_items_equal(snapshot.coordinates[0],
self.sim.positions)
def test_snapshot_set(self):
snap = toy.Snapshot(coordinates=np.array([[1,2,3]]),
velocities=np.array([[4,5,6]]))
self.sim.current_snapshot = snap
assert_items_equal(self.sim.positions, [1,2,3])
assert_items_equal(self.sim.velocities, [4,5,6])
def test_generate_next_frame(self):
# we test correctness by integrating forward, then backward
assert_items_equal(self.sim.positions, init_pos)
assert_items_equal(self.sim.velocities, init_vel)
snap = self.sim.generate_next_frame()
#assert_equal_array_array(snap.coordinates,
#np.array([init_pos.append(0.0)]))
self.sim.velocities = -self.sim.velocities
snap2 = self.sim.generate_next_frame()
np.testing.assert_allclose(snap2.coordinates[0], init_pos)
def test_generate(self):
self.sim.initialized = True
try:
traj = self.sim.generate(self.sim.current_snapshot, [true_func])
except paths.engines.EngineMaxLengthError as e:
traj = e.last_trajectory
assert_equal(len(traj), self.sim.n_frames_max)
else:
raise RuntimeError('Did not raise MaxLength Error')
def test_generate_n_frames(self):
self.sim.initialized = True
ens = paths.LengthEnsemble(4) # first snap plus n_frames
orig = self.sim.current_snapshot.copy()
traj1 = self.sim.generate(self.sim.current_snapshot, [ens.can_append])
self.sim.current_snapshot = orig
traj2 = [orig] + self.sim.generate_n_frames(3)
assert_equal(len(traj1), len(traj2))
for (s1, s2) in zip(traj1, traj2):
# snapshots are not the same object
assert_not_equal(s1, s2)
# however, they have the same values stored in them
assert_equal(len(s1.coordinates), 1)
assert_equal(len(s1.coordinates[0]), 2)
assert_items_equal(s1.coordinates[0], s2.coordinates[0])
assert_items_equal(s1.velocities[0], s2.velocities[0])
def test_start_with_snapshot(self):
snap = toy.Snapshot(coordinates=np.array([1,2]),
velocities=np.array([3,4]))
self.sim.start(snapshot=snap)
self.sim.stop([snap])
# === TESTS FOR TOY INTEGRATORS ===========================================
class testLeapfrogVerletIntegrator(object):
def setUp(self):
pes = linear
integ = toy.LeapfrogVerletIntegrator(dt=0.002)
topology=toy.Topology(
n_spatial = 2,
masses = sys_mass,
pes = pes
)
options={
'integ' : integ,
'n_frames_max' : 5}
sim = toy.Engine(options=options,
topology=topology
)
template = toy.Snapshot(
coordinates=init_pos.copy(),
velocities=init_pos.copy(),
engine=sim
)
sim.positions = init_pos.copy()
sim.velocities = init_vel.copy()
sim.n_steps_per_frame = 10
self.sim = sim
def test_momentum_update(self):
self.sim.integ._momentum_update(self.sim, 0.002)
# velocities = init_vel - pes.dVdx(init_pos)/m*dt
# = [0.6, 0.5] - [1.5, 0.75]/[1.5, 1.5] * 0.002
# = [0.598, 0.499]
assert_equal(self.sim.velocities[0], 0.598)
assert_equal(self.sim.velocities[1], 0.499)
def test_position_update(self):
self.sim.integ._position_update(self.sim, 0.002)
# positions = init_pos + velocities * dt
# = [0.7, 0.65] + [0.6, 0.5]*0.002 = [0.7012, 0.651]
assert_almost_equal(self.sim.positions[0], 0.7012)
assert_almost_equal(self.sim.positions[1], 0.651)
def test_step(self):
# no assertions since the tests of position/momentum updates should
# handle that... this is just to make sure we run
self.sim.integ.step(self.sim)
self.sim.integ.step(self.sim)
class testLangevinBAOABIntegrator(object):
'''Testing for correctness is hard, since this is a stochastic
calculation. However, we can at least run tests to make sure nothing
crashes.'''
def setUp(self):
pes = linear
integ = toy.LangevinBAOABIntegrator(dt=0.002, temperature=0.5,
gamma=1.0)
topology=toy.Topology(
n_spatial = 2,
masses = sys_mass,
pes = pes
)
options={
'integ' : integ,
'n_frames_max' : 5}
sim = toy.Engine(options=options,
topology=topology
)
template = toy.Snapshot(
coordinates=init_pos.copy(),
velocities=init_pos.copy(),
engine=sim
)
sim.positions = init_pos.copy()
sim.velocities = init_vel.copy()
sim.n_steps_per_frame = 10
self.sim = sim
def test_OU_update(self):
# we can't actually test for correct values, but we *can* test that
# some things *don't* happen
assert_equal(self.sim.velocities[0], init_vel[0])
assert_equal(self.sim.velocities[1], init_vel[1])
self.sim.integ._OU_update(self.sim, 0.002)
assert_not_equal(self.sim.velocities[0], init_vel[0])
assert_not_equal(self.sim.velocities[1], init_vel[1])
# tests that the same random number wasn't used for both:
assert_not_equal(self.sim.velocities[0] - init_vel[0],
self.sim.velocities[1] - init_vel[1])
def test_step(self):
self.sim.generate_next_frame()
| jhprinz/openpathsampling | openpathsampling/tests/test_toy_dynamics.py | Python | lgpl-2.1 | 12,227 | 0.005561 |
import tft_ir_api as IR
var_u = IR.RealVE("u", 0, (-100.0 - 0.0000001), (100.0 + 0.0000001))
var_v = IR.RealVE("v", 1, (20.0 - 0.000000001), (20000.0 + 0.000000001))
var_T = IR.RealVE("T", 2, (-30.0 - 0.000001), (50.0 + 0.000001))
t1 = IR.BE("+", 4, IR.FConst(331.4), IR.BE("*", 3, IR.FConst(0.6), var_T))
temp = IR.BE("+", 5, t1, var_u)
temp = IR.BE("*", 8, temp, temp)
r = IR.BE("/", 9, IR.BE("*", 7, IR.UE("-", 6, t1), var_v), temp)
IR.TuneExpr(r)
| soarlab/FPTuner | examples/primitives/doppler-1.py | Python | mit | 477 | 0.020964 |
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import DataCodec
# hex: 0x011300
_REQUEST_MESSAGE_TYPE = 70400
# hex: 0x011301
_RESPONSE_MESSAGE_TYPE = 70401
_REQUEST_THREAD_ID_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_REFERENCE_ID_OFFSET = _REQUEST_THREAD_ID_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_REFERENCE_ID_OFFSET + LONG_SIZE_IN_BYTES
def encode_request(name, key, thread_id, reference_id):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_long(buf, _REQUEST_THREAD_ID_OFFSET, thread_id)
FixSizedTypesCodec.encode_long(buf, _REQUEST_REFERENCE_ID_OFFSET, reference_id)
StringCodec.encode(buf, name)
DataCodec.encode(buf, key, True)
return OutboundMessage(buf, True)
| hazelcast/hazelcast-python-client | hazelcast/protocol/codec/map_unlock_codec.py | Python | apache-2.0 | 1,021 | 0.002938 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-29 05:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0025_auto_20170626_0008'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'ordering': ['-id'], 'verbose_name': '分类', 'verbose_name_plural': '分类'},
),
]
| r26zhao/django_blog | blog/migrations/0026_auto_20170629_1342.py | Python | mit | 467 | 0.002179 |
import os
import sys
from optparse import OptionParser, NO_DEFAULT
import imp
import django
from google.appengine._internal.django.core.management.base import BaseCommand, CommandError, handle_default_options
from google.appengine._internal.django.utils.importlib import import_module
# For backwards compatibility: get_version() used to be in this module.
get_version = django.get_version
# A cache of loaded commands, so that call_command
# doesn't have to reload every time it's called.
_commands = None
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
def find_management_module(app_name):
"""
Determines the path to the management module for the given app_name,
without actually importing the application or the management module.
Raises ImportError if the management module cannot be found for any reason.
"""
parts = app_name.split('.')
parts.append('management')
parts.reverse()
part = parts.pop()
path = None
# When using manage.py, the project module is added to the path,
# loaded, then removed from the path. This means that
# testproject.testapp.models can be loaded in future, even if
# testproject isn't in the path. When looking for the management
# module, we need look for the case where the project name is part
# of the app_name but the project directory itself isn't on the path.
try:
f, path, descr = imp.find_module(part,path)
except ImportError,e:
if os.path.basename(os.getcwd()) != part:
raise e
while parts:
part = parts.pop()
f, path, descr = imp.find_module(part, path and [path] or None)
return path
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included, the
startproject command will be disabled, and the startapp command
will be modified to use the directory in which the settings module appears.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
global _commands
if _commands is None:
_commands = dict([(name, 'django.core') for name in find_commands(__path__[0])])
# Find the installed apps
try:
from google.appengine._internal.django.conf import settings
apps = settings.INSTALLED_APPS
except (AttributeError, EnvironmentError, ImportError):
apps = []
# Find the project directory
try:
from google.appengine._internal.django.conf import settings
module = import_module(settings.SETTINGS_MODULE)
project_directory = setup_environ(module, settings.SETTINGS_MODULE)
except (AttributeError, EnvironmentError, ImportError, KeyError):
project_directory = None
# Find and load the management module for each installed app.
for app_name in apps:
try:
path = find_management_module(app_name)
_commands.update(dict([(name, app_name)
for name in find_commands(path)]))
except ImportError:
pass # No management module - ignore this app
if project_directory:
# Remove the "startproject" command from self.commands, because
# that's a django-admin.py command, not a manage.py command.
del _commands['startproject']
# Override the startapp command so that it always uses the
# project_directory, not the current working directory
# (which is default).
from google.appengine._internal.django.core.management.commands.startapp import ProjectCommand
_commands['startapp'] = ProjectCommand(project_directory)
return _commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlall', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
except KeyError:
raise CommandError("Unknown command: %r" % name)
# Grab out a list of defaults from the options. optparse does this for us
# when the script runs from the command line, but since call_command can
# be called programatically, we need to simulate the loading and handling
# of defaults (see #10080 for details).
defaults = dict([(o.dest, o.default)
for o in klass.option_list
if o.default is not NO_DEFAULT])
defaults.update(options)
return klass.execute(*args, **defaults)
class LaxOptionParser(OptionParser):
"""
An option parser that doesn't raise any errors on unknown options.
This is needed because the --settings and --pythonpath options affect
the commands (and thus the options) that are available to the user.
"""
def error(self, msg):
pass
def print_help(self):
"""Output nothing.
The lax options are included in the normal option parser, so under
normal usage, we don't need to print the lax options.
"""
pass
def print_lax_help(self):
"""Output the basic options available to every command.
This just redirects to the default print_help() behaviour.
"""
OptionParser.print_help(self)
def _process_args(self, largs, rargs, values):
"""
Overrides OptionParser._process_args to exclusively handle default
options and ignore args and other options.
This overrides the behavior of the super class, which stop parsing
at the first unrecognized option.
"""
while rargs:
arg = rargs[0]
try:
if arg[0:2] == "--" and len(arg) > 2:
# process a single long option (possibly with value(s))
# the superclass code pops the arg off rargs
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
# the superclass code pops the arg off rargs
self._process_short_opts(rargs, values)
else:
# it's either a non-default option or an arg
# either way, add it to the args list so we can keep
# dealing with options
del rargs[0]
raise Exception
except:
largs.append(arg)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin.py and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
def main_help_text(self):
"""
Returns the script's main help text, as a string.
"""
usage = ['',"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,'']
usage.append('Available subcommands:')
commands = get_commands().keys()
commands.sort()
for cmd in commands:
usage.append(' %s' % cmd)
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin.py" or "manage.py") if it can't be found.
"""
try:
app_name = get_commands()[subcommand]
except KeyError:
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" % (subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, a equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if not os.environ.has_key('DJANGO_AUTO_COMPLETE'):
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword-1]
except IndexError:
curr = ''
subcommands = get_commands().keys() + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
print ' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands)))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: 'runfcgi' stores additional options as
# 'key=value' pairs
if cwords[0] == 'runfcgi':
from google.appengine._internal.django.core.servers.fastcgi import FASTCGI_OPTIONS
options += [(k, 1) for k in FASTCGI_OPTIONS]
# special case: add the names of installed apps to options
elif cwords[0] in ('dumpdata', 'reset', 'sql', 'sqlall',
'sqlclear', 'sqlcustom', 'sqlindexes',
'sqlreset', 'sqlsequencereset', 'test'):
try:
from google.appengine._internal.django.conf import settings
# Get the last part of the dotted path as the app name.
options += [(a.split('.')[-1], 0) for a in settings.INSTALLED_APPS]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
subcommand_cls.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword-1]]
options = filter(lambda (x, v): x not in prev_opts, options)
# filter options by current input
options = sorted([(k, v) for k, v in options if k.startswith(curr)])
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print opt_label
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = LaxOptionParser(usage="%prog subcommand [options] [args]",
version=get_version(),
option_list=BaseCommand.option_list)
self.autocomplete()
try:
options, args = parser.parse_args(self.argv)
handle_default_options(options)
except:
pass # Ignore any option errors at this point.
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
if subcommand == 'help':
if len(args) > 2:
self.fetch_command(args[2]).print_help(self.prog_name, args[2])
else:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
sys.exit(1)
# Special-cases: We want 'django-admin.py --version' and
# 'django-admin.py --help' to work, for backwards compatibility.
elif self.argv[1:] == ['--version']:
# LaxOptionParser already takes care of printing the version.
pass
elif self.argv[1:] == ['--help']:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def setup_environ(settings_mod, original_settings_path=None):
"""
Configures the runtime environment. This can also be used by external
scripts wanting to set up a similar environment to manage.py.
Returns the project directory (assuming the passed settings module is
directly in the project directory).
The "original_settings_path" parameter is optional, but recommended, since
trying to work out the original path from the module can be problematic.
"""
# Add this project to sys.path so that it's importable in the conventional
# way. For example, if this file (manage.py) lives in a directory
# "myproject", this code would add "/path/to/myproject" to sys.path.
if '__init__.py' in settings_mod.__file__:
p = os.path.dirname(settings_mod.__file__)
else:
p = settings_mod.__file__
project_directory, settings_filename = os.path.split(p)
if project_directory == os.curdir or not project_directory:
project_directory = os.getcwd()
project_name = os.path.basename(project_directory)
# Strip filename suffix to get the module name.
settings_name = os.path.splitext(settings_filename)[0]
# Strip $py for Jython compiled files (like settings$py.class)
if settings_name.endswith("$py"):
settings_name = settings_name[:-3]
# Set DJANGO_SETTINGS_MODULE appropriately.
if original_settings_path:
os.environ['DJANGO_SETTINGS_MODULE'] = original_settings_path
else:
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.%s' % (project_name, settings_name)
# Import the project module. We add the parent directory to PYTHONPATH to
# avoid some of the path errors new users can have.
sys.path.append(os.path.join(project_directory, os.pardir))
project_module = import_module(project_name)
sys.path.pop()
return project_directory
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
def execute_manager(settings_mod, argv=None):
"""
Like execute_from_command_line(), but for use by manage.py, a
project-specific django-admin.py utility.
"""
setup_environ(settings_mod)
utility = ManagementUtility(argv)
utility.execute()
| ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/google/appengine/_internal/django/core/management/__init__.py | Python | bsd-3-clause | 17,576 | 0.001707 |
from webob import Request, Response
import re
def not_found(request, response):
response.status = 404
response.write("404 Not Found")
class Route(object):
default_methods = ['GET']
def __init__(self, path, handler, methods=None):
self.path = path
self.handler = handler
if methods is None:
self.methods = self.default_methods
else:
self.methods = methods
self.urlvars = {}
def match(self, request):
regex = re.compile(self.path)
match = regex.match(request.path)
if match and request.method in self.methods:
self.urlvars.update(match.groupdict())
request.urlvars = self.urlvars
return True
return False
class Router(object):
def __init__(self):
self.routes = []
def handle(self, url, handler, *args, **kwargs):
if not url.startswith("^"):
url = "^" + url
if not url.endswith("$"):
url += "$"
route = Route(path=url, handler=handler, *args, **kwargs)
self.routes.append(route)
def match(self, request):
for route in self.routes:
if route.match(request):
return route.handler
return None
def dispatch(self, request):
handler = self.match(request)
return handler
def __call__(self, environ, start_response):
request = Request(environ)
response = Response()
handler = self.dispatch(request)
if handler:
handler()(request, response)
else:
not_found(request, response)
return response(environ, start_response) | xstrengthofonex/code-live-tutorials | python_web_development/templating/router.py | Python | mit | 1,680 | 0.002976 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from .. import activations, initializations
from ..utils.theano_utils import shared_scalar, shared_zeros, alloc_zeros_matrix
from ..layers.core import Layer, MaskedLayer
from six.moves import range
class Recurrent(MaskedLayer):
def get_output_mask(self, train=None):
if self.return_sequences:
return super(Recurrent, self).get_output_mask(train)
else:
return None
def get_padded_shuffled_mask(self, train, X, pad=0):
mask = self.get_input_mask(train)
if mask is None:
mask = T.ones_like(X.sum(axis=-1)) # is there a better way to do this without a sum?
# mask is (nb_samples, time)
mask = T.shape_padright(mask) # (nb_samples, time, 1)
mask = T.addbroadcast(mask, -1) # the new dimension (the '1') is made broadcastable
# see http://deeplearning.net/software/theano/library/tensor/basic.html#broadcasting-in-theano-vs-numpy
mask = mask.dimshuffle(1, 0, 2) # (time, nb_samples, 1)
if pad > 0:
# left-pad in time with 0
padding = alloc_zeros_matrix(pad, mask.shape[1], 1)
mask = T.concatenate([padding, mask], axis=0)
return mask.astype('int8')
@property
def output_shape(self):
input_shape = self.input_shape
if self.return_sequences:
return (input_shape[0], input_shape[1], self.output_dim)
else:
return (input_shape[0], self.output_dim)
class SimpleRNN(Recurrent):
'''
Fully connected RNN where output is to fed back to input.
Not a particularly useful model,
included for demonstration purposes
(demonstrates how to use theano.scan to build a basic RNN).
'''
def __init__(self, input_dim, output_dim,
init='glorot_uniform', inner_init='orthogonal', activation='sigmoid', weights=None,
truncate_gradient=-1, return_sequences=False):
super(SimpleRNN, self).__init__()
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.activation = activations.get(activation)
self.return_sequences = return_sequences
self.input = T.tensor3()
self.W = self.init((self.input_dim, self.output_dim))
self.U = self.inner_init((self.output_dim, self.output_dim))
self.b = shared_zeros((self.output_dim))
self.params = [self.W, self.U, self.b]
if weights is not None:
self.set_weights(weights)
def _step(self, x_t, mask_tm1, h_tm1, u):
'''
Variable names follow the conventions from:
http://deeplearning.net/software/theano/library/scan.html
'''
return self.activation(x_t + mask_tm1 * T.dot(h_tm1, u))
def get_output(self, train=False):
X = self.get_input(train) # shape: (nb_samples, time (padded with zeros), input_dim)
# new shape: (time, nb_samples, input_dim) -> because theano.scan iterates over main dimension
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
x = T.dot(X, self.W) + self.b
# scan = theano symbolic loop.
# See: http://deeplearning.net/software/theano/library/scan.html
# Iterate over the first dimension of the x array (=time).
outputs, updates = theano.scan(
self._step, # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])
sequences=[x, dict(input=padded_mask, taps=[-1])], # tensors to iterate over, inputs to _step
# initialization of the output. Input to _step with default tap=-1.
outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
non_sequences=self.U, # static inputs to _step
truncate_gradient=self.truncate_gradient)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"activation": self.activation.__name__,
"truncate_gradient": self.truncate_gradient,
"return_sequences": self.return_sequences}
class SimpleDeepRNN(Recurrent):
'''
Fully connected RNN where the output of multiple timesteps
(up to "depth" steps in the past) is fed back to the input:
output = activation( W.x_t + b + inner_activation(U_1.h_tm1) + inner_activation(U_2.h_tm2) + ... )
This demonstrates how to build RNNs with arbitrary lookback.
Also (probably) not a super useful model.
'''
def __init__(self, input_dim, output_dim, depth=3,
init='glorot_uniform', inner_init='orthogonal',
activation='sigmoid', inner_activation='hard_sigmoid',
weights=None, truncate_gradient=-1, return_sequences=False):
super(SimpleDeepRNN, self).__init__()
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.depth = depth
self.return_sequences = return_sequences
self.input = T.tensor3()
self.W = self.init((self.input_dim, self.output_dim))
self.Us = [self.inner_init((self.output_dim, self.output_dim)) for _ in range(self.depth)]
self.b = shared_zeros((self.output_dim))
self.params = [self.W] + self.Us + [self.b]
if weights is not None:
self.set_weights(weights)
def _step(self, x_t, *args):
o = x_t
for i in range(self.depth):
mask_tmi = args[i]
h_tmi = args[i + self.depth]
U_tmi = args[i + 2*self.depth]
o += mask_tmi*self.inner_activation(T.dot(h_tmi, U_tmi))
return self.activation(o)
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=self.depth)
X = X.dimshuffle((1, 0, 2))
x = T.dot(X, self.W) + self.b
if self.depth == 1:
initial = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
else:
initial = T.unbroadcast(T.unbroadcast(alloc_zeros_matrix(self.depth, X.shape[1], self.output_dim), 0), 2)
outputs, updates = theano.scan(
self._step,
sequences=[x, dict(
input=padded_mask,
taps=[(-i) for i in range(self.depth)]
)],
outputs_info=[dict(
initial=initial,
taps=[(-i-1) for i in range(self.depth)]
)],
non_sequences=self.Us,
truncate_gradient=self.truncate_gradient
)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"depth": self.depth,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"activation": self.activation.__name__,
"inner_activation": self.inner_activation.__name__,
"truncate_gradient": self.truncate_gradient,
"return_sequences": self.return_sequences}
class GRU(Recurrent):
'''
Gated Recurrent Unit - Cho et al. 2014
Acts as a spatiotemporal projection,
turning a sequence of vectors into a single vector.
Eats inputs with shape:
(nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)
and returns outputs with shape:
if not return_sequences:
(nb_samples, output_dim)
if return_sequences:
(nb_samples, max_sample_length, output_dim)
References:
On the Properties of Neural Machine Translation: Encoder–Decoder Approaches
http://www.aclweb.org/anthology/W14-4012
Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling
http://arxiv.org/pdf/1412.3555v1.pdf
'''
def __init__(self, input_dim, output_dim=128,
init='glorot_uniform', inner_init='orthogonal',
activation='sigmoid', inner_activation='hard_sigmoid',
weights=None, truncate_gradient=-1, return_sequences=False):
super(GRU, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.return_sequences = return_sequences
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.input = T.tensor3()
self.W_z = self.init((self.input_dim, self.output_dim))
self.U_z = self.inner_init((self.output_dim, self.output_dim))
self.b_z = shared_zeros((self.output_dim))
self.W_r = self.init((self.input_dim, self.output_dim))
self.U_r = self.inner_init((self.output_dim, self.output_dim))
self.b_r = shared_zeros((self.output_dim))
self.W_h = self.init((self.input_dim, self.output_dim))
self.U_h = self.inner_init((self.output_dim, self.output_dim))
self.b_h = shared_zeros((self.output_dim))
self.params = [
self.W_z, self.U_z, self.b_z,
self.W_r, self.U_r, self.b_r,
self.W_h, self.U_h, self.b_h,
]
if weights is not None:
self.set_weights(weights)
def _step(self,
xz_t, xr_t, xh_t, mask_tm1,
h_tm1,
u_z, u_r, u_h):
h_mask_tm1 = mask_tm1 * h_tm1
z = self.inner_activation(xz_t + T.dot(h_mask_tm1, u_z))
r = self.inner_activation(xr_t + T.dot(h_mask_tm1, u_r))
hh_t = self.activation(xh_t + T.dot(r * h_mask_tm1, u_h))
h_t = z * h_mask_tm1 + (1 - z) * hh_t
return h_t
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
x_z = T.dot(X, self.W_z) + self.b_z
x_r = T.dot(X, self.W_r) + self.b_r
x_h = T.dot(X, self.W_h) + self.b_h
outputs, updates = theano.scan(
self._step,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
non_sequences=[self.U_z, self.U_r, self.U_h],
truncate_gradient=self.truncate_gradient)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"activation": self.activation.__name__,
"inner_activation": self.inner_activation.__name__,
"truncate_gradient": self.truncate_gradient,
"return_sequences": self.return_sequences}
class LSTM(Recurrent):
'''
Acts as a spatiotemporal projection,
turning a sequence of vectors into a single vector.
Eats inputs with shape:
(nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)
and returns outputs with shape:
if not return_sequences:
(nb_samples, output_dim)
if return_sequences:
(nb_samples, max_sample_length, output_dim)
For a step-by-step description of the algorithm, see:
http://deeplearning.net/tutorial/lstm.html
References:
Long short-term memory (original 97 paper)
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Learning to forget: Continual prediction with LSTM
http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015
Supervised sequence labelling with recurrent neural networks
http://www.cs.toronto.edu/~graves/preprint.pdf
'''
def __init__(self, input_dim, output_dim=128,
init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one',
activation='tanh', inner_activation='hard_sigmoid',
weights=None, truncate_gradient=-1, return_sequences=False):
super(LSTM, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.return_sequences = return_sequences
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.input = T.tensor3()
self.W_i = self.init((self.input_dim, self.output_dim))
self.U_i = self.inner_init((self.output_dim, self.output_dim))
self.b_i = shared_zeros((self.output_dim))
self.W_f = self.init((self.input_dim, self.output_dim))
self.U_f = self.inner_init((self.output_dim, self.output_dim))
self.b_f = self.forget_bias_init((self.output_dim))
self.W_c = self.init((self.input_dim, self.output_dim))
self.U_c = self.inner_init((self.output_dim, self.output_dim))
self.b_c = shared_zeros((self.output_dim))
self.W_o = self.init((self.input_dim, self.output_dim))
self.U_o = self.inner_init((self.output_dim, self.output_dim))
self.b_o = shared_zeros((self.output_dim))
self.params = [
self.W_i, self.U_i, self.b_i,
self.W_c, self.U_c, self.b_c,
self.W_f, self.U_f, self.b_f,
self.W_o, self.U_o, self.b_o,
]
if weights is not None:
self.set_weights(weights)
def _step(self,
xi_t, xf_t, xo_t, xc_t, mask_tm1,
h_tm1, c_tm1,
u_i, u_f, u_o, u_c):
h_mask_tm1 = mask_tm1 * h_tm1
c_mask_tm1 = mask_tm1 * c_tm1
i_t = self.inner_activation(xi_t + T.dot(h_mask_tm1, u_i))
f_t = self.inner_activation(xf_t + T.dot(h_mask_tm1, u_f))
c_t = f_t * c_mask_tm1 + i_t * self.activation(xc_t + T.dot(h_mask_tm1, u_c))
o_t = self.inner_activation(xo_t + T.dot(h_mask_tm1, u_o))
h_t = o_t * self.activation(c_t)
return h_t, c_t
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
xi = T.dot(X, self.W_i) + self.b_i
xf = T.dot(X, self.W_f) + self.b_f
xc = T.dot(X, self.W_c) + self.b_c
xo = T.dot(X, self.W_o) + self.b_o
[outputs, memories], updates = theano.scan(
self._step,
sequences=[xi, xf, xo, xc, padded_mask],
outputs_info=[
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
],
non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c],
truncate_gradient=self.truncate_gradient)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"forget_bias_init": self.forget_bias_init.__name__,
"activation": self.activation.__name__,
"inner_activation": self.inner_activation.__name__,
"truncate_gradient": self.truncate_gradient,
"return_sequences": self.return_sequences}
class JZS1(Recurrent):
'''
Evolved recurrent neural network architectures from the evaluation of thousands
of models, serving as alternatives to LSTMs and GRUs. See Jozefowicz et al. 2015.
This corresponds to the `MUT1` architecture described in the paper.
Takes inputs with shape:
(nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)
and returns outputs with shape:
if not return_sequences:
(nb_samples, output_dim)
if return_sequences:
(nb_samples, max_sample_length, output_dim)
References:
An Empirical Exploration of Recurrent Network Architectures
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf
'''
def __init__(self, input_dim, output_dim=128,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='sigmoid',
weights=None, truncate_gradient=-1, return_sequences=False):
super(JZS1, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.return_sequences = return_sequences
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.input = T.tensor3()
self.W_z = self.init((self.input_dim, self.output_dim))
self.b_z = shared_zeros((self.output_dim))
self.W_r = self.init((self.input_dim, self.output_dim))
self.U_r = self.inner_init((self.output_dim, self.output_dim))
self.b_r = shared_zeros((self.output_dim))
self.U_h = self.inner_init((self.output_dim, self.output_dim))
self.b_h = shared_zeros((self.output_dim))
# P_h used to project X onto different dimension, using sparse random projections
if self.input_dim == self.output_dim:
self.Pmat = theano.shared(np.identity(self.output_dim, dtype=theano.config.floatX), name=None)
else:
P = np.random.binomial(1, 0.5, size=(self.input_dim, self.output_dim)).astype(theano.config.floatX) * 2 - 1
P = 1 / np.sqrt(self.input_dim) * P
self.Pmat = theano.shared(P, name=None)
self.params = [
self.W_z, self.b_z,
self.W_r, self.U_r, self.b_r,
self.U_h, self.b_h,
self.Pmat
]
if weights is not None:
self.set_weights(weights)
def _step(self,
xz_t, xr_t, xh_t, mask_tm1,
h_tm1,
u_r, u_h):
h_mask_tm1 = mask_tm1 * h_tm1
z = self.inner_activation(xz_t)
r = self.inner_activation(xr_t + T.dot(h_mask_tm1, u_r))
hh_t = self.activation(xh_t + T.dot(r * h_mask_tm1, u_h))
h_t = hh_t * z + h_mask_tm1 * (1 - z)
return h_t
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
x_z = T.dot(X, self.W_z) + self.b_z
x_r = T.dot(X, self.W_r) + self.b_r
x_h = T.tanh(T.dot(X, self.Pmat)) + self.b_h
outputs, updates = theano.scan(
self._step,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
non_sequences=[self.U_r, self.U_h],
truncate_gradient=self.truncate_gradient)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"activation": self.activation.__name__,
"inner_activation": self.inner_activation.__name__,
"truncate_gradient": self.truncate_gradient,
"return_sequences": self.return_sequences}
class JZS2(Recurrent):
'''
Evolved recurrent neural network architectures from the evaluation of thousands
of models, serving as alternatives to LSTMs and GRUs. See Jozefowicz et al. 2015.
This corresponds to the `MUT2` architecture described in the paper.
Takes inputs with shape:
(nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)
and returns outputs with shape:
if not return_sequences:
(nb_samples, output_dim)
if return_sequences:
(nb_samples, max_sample_length, output_dim)
References:
An Empirical Exploration of Recurrent Network Architectures
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf
'''
def __init__(self, input_dim, output_dim=128,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='sigmoid',
weights=None, truncate_gradient=-1, return_sequences=False):
super(JZS2, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.return_sequences = return_sequences
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.input = T.tensor3()
self.W_z = self.init((self.input_dim, self.output_dim))
self.U_z = self.inner_init((self.output_dim, self.output_dim))
self.b_z = shared_zeros((self.output_dim))
self.U_r = self.inner_init((self.output_dim, self.output_dim))
self.b_r = shared_zeros((self.output_dim))
self.W_h = self.init((self.input_dim, self.output_dim))
self.U_h = self.inner_init((self.output_dim, self.output_dim))
self.b_h = shared_zeros((self.output_dim))
# P_h used to project X onto different dimension, using sparse random projections
if self.input_dim == self.output_dim:
self.Pmat = theano.shared(np.identity(self.output_dim, dtype=theano.config.floatX), name=None)
else:
P = np.random.binomial(1, 0.5, size=(self.input_dim, self.output_dim)).astype(theano.config.floatX) * 2 - 1
P = 1 / np.sqrt(self.input_dim) * P
self.Pmat = theano.shared(P, name=None)
self.params = [
self.W_z, self.U_z, self.b_z,
self.U_r, self.b_r,
self.W_h, self.U_h, self.b_h,
self.Pmat
]
if weights is not None:
self.set_weights(weights)
def _step(self,
xz_t, xr_t, xh_t, mask_tm1,
h_tm1,
u_z, u_r, u_h):
h_mask_tm1 = mask_tm1 * h_tm1
z = self.inner_activation(xz_t + T.dot(h_mask_tm1, u_z))
r = self.inner_activation(xr_t + T.dot(h_mask_tm1, u_r))
hh_t = self.activation(xh_t + T.dot(r * h_mask_tm1, u_h))
h_t = hh_t * z + h_mask_tm1 * (1 - z)
return h_t
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
x_z = T.dot(X, self.W_z) + self.b_z
x_r = T.dot(X, self.Pmat) + self.b_r
x_h = T.dot(X, self.W_h) + self.b_h
outputs, updates = theano.scan(
self._step,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
non_sequences=[self.U_z, self.U_r, self.U_h],
truncate_gradient=self.truncate_gradient)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"activation": self.activation.__name__,
"inner_activation": self.inner_activation.__name__,
"truncate_gradient": self.truncate_gradient,
"return_sequences": self.return_sequences}
class JZS3(Recurrent):
'''
Evolved recurrent neural network architectures from the evaluation of thousands
of models, serving as alternatives to LSTMs and GRUs. See Jozefowicz et al. 2015.
This corresponds to the `MUT3` architecture described in the paper.
Takes inputs with shape:
(nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)
and returns outputs with shape:
if not return_sequences:
(nb_samples, output_dim)
if return_sequences:
(nb_samples, max_sample_length, output_dim)
References:
An Empirical Exploration of Recurrent Network Architectures
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf
'''
def __init__(self, input_dim, output_dim=128,
init='glorot_uniform', inner_init='orthogonal',
activation='tanh', inner_activation='sigmoid',
weights=None, truncate_gradient=-1, return_sequences=False):
super(JZS3, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.truncate_gradient = truncate_gradient
self.return_sequences = return_sequences
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.input = T.tensor3()
self.W_z = self.init((self.input_dim, self.output_dim))
self.U_z = self.inner_init((self.output_dim, self.output_dim))
self.b_z = shared_zeros((self.output_dim))
self.W_r = self.init((self.input_dim, self.output_dim))
self.U_r = self.inner_init((self.output_dim, self.output_dim))
self.b_r = shared_zeros((self.output_dim))
self.W_h = self.init((self.input_dim, self.output_dim))
self.U_h = self.inner_init((self.output_dim, self.output_dim))
self.b_h = shared_zeros((self.output_dim))
self.params = [
self.W_z, self.U_z, self.b_z,
self.W_r, self.U_r, self.b_r,
self.W_h, self.U_h, self.b_h,
]
if weights is not None:
self.set_weights(weights)
def _step(self,
xz_t, xr_t, xh_t, mask_tm1,
h_tm1,
u_z, u_r, u_h):
h_mask_tm1 = mask_tm1 * h_tm1
z = self.inner_activation(xz_t + T.dot(T.tanh(h_mask_tm1), u_z))
r = self.inner_activation(xr_t + T.dot(h_mask_tm1, u_r))
hh_t = self.activation(xh_t + T.dot(r * h_mask_tm1, u_h))
h_t = hh_t * z + h_mask_tm1 * (1 - z)
return h_t
def get_output(self, train=False):
X = self.get_input(train)
padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
X = X.dimshuffle((1, 0, 2))
x_z = T.dot(X, self.W_z) + self.b_z
x_r = T.dot(X, self.W_r) + self.b_r
x_h = T.dot(X, self.W_h) + self.b_h
outputs, updates = theano.scan(
self._step,
sequences=[x_z, x_r, x_h, padded_mask],
outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
non_sequences=[self.U_z, self.U_r, self.U_h],
truncate_gradient=self.truncate_gradient
)
if self.return_sequences:
return outputs.dimshuffle((1, 0, 2))
return outputs[-1]
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"inner_init": self.inner_init.__name__,
"activation": self.activation.__name__,
"inner_activation": self.inner_activation.__name__,
"truncate_gradient": self.truncate_gradient,
"return_sequences": self.return_sequences}
| stephenbalaban/keras | keras/layers/recurrent.py | Python | mit | 29,818 | 0.001375 |
# Logic for listing and running tests.
#
# * @author Ben Gardner October 2009
# * @author Guy Maurel October 2015
# * @author Matthew Woehlke June 2018
#
import argparse
import os
import subprocess
import sys
from .ansicolor import printc
from .config import config, all_tests, FAIL_ATTRS, PASS_ATTRS, SKIP_ATTRS
from .failure import (Failure, MismatchFailure, UnexpectedlyPassingFailure,
UnstableFailure)
from .test import FormatTest
# -----------------------------------------------------------------------------
def _add_common_arguments(parser):
parser.add_argument('-c', '--show-commands', action='store_true',
help='show commands')
parser.add_argument('-v', '--verbose', action='store_true',
help='show detailed test information')
parser.add_argument('-d', '--diff', action='store_true',
help='show diff on failure')
parser.add_argument('-x', '--xdiff', action='store_true',
help='show diff on expected failure')
parser.add_argument('-g', '--debug', action='store_true',
help='generate debug files (.log, .unc)')
parser.add_argument('-e', '--executable', type=str, required=True,
metavar='PATH',
help='uncrustify executable to test')
parser.add_argument('--git', type=str, default=config.git_exe,
metavar='PATH',
help='git executable to use to generate diffs')
parser.add_argument('--result-dir', type=str, default=os.getcwd(),
metavar='DIR',
help='location to which results will be written')
# -----------------------------------------------------------------------------
def add_test_arguments(parser):
_add_common_arguments(parser)
parser.add_argument("name", type=str, metavar='NAME')
parser.add_argument("--lang", type=str, required=True)
parser.add_argument("--input", type=str, required=True)
parser.add_argument("--config", type=str, required=True)
parser.add_argument("--expected", type=str, required=True)
parser.add_argument("--rerun-config", type=str, metavar='INPUT')
parser.add_argument("--rerun-expected", type=str, metavar='CONFIG')
parser.add_argument("--xfail", action='store_true')
# -----------------------------------------------------------------------------
def add_source_tests_arguments(parser):
_add_common_arguments(parser)
parser.add_argument('-p', '--show-all', action='store_true',
help='show passed/skipped tests')
# -----------------------------------------------------------------------------
def add_format_tests_arguments(parser):
_add_common_arguments(parser)
parser.add_argument('-p', '--show-all', action='store_true',
help='show passed/skipped tests')
parser.add_argument('-r', '--select', metavar='CASE(S)', type=str,
help='select tests to be executed')
parser.add_argument('tests', metavar='TEST', type=str, nargs='*',
default=all_tests,
help='test(s) to run (default all)')
# Arguments for generating the CTest script; users should not use these
# directly
parser.add_argument("--write-ctest", type=str, help=argparse.SUPPRESS)
parser.add_argument("--cmake-config", type=str, help=argparse.SUPPRESS)
parser.add_argument("--python", type=str, help=argparse.SUPPRESS)
# -----------------------------------------------------------------------------
def parse_args(parser):
args = parser.parse_args()
if args.git is not None:
config.git_exe = args.git
config.uncrustify_exe = args.executable
if not os.path.exists(config.uncrustify_exe):
msg = 'Specified uncrustify executable {!r} does not exist'.format(
config.uncrustify_exe)
printc("FAILED: ", msg, **FAIL_ATTRS)
sys.exit(-1)
# Do a sanity check on the executable
try:
with open(os.devnull, 'w') as bitbucket:
subprocess.check_call([config.uncrustify_exe, '--help'],
stdout=bitbucket)
except Exception as exc:
msg = ('Specified uncrustify executable {!r} ' +
'does not appear to be usable: {!s}').format(
config.uncrustify_exe, exc)
printc("FAILED: ", msg, **FAIL_ATTRS)
sys.exit(-1)
return args
# -----------------------------------------------------------------------------
def run_tests(tests, args, selector=None):
pass_count = 0
fail_count = 0
mismatch_count = 0
unstable_count = 0
unexpectedly_passing_count = 0
for test in tests:
if selector is not None and not selector.test(test.test_name):
if args.show_all:
printc("SKIPPED: ", test.test_name, **SKIP_ATTRS)
continue
try:
test.run(args)
if args.show_all:
outcome = 'XFAILED' if test.test_xfail else 'PASSED'
printc('{}: '.format(outcome), test.test_name, **PASS_ATTRS)
pass_count += 1
except UnstableFailure:
unstable_count += 1
except MismatchFailure:
mismatch_count += 1
except UnexpectedlyPassingFailure:
unexpectedly_passing_count += 1
except Failure:
fail_count += 1
return {
'passing': pass_count,
'failing': fail_count,
'mismatch': mismatch_count,
'unstable': unstable_count,
'xpass': unexpectedly_passing_count
}
# -----------------------------------------------------------------------------
def report(counts):
total = sum(counts.values())
print('{passing} / {total} tests passed'.format(total=total, **counts))
if counts['failing'] > 0:
printc('{failing} tests failed to execute'.format(**counts),
**FAIL_ATTRS)
if counts['mismatch'] > 0:
printc(
'{mismatch} tests did not match the expected output'.format(
**counts),
**FAIL_ATTRS)
if counts['unstable'] > 0:
printc('{unstable} tests were unstable'.format(**counts),
**FAIL_ATTRS)
if counts['xpass'] > 0:
printc('{xpass} tests passed but were expected to fail'
.format(**counts), **FAIL_ATTRS)
# -----------------------------------------------------------------------------
def read_format_tests(filename, group):
tests = []
print("Processing " + filename)
with open(filename, 'rt') as f:
for line_number, line in enumerate(f, 1):
line = line.strip()
if not len(line):
continue
if line.startswith('#'):
continue
test = FormatTest()
test.build_from_declaration(line, group, line_number)
tests.append(test)
return tests
# -----------------------------------------------------------------------------
def fixup_ctest_path(path, config):
if config is None:
return path
dirname, basename = os.path.split(path)
if os.path.basename(dirname).lower() == config.lower():
dirname, junk = os.path.split(dirname)
return os.path.join(dirname, '${CTEST_CONFIGURATION_TYPE}', basename)
return path
| nivekkagicom/uncrustify | tests/test_uncrustify/utilities.py | Python | gpl-2.0 | 7,537 | 0.000133 |
import itertools
from unittest import TestCase
from turgles.buffer import ChunkBuffer, ShapeBuffer, BufferManager
from turgles.memory import TURTLE_MODEL_DATA_SIZE, TURTLE_COLOR_DATA_SIZE
MODEL_ZEROS = [0] * TURTLE_MODEL_DATA_SIZE
MODEL_ONES = [1] * TURTLE_MODEL_DATA_SIZE
MODEL_TWOS = [2] * TURTLE_MODEL_DATA_SIZE
MODEL_THREES = [3] * TURTLE_MODEL_DATA_SIZE
COLOR_ZEROS = [0] * TURTLE_COLOR_DATA_SIZE
COLOR_ONES = [1] * TURTLE_COLOR_DATA_SIZE
COLOR_TWOS = [2] * TURTLE_COLOR_DATA_SIZE
COLOR_THREES = [3] * TURTLE_COLOR_DATA_SIZE
class ChunkBufferTestCase(TestCase):
def assert_turtle_data(self, buffer, index, data):
offset = index * TURTLE_MODEL_DATA_SIZE
slice = buffer.data[offset:offset + TURTLE_MODEL_DATA_SIZE]
self.assertEqual(list(slice), data)
def test_sized_correctly(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
self.assertEqual(len(buffer.data), 4 * TURTLE_MODEL_DATA_SIZE)
self.assertEqual(buffer.count, 0)
self.assertEqual(buffer.size, 4)
def test_new(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
data = buffer.new()
self.assertEqual(len(data), TURTLE_MODEL_DATA_SIZE)
self.assertEqual(list(data), MODEL_ZEROS)
self.assertEqual(buffer.count, 1)
def test_new_with_init(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
init = list(reversed(range(TURTLE_MODEL_DATA_SIZE)))
data = buffer.new(init)
self.assertEqual(len(data), TURTLE_MODEL_DATA_SIZE)
self.assertEqual(list(data), init)
def test_mutlple_new(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
buffer.new()
self.assertEqual(buffer.count, 1)
buffer.new()
self.assertEqual(buffer.count, 2)
def test_new_triggers_resize(self):
buffer = ChunkBuffer(2, TURTLE_MODEL_DATA_SIZE)
buffer.new()
buffer.new()
self.assertEqual(buffer.size, 2)
self.assertEqual(buffer.count, 2)
buffer.new()
self.assertEqual(buffer.size, 4)
self.assertEqual(buffer.count, 3)
def test_resize(self):
buffer = ChunkBuffer(2, TURTLE_MODEL_DATA_SIZE)
buffer.new(MODEL_ONES)
buffer.new(MODEL_TWOS)
buffer.resize(4)
self.assertEqual(buffer.size, 4)
self.assertEqual(len(buffer.data), 4 * TURTLE_MODEL_DATA_SIZE)
self.assert_turtle_data(buffer, 0, MODEL_ONES)
self.assert_turtle_data(buffer, 1, MODEL_TWOS)
self.assert_turtle_data(buffer, 2, MODEL_ZEROS)
self.assert_turtle_data(buffer, 3, MODEL_ZEROS)
def test_remove_end(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
buffer.new(MODEL_ONES)
buffer.new(MODEL_TWOS)
buffer.new(MODEL_THREES)
moved = buffer.remove(2)
self.assertEqual(buffer.count, 2)
self.assertIsNone(moved)
self.assert_turtle_data(buffer, 0, MODEL_ONES)
self.assert_turtle_data(buffer, 1, MODEL_TWOS)
self.assert_turtle_data(buffer, 2, MODEL_ZEROS)
def test_remove_start(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
buffer.new(MODEL_ONES)
buffer.new(MODEL_TWOS)
buffer.new(MODEL_THREES)
moved = buffer.remove(0)
self.assertEqual(buffer.count, 2)
self.assertEqual(moved, 2)
self.assert_turtle_data(buffer, 0, MODEL_THREES)
self.assert_turtle_data(buffer, 1, MODEL_TWOS)
self.assert_turtle_data(buffer, 2, MODEL_ZEROS)
def test_remove_middle(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
buffer.new(MODEL_ONES)
buffer.new(MODEL_TWOS)
buffer.new(MODEL_THREES)
moved = buffer.remove(1)
self.assertEqual(buffer.count, 2)
self.assertEqual(moved, 2)
self.assert_turtle_data(buffer, 0, MODEL_ONES)
self.assert_turtle_data(buffer, 1, MODEL_THREES)
self.assert_turtle_data(buffer, 2, MODEL_ZEROS)
def test_remove_then_add(self):
buffer = ChunkBuffer(4, TURTLE_MODEL_DATA_SIZE)
buffer.new(MODEL_ONES)
buffer.new(MODEL_TWOS)
buffer.new(MODEL_THREES)
buffer.remove(2)
self.assertEqual(buffer.count, 2)
# check data was zeroed
self.assert_turtle_data(buffer, 2, MODEL_ZEROS)
buffer.new([4] * TURTLE_MODEL_DATA_SIZE)
self.assertEqual(buffer.count, 3)
# check reuses previously removed turtle's space
self.assert_turtle_data(buffer, 2, [4] * TURTLE_MODEL_DATA_SIZE)
def make_slices(self, size, array_size=20):
buffer = ChunkBuffer(array_size, TURTLE_MODEL_DATA_SIZE)
for i in range(array_size):
buffer.new([i+1] * TURTLE_MODEL_DATA_SIZE)
return buffer.slice(size)
def test_slice_size_multiple(self):
slices = self.make_slices(10, 20)
size, slice = next(slices)
self.assertEqual(size, 10)
self.assertEqual(
list(slice[0:TURTLE_MODEL_DATA_SIZE]),
[1] * TURTLE_MODEL_DATA_SIZE
)
size, slice = next(slices)
self.assertEqual(size, 10)
self.assertEqual(
list(slice[0:TURTLE_MODEL_DATA_SIZE]),
[11] * TURTLE_MODEL_DATA_SIZE
)
with self.assertRaises(StopIteration):
next(slices)
def test_slice_size_remainder(self):
slices = self.make_slices(15, 20)
size, slice = next(slices)
self.assertEqual(size, 15)
self.assertEqual(
list(slice[0:TURTLE_MODEL_DATA_SIZE]),
[1] * TURTLE_MODEL_DATA_SIZE
)
size, slice = next(slices)
self.assertEqual(size, 5)
self.assertEqual(
list(slice[0:TURTLE_MODEL_DATA_SIZE]),
[16] * TURTLE_MODEL_DATA_SIZE
)
with self.assertRaises(StopIteration):
next(slices)
def test_slice_size_only_one(self):
slices = self.make_slices(20, 10)
size, slice = next(slices)
self.assertEqual(size, 10)
self.assertEqual(
list(slice[0:TURTLE_MODEL_DATA_SIZE]),
[1] * TURTLE_MODEL_DATA_SIZE
)
with self.assertRaises(StopIteration):
next(slices)
class ShapeBufferTestCase(TestCase):
def assert_id_map(self, buffer, id, index):
self.assertIn(id, buffer.id_to_index)
self.assertIn(index, buffer.index_to_id)
self.assertEqual(buffer.id_to_index[id], index)
self.assertEqual(buffer.index_to_id[index], id)
def assert_turtle_data(self, buffer, id, index, model, color):
if id:
self.assert_id_map(buffer, id, index)
model_data = buffer.model.get(index)
color_data = buffer.color.get(index)
self.assertEqual(list(model_data), model)
self.assertEqual(list(color_data), color)
def test_new(self):
buffer = ShapeBuffer('shape', 4)
model, color = buffer.new(0)
self.assert_turtle_data(buffer, 0, 0, MODEL_ZEROS, COLOR_ZEROS)
self.assertEqual(buffer.count, 1)
def test_new_bad_id(self):
buffer = ShapeBuffer('shape', 4)
buffer.new(0)
with self.assertRaises(AssertionError):
buffer.new(0)
def test_new_with_init(self):
buffer = ShapeBuffer('shape', 4)
model, color = buffer.new(0, MODEL_ONES, COLOR_TWOS)
self.assert_turtle_data(buffer, 0, 0, MODEL_ONES, COLOR_TWOS)
def test_mutlple_new(self):
buffer = ShapeBuffer('shape', 4)
buffer.new(0)
self.assert_id_map(buffer, 0, 0)
self.assertEqual(buffer.count, 1)
buffer.new(1)
self.assert_id_map(buffer, 1, 1)
self.assertEqual(buffer.count, 2)
def test_remove_id_end(self):
buffer = ShapeBuffer('shape', 4)
buffer.new(0, MODEL_ONES, COLOR_ONES)
buffer.new(1, MODEL_TWOS, COLOR_TWOS)
buffer.new(2, MODEL_THREES, COLOR_THREES)
self.assert_turtle_data(buffer, 2, 2, MODEL_THREES, COLOR_THREES)
buffer.remove(2)
self.assertEqual(buffer.count, 2)
self.assert_turtle_data(buffer, 0, 0, MODEL_ONES, COLOR_ONES)
self.assert_turtle_data(buffer, 1, 1, MODEL_TWOS, COLOR_TWOS)
# check last one zeroed
self.assert_turtle_data(buffer, None, 2, MODEL_ZEROS, COLOR_ZEROS)
self.assertNotIn(2, buffer.id_to_index)
self.assertNotIn(2, buffer.index_to_id)
def test_remove_id_start(self):
buffer = ShapeBuffer('shape', 4)
buffer.new(0, MODEL_ONES, COLOR_ONES)
buffer.new(1, MODEL_TWOS, COLOR_TWOS)
buffer.new(2, MODEL_THREES, COLOR_THREES)
self.assert_turtle_data(buffer, 0, 0, MODEL_ONES, COLOR_ONES)
buffer.remove(0)
self.assertEqual(buffer.count, 2)
# check last one has been copied to 0
self.assert_turtle_data(buffer, 2, 0, MODEL_THREES, COLOR_THREES)
self.assert_turtle_data(buffer, 1, 1, MODEL_TWOS, COLOR_TWOS)
# check last one zeroed
self.assert_turtle_data(buffer, None, 2, MODEL_ZEROS, COLOR_ZEROS)
self.assertNotIn(0, buffer.id_to_index)
self.assertNotIn(2, buffer.index_to_id)
def test_remove_id_middle(self):
buffer = ShapeBuffer('shape', 4)
buffer.new(0, MODEL_ONES, COLOR_ONES)
buffer.new(1, MODEL_TWOS, COLOR_TWOS)
buffer.new(2, MODEL_THREES, COLOR_THREES)
self.assert_turtle_data(buffer, 1, 1, MODEL_TWOS, COLOR_TWOS)
buffer.remove(1)
self.assertEqual(buffer.count, 2)
# check last has been copied to 1
self.assert_turtle_data(buffer, 0, 0, MODEL_ONES, COLOR_ONES)
self.assert_turtle_data(buffer, 2, 1, MODEL_THREES, COLOR_THREES)
# check last one zeroed
self.assert_turtle_data(buffer, None, 2, MODEL_ZEROS, COLOR_ZEROS)
self.assertNotIn(1, buffer.id_to_index)
self.assertNotIn(2, buffer.index_to_id)
class BufferManagerTestCase(TestCase):
def test_get_buffer(self):
manager = BufferManager(4)
self.assertEqual(len(manager.buffers), 0)
buffer1 = manager.get_buffer('classic')
self.assertEqual(len(manager.buffers), 1)
self.assertIn('classic', manager.buffers)
self.assertEqual(buffer1.size, 4)
buffer2 = manager.get_buffer('classic')
self.assertEqual(len(manager.buffers), 1)
self.assertIs(buffer1, buffer2)
def test_create_turtle(self):
manager = BufferManager(4)
model, color = manager.create_turtle(
0, 'classic', MODEL_ONES, COLOR_ONES)
self.assertEqual(list(model), MODEL_ONES)
self.assertEqual(list(color), COLOR_ONES)
self.assertEqual(len(manager.buffers), 1)
self.assertIn('classic', manager.buffers)
self.assertEqual(manager.buffers['classic'].size, 4)
def test_set_shape(self):
manager = BufferManager(4)
model, color = manager.create_turtle(
0, 'classic', MODEL_ONES, COLOR_ONES)
model2, color2 = manager.set_shape(0, 'turtle')
self.assertEqual(len(manager.buffers), 2)
self.assertIn('turtle', manager.buffers)
self.assertEqual(list(model2), MODEL_ONES)
self.assertEqual(list(color2), COLOR_ONES)
def test_destroy_turtle(self):
manager = BufferManager(4)
model, color = manager.create_turtle(
0, 'classic', MODEL_ONES, COLOR_ONES)
manager.destroy_turtle(0)
self.assertEqual(list(model), MODEL_ZEROS)
self.assertEqual(list(color), COLOR_ZEROS)
self.assertEqual(manager.buffers['classic'].count, 0)
| AllTheWayDown/turgles | turgles/tests/test_buffers.py | Python | mit | 11,668 | 0.000086 |
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
###############Credits######################################################
# Coded by: Humberto Arocha [email protected]
# Angelica Barrios [email protected]
# Jordi Esteve <[email protected]>
# Planified by: Humberto Arocha
# Finance by: LUBCAN COL S.A.S http://www.lubcancol.com
# Audited by: Humberto Arocha [email protected]
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import wizard
| kailIII/emaresa | trunk.pe/account_financial_report/wizard/__init__.py | Python | agpl-3.0 | 1,490 | 0.000671 |
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import mock
import time
import random
import shutil
import contextlib
import tempfile
import binascii
import platform
import select
import datetime
from io import BytesIO
from subprocess import Popen, PIPE
from dateutil.tz import tzlocal
import unittest
from nose.tools import assert_equal
import botocore.loaders
import botocore.session
from botocore.awsrequest import AWSResponse
from botocore.compat import (
parse_qs, six, urlparse, HAS_CRT
)
from botocore import utils
from botocore import credentials
from botocore.stub import Stubber
_LOADER = botocore.loaders.Loader()
def skip_unless_has_memory_collection(cls):
"""Class decorator to skip tests that require memory collection.
Any test that uses memory collection (such as the resource leak tests)
can decorate their class with skip_unless_has_memory_collection to
indicate that if the platform does not support memory collection
the tests should be skipped.
"""
if platform.system() not in ['Darwin', 'Linux']:
return unittest.skip('Memory tests only supported on mac/linux.')(cls)
return cls
def skip_if_windows(reason):
"""Decorator to skip tests that should not be run on windows.
Example usage:
@skip_if_windows("Not valid")
def test_some_non_windows_stuff(self):
self.assertEqual(...)
"""
def decorator(func):
return unittest.skipIf(
platform.system() not in ['Darwin', 'Linux'], reason)(func)
return decorator
def requires_crt(reason=None):
if reason is None:
reason = "Test requires awscrt to be installed"
def decorator(func):
return unittest.skipIf(not HAS_CRT, reason)(func)
return decorator
def random_chars(num_chars):
"""Returns random hex characters.
Useful for creating resources with random names.
"""
return binascii.hexlify(os.urandom(int(num_chars / 2))).decode('ascii')
def create_session(**kwargs):
# Create a Session object. By default,
# the _LOADER object is used as the loader
# so that we reused the same models across tests.
session = botocore.session.Session(**kwargs)
session.register_component('data_loader', _LOADER)
session.set_config_variable('credentials_file', 'noexist/foo/botocore')
return session
@contextlib.contextmanager
def temporary_file(mode):
"""This is a cross platform temporary file creation.
tempfile.NamedTemporary file on windows creates a secure temp file
that can't be read by other processes and can't be opened a second time.
For tests, we generally *want* them to be read multiple times.
The test fixture writes the temp file contents, the test reads the
temp file.
"""
temporary_directory = tempfile.mkdtemp()
basename = 'tmpfile-%s-%s' % (int(time.time()), random.randint(1, 1000))
full_filename = os.path.join(temporary_directory, basename)
open(full_filename, 'w').close()
try:
with open(full_filename, mode) as f:
yield f
finally:
shutil.rmtree(temporary_directory)
class BaseEnvVar(unittest.TestCase):
def setUp(self):
# Automatically patches out os.environ for you
# and gives you a self.environ attribute that simulates
# the environment. Also will automatically restore state
# for you in tearDown()
self.environ = {}
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
def tearDown(self):
self.environ_patch.stop()
class BaseSessionTest(BaseEnvVar):
"""Base class used to provide credentials.
This class can be used as a base class that want to use a real
session class but want to be completely isolated from the
external environment (including environment variables).
This class will also set credential vars so you can make fake
requests to services.
"""
def setUp(self, **environ):
super(BaseSessionTest, self).setUp()
self.environ['AWS_ACCESS_KEY_ID'] = 'access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key'
self.environ['AWS_CONFIG_FILE'] = 'no-exist-foo'
self.environ.update(environ)
self.session = create_session()
self.session.config_filename = 'no-exist-foo'
@skip_unless_has_memory_collection
class BaseClientDriverTest(unittest.TestCase):
INJECT_DUMMY_CREDS = False
def setUp(self):
self.driver = ClientDriver()
env = None
if self.INJECT_DUMMY_CREDS:
env = {'AWS_ACCESS_KEY_ID': 'foo',
'AWS_SECRET_ACCESS_KEY': 'bar'}
self.driver.start(env=env)
def cmd(self, *args):
self.driver.cmd(*args)
def send_cmd(self, *args):
self.driver.send_cmd(*args)
def record_memory(self):
self.driver.record_memory()
@property
def memory_samples(self):
return self.driver.memory_samples
def tearDown(self):
self.driver.stop()
class ClientDriver(object):
CLIENT_SERVER = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'cmd-runner'
)
def __init__(self):
self._popen = None
self.memory_samples = []
def _get_memory_with_ps(self, pid):
# It would be better to eventually switch to psutil,
# which should allow us to test on windows, but for now
# we'll just use ps and run on POSIX platforms.
command_list = ['ps', '-p', str(pid), '-o', 'rss']
p = Popen(command_list, stdout=PIPE)
stdout = p.communicate()[0]
if not p.returncode == 0:
raise RuntimeError("Could not retrieve memory")
else:
# Get the RSS from output that looks like this:
# RSS
# 4496
return int(stdout.splitlines()[1].split()[0]) * 1024
def record_memory(self):
mem = self._get_memory_with_ps(self._popen.pid)
self.memory_samples.append(mem)
def start(self, env=None):
"""Start up the command runner process."""
self._popen = Popen([sys.executable, self.CLIENT_SERVER],
stdout=PIPE, stdin=PIPE, env=env)
def stop(self):
"""Shutdown the command runner process."""
self.cmd('exit')
self._popen.wait()
def send_cmd(self, *cmd):
"""Send a command and return immediately.
This is a lower level method than cmd().
This method will instruct the cmd-runner process
to execute a command, but this method will
immediately return. You will need to use
``is_cmd_finished()`` to check that the command
is finished.
This method is useful if you want to record attributes
about the process while an operation is occurring. For
example, if you want to instruct the cmd-runner process
to upload a 1GB file to S3 and you'd like to record
the memory during the upload process, you can use
send_cmd() instead of cmd().
"""
cmd_str = ' '.join(cmd) + '\n'
cmd_bytes = cmd_str.encode('utf-8')
self._popen.stdin.write(cmd_bytes)
self._popen.stdin.flush()
def is_cmd_finished(self):
rlist = [self._popen.stdout.fileno()]
result = select.select(rlist, [], [], 0.01)
if result[0]:
return True
return False
def cmd(self, *cmd):
"""Send a command and block until it finishes.
This method will send a command to the cmd-runner process
to run. It will block until the cmd-runner process is
finished executing the command and sends back a status
response.
"""
self.send_cmd(*cmd)
result = self._popen.stdout.readline().strip()
if result != b'OK':
raise RuntimeError(
"Error from command '%s': %s" % (cmd, result))
# This is added to this file because it's used in both
# the functional and unit tests for cred refresh.
class IntegerRefresher(credentials.RefreshableCredentials):
"""Refreshable credentials to help with testing.
This class makes testing refreshable credentials easier.
It has the following functionality:
* A counter, self.refresh_counter, to indicate how many
times refresh was called.
* A way to specify how many seconds to make credentials
valid.
* Configurable advisory/mandatory refresh.
* An easy way to check consistency. Each time creds are
refreshed, all the cred values are set to the next
incrementing integer. Frozen credentials should always
have this value.
"""
_advisory_refresh_timeout = 2
_mandatory_refresh_timeout = 1
_credentials_expire = 3
def __init__(self, creds_last_for=_credentials_expire,
advisory_refresh=_advisory_refresh_timeout,
mandatory_refresh=_mandatory_refresh_timeout,
refresh_function=None):
expires_in = (
self._current_datetime() +
datetime.timedelta(seconds=creds_last_for))
if refresh_function is None:
refresh_function = self._do_refresh
super(IntegerRefresher, self).__init__(
'0', '0', '0', expires_in,
refresh_function, 'INTREFRESH')
self.creds_last_for = creds_last_for
self.refresh_counter = 0
self._advisory_refresh_timeout = advisory_refresh
self._mandatory_refresh_timeout = mandatory_refresh
def _do_refresh(self):
self.refresh_counter += 1
current = int(self._access_key)
next_id = str(current + 1)
return {
'access_key': next_id,
'secret_key': next_id,
'token': next_id,
'expiry_time': self._seconds_later(self.creds_last_for),
}
def _seconds_later(self, num_seconds):
# We need to guarantee at *least* num_seconds.
# Because this doesn't handle subsecond precision
# we'll round up to the next second.
num_seconds += 1
t = self._current_datetime() + datetime.timedelta(seconds=num_seconds)
return self._to_timestamp(t)
def _to_timestamp(self, datetime_obj):
obj = utils.parse_to_aware_datetime(datetime_obj)
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
def _current_timestamp(self):
return self._to_timestamp(self._current_datetime())
def _current_datetime(self):
return datetime.datetime.now(tzlocal())
def _urlparse(url):
if isinstance(url, six.binary_type):
# Not really necessary, but it helps to reduce noise on Python 2.x
url = url.decode('utf8')
return urlparse(url)
def assert_url_equal(url1, url2):
parts1 = _urlparse(url1)
parts2 = _urlparse(url2)
# Because the query string ordering isn't relevant, we have to parse
# every single part manually and then handle the query string.
assert_equal(parts1.scheme, parts2.scheme)
assert_equal(parts1.netloc, parts2.netloc)
assert_equal(parts1.path, parts2.path)
assert_equal(parts1.params, parts2.params)
assert_equal(parts1.fragment, parts2.fragment)
assert_equal(parts1.username, parts2.username)
assert_equal(parts1.password, parts2.password)
assert_equal(parts1.hostname, parts2.hostname)
assert_equal(parts1.port, parts2.port)
assert_equal(parse_qs(parts1.query), parse_qs(parts2.query))
class HTTPStubberException(Exception):
pass
class RawResponse(BytesIO):
# TODO: There's a few objects similar to this in various tests, let's
# try and consolidate to this one in a future commit.
def stream(self, **kwargs):
contents = self.read()
while contents:
yield contents
contents = self.read()
class BaseHTTPStubber(object):
def __init__(self, obj_with_event_emitter, strict=True):
self.reset()
self._strict = strict
self._obj_with_event_emitter = obj_with_event_emitter
def reset(self):
self.requests = []
self.responses = []
def add_response(self, url='https://example.com', status=200, headers=None,
body=b''):
if headers is None:
headers = {}
raw = RawResponse(body)
response = AWSResponse(url, status, headers, raw)
self.responses.append(response)
@property
def _events(self):
raise NotImplementedError('_events')
def start(self):
self._events.register('before-send', self)
def stop(self):
self._events.unregister('before-send', self)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def __call__(self, request, **kwargs):
self.requests.append(request)
if self.responses:
response = self.responses.pop(0)
if isinstance(response, Exception):
raise response
else:
return response
elif self._strict:
raise HTTPStubberException('Insufficient responses')
else:
return None
class ClientHTTPStubber(BaseHTTPStubber):
@property
def _events(self):
return self._obj_with_event_emitter.meta.events
class SessionHTTPStubber(BaseHTTPStubber):
@property
def _events(self):
return self._obj_with_event_emitter.get_component('event_emitter')
class ConsistencyWaiterException(Exception):
pass
class ConsistencyWaiter(object):
"""
A waiter class for some check to reach a consistent state.
:type min_successes: int
:param min_successes: The minimum number of successful check calls to
treat the check as stable. Default of 1 success.
:type max_attempts: int
:param min_successes: The maximum number of times to attempt calling
the check. Default of 20 attempts.
:type delay: int
:param delay: The number of seconds to delay the next API call after a
failed check call. Default of 5 seconds.
"""
def __init__(self, min_successes=1, max_attempts=20, delay=5,
delay_initial_poll=False):
self.min_successes = min_successes
self.max_attempts = max_attempts
self.delay = delay
self.delay_initial_poll = delay_initial_poll
def wait(self, check, *args, **kwargs):
"""
Wait until the check succeeds the configured number of times
:type check: callable
:param check: A callable that returns True or False to indicate
if the check succeeded or failed.
:type args: list
:param args: Any ordered arguments to be passed to the check.
:type kwargs: dict
:param kwargs: Any keyword arguments to be passed to the check.
"""
attempts = 0
successes = 0
if self.delay_initial_poll:
time.sleep(self.delay)
while attempts < self.max_attempts:
attempts += 1
if check(*args, **kwargs):
successes += 1
if successes >= self.min_successes:
return
else:
time.sleep(self.delay)
fail_msg = self._fail_message(attempts, successes)
raise ConsistencyWaiterException(fail_msg)
def _fail_message(self, attempts, successes):
format_args = (attempts, successes)
return 'Failed after %s attempts, only had %s successes' % format_args
class StubbedSession(botocore.session.Session):
def __init__(self, *args, **kwargs):
super(StubbedSession, self).__init__(*args, **kwargs)
self._cached_clients = {}
self._client_stubs = {}
def create_client(self, service_name, *args, **kwargs):
if service_name not in self._cached_clients:
client = self._create_stubbed_client(service_name, *args, **kwargs)
self._cached_clients[service_name] = client
return self._cached_clients[service_name]
def _create_stubbed_client(self, service_name, *args, **kwargs):
client = super(StubbedSession, self).create_client(
service_name, *args, **kwargs)
stubber = Stubber(client)
self._client_stubs[service_name] = stubber
return client
def stub(self, service_name, *args, **kwargs):
if service_name not in self._client_stubs:
self.create_client(service_name, *args, **kwargs)
return self._client_stubs[service_name]
def activate_stubs(self):
for stub in self._client_stubs.values():
stub.activate()
def verify_stubs(self):
for stub in self._client_stubs.values():
stub.assert_no_pending_responses()
| pplu/botocore | tests/__init__.py | Python | apache-2.0 | 17,369 | 0.000058 |
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals, absolute_import
import os
import platform
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
def get_sudo_username():
"""
Check 'SUDO_USER' var if in environment and return a tuple with True and
value of 'SUDO_USER' if the var in environment or a tuple with False and
'USER' value.
Return tuple.
"""
if 'SUDO_USER' in os.environ:
return True, os.environ['SUDO_USER']
return False, os.environ['USER']
def get_home_dir():
"""
Return path of user's home directory.
Return string.
"""
system = platform.system().lower()
if system == 'linux':
return '/home/'
elif system == 'darwin':
return '/Users/'
elif system == 'windows':
return 'C:/Documents...'
else:
raise Exception("Impossibile individuare il tipo di sistema")
def read_config(filename=None):
"""
Read a config filename into .ini format and return dict of shares.
Keyword arguments:
filename -- the path of config filename (default None)
Return dict.
"""
if not os.path.exists(filename):
raise IOError('Impossibile trovare il filename %s' % filename)
shares = []
config = ConfigParser()
config.read(filename)
for share_items in [config.items(share_title) for share_title in
config.sections()]:
dict_share = {}
for key, value in share_items:
if key == 'hostname' and '@' in value:
hostname, credentials = (item[::-1] for item
in value[::-1].split('@', 1))
dict_share.update({key: hostname})
credentials = tuple(cred.lstrip('"').rstrip('"')
for cred in credentials.split(':', 1))
dict_share.update({'username': credentials[0]})
if len(credentials) > 1:
dict_share.update({'password': credentials[1]})
continue
dict_share.update({key: value})
shares.append(dict_share)
return shares
| simodalla/pygmount | pygmount/utils/utils.py | Python | bsd-3-clause | 2,197 | 0 |
# Copyright The Plasma Project.
# See LICENSE.txt for details.
"""
Test helpers and classes
"""
import inspect
def dict_for_slots(obj):
"""
"""
slots = []
for cls in inspect.getmro(obj.__class__):
if hasattr(cls, '__slots__'):
slots += cls.__slots__
return dict(zip(slots, [getattr(obj, x) for x in slots]))
| hydralabs/plasma | plasma/test/util.py | Python | mit | 354 | 0 |
from arza.misc.platform import jit
from arza.types.space import isany
from arza.runtime import error
from arza.runtime.routine.code_routine import CodeRoutine
from arza.runtime.routine.native_routine import NativeRoutine
from arza.types import api, space
def complete_or_interrupt_native_routine(func):
def func_wrapper(process, routine):
result = func(process, routine)
assert isany(result)
if space.isinterrupt(result):
return
if not routine.is_closed():
routine.complete(process, result)
return func_wrapper
def complete_native_routine(func):
def func_wrapper(process, routine):
result = func(process, routine)
assert space.isany(result), (func, routine.args(), result)
assert not space.isvoid(result), (func, routine.args(), result)
if not routine.is_closed():
routine.complete(process, result)
return func_wrapper
def create_native_routine(stack, name, native, args, arity):
return NativeRoutine(stack, name, native, args, arity)
def create_callback_routine(stack, on_result, on_complete, function, args):
from arza.runtime.routine.callback_routine import CallbackRoutine
return CallbackRoutine(stack, on_result, on_complete, function, args)
def create_module_routine(name, stack, code, env):
return jit.promote(CodeRoutine(space.newvoid(), stack, None, name, code, env))
def create_function_routine(stack, func, args, outer_env):
code = func.bytecode
scope = code.scope
name = func.name
env = create_function_environment(func, scope, args, outer_env)
routine = jit.promote(CodeRoutine(func, stack, args, name, code, env))
return routine
def create_function_environment(func, scope, args, outer_env):
declared_args_count = scope.arg_count if not scope.is_variadic else scope.arg_count - 1
args_count = api.length_i(args)
if not scope.is_variadic:
if args_count != declared_args_count:
return error.throw_5(error.Errors.INVALID_ARG_COUNT_ERROR,
space.newint(args_count), space.newstring(u"!="), space.newint(declared_args_count),
func, args)
if args_count < declared_args_count:
return error.throw_5(error.Errors.INVALID_ARG_COUNT_ERROR,
space.newint(args_count), space.newstring(u"<"), space.newint(declared_args_count),
func.name, args)
env = space.newenv(func.name, scope, outer_env)
return env
| gloryofrobots/obin | arza/runtime/routine/routine.py | Python | gpl-2.0 | 2,553 | 0.001567 |
from django.contrib.auth.models import User
from django_countries import countries
def get_user(user_id):
user = User.objects.get(id=user_id)
return user
def get_user_profile(user_id):
user = User.objects.get(id=user_id)
return user.profile
def get_ambassadors(country_code=None):
ambassadors = []
all_ambassadors = User.objects.filter(groups__name='ambassadors').order_by('date_joined')
for ambassador in all_ambassadors:
if country_code:
if ambassador.profile.country == country_code:
ambassadors.append(ambassador.profile)
else:
ambassadors.append(ambassador.profile)
return ambassadors
def get_main_ambassadors(country_code=None):
ambassadors = []
all_ambassadors = User.objects.filter(groups__name='main').order_by('date_joined')
for ambassador in all_ambassadors:
if country_code:
if ambassador.profile.country == country_code:
ambassadors.append(ambassador.profile)
else:
ambassadors.append(ambassador.profile)
return ambassadors
def get_not_main_ambassadors(country_code=None):
ambassadors = []
all_ambassadors = User.objects.filter(groups__name='ambassadors').exclude(groups__name='main').order_by('date_joined')
for ambassador in all_ambassadors:
if country_code:
if ambassador.profile.country == country_code:
ambassadors.append(ambassador.profile)
else:
ambassadors.append(ambassador.profile)
return ambassadors
def get_ambassadors_for_countries():
ambassadors = get_ambassadors()
countries_ambassadors = []
# list countries minus two CUSTOM_COUNTRY_ENTRIES
for code, name in list(countries)[2:]:
readable_name = unicode(name)
main_ambassadors = get_main_ambassadors(code)
found_ambassadors = get_not_main_ambassadors(code)
countries_ambassadors.append((code, readable_name,found_ambassadors, main_ambassadors))
countries_ambassadors.sort()
return countries_ambassadors
def get_ambassadors_for_country(country):
ambassadors = User.objects.filter(groups__name='ambassadors', userprofile__country=country)
return ambassadors
def update_user_email(user_id, new_email):
user = User.objects.get(id=user_id)
user.email = new_email
user.save(update_fields=["email"])
return user
| ercchy/coding-events | web/processors/user.py | Python | mit | 2,185 | 0.026545 |
import tkinter
import string
from ctypes import windll
import os
'''
for d in string.ascii_uppercase:
if os.path.exists("%s:\\" % (d)):
print("Drive letter '%s' is in use." % (d))
'''
'''
def get_drives():
drives = []
bitmask = windll.kernel32.GetLogicalDrives()
for letter in string.ascii_uppercase:
if bitmask & 1:
drives.append(letter)
bitmask >>= 1
return drives
print(get_drives())
'''
'''
testLambda = (lambda boolInput: False if boolInput else True)
print(testLambda(True))
print(testLambda(False))
'''
top = tkinter.Tk()
#Testing out listbox in preparation to migrating game selection to a listbox as opposed to a bunch of check boxes
#because checkboxes aren't very scalable.
def click_button(event):
# #this block works
w = event.widget
index = w.curselection()
for x in index:
value = w.get(x)
print(value)
Lb1 = tkinter.Listbox(top,selectmode='extended')
#Lb1.bind('<<ListboxSelect>>', click_button)
Lb1.insert(0, "Python")
Lb1.insert(0, "Perl")
Lb1.insert(0, "C")
Lb1.insert(0, "PHP")
Lb1.insert(0, "JSP")
Lb1.insert(0, "Ruby")
def btnclick():
values = [Lb1.get(x) for x in Lb1.curselection()]
print(', '.join(values))
onlybutton = tkinter.Button(text='test', command=btnclick)
Lb1.pack()
onlybutton.pack()
top.mainloop()
| deviantenigma/Savegame-backup-Utility | ignore_fortestingpurposesonly.py | Python | gpl-3.0 | 1,345 | 0.007435 |
import matplotlib.pyplot as plt
import numpy as np
from kaftools.filters import KlmsFilter
from kaftools.kernels import GaussianKernel
from kaftools.utils.shortcuts import plot_series, plot_squared_error
from kaftools.sparsifiers import NoveltyCriterion
if __name__ == "__main__":
# Cargar datos
data = np.load('./data/pretrained_data_lorentz.npz')
# sparsify lorentz : lr(1e-2), novelty(0.99919, 1.0)
# sparsify wind: lr(1e-2), novelty(0.9934, 1.0)
# Configurar KLMS
klms_params = {
'kernel': GaussianKernel(sigma=float(data['sigma_k_post'])),
'learning_rate': 1e-1,
'delay': int(data['delay']),
#'sparsifiers': [NoveltyCriterion(0.99919, 1.0)]
'coefs': data['a_post'],
'dict': data['s_post'].T,
'freeze_dict': True
}
# np.seterr(all='raise')
klms = KlmsFilter(data['y_prog'], data['y_prog'])
klms.fit(**klms_params)
print(len(klms.support_vectors))
plot_series(data['y_prog'], klms.estimate, markersize=1, linewidth=1, figsize=(15, 3))
plot_squared_error(klms.error_history)
import matplotlib.pyplot as plt
#plt.semilogy(np.array(klms.error_history)**2)
#plt.show() | Canas/kaftools | examples/klms_pretrained.py | Python | mit | 1,192 | 0.004195 |
from django.test import TestCase
from core.models import User, Group, Host
class Access_Test(TestCase):
fixtures = ['test_users.json','test_groups','test_hosts.json']
def test_superuser_can_all(self):
admin = User.objects.get(pk=1)
# Access to host
host = Host.objects.get(pk=1)
r = admin.has_access(host)
self.assertTrue(r, "Superuser can't access to hosts.")
# Access to user
user = User.objects.get(pk=2)
r = admin.has_access(user)
self.assertTrue(r, "Superuser can't access to users.")
# Access to group
group = Group.objects.get(pk=1)
r = admin.has_access(group)
self.assertTrue(r, "Superuser can't access to groups.")
def test_access_to_own_group(self):
user = User.objects.get(pk=2)
# Access to host
host = Host.objects.get(pk=1)
r = user.has_access(host)
self.assertTrue(r, "Simple user can't access to his hosts.")
# Access to user
r = user.has_access(user)
self.assertTrue(r, "Simple user can't access to himself.")
# Access to group
group = Group.objects.get(pk=1)
r = user.has_access(group)
self.assertTrue(r, "Simple user can't access to his groups.")
def test_access_to_other_group(self):
user = User.objects.get(pk=2)
# Access to host
host = Host.objects.get(pk=2)
r = user.has_access(host)
self.assertFalse(r, "Simple user can access to other group's hosts.")
# Access to user
user2 = User.objects.get(pk=3)
r = user.has_access(user2)
self.assertFalse(r, "Simple user can access to users.")
# Access to group
group = Group.objects.get(pk=2)
r = user.has_access(group)
self.assertFalse(r, "Simple user can access to others groups.")
| redhat-cip/numeter | web-app/numeter_webapp/core/tests/group_restriction.py | Python | agpl-3.0 | 1,907 | 0.003146 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main_window.ui'
#
# Created: Wed Jan 7 16:57:08 2015
# by: PyQt5 UI code generator 5.2.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.test_btn = QtWidgets.QPushButton(self.centralwidget)
self.test_btn.setGeometry(QtCore.QRect(170, 100, 98, 27))
self.test_btn.setObjectName("test_btn")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 25))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.test_btn.setText(_translate("MainWindow", "Test btn"))
| shubhamshuklaer/compiler_main | main_window_ui.py | Python | gpl-3.0 | 1,476 | 0.001355 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from airflow import models
from airflow.providers.google.cloud.operators.text_to_speech import CloudTextToSpeechSynthesizeOperator
from airflow.providers.google.cloud.operators.translate_speech import CloudTranslateSpeechOperator
from airflow.utils import dates
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
BUCKET_NAME = os.environ.get("GCP_TRANSLATE_SPEECH_TEST_BUCKET", "INVALID BUCKET NAME")
# [START howto_operator_translate_speech_gcp_filename]
FILENAME = "gcp-speech-test-file"
# [END howto_operator_translate_speech_gcp_filename]
# [START howto_operator_text_to_speech_api_arguments]
INPUT = {"text": "Sample text for demo purposes"}
VOICE = {"language_code": "en-US", "ssml_gender": "FEMALE"}
AUDIO_CONFIG = {"audio_encoding": "LINEAR16"}
# [END howto_operator_text_to_speech_api_arguments]
# [START howto_operator_translate_speech_arguments]
CONFIG = {"encoding": "LINEAR16", "language_code": "en_US"}
AUDIO = {"uri": f"gs://{BUCKET_NAME}/{FILENAME}"}
TARGET_LANGUAGE = 'pl'
FORMAT = 'text'
MODEL = 'base'
SOURCE_LANGUAGE = None # type: None
# [END howto_operator_translate_speech_arguments]
with models.DAG(
"example_gcp_translate_speech",
schedule_interval='@once', # Override to match your needs
start_date=dates.days_ago(1),
tags=['example'],
) as dag:
text_to_speech_synthesize_task = CloudTextToSpeechSynthesizeOperator(
project_id=GCP_PROJECT_ID,
input_data=INPUT,
voice=VOICE,
audio_config=AUDIO_CONFIG,
target_bucket_name=BUCKET_NAME,
target_filename=FILENAME,
task_id="text_to_speech_synthesize_task",
)
# [START howto_operator_translate_speech]
translate_speech_task = CloudTranslateSpeechOperator(
project_id=GCP_PROJECT_ID,
audio=AUDIO,
config=CONFIG,
target_language=TARGET_LANGUAGE,
format_=FORMAT,
source_language=SOURCE_LANGUAGE,
model=MODEL,
task_id='translate_speech_task',
)
translate_speech_task2 = CloudTranslateSpeechOperator(
audio=AUDIO,
config=CONFIG,
target_language=TARGET_LANGUAGE,
format_=FORMAT,
source_language=SOURCE_LANGUAGE,
model=MODEL,
task_id='translate_speech_task2',
)
# [END howto_operator_translate_speech]
text_to_speech_synthesize_task >> translate_speech_task >> translate_speech_task2
| apache/incubator-airflow | airflow/providers/google/cloud/example_dags/example_translate_speech.py | Python | apache-2.0 | 3,199 | 0.00125 |
"""
Settings for the LMS that runs alongside the CMS on AWS
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .dev import *
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
}
| pelikanchik/edx-platform | lms/envs/cms/preview_dev.py | Python | agpl-3.0 | 430 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from urllib.parse import urljoin, urlsplit
from django.db import migrations, models
def forwards(apps, schema_editor):
MenuItem = apps.get_model('external_services', 'MenuItem')
items = (MenuItem.objects.all()
.exclude(service=None)
.exclude(menu_url=None)
.exclude(menu_url=''))
errors = []
for item in items:
uri1 = urlsplit(item.menu_url)
uri2 = urlsplit(item.service.url)
if uri1.netloc and uri1.netloc != uri2.netloc:
errors.append(item)
if errors:
print()
msg = ['Database is in inconsistent state.']
for item in errors:
msg.append(" MenuItem(pk=%s): %s <> %s" % (item.pk, item.menu_url, item.service.url))
msg.append("For above menuitems, domain in MenuItem.menu_url doesn't match domain in MenuItem.service.url.")
msg.append("Database is in inconsistent state. Manual fixing is required.")
raise RuntimeError('\n'.join(msg))
for item in items:
uri = urlsplit(item.menu_url)
url = uri._replace(scheme='', netloc='').geturl()
item.menu_url = url
item.save(update_fields=['menu_url'])
def backwards(apps, schema_editor):
MenuItem = apps.get_model('external_services', 'MenuItem')
for item in MenuItem.objects.all():
item.menu_url = urljoin(item.service.url, item.menu_url)
item.save(update_fields=['menu_url'])
class Migration(migrations.Migration):
atomic = False
dependencies = [
('external_services', '0010_auto_20180918_1916'),
]
operations = [
migrations.RunPython(forwards, backwards),
]
| teemulehtinen/a-plus | external_services/migrations/0011_menuitem_menu_url.py | Python | gpl-3.0 | 1,720 | 0.003488 |
from concurrent.futures import ThreadPoolExecutor
import docker
import functools
# Common threaded executor for asynchronous jobs.
# Required for the AsyncDockerClient to operate.
_executor = ThreadPoolExecutor(1)
class AsyncDockerClient:
"""Provides an asynchronous interface to dockerpy.
All Client interface is available as methods returning a future
instead of the actual result. The resulting future can be yielded.
This class is thread safe. Note that all instances use the same
executor.
"""
def __init__(self, *args, **kwargs):
"""Initialises the docker async client.
The client uses a single, module level executor to submit
requests and obtain futures. The futures must be yielded
according to the tornado asynchronous interface.
The exported methods are the same as from the docker-py
synchronous client, with the exception of their async nature.
Note that the executor is a ThreadPoolExecutor with a single thread.
"""
self._sync_client = docker.Client(*args, **kwargs)
def __getattr__(self, attr):
"""Returns the docker client method, wrapped in an async execution
environment. The returned method must be used in conjunction with
the yield keyword."""
if hasattr(self._sync_client, attr):
return functools.partial(self._submit_to_executor, attr)
else:
raise AttributeError(
"'{}' object has no attribute '{}'".format(
type(self).__name__,
attr
)
)
# Private
def _submit_to_executor(self, method, *args, **kwargs):
"""Call a synchronous docker client method in a background thread,
using the module level executor.
Parameters
----------
method : string
A string containing a callable method
*args, *kwargs:
Arguments to the invoked method
Return
------
A future from the ThreadPoolExecutor.
"""
return _executor.submit(self._invoke, method, *args, **kwargs)
def _invoke(self, method, *args, **kwargs):
"""wrapper for calling docker methods to be passed to
ThreadPoolExecutor.
"""
m = getattr(self._sync_client, method)
return m(*args, **kwargs)
| simphony/simphony-remote | remoteappmanager/docker/async_docker_client.py | Python | bsd-3-clause | 2,391 | 0 |
"""n_answers migration
Revision ID: 7927d63d556
Revises: 1eb5febf4842
Create Date: 2014-08-08 14:02:36.738460
Delete the "n_answers" field from the "info" attribute/column as it is not used.
"""
# revision identifiers, used by Alembic.
revision = '7927d63d556'
down_revision = '1eb5febf4842'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column, select, bindparam
import json
def upgrade():
task = table('task',
column('id'),
column('info')
)
conn = op.get_bind()
query = select([task.c.id, task.c.info])
tasks = conn.execute(query)
update_values = []
for row in tasks:
info_data = row.info
info_dict = json.loads(info_data)
if info_dict.get('n_answers'):
del info_dict['n_answers']
update_values.append({'task_id': row.id, 'new_info': json.dumps(info_dict)})
task_update = task.update().\
where(task.c.id == bindparam('task_id')).\
values(info=bindparam('new_info'))
if len(update_values) > 0:
conn.execute(task_update, update_values)
def downgrade():
task = table('task',
column('id'),
column('info'),
column('n_answers')
)
conn = op.get_bind()
query = select([task.c.id, task.c.info, task.c.n_answers])
tasks = conn.execute(query)
update_values = []
for row in tasks:
info_data = row.info
info_dict = json.loads(info_data)
info_dict['n_answers'] = row.n_answers
update_values.append({'task_id': row.id, 'new_info': json.dumps(info_dict)})
task_update = task.update().\
where(task.c.id == bindparam('task_id')).\
values(info=bindparam('new_info'))
if len(update_values) > 0:
conn.execute(task_update, update_values)
| PyBossa/pybossa | alembic/versions/7927d63d556_n_answers_migration.py | Python | agpl-3.0 | 1,841 | 0.008691 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from typing import Dict
from numpy import unique
from pandas import DataFrame
from lib.data_source import DataSource
class Covid19GermanyDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
# Rename the appropriate columns
data = dataframes[0].rename(columns={"time_iso8601": "date"})
# Convert dates to ISO format
data["date"] = data["date"].apply(
lambda x: datetime.datetime.fromisoformat(x).date().isoformat()
)
# Get a list of all regions
regions = unique([col[3:5] for col in data.columns if col.startswith("DE-")])
# Transform the data from non-tabulated format to our record format
records = []
for idx, row in data.iterrows():
record = {"date": row["date"]}
for region_code in regions:
records.append(
{
"subregion1_code": region_code,
"total_confirmed": row["DE-%s_cases" % region_code],
"total_deceased": row["DE-%s_deaths" % region_code],
**record,
}
)
data = DataFrame.from_records(records)
# Ensure we only take one record from the table
data = data.groupby(["date", "subregion1_code"]).last().reset_index()
# Output the results
data["country_code"] = "DE"
return data
| GoogleCloudPlatform/covid-19-open-data | src/pipelines/epidemiology/de_covid_19_germany_gae.py | Python | apache-2.0 | 2,120 | 0.000943 |
from django.db import migrations, models
from django.conf import settings
from opaque_keys.edx.django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseGoal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_key', CourseKeyField(max_length=255, db_index=True)),
('goal_key', models.CharField(default='unsure', max_length=100, choices=[('certify', 'Earn a certificate.'), ('complete', 'Complete the course.'), ('explore', 'Explore the course.'), ('unsure', 'Not sure yet.')])),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.AlterUniqueTogether(
name='coursegoal',
unique_together={('user', 'course_key')},
),
]
| eduNEXT/edx-platform | lms/djangoapps/course_goals/migrations/0001_initial.py | Python | agpl-3.0 | 1,064 | 0.00282 |
##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import GafferUI
import GafferUITest
class ProgressBarTest( GafferUITest.TestCase ) :
def testConstructor( self ) :
b = GafferUI.ProgressBar()
self.assertEqual( b.getRange(), ( 0, 100 ) )
self.assertEqual( b.getProgress(), 0 )
self.assertEqual( b.getText(), "%p%" )
b = GafferUI.ProgressBar( 10, ( 5, 15 ), "doing something %p%" )
self.assertEqual( b.getRange(), ( 5, 15 ) )
self.assertEqual( b.getProgress(), 10 )
self.assertEqual( b.getText(), "doing something %p%" )
def testAccessors( self ) :
b = GafferUI.ProgressBar()
b.setRange( ( 0, 20 ) )
self.assertEqual( b.getRange(), ( 0, 20 ) )
b.setProgress( 10 )
self.assertEqual( b.getProgress(), 10 )
b.setText( "woteva" )
self.assertEqual( b.getText(), "woteva" )
if __name__ == "__main__":
unittest.main()
| DoubleNegativeVisualEffects/gaffer | python/GafferUITest/ProgressBarTest.py | Python | bsd-3-clause | 2,664 | 0.034535 |
import unittest
from aiourlstatus import app
class TestEmpty(unittest.TestCase):
def test_no_urls(self):
data = ''
urls, len_urls = app.find_sort_urls(data)
self.assertEqual(urls, [])
self.assertEqual(len_urls, 0)
class TestTXT(unittest.TestCase):
def test_parse_text(self):
with open('tests/retest.txt') as f:
data = f.read()
urls, len_urls = app.find_sort_urls(data)
url_list = [['http://en.wikipedia.org/wiki/Body_image', 'http://en.wikipedia.org/wiki/Identity_formation',
'http://en.wikipedia.org/wiki/Self-confidence', 'http://en.wikipedia.org/wiki/Self-esteem'],
['http://www.bbc.com/sport/0/'], ['http://www.haskell.org/'], ['http://lxer.com/'],
['http://www.find-happiness.com/definition-of-happiness.html'],
['http://www.wikihow.com/Elevate-Your-Self-Esteem']]
self.assertCountEqual(urls, url_list)
if __name__ == '__main__':
unittest.main()
| riverrun/aiourlstatus | tests/parse_test.py | Python | gpl-3.0 | 990 | 0.008081 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import fields, osv
from lxml import etree
from openerp.tools.translate import _
from openerp.exceptions import UserError
class followup(osv.osv):
_name = 'account_followup.followup'
_description = 'Account Follow-up'
_rec_name = 'name'
_columns = {
'followup_line': fields.one2many('account_followup.followup.line', 'followup_id', 'Follow-up', copy=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'name': fields.related('company_id', 'name', string = "Name", readonly=True, type="char"),
}
_defaults = {
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'account_followup.followup', context=c),
}
_sql_constraints = [('company_uniq', 'unique(company_id)', 'Only one follow-up per company is allowed')]
class followup_line(osv.osv):
def _get_default_template(self, cr, uid, ids, context=None):
try:
return self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_followup', 'email_template_account_followup_default')[1]
except ValueError:
return False
_name = 'account_followup.followup.line'
_description = 'Follow-up Criteria'
_columns = {
'name': fields.char('Follow-Up Action', required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of follow-up lines."),
'delay': fields.integer('Due Days', help="The number of days after the due date of the invoice to wait before sending the reminder. Could be negative if you want to send a polite alert beforehand.", required=True),
'followup_id': fields.many2one('account_followup.followup', 'Follow Ups', required=True, ondelete="cascade"),
'description': fields.text('Printed Message', translate=True),
'send_email':fields.boolean('Send an Email', help="When processing, it will send an email"),
'send_letter':fields.boolean('Send a Letter', help="When processing, it will print a letter"),
'manual_action':fields.boolean('Manual Action', help="When processing, it will set the manual action to be taken for that customer. "),
'manual_action_note':fields.text('Action To Do', placeholder="e.g. Give a phone call, check with others , ..."),
'manual_action_responsible_id':fields.many2one('res.users', 'Assign a Responsible', ondelete='set null'),
'email_template_id':fields.many2one('mail.template', 'Email Template', ondelete='set null'),
}
_order = 'delay'
_sql_constraints = [('days_uniq', 'unique(followup_id, delay)', 'Days of the follow-up levels must be different')]
_defaults = {
'send_email': True,
'send_letter': True,
'manual_action':False,
'description': """
Dear %(partner_name)s,
Exception made if there was a mistake of ours, it seems that the following amount stays unpaid. Please, take appropriate measures in order to carry out this payment in the next 8 days.
Would your payment have been carried out after this mail was sent, please ignore this message. Do not hesitate to contact our accounting department.
Best Regards,
""",
'email_template_id': _get_default_template,
}
def _check_description(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.description:
try:
line.description % {'partner_name': '', 'date':'', 'user_signature': '', 'company_name': ''}
except:
return False
return True
_constraints = [
(_check_description, 'Your description is invalid, use the right legend or %% if you want to use the percent character.', ['description']),
]
class account_move_line(osv.osv):
def _get_result(self, cr, uid, ids, name, arg, context=None):
res = {}
for aml in self.browse(cr, uid, ids, context=context):
res[aml.id] = aml.debit - aml.credit
return res
_inherit = 'account.move.line'
_columns = {
'followup_line_id': fields.many2one('account_followup.followup.line', 'Follow-up Level',
ondelete='restrict'), #restrict deletion of the followup line
'followup_date': fields.date('Latest Follow-up', select=True),
'result':fields.function(_get_result, type='float', method=True,
string="Balance") #'balance' field is not the same
}
class res_partner(osv.osv):
def fields_view_get(self, cr, uid, view_id=None, view_type=None, context=None, toolbar=False, submenu=False):
res = super(res_partner, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context,
toolbar=toolbar, submenu=submenu)
context = context or {}
if view_type == 'form' and context.get('Followupfirst'):
doc = etree.XML(res['arch'], parser=None, base_url=None)
first_node = doc.xpath("//page[@name='followup_tab']")
root = first_node[0].getparent()
root.insert(0, first_node[0])
res['arch'] = etree.tostring(doc, encoding="utf-8")
return res
def _get_latest(self, cr, uid, ids, names, arg, context=None, company_id=None):
res={}
if company_id == None:
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
else:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
for partner in self.browse(cr, uid, ids, context=context):
amls = partner.unreconciled_aml_ids
latest_date = False
latest_level = False
latest_days = False
latest_level_without_lit = False
latest_days_without_lit = False
for aml in amls:
if (aml.company_id == company) and (aml.followup_line_id != False) and (not latest_days or latest_days < aml.followup_line_id.delay):
latest_days = aml.followup_line_id.delay
latest_level = aml.followup_line_id.id
if (aml.company_id == company) and (not latest_date or latest_date < aml.followup_date):
latest_date = aml.followup_date
if (aml.company_id == company) and (aml.blocked == False) and (aml.followup_line_id != False and
(not latest_days_without_lit or latest_days_without_lit < aml.followup_line_id.delay)):
latest_days_without_lit = aml.followup_line_id.delay
latest_level_without_lit = aml.followup_line_id.id
res[partner.id] = {'latest_followup_date': latest_date,
'latest_followup_level_id': latest_level,
'latest_followup_level_id_without_lit': latest_level_without_lit}
return res
@api.cr_uid_ids_context
def do_partner_manual_action(self, cr, uid, partner_ids, context=None):
#partner_ids -> res.partner
for partner in self.browse(cr, uid, partner_ids, context=context):
#Check action: check if the action was not empty, if not add
action_text= ""
if partner.payment_next_action:
action_text = (partner.payment_next_action or '') + "\n" + (partner.latest_followup_level_id_without_lit.manual_action_note or '')
else:
action_text = partner.latest_followup_level_id_without_lit.manual_action_note or ''
#Check date: only change when it did not exist already
action_date = partner.payment_next_action_date or fields.date.context_today(self, cr, uid, context=context)
# Check responsible: if partner has not got a responsible already, take from follow-up
responsible_id = False
if partner.payment_responsible_id:
responsible_id = partner.payment_responsible_id.id
else:
p = partner.latest_followup_level_id_without_lit.manual_action_responsible_id
responsible_id = p and p.id or False
self.write(cr, uid, [partner.id], {'payment_next_action_date': action_date,
'payment_next_action': action_text,
'payment_responsible_id': responsible_id})
def do_partner_print(self, cr, uid, wizard_partner_ids, data, context=None):
#wizard_partner_ids are ids from special view, not from res.partner
if not wizard_partner_ids:
return {}
data['partner_ids'] = wizard_partner_ids
datas = {
'ids': wizard_partner_ids,
'model': 'account_followup.followup',
'form': data
}
return self.pool['report'].get_action(cr, uid, [], 'account_followup.report_followup', data=datas, context=context)
@api.cr_uid_ids_context
def do_partner_mail(self, cr, uid, partner_ids, context=None):
if context is None:
context = {}
ctx = context.copy()
ctx['followup'] = True
#partner_ids are res.partner ids
# If not defined by latest follow-up level, it will be the default template if it can find it
mtp = self.pool.get('mail.template')
unknown_mails = 0
for partner in self.browse(cr, uid, partner_ids, context=ctx):
if partner.email and partner.email.strip():
level = partner.latest_followup_level_id_without_lit
if level and level.send_email and level.email_template_id and level.email_template_id.id:
mtp.send_mail(cr, uid, level.email_template_id.id, partner.id, context=ctx)
else:
mail_template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid,
'account_followup', 'email_template_account_followup_default')
mtp.send_mail(cr, uid, mail_template_id[1], partner.id, context=ctx)
else:
unknown_mails = unknown_mails + 1
action_text = _("Email not sent because of email address of partner not filled in")
if partner.payment_next_action_date:
payment_action_date = min(fields.date.context_today(self, cr, uid, context=ctx), partner.payment_next_action_date)
else:
payment_action_date = fields.date.context_today(self, cr, uid, context=ctx)
if partner.payment_next_action:
payment_next_action = partner.payment_next_action + " \n " + action_text
else:
payment_next_action = action_text
self.write(cr, uid, [partner.id], {'payment_next_action_date': payment_action_date,
'payment_next_action': payment_next_action}, context=ctx)
return unknown_mails
def get_followup_table_html(self, cr, uid, ids, context=None):
""" Build the html tables to be included in emails send to partners,
when reminding them their overdue invoices.
:param ids: [id] of the partner for whom we are building the tables
:rtype: string
"""
from report import account_followup_print
assert len(ids) == 1
if context is None:
context = {}
partner = self.browse(cr, uid, ids[0], context=context)
#copy the context to not change global context. Overwrite it because _() looks for the lang in local variable 'context'.
#Set the language to use = the partner language
context = dict(context, lang=partner.lang)
followup_table = ''
if partner.unreconciled_aml_ids:
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
current_date = fields.date.context_today(self, cr, uid, context=context)
rml_parse = account_followup_print.report_rappel(cr, uid, "followup_rml_parser")
final_res = rml_parse._lines_get_with_partner(partner, company.id)
for currency_dict in final_res:
currency = currency_dict.get('line', [{'currency_id': company.currency_id}])[0]['currency_id']
followup_table += '''
<table border="2" width=100%%>
<tr>
<td>''' + _("Invoice Date") + '''</td>
<td>''' + _("Description") + '''</td>
<td>''' + _("Reference") + '''</td>
<td>''' + _("Due Date") + '''</td>
<td>''' + _("Amount") + " (%s)" % (currency.symbol) + '''</td>
<td>''' + _("Lit.") + '''</td>
</tr>
'''
total = 0
for aml in currency_dict['line']:
block = aml['blocked'] and 'X' or ' '
total += aml['balance']
strbegin = "<TD>"
strend = "</TD>"
date = aml['date_maturity'] or aml['date']
if date <= current_date and aml['balance'] > 0:
strbegin = "<TD><B>"
strend = "</B></TD>"
followup_table +="<TR>" + strbegin + str(aml['date']) + strend + strbegin + aml['name'] + strend + strbegin + (aml['ref'] or '') + strend + strbegin + str(date) + strend + strbegin + str(aml['balance']) + strend + strbegin + block + strend + "</TR>"
total = reduce(lambda x, y: x+y['balance'], currency_dict['line'], 0.00)
total = rml_parse.formatLang(total, dp='Account', currency_obj=currency)
followup_table += '''<tr> </tr>
</table>
<center>''' + _("Amount due") + ''' : %s </center>''' % (total)
return followup_table
def write(self, cr, uid, ids, vals, context=None):
if vals.get("payment_responsible_id", False):
for part in self.browse(cr, uid, ids, context=context):
if part.payment_responsible_id <> vals["payment_responsible_id"]:
#Find partner_id of user put as responsible
responsible_partner_id = self.pool.get("res.users").browse(cr, uid, vals['payment_responsible_id'], context=context).partner_id.id
self.pool.get("mail.thread").message_post(cr, uid, 0,
body = _("You became responsible to do the next action for the payment follow-up of") + " <b><a href='#id=" + str(part.id) + "&view_type=form&model=res.partner'> " + part.name + " </a></b>",
type = 'comment',
subtype = "mail.mt_comment", context = context,
model = 'res.partner', res_id = part.id,
partner_ids = [responsible_partner_id])
return super(res_partner, self).write(cr, uid, ids, vals, context=context)
def action_done(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'payment_next_action_date': False, 'payment_next_action':'', 'payment_responsible_id': False}, context=context)
def do_button_print(self, cr, uid, ids, context=None):
assert(len(ids) == 1)
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
#search if the partner has accounting entries to print. If not, it may not be present in the
#psql view the report is based on, so we need to stop the user here.
if not self.pool.get('account.move.line').search(cr, uid, [
('partner_id', '=', ids[0]),
('account_id.type', '=', 'receivable'),
('reconcile_id', '=', False),
('state', '!=', 'draft'),
('company_id', '=', company_id),
('date_maturity', '<=', fields.date.context_today(self,cr,uid)),
], context=context):
raise UserError(_("The partner does not have any accounting entries to print in the overdue report for the current company."))
self.message_post(cr, uid, [ids[0]], body=_('Printed overdue payments report'), context=context)
#build the id of this partner in the psql view. Could be replaced by a search with [('company_id', '=', company_id),('partner_id', '=', ids[0])]
wizard_partner_ids = [ids[0] * 10000 + company_id]
followup_ids = self.pool.get('account_followup.followup').search(cr, uid, [('company_id', '=', company_id)], context=context)
if not followup_ids:
raise UserError(_("There is no followup plan defined for the current company."))
data = {
'date': fields.date.today(),
'followup_id': followup_ids[0],
}
#call the print overdue report on this partner
return self.do_partner_print(cr, uid, wizard_partner_ids, data, context=context)
def _get_amounts_and_date(self, cr, uid, ids, name, arg, context=None):
'''
Function that computes values for the followup functional fields. Note that 'payment_amount_due'
is similar to 'credit' field on res.partner except it filters on user's company.
'''
res = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
current_date = fields.date.context_today(self, cr, uid, context=context)
for partner in self.browse(cr, uid, ids, context=context):
worst_due_date = False
amount_due = amount_overdue = 0.0
for aml in partner.unreconciled_aml_ids:
if (aml.company_id == company):
date_maturity = aml.date_maturity or aml.date
if not worst_due_date or date_maturity < worst_due_date:
worst_due_date = date_maturity
amount_due += aml.result
if (date_maturity <= current_date):
amount_overdue += aml.result
res[partner.id] = {'payment_amount_due': amount_due,
'payment_amount_overdue': amount_overdue,
'payment_earliest_due_date': worst_due_date}
return res
def _get_followup_overdue_query(self, cr, uid, args, overdue_only=False, context=None):
'''
This function is used to build the query and arguments to use when making a search on functional fields
* payment_amount_due
* payment_amount_overdue
Basically, the query is exactly the same except that for overdue there is an extra clause in the WHERE.
:param args: arguments given to the search in the usual domain notation (list of tuples)
:param overdue_only: option to add the extra argument to filter on overdue accounting entries or not
:returns: a tuple with
* the query to execute as first element
* the arguments for the execution of this query
:rtype: (string, [])
'''
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
having_where_clause = ' AND '.join(map(lambda x: '(SUM(bal2) %s %%s)' % (x[1]), args))
having_values = [x[2] for x in args]
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
overdue_only_str = overdue_only and 'AND date_maturity <= NOW()' or ''
return ('''SELECT pid AS partner_id, SUM(bal2) FROM
(SELECT CASE WHEN bal IS NOT NULL THEN bal
ELSE 0.0 END AS bal2, p.id as pid FROM
(SELECT (debit-credit) AS bal, partner_id
FROM account_move_line l
WHERE account_id IN
(SELECT id FROM account_account
WHERE type=\'receivable\' AND active)
''' + overdue_only_str + '''
AND reconcile_id IS NULL
AND company_id = %s
AND ''' + query + ''') AS l
RIGHT JOIN res_partner p
ON p.id = partner_id ) AS pl
GROUP BY pid HAVING ''' + having_where_clause, [company_id] + having_values)
def _payment_overdue_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
query, query_args = self._get_followup_overdue_query(cr, uid, args, overdue_only=True, context=context)
cr.execute(query, query_args)
res = cr.fetchall()
if not res:
return [('id','=','0')]
return [('id','in', [x[0] for x in res])]
def _payment_earliest_date_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
having_where_clause = ' AND '.join(map(lambda x: '(MIN(l.date_maturity) %s %%s)' % (x[1]), args))
having_values = [x[2] for x in args]
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
cr.execute('SELECT partner_id FROM account_move_line l '\
'WHERE account_id IN '\
'(SELECT id FROM account_account '\
'WHERE type=\'receivable\' AND active) '\
'AND l.company_id = %s '
'AND reconcile_id IS NULL '\
'AND '+query+' '\
'AND partner_id IS NOT NULL '\
'GROUP BY partner_id HAVING '+ having_where_clause,
[company_id] + having_values)
res = cr.fetchall()
if not res:
return [('id','=','0')]
return [('id','in', [x[0] for x in res])]
def _payment_due_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
query, query_args = self._get_followup_overdue_query(cr, uid, args, overdue_only=False, context=context)
cr.execute(query, query_args)
res = cr.fetchall()
if not res:
return [('id','=','0')]
return [('id','in', [x[0] for x in res])]
def _get_partners(self, cr, uid, ids, context=None):
#this function search for the partners linked to all account.move.line 'ids' that have been changed
partners = set()
for aml in self.browse(cr, uid, ids, context=context):
if aml.partner_id:
partners.add(aml.partner_id.id)
return list(partners)
_inherit = "res.partner"
_columns = {
'payment_responsible_id':fields.many2one('res.users', ondelete='set null', string='Follow-up Responsible',
help="Optionally you can assign a user to this field, which will make him responsible for the action.",
track_visibility="onchange", copy=False),
'payment_note':fields.text('Customer Payment Promise', help="Payment Note", track_visibility="onchange", copy=False),
'payment_next_action':fields.text('Next Action', copy=False,
help="This is the next action to be taken. It will automatically be set when the partner gets a follow-up level that requires a manual action. ",
track_visibility="onchange"),
'payment_next_action_date': fields.date('Next Action Date', copy=False,
help="This is when the manual follow-up is needed. "
"The date will be set to the current date when the partner "
"gets a follow-up level that requires a manual action. "
"Can be practical to set manually e.g. to see if he keeps "
"his promises."),
'unreconciled_aml_ids':fields.one2many('account.move.line', 'partner_id', domain=['&', ('reconcile_id', '=', False), '&',
('account_id.active','=', True), '&', ('account_id.type', '=', 'receivable'), ('state', '!=', 'draft')]),
'latest_followup_date':fields.function(_get_latest, method=True, type='date', string="Latest Follow-up Date",
help="Latest date that the follow-up level of the partner was changed",
store=False, multi="latest"),
'latest_followup_level_id':fields.function(_get_latest, method=True,
type='many2one', relation='account_followup.followup.line', string="Latest Follow-up Level",
help="The maximum follow-up level",
store={
'res.partner': (lambda self, cr, uid, ids, c: ids,[],10),
'account.move.line': (_get_partners, ['followup_line_id'], 10),
},
multi="latest"),
'latest_followup_level_id_without_lit':fields.function(_get_latest, method=True,
type='many2one', relation='account_followup.followup.line', string="Latest Follow-up Level without litigation",
help="The maximum follow-up level without taking into account the account move lines with litigation",
store={
'res.partner': (lambda self, cr, uid, ids, c: ids,[],10),
'account.move.line': (_get_partners, ['followup_line_id'], 10),
},
multi="latest"),
'payment_amount_due':fields.function(_get_amounts_and_date,
type='float', string="Amount Due",
store = False, multi="followup",
fnct_search=_payment_due_search),
'payment_amount_overdue':fields.function(_get_amounts_and_date,
type='float', string="Amount Overdue",
store = False, multi="followup",
fnct_search = _payment_overdue_search),
'payment_earliest_due_date':fields.function(_get_amounts_and_date,
type='date',
string = "Worst Due Date",
multi="followup",
fnct_search=_payment_earliest_date_search),
}
class account_config_settings(osv.TransientModel):
_name = 'account.config.settings'
_inherit = 'account.config.settings'
def open_followup_level_form(self, cr, uid, ids, context=None):
res_ids = self.pool.get('account_followup.followup').search(cr, uid, [], context=context)
return {
'type': 'ir.actions.act_window',
'name': 'Payment Follow-ups',
'res_model': 'account_followup.followup',
'res_id': res_ids and res_ids[0] or False,
'view_mode': 'form,tree',
}
| addition-it-solutions/project-all | addons/account_followup/account_followup.py | Python | agpl-3.0 | 28,847 | 0.010122 |
import os
import traceback,sys
CONFIG_FOLDER = "Configs"
def load_config(name):
try:
module = __import__("%s.%s" % (CONFIG_FOLDER,name), fromlist=["Config"])
except ImportError:
# Display error message
traceback.print_exc(file=sys.stdout)
raise ImportError("Failed to import module {0} from folder {1} using fromlist {2}".format(name,CONFIG_FOLDER,listofimports))
conf = module.Config()
conf.filename = os.path.join(CONFIG_FOLDER, "%s.py" % name)
conf.name = name
print "Loading config %s. Loading completed" % name
return conf
| sondree/Master-thesis | Python MOEA/utility.py | Python | gpl-3.0 | 592 | 0.015203 |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train a progressive GAN model.
See https://arxiv.org/abs/1710.10196 for details about the model.
See https://github.com/tkarras/progressive_growing_of_gans for the original
theano implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import logging
from magenta.models.gansynth.lib import networks
import numpy as np
import tensorflow as tf
tfgan = tf.contrib.gan
def make_train_sub_dir(stage_id, **kwargs):
"""Returns the log directory for training stage `stage_id`."""
return os.path.join(kwargs['train_root_dir'], 'stage_{:05d}'.format(stage_id))
def make_resolution_schedule(**kwargs):
"""Returns an object of `ResolutionSchedule`."""
return networks.ResolutionSchedule(
scale_mode=kwargs['scale_mode'],
start_resolutions=(kwargs['start_height'], kwargs['start_width']),
scale_base=kwargs['scale_base'],
num_resolutions=kwargs['num_resolutions'])
def get_stage_ids(**kwargs):
"""Returns a list of stage ids.
Args:
**kwargs: A dictionary of
'train_root_dir': A string of root directory of training logs.
'num_resolutions': An integer of number of progressive resolutions.
"""
train_sub_dirs = [
sub_dir for sub_dir in tf.gfile.ListDirectory(kwargs['train_root_dir'])
if sub_dir.startswith('stage_')
]
# If fresh start, start with start_stage_id = 0
# If has been trained for n = len(train_sub_dirs) stages, start with the last
# stage, i.e. start_stage_id = n - 1.
start_stage_id = max(0, len(train_sub_dirs) - 1)
return range(start_stage_id, get_total_num_stages(**kwargs))
def get_total_num_stages(**kwargs):
"""Returns total number of training stages."""
return 2 * kwargs['num_resolutions'] - 1
def get_batch_size(stage_id, **kwargs):
"""Returns batch size for each stage.
It is expected that `len(batch_size_schedule) == num_resolutions`. Each stage
corresponds to a resolution and hence a batch size. However if
`len(batch_size_schedule) < num_resolutions`, pad `batch_size_schedule` in the
beginning with the first batch size.
Args:
stage_id: An integer of training stage index.
**kwargs: A dictionary of
'batch_size_schedule': A list of integer, each element is the batch size
for the current training image resolution.
'num_resolutions': An integer of number of progressive resolutions.
Returns:
An integer batch size for the `stage_id`.
"""
batch_size_schedule = kwargs['batch_size_schedule']
num_resolutions = kwargs['num_resolutions']
if len(batch_size_schedule) < num_resolutions:
batch_size_schedule = (
[batch_size_schedule[0]] * (num_resolutions - len(batch_size_schedule))
+ batch_size_schedule)
return int(batch_size_schedule[(stage_id + 1) // 2])
def get_stage_info(stage_id, **kwargs):
"""Returns information for a training stage.
Args:
stage_id: An integer of training stage index.
**kwargs: A dictionary of
'num_resolutions': An integer of number of progressive resolutions.
'stable_stage_num_images': An integer of number of training images in
the stable stage.
'transition_stage_num_images': An integer of number of training images
in the transition stage.
'total_num_images': An integer of total number of training images.
Returns:
A tuple of integers. The first entry is the number of blocks. The second
entry is the accumulated total number of training images when stage
`stage_id` is finished.
Raises:
ValueError: If `stage_id` is not in [0, total number of stages).
"""
total_num_stages = get_total_num_stages(**kwargs)
valid_stage_id = (0 <= stage_id < total_num_stages)
if not valid_stage_id:
raise ValueError(
'`stage_id` must be in [0, {0}), but instead was {1}'.format(
total_num_stages, stage_id))
# Even stage_id: stable training stage.
# Odd stage_id: transition training stage.
num_blocks = (stage_id + 1) // 2 + 1
num_images = ((stage_id // 2 + 1) * kwargs['stable_stage_num_images'] + (
(stage_id + 1) // 2) * kwargs['transition_stage_num_images'])
total_num_images = kwargs['total_num_images']
if stage_id >= total_num_stages - 1:
num_images = total_num_images
num_images = min(num_images, total_num_images)
return num_blocks, num_images
def make_latent_vectors(num, **kwargs):
"""Returns a batch of `num` random latent vectors."""
return tf.random_normal([num, kwargs['latent_vector_size']], dtype=tf.float32)
def make_interpolated_latent_vectors(num_rows, num_columns, **kwargs):
"""Returns a batch of linearly interpolated latent vectors.
Given two randomly generated latent vector za and zb, it can generate
a row of `num_columns` interpolated latent vectors, i.e.
[..., za + (zb - za) * i / (num_columns - 1), ...] where
i = 0, 1, ..., `num_columns` - 1.
This function produces `num_rows` such rows and returns a (flattened)
batch of latent vectors with batch size `num_rows * num_columns`.
Args:
num_rows: An integer. Number of rows of interpolated latent vectors.
num_columns: An integer. Number of interpolated latent vectors in each row.
**kwargs: A dictionary of
'latent_vector_size': An integer of latent vector size.
Returns:
A `Tensor` of shape `[num_rows * num_columns, latent_vector_size]`.
"""
ans = []
for _ in range(num_rows):
z = tf.random_normal([2, kwargs['latent_vector_size']])
r = tf.reshape(
tf.to_float(tf.range(num_columns)) / (num_columns - 1), [-1, 1])
dz = z[1] - z[0]
ans.append(z[0] + tf.stack([dz] * num_columns) * r)
return tf.concat(ans, axis=0)
def define_loss(gan_model, **kwargs):
"""Defines progressive GAN losses.
The generator and discriminator both use wasserstein loss. In addition,
a small penalty term is added to the discriminator loss to prevent it getting
too large.
Args:
gan_model: A `GANModel` namedtuple.
**kwargs: A dictionary of
'gradient_penalty_weight': A float of gradient norm target for
wasserstein loss.
'gradient_penalty_target': A float of gradient penalty weight for
wasserstein loss.
'real_score_penalty_weight': A float of Additional penalty to keep
the scores from drifting too far from zero.
Returns:
A `GANLoss` namedtuple.
"""
gan_loss = tfgan.gan_loss(
gan_model,
generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
gradient_penalty_weight=kwargs['gradient_penalty_weight'],
gradient_penalty_target=kwargs['gradient_penalty_target'],
gradient_penalty_epsilon=0.0)
real_score_penalty = tf.reduce_mean(
tf.square(gan_model.discriminator_real_outputs))
tf.summary.scalar('real_score_penalty', real_score_penalty)
return gan_loss._replace(
discriminator_loss=(
gan_loss.discriminator_loss +
kwargs['real_score_penalty_weight'] * real_score_penalty))
def define_train_ops(gan_model, gan_loss, **kwargs):
"""Defines progressive GAN train ops.
Args:
gan_model: A `GANModel` namedtuple.
gan_loss: A `GANLoss` namedtuple.
**kwargs: A dictionary of
'adam_beta1': A float of Adam optimizer beta1.
'adam_beta2': A float of Adam optimizer beta2.
'generator_learning_rate': A float of generator learning rate.
'discriminator_learning_rate': A float of discriminator learning rate.
Returns:
A tuple of `GANTrainOps` namedtuple and a list variables tracking the state
of optimizers.
"""
with tf.variable_scope('progressive_gan_train_ops') as var_scope:
beta1, beta2 = kwargs['adam_beta1'], kwargs['adam_beta2']
gen_opt = tf.train.AdamOptimizer(kwargs['generator_learning_rate'], beta1,
beta2)
dis_opt = tf.train.AdamOptimizer(kwargs['discriminator_learning_rate'],
beta1, beta2)
gan_train_ops = tfgan.gan_train_ops(gan_model, gan_loss, gen_opt, dis_opt)
return gan_train_ops, tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope.name)
def add_generator_smoothing_ops(generator_ema, gan_model, gan_train_ops):
"""Adds generator smoothing ops."""
with tf.control_dependencies([gan_train_ops.generator_train_op]):
new_generator_train_op = generator_ema.apply(gan_model.generator_variables)
gan_train_ops = gan_train_ops._replace(
generator_train_op=new_generator_train_op)
generator_vars_to_restore = generator_ema.variables_to_restore(
gan_model.generator_variables)
return gan_train_ops, generator_vars_to_restore
def make_var_scope_custom_getter_for_ema(ema):
"""Makes variable scope custom getter."""
def _custom_getter(getter, name, *args, **kwargs):
var = getter(name, *args, **kwargs)
ema_var = ema.average(var)
return ema_var if ema_var else var
return _custom_getter
def add_model_summaries(model, **kwargs):
"""Adds model summaries.
This function adds several useful summaries during training:
- fake_images: A grid of fake images based on random latent vectors.
- interp_images: A grid of fake images based on interpolated latent vectors.
- real_images_blend: A grid of real images.
- summaries for `gan_model` losses, variable distributions etc.
Args:
model: An model object having all information of progressive GAN model,
e.g. the return of build_model().
**kwargs: A dictionary of
'fake_grid_size': The fake image grid size for summaries.
'interp_grid_size': The latent space interpolated image grid size for
summaries.
'colors': Number of image channels.
'latent_vector_size': An integer of latent vector size.
"""
fake_grid_size = kwargs['fake_grid_size']
interp_grid_size = kwargs['interp_grid_size']
colors = kwargs['colors']
image_shape = list(model.resolution_schedule.final_resolutions)
fake_batch_size = fake_grid_size**2
fake_images_shape = [fake_batch_size] + image_shape + [colors]
interp_batch_size = interp_grid_size**2
interp_images_shape = [interp_batch_size] + image_shape + [colors]
# When making prediction, use the ema smoothed generator vars.
with tf.variable_scope(
model.gan_model.generator_scope,
reuse=True,
custom_getter=make_var_scope_custom_getter_for_ema(model.generator_ema)):
z_fake = make_latent_vectors(fake_batch_size, **kwargs)
fake_images = model.gan_model.generator_fn(z_fake)
fake_images.set_shape(fake_images_shape)
z_interp = make_interpolated_latent_vectors(interp_grid_size,
interp_grid_size, **kwargs)
interp_images = model.gan_model.generator_fn(z_interp)
interp_images.set_shape(interp_images_shape)
tf.summary.image(
'fake_images',
tfgan.eval.eval_utils.image_grid(
fake_images,
grid_shape=[fake_grid_size] * 2,
image_shape=image_shape,
num_channels=colors),
max_outputs=1)
tf.summary.image(
'interp_images',
tfgan.eval.eval_utils.image_grid(
interp_images,
grid_shape=[interp_grid_size] * 2,
image_shape=image_shape,
num_channels=colors),
max_outputs=1)
real_grid_size = int(np.sqrt(model.batch_size))
tf.summary.image(
'real_images_blend',
tfgan.eval.eval_utils.image_grid(
model.gan_model.real_data[:real_grid_size**2],
grid_shape=(real_grid_size, real_grid_size),
image_shape=image_shape,
num_channels=colors),
max_outputs=1)
tfgan.eval.add_gan_model_summaries(model.gan_model)
def make_scaffold(stage_id, optimizer_var_list, **kwargs):
"""Makes a custom scaffold.
The scaffold
- restores variables from the last training stage.
- initializes new variables in the new block.
Args:
stage_id: An integer of stage id.
optimizer_var_list: A list of optimizer variables.
**kwargs: A dictionary of
'train_root_dir': A string of root directory of training logs.
'num_resolutions': An integer of number of progressive resolutions.
'stable_stage_num_images': An integer of number of training images in
the stable stage.
'transition_stage_num_images': An integer of number of training images
in the transition stage.
'total_num_images': An integer of total number of training images.
Returns:
A `Scaffold` object.
"""
# Holds variables that from the previous stage and need to be restored.
restore_var_list = []
prev_ckpt = None
curr_ckpt = tf.train.latest_checkpoint(make_train_sub_dir(stage_id, **kwargs))
if stage_id > 0 and curr_ckpt is None:
prev_ckpt = tf.train.latest_checkpoint(
make_train_sub_dir(stage_id - 1, **kwargs))
num_blocks, _ = get_stage_info(stage_id, **kwargs)
prev_num_blocks, _ = get_stage_info(stage_id - 1, **kwargs)
# Holds variables created in the new block of the current stage. If the
# current stage is a stable stage (except the initial stage), this list
# will be empty.
new_block_var_list = []
for block_id in range(prev_num_blocks + 1, num_blocks + 1):
new_block_var_list.extend(
tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope='.*/{}/'.format(networks.block_name(block_id))))
# Every variables that are 1) not for optimizers and 2) from the new block
# need to be restored.
restore_var_list = [
var for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
if var not in set(optimizer_var_list + new_block_var_list)
]
# Add saver op to graph. This saver is used to restore variables from the
# previous stage.
saver_for_restore = tf.train.Saver(
var_list=restore_var_list, allow_empty=True)
# Add the op to graph that initializes all global variables.
init_op = tf.global_variables_initializer()
def _init_fn(unused_scaffold, sess):
# First initialize every variables.
sess.run(init_op)
logging.info('\n'.join([var.name for var in restore_var_list]))
# Then overwrite variables saved in previous stage.
if prev_ckpt is not None:
saver_for_restore.restore(sess, prev_ckpt)
# Use a dummy init_op here as all initialization is done in init_fn.
return tf.train.Scaffold(init_op=tf.constant([]), init_fn=_init_fn)
def make_status_message(model):
"""Makes a string `Tensor` of training status."""
return tf.string_join(
[
'Starting train step: current_image_id: ',
tf.as_string(model.current_image_id), ', progress: ',
tf.as_string(model.progress), ', num_blocks: {}'.format(
model.num_blocks), ', batch_size: {}'.format(model.batch_size)
],
name='status_message')
class ProganDebugHook(tf.train.SessionRunHook):
"""Prints summary statistics of all tf variables."""
def __init__(self):
super(ProganDebugHook, self).__init__()
self._fetches = [v for v in tf.global_variables()]
def before_run(self, _):
return tf.train.SessionRunArgs(self._fetches)
def after_run(self, _, vals):
print('=============')
print('Weight stats:')
for v, r in zip(self._fetches, vals.results):
print('\t', v.name, np.min(r), np.mean(r), np.max(r), r.shape)
print('=============')
class TrainTimeHook(tf.train.SessionRunHook):
"""Updates the train_time variable.
Optionally stops training if we've passed a time limit.
"""
def __init__(self, train_time, time_limit=None):
super(TrainTimeHook, self).__init__()
self._train_time = train_time
self._time_limit = time_limit
self._increment_amount = tf.placeholder(tf.float32, None)
self._increment_op = tf.assign_add(train_time, self._increment_amount)
self._last_run_duration = None
def before_run(self, _):
self._last_run_start_time = time.time()
if self._last_run_duration is not None:
return tf.train.SessionRunArgs(
[self._train_time, self._increment_op],
feed_dict={self._increment_amount: self._last_run_duration})
else:
return tf.train.SessionRunArgs([self._train_time])
def after_run(self, run_context, vals):
self._last_run_duration = time.time() - self._last_run_start_time
train_time = vals.results[0]
if (self._time_limit is not None) and (train_time > self._time_limit):
run_context.request_stop()
def train(model, **kwargs):
"""Trains progressive GAN for stage `stage_id`.
Args:
model: An model object having all information of progressive GAN model,
e.g. the return of build_model().
**kwargs: A dictionary of
'train_root_dir': A string of root directory of training logs.
'master': Name of the TensorFlow master to use.
'task': The Task ID. This value is used when training with multiple
workers to identify each worker.
'save_summaries_num_images': Save summaries in this number of images.
'debug_hook': Whether to attach the debug hook to the training session.
Returns:
None.
"""
logging.info('stage_id=%d, num_blocks=%d, num_images=%d', model.stage_id,
model.num_blocks, model.num_images)
scaffold = make_scaffold(model.stage_id, model.optimizer_var_list, **kwargs)
logdir = make_train_sub_dir(model.stage_id, **kwargs)
print('starting training, logdir: {}'.format(logdir))
hooks = []
if model.stage_train_time_limit is None:
hooks.append(tf.train.StopAtStepHook(last_step=model.num_images))
hooks.append(tf.train.LoggingTensorHook(
[make_status_message(model)], every_n_iter=1))
hooks.append(TrainTimeHook(model.train_time, model.stage_train_time_limit))
if kwargs['debug_hook']:
hooks.append(ProganDebugHook())
tfgan.gan_train(
model.gan_train_ops,
logdir=logdir,
get_hooks_fn=tfgan.get_sequential_train_hooks(tfgan.GANTrainSteps(1, 1)),
hooks=hooks,
master=kwargs['master'],
is_chief=(kwargs['task'] == 0),
scaffold=scaffold,
save_checkpoint_secs=600,
save_summaries_steps=(kwargs['save_summaries_num_images']))
| jesseengel/magenta | magenta/models/gansynth/lib/train_util.py | Python | apache-2.0 | 18,904 | 0.006083 |
import numpy as np
from scipy.ndimage.filters import convolve
from scipy.ndimage import maximum_filter, gaussian_filter
from .. import MaskSeparationBase, SeparationException
from ..benchmark import HighLowPassFilter
from ... import AudioSignal
from ... import vamp_imported
import numpy as np
import scipy.signal
if vamp_imported:
import vamp
# function for generating the vocal chord impulse response
def rosenmodel(t1, t2, fs):
"""
This model for generating singing vowel sounds from sine tones comes
from:
https://simonl02.users.greyc.fr/files/Documents/Teaching/SignalProcessingLabs/lab3.pdf
The above will be referred to throughout these docstrings as THE DOCUMENT.
Original author: Fatemeh Pishdadian
The arguments to the fucntions throughout this text follow the
signatures laid out in THE DOCUMENT.
This is used in Melodia to generate the melody signal to produce a
mask.
Equation 2 in THE DOCUMENT.
"""
N1 = np.floor(t1 * fs)
N2 = np.floor(t2 * fs)
samp_vec1 = np.arange(N1+1)
samp_vec2 = np.arange(N1,N1+N2+1)
ir_func1 = 0.5 * (1 - np.cos((np.pi * samp_vec1)/N1))
ir_func2 = np.cos(np.pi * (samp_vec2 - N1)/(2 * N2))
vchord_filt = np.concatenate((ir_func1,ir_func2))
return vchord_filt
# function for computing the denominator coeffs of the vocal cavity filter transfer function
def oral_cavity_filt(pole_amps, pole_freqs,fs):
"""
This model for generating singing vowel sounds from sine tones comes
from:
https://simonl02.users.greyc.fr/files/Documents/Teaching/SignalProcessingLabs/lab3.pdf
The above will be referred to throughout these docstrings as THE DOCUMENT.
Original author: Fatemeh Pishdadian
The arguments to the fucntions throughout this text follow the
signatures laid out in THE DOCUMENT.
This is used in Melodia to generate the melody signal to produce a
mask.
Solves "Q. Write a function to synthesize filter H(z)" in
THE DOCUMENT
"""
num_pole_pair = len(pole_amps)
poles = pole_amps * np.exp(1j * 2 * np.pi * pole_freqs / fs)
poles_conj = np.conj(poles)
denom_coeffs = 1
for i in range(num_pole_pair):
pole_temp = poles[i]
pole_conj_temp = poles_conj[i]
pole_pair_coeffs = np.convolve(np.array([1,-pole_temp]),np.array([1,-pole_conj_temp]))
denom_coeffs = np.convolve(denom_coeffs, pole_pair_coeffs)
return denom_coeffs
def _apply_vowel_filter(impulse_train, fs, t1=0.0075, t2=.013,
pole_amps=None, pole_freqs=None):
"""
This model for generating singing vowel sounds from sine tones comes
from:
https://simonl02.users.greyc.fr/files/Documents/Teaching/SignalProcessingLabs/lab3.pdf
The above will be referred to throughout these docstrings as THE DOCUMENT.
Original author: Fatemeh Pishdadian
The arguments to the fucntions throughout this text follow the
signatures laid out in THE DOCUMENT.
This is used in Melodia to generate the melody signal to produce a
mask.
Args:
impulse_train (np.ndarray): Numpy array with data to be filtered
fs (int): Sample rate of audio.
t1 (float, optional): N1 in Equation 2 in THE DOCUMENT. Defaults to 0.0075.
t2 (float, optional): N2 in Equation 2 in THE DOCUMENT. Defaults to .013.
pole_amps (np.ndarray, optional): Pole amplitudes, see Figures 2-4 in THE DOCUMENT.
Defaults to None, which maps to E vowel.
pole_freqs (np.ndarray, optional): Pole frequencies, see Figures 2-4 in THE DOCUMENT.
Defaults to None, which maps to E vowel
Returns:
np.ndarray: Filtered impulse train that should sound sort of like the desired
vowel.
"""
if pole_amps is None:
pole_amps = np.array([0.99,0.98,0.9,0.9])
if pole_freqs is None:
pole_freqs = np.array([800,1200,2800,3600])
vchord_filt = rosenmodel(t1, t2, fs)
vchord_out = np.convolve(impulse_train, vchord_filt)
denom_coeffs = oral_cavity_filt(pole_amps, pole_freqs, fs)
oral_out = scipy.signal.lfilter(
np.array([1]), denom_coeffs, vchord_out)
lip_out = np.real(scipy.signal.lfilter(
np.array([1,-1]), np.array([1]), oral_out))
lip_out = lip_out[:impulse_train.shape[0]]
return np.real(lip_out)
class Melodia(MaskSeparationBase):
"""
Implements melody extraction using Melodia [1].
This needs Melodia installed as a vamp plugin, as well as having vampy for
Python installed. Install Melodia via: https://www.upf.edu/web/mtg/melodia.
Note that Melodia can be used only for NON-COMMERCIAL use.
References:
[1] J. Salamon and E. Gómez, "Melody Extraction from Polyphonic Music Signals using
Pitch Contour Characteristics", IEEE Transactions on Audio, Speech and
Language Processing, 20(6):1759-1770, Aug. 2012.
Args:
input_audio_signal (AudioSignal object): The AudioSignal object that has the
audio data that Melodia will be run on.
high_pass_cutoff (optional, float): value (in Hz) for the high pass cutoff
filter.
minimum_frequency (optional, float): minimum frequency in Hertz (default 55.0)
maximum_frequency (optional, float): maximum frequency in Hertz (default 1760.0)
voicing_tolerance (optional, float): Greater values will result in more pitch contours
included in the final melody. Smaller values will result in less pitch
contours included in the final melody (default 0.2).
minimum_peak_salience (optional, float): a hack to avoid silence turning into junk
contours when analyzing monophonic recordings (e.g. solo voice with
no accompaniment). Generally you want to leave this untouched (default 0.0).
num_overtones (optional, int): Number of overtones to use when creating
melody mask.
apply_vowel_filter (optional, bool): Whether or not to apply a vowel filter
on the resynthesized melody signal when masking.
smooth_length (optional, int): Number of frames to smooth discontinuities in the
mask.
add_lower_octave (optional, fool): Use octave below fundamental frequency as well
to take care of octave errors in pitch tracking, since we only care about
the mask. Defaults to False.
mask_type (optional, str): Type of mask to use.
mask_threshold (optional, float): Threshold for mask to convert to binary.
"""
def __init__(self, input_audio_signal, high_pass_cutoff=100, minimum_frequency=55.0,
maximum_frequency=1760.0, voicing_tolerance=0.2, minimum_peak_salience=0.0,
compression=0.5, num_overtones=40, apply_vowel_filter=False, smooth_length=5,
add_lower_octave=False, mask_type='soft', mask_threshold=0.5):
# lazy load vamp to check if it exists
from ... import vamp_imported
melodia_installed = False
if vamp_imported:
melodia_installed = 'mtg-melodia:melodia' in vamp.list_plugins()
if not vamp_imported or not melodia_installed:
self._raise_vamp_melodia_error()
super().__init__(
input_audio_signal=input_audio_signal,
mask_type=mask_type,
mask_threshold=mask_threshold
)
self.high_pass_cutoff = high_pass_cutoff
self.minimum_frequency = float(minimum_frequency)
self.maximum_frequency = float(maximum_frequency)
self.voicing_tolerance = float(voicing_tolerance)
self.minimum_peak_salience = float(minimum_peak_salience)
self.compression = compression
self.apply_vowel_filter = apply_vowel_filter
self.add_lower_octave = add_lower_octave
self.melody = None
self.melody_signal = None
self.timestamps = None
self.num_overtones = num_overtones
self.smooth_length = smooth_length
def _raise_vamp_melodia_error(self):
raise SeparationException(
'\n**~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~**'
'\n* Are Vamp and Melodia installed correctly? *'
'\n* Check https://bit.ly/2DXbrAk for installation instructions! *'
'\n**~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~**')
def extract_melody(self):
"""
Extracts melody from the audio using the melodia vamp plugin. Uses arguments kept
in self:
- `self.minimum_frequency` (default: 55 Hz)
- `self.maximum_frequency` (default: 1760 Hz)
- `self.voicing_tolerance` (default: 0.2)
- `self.minimum_peak_salience` (default: 0.0)
This function sets two class members used in other parts:
- `self.melody`: (numpy array) contains the melody in Hz for every timestep
(0 indicates no voice).
- `self.timestamps`: (numpy array) contains the timestamps for each melody note
"""
params = {
'minfqr': self.minimum_frequency,
'maxfqr': self.maximum_frequency,
'voicing': self.voicing_tolerance,
'minpeaksalience': self.minimum_peak_salience
}
data = vamp.collect(self.audio_signal.audio_data, self.sample_rate,
"mtg-melodia:melodia", parameters=params)
_, melody = data['vector']
hop = 128. / 44100. # hard coded hop in Melodia vamp plugin, converting it to frames.
timestamps = 8 * hop + np.arange(len(melody)) * hop
melody[melody < 0] = 0
self.melody = melody
self.timestamps = timestamps
def create_melody_signal(self, num_overtones):
"""
Adapted from Melosynth by Justin Salamon: https://github.com/justinsalamon/melosynth.
To mask the mixture, we need to identify time-frequency bins that belong to the
melody. Melodia outputs only the fundamental frequency of the melodic line.
To construct the mask we take the fundamental frequency and add all the
overtones of it (up to num_overtones) to the mask. The melody is faded in and
out at onsets and offsets to make the separation sound more natural
(hard-coded by transition_length).
Args:
num_overtones (int): Number of overtones to expand out to build the mask.
"""
if self.timestamps[0] > 0:
estimated_hop = np.median(np.diff(self.timestamps))
previous_time = max(self.timestamps[0] - estimated_hop, 0)
self.timestamps = np.insert(self.timestamps, 0, previous_time)
self.melody = np.insert(self.melody, 0, 0)
sample_rate = self.audio_signal.sample_rate
melody_signal = []
transition_length = .001 # duration for fade in/out and frequency interpretation
phase = np.zeros(num_overtones)
previous_frequency = 0
previous_time = 0
overtone_weights = np.ones(num_overtones)
for time, frequency in zip(self.timestamps, self.melody):
if self.add_lower_octave:
frequency = frequency / 2
# taking care of octave errors since we only care about masking
num_samples = int(np.round((time - previous_time) * sample_rate))
if num_samples > 0:
num_transition_samples = float(
min(np.round(transition_length * sample_rate), num_samples))
frequency_series = np.ones(num_samples) * previous_frequency
if previous_frequency > 0 and frequency > 0:
frequency_series += np.minimum(
np.arange(num_samples) / num_transition_samples, 1) * \
(frequency - previous_frequency)
elif frequency > 0:
frequency_series = np.ones(num_samples) * frequency
samples = np.zeros(num_samples)
for overtone in range(num_overtones):
overtone_num = overtone + 1
phasors = 2 * np.pi * overtone_num * frequency_series / float(sample_rate)
phases = phase[overtone] + np.cumsum(phasors)
samples += overtone_weights[overtone] * np.sign(np.sin(phases))
phase[overtone] = phases[-1]
if previous_frequency == 0 and frequency > 0:
samples *= np.minimum(np.arange(num_samples) / num_transition_samples, 1)
elif previous_frequency > 0 and frequency == 0:
samples *= np.maximum(1 - np.arange(num_samples) / num_transition_samples, 0)
elif previous_frequency == 0 and frequency == 0:
samples *= 0
melody_signal.extend(samples)
previous_frequency = frequency
previous_time = time
melody_signal = np.asarray(melody_signal)
if self.apply_vowel_filter:
melody_signal = _apply_vowel_filter(melody_signal, sample_rate)
melody_signal /= float(max(np.max(melody_signal), 1e-7))
melody_signal = [melody_signal for _ in range(self.audio_signal.num_channels)]
melody_signal = np.asarray(melody_signal)
melody_signal = melody_signal[:, 0:self.audio_signal.signal_length]
melody_signal = AudioSignal(
audio_data_array=melody_signal,
sample_rate=sample_rate,
stft_params=self.audio_signal.stft_params
)
self.melody_signal = melody_signal
return melody_signal
def create_harmonic_mask(self, melody_signal):
"""
Creates a harmonic mask from the melody signal. The mask is smoothed to reduce
the effects of discontinuities in the melody synthesizer.
"""
stft = np.abs(melody_signal.stft())
# Need to threshold the melody stft since the synthesized
# F0 sequence overtones are at different weights.
stft = stft ** self.compression
stft /= np.maximum(np.max(stft, axis=1, keepdims=True), 1e-7)
mask = np.empty(self.stft.shape)
# Smoothing the mask row-wise using a low-pass filter to
# get rid of discontuinities in the mask.
kernel = np.full((1, self.smooth_length), 1 / self.smooth_length)
for ch in range(self.audio_signal.num_channels):
mask[..., ch] = convolve(stft[..., ch], kernel)
return mask
def run(self):
high_low = HighLowPassFilter(self.audio_signal, self.high_pass_cutoff)
high_pass_masks = high_low.run()
# separate the mixture foreground melody by masking
if self.melody_signal is None:
self.extract_melody()
self.create_melody_signal(self.num_overtones)
foreground_mask = self.create_harmonic_mask(self.melody_signal)
foreground_mask = self.MASKS['soft'](foreground_mask)
foreground_mask = foreground_mask
background_mask = foreground_mask.invert_mask()
_masks = np.stack(
[background_mask.mask, foreground_mask.mask], axis=-1)
self.result_masks = []
for i in range(_masks.shape[-1]):
mask_data = _masks[..., i]
if self.mask_type == self.MASKS['binary']:
mask_data = _masks[..., i] == np.max(_masks, axis=-1)
if i == 0:
mask_data = np.maximum(mask_data, high_pass_masks[i].mask)
elif i == 1:
mask_data = np.minimum(mask_data, high_pass_masks[i].mask)
mask = self.mask_type(mask_data)
self.result_masks.append(mask)
return self.result_masks
| interactiveaudiolab/nussl | nussl/separation/primitive/melodia.py | Python | mit | 15,856 | 0.006244 |
import re
from enum import Enum
from context import ParsingContext
class UnrollerState(Enum):
START = 0
NAMES = 1
VALUES = 2
END = 3
class Unroller(object):
should_unroll_big = False
def __init__(self, unroll_big):
self.brackets_stack = []
self.names = []
self.values = []
self.recorded_lines = []
self.givens = []
self.state = UnrollerState.START
self.all_lines = []
self.was_parameterized = False
Unroller.should_unroll_big = unroll_big
def record(self, line):
self.all_lines.append(line)
if self.state == UnrollerState.NAMES:
if '<<' in line:
self.names.append(line.split(' << ')[0].strip())
vals = line.split(' << ')[-1].strip().lstrip('[').rstrip(']').split(',')
self.values.extend([[val.strip()] for val in vals])
self.all_lines.pop()
self.state = UnrollerState.END
return False
if self.state == UnrollerState.NAMES:
line = line.replace('||', '|')
self.names = [var.strip() for var in line.split('|')]
self.all_lines.pop()
self.state = UnrollerState.VALUES
return False
elif self.state == UnrollerState.VALUES:
if '|' in line:
line = line.replace('||', '|')
self.values.append([var.strip() for var in line.split('|')])
self.all_lines.pop()
return False
elif '=' in line:
self.givens.append(line)
return False
else:
self.state = UnrollerState.END
if 'where:' in line:
if not re.search('\S', self.recorded_lines[-1]):
self.recorded_lines.pop()
if not re.search('\S', self.all_lines[-1]):
self.all_lines.pop()
self.state = UnrollerState.NAMES
self.all_lines.pop()
return False
self.recorded_lines.append(line)
if self.state == UnrollerState.START or self.state == UnrollerState.END:
if '}' in line:
return True
return False
def unroll_tests(self):
if len(self.values) > 3 and not Unroller.should_unroll_big:
self.was_parameterized = True
return self.get_parameterized_template()
self.givens = ['val ' + given for given in self.givens]
new_tests = []
for set_index, _set in enumerate(self.values):
method_name = self.recorded_lines[0]
for name_index, name in enumerate(self.names):
method_name = method_name.replace("#{}".format(name), _set[name_index].strip().strip('"'))
method_name = self.replace_invalid_chars(method_name)
new_tests.append(method_name)
new_givens = []
for given in self.givens:
for name_index, name in enumerate(self.names):
given = re.sub(r'([^.]){}(\b)'.format(name), '\g<1>' + _set[name_index] + '\g<2>', given)
new_tests.append(given)
new_tests.extend(new_givens)
for line in self.recorded_lines[1:]:
for name_index, name in enumerate(self.names):
line = re.sub(r'([^.]){}(\b)'.format(name), '\g<1>' + _set[name_index] + '\g<2>', line)
new_tests.append(line)
new_tests.append('')
return new_tests
@staticmethod
def replace_invalid_chars(method_name):
method_name = method_name.replace(',', '[coma]')
method_name = method_name.replace('.', '[dot]')
method_name = method_name.replace('[', '(')
method_name = method_name.replace(']', ')')
method_name = method_name.replace(':', '')
method_name = method_name.replace('\\n', ' newline ')
method_name = method_name.replace('\\', ' [slash] ')
return method_name
@staticmethod
def parse(spock):
state = ParsingContext.INDETERMINATE
unroller = Unroller(Unroller.should_unroll_big)
new_lines = []
parameterized_lines = []
for line in spock:
if '@Unroll' in line:
state = ParsingContext.UNROLLING
continue
if state == ParsingContext.UNROLLING:
if unroller.record(line):
new_tests = unroller.unroll_tests()
state = ParsingContext.INDETERMINATE
if not unroller.was_parameterized:
new_lines.extend(new_tests)
else:
parameterized_lines.extend(new_tests)
unroller = Unroller(Unroller.should_unroll_big)
continue
new_lines.append(line)
new_lines.extend(parameterized_lines)
return new_lines
def get_parameterized_template(self):
class_name = re.search('`(.*)`', self.all_lines[0]).group(1)
coma_separator = ',\n'
parameters = coma_separator.join(["private val " + name + ": " for name in self.names])
data_whitespace = ' ' * 16
values = coma_separator.join([data_whitespace + '{ arrayOf(' + ', '.join(vals) + ') }' for vals in self.values])
pipe_whitespace = ' |\n' + ' ' * 48
names = pipe_whitespace.join([name + ' {' + str(index) + '}' for index, name in enumerate(self.names)])
parameterized_template = '''
@RunWith(Parameterized::class)
class `{}`({}) : Setup() {{
companion object {{
@JvmStatic
@Parameterized.Parameters(name = """{}
""")
fun data() = createData(
{}
)
}}
'''.format(class_name, parameters, names, values)
self.all_lines.insert(0, parameterized_template)
self.all_lines.append("}")
return self.all_lines
| xklakoux/spock2kotlin | unroller.py | Python | mit | 6,026 | 0.002489 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, models
class SubscribeLogs(models.TransientModel):
_name = 'auditlog.subscriber'
@api.model
def subscribe_rules(self):
xml_data = self.env['ir.model.data'].search([
('module', '=', 'logging_compassion'),
('model', '=', 'auditlog.rule')])
rules = self.env['auditlog.rule'].browse(xml_data.mapped('res_id'))
rules.subscribe()
| eicher31/compassion-modules | logging_compassion/wizards/subscribe_logs.py | Python | agpl-3.0 | 789 | 0 |
import numpy
import chainer
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(4, 3, 2), (1,), (1, 2, 3, 4, 5, 6)],
'Wdim': [0, 1, 3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@chainer.testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestPReLU(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
if self.dtype == numpy.float16:
self.check_double_backward_options.update(
{'atol': 5e-3, 'rtol': 5e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
x[(-0.05 < x) & (x < 0.05)] = 0.5
W = numpy.random.uniform(
-1, 1, self.shape[1:1 + self.Wdim]).astype(self.dtype)
return x, W
def forward_expected(self, inputs):
x, W = inputs
y_expect = x.copy()
masked = numpy.ma.masked_greater_equal(y_expect, 0, copy=False)
shape = (1,) + W.shape + (1,) * (x.ndim - W.ndim - 1)
masked *= W.reshape(shape)
return y_expect,
def forward(self, inputs, device):
x, W = inputs
y = functions.prelu(x, W)
return y,
testing.run_module(__name__, __file__)
| pfnet/chainer | tests/chainer_tests/functions_tests/activation_tests/test_prelu.py | Python | mit | 1,845 | 0 |
from django.test import TestCase
from mock import Mock, patch
from paymentexpress.facade import Facade
from paymentexpress.gateway import AUTH, PURCHASE
from paymentexpress.models import OrderTransaction
from tests import (XmlTestingMixin, CARD_VISA, SAMPLE_SUCCESSFUL_RESPONSE,
SAMPLE_DECLINED_RESPONSE, SAMPLE_ERROR_RESPONSE)
from oscar.apps.payment.utils import Bankcard
from oscar.apps.payment.exceptions import (UnableToTakePayment,
InvalidGatewayRequestError)
class MockedResponseTestCase(TestCase):
def create_mock_response(self, body, status_code=200):
response = Mock()
response.content = body
response.text = body
response.status_code = status_code
return response
class FacadeTests(TestCase, XmlTestingMixin):
def setUp(self):
self.facade = Facade()
def test_zero_amount_raises_exception(self):
card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
with self.assertRaises(UnableToTakePayment):
self.facade.authorise('1000', 0, card)
def test_zero_amount_for_complete_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.complete('1000', 0, '1234')
def test_zero_amount_for_purchase_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 0)
def test_purchase_without_billing_id_or_card_raises_exception(self):
with self.assertRaises(ValueError):
self.facade.purchase('1000', 1.23)
def test_zero_amount_for_refund_raises_exception(self):
with self.assertRaises(UnableToTakePayment):
self.facade.refund('1000', 0, '1234')
def test_merchant_reference_format(self):
merchant_ref = self.facade._get_merchant_reference('1000', AUTH)
self.assertRegexpMatches(merchant_ref, r'^\d+_[A-Z]+_\d+_\d{4}$')
class FacadeSuccessfulResponseTests(MockedResponseTestCase):
dps_txn_ref = '000000030884cdc6'
dps_billing_id = '0000080023225598'
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_successful_call_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
auth_dict = self.facade.authorise('1000', 1, self.card)
complete_dict = self.facade.complete('1000', 1.23,
self.dps_txn_ref)
refund_dict = self.facade.refund('1000', 1.23, '000000030884cdc6')
validate_dict = self.facade.validate(self.card)
response_dicts = (auth_dict, complete_dict, refund_dict,
validate_dict)
for response_dict in response_dicts:
self.assertEquals(self.dps_txn_ref,
response_dict['txn_reference'])
self.assertEquals(self.dps_billing_id,
response_dict['partner_reference'])
def test_purchase_with_billing_id_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
txn_ref = self.facade.purchase('1000', 1.23, 'abc123')
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
def test_purchase_with_bankcard_returns_valid_dict(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
txn_ref = self.facade.purchase('1000', 1.23, None, self.card)
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
def test_successful_call_is_recorded(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
self.facade.authorise('10001', 10.25, self.card)
txn = OrderTransaction.objects.filter(order_number='10001')[0]
self.assertEquals(AUTH, txn.txn_type)
def test_empty_issue_date_is_allowed(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_SUCCESSFUL_RESPONSE)
card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123")
txn_ref = self.facade.authorise('1000', 1.23, card)
self.assertEquals(self.dps_txn_ref, txn_ref['txn_reference'])
class FacadeDeclinedResponseTests(MockedResponseTestCase):
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_declined_call_raises_an_exception(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_DECLINED_RESPONSE)
with self.assertRaises(UnableToTakePayment):
self.facade.authorise('1000', 1, self.card)
with self.assertRaises(UnableToTakePayment):
self.facade.complete('1000', 1.23, '000000030884cdc6')
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 1.23, 'abc123')
with self.assertRaises(UnableToTakePayment):
self.facade.purchase('1000', 1.23, None, self.card)
with self.assertRaises(UnableToTakePayment):
self.facade.refund('1000', 1.23, '000000030884cdc6')
with self.assertRaises(UnableToTakePayment):
self.facade.validate(self.card)
def test_declined_call_is_recorded(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_DECLINED_RESPONSE)
try:
self.facade.purchase('1001', 10.24, None, self.card)
except Exception:
pass
txn = OrderTransaction.objects.filter(order_number='1001')[0]
self.assertIsNotNone(txn)
self.assertEquals(PURCHASE, txn.txn_type)
class FacadeErrorResponseTests(MockedResponseTestCase):
def setUp(self):
self.facade = Facade()
self.card = Bankcard(card_number=CARD_VISA,
expiry_date='1015',
name="Frankie", cvv="123",
start_date="1010")
def test_error_response_raises_invalid_gateway_request_exception(self):
with patch('requests.post') as post:
post.return_value = self.create_mock_response(
SAMPLE_ERROR_RESPONSE)
with self.assertRaises(InvalidGatewayRequestError):
self.facade.purchase('1000', 10.24, None, self.card)
| django-oscar/django-oscar-paymentexpress | tests/facade_tests.py | Python | bsd-3-clause | 7,334 | 0.001364 |
# -*- coding: utf-8 -*-
# Copyright (c)2013 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from functools import wraps
import re
from pyrax.client import BaseClient
import pyrax.exceptions as exc
from pyrax.manager import BaseManager
from pyrax.resource import BaseResource
import pyrax.utils as utils
class CloudCDNFlavor(BaseResource):
pass
class CloudCDNFlavorManager(BaseManager):
def list(self):
resp, resp_body = self.api.method_get("/%s" % self.uri_base)
return [CloudCDNFlavor(self, info)
for info in resp_body[self.plural_response_key]]
def get(self, flavor_id):
resp, resp_body = self.api.method_get(
"/%s/%s" % (self.uri_base, flavor_id))
return CloudCDNFlavor(self, resp_body)
class CloudCDNService(BaseResource):
def patch(self, changes):
self.manager.patch(self.id, changes)
def delete(self):
self.manager.delete(self)
def delete_assets(self, url=None, all=False):
self.manager.delete_assets(self.id, url, all)
class CloudCDNServiceManager(BaseManager):
def create(self, name, flavor_id, domains, origins,
restrictions=None, caching=None, log_delivery=False):
body = {"name": name,
"flavor_id": flavor_id,
"domains": domains,
"origins": origins,
"restrictions": restrictions or [],
"caching": caching or [],
"log_delivery": {"enabled": bool(log_delivery)}}
resp, resp_body = self.api.method_post("/%s" % self.uri_base,
body=body)
body["id"] = resp.headers.get("location").split("/")[-1]
return CloudCDNService(self, body)
def patch(self, service_id, changes):
resp, resp_body = self.api.method_patch(
"/%s/%s" % (self.uri_base, service_id), body=changes)
return None
def delete_assets(self, service_id, url=None, all=False):
uri = "/%s/%s/assets" % (self.uri_base, service_id)
queries = {}
if all:
queries["all"] = "true"
if url is not None:
queries["url"] = url
qs = utils.dict_to_qs(queries)
if qs:
uri = "%s?%s" % (uri, qs)
self.api.method_delete(uri)
return None
def list(self, limit=None, marker=None):
uri = "/%s" % self.uri_base
qs = utils.dict_to_qs(dict(limit=limit, marker=marker))
if qs:
uri = "%s?%s" % (uri, qs)
return self._list(uri)
class CloudCDNClient(BaseClient):
"""
This is the base client for creating and managing Cloud CDN.
"""
def __init__(self, *args, **kwargs):
super(CloudCDNClient, self).__init__(*args, **kwargs)
self.name = "Cloud CDN"
def _configure_manager(self):
"""
Creates the Manager instances to handle monitoring.
"""
self._flavor_manager = CloudCDNFlavorManager(self,
uri_base="flavors", resource_class=CloudCDNFlavor,
response_key=None, plural_response_key="flavors")
self._services_manager = CloudCDNServiceManager(self,
uri_base="services", resource_class=CloudCDNService,
response_key=None, plural_response_key="services")
def ping(self):
"""Ping the server
Returns None if successful, or raises some exception...TODO
"""
self.method_get("/ping")
def list_flavors(self):
"""List CDN flavors."""
return self._flavor_manager.list()
def get_flavor(self, flavor_id):
"""Get one CDN flavor."""
return self._flavor_manager.get(flavor_id)
def list_services(self, limit=None, marker=None):
"""List CDN services."""
return self._services_manager.list(limit=limit, marker=marker)
def get_service(self, service_id):
"""Get one CDN service."""
return self._services_manager.get(service_id)
def create_service(self, name, flavor_id, domains, origins,
restrictions=None, caching=None, log_delivery=False):
"""Create a new CDN service.
Arguments:
name: The name of the service.
flavor_id: The ID of the flavor to use for this service.
domains: A list of dictionaries, each of which has a required
key "domain" and optional key "protocol" (the default
protocol is http).
origins: A list of dictionaries, each of which has a required
key "origin" which is the URL or IP address to pull
origin content from. Optional keys include "port" to
use a port other than the default of 80, and "ssl"
to enable SSL, which is disabled by default.
caching: An optional
"""
return self._services_manager.create(name, flavor_id, domains,
origins, restrictions, caching,
log_delivery)
def patch_service(self, service_id, changes):
"""Update a CDN service with a patch
Arguments:
service_id: The ID of the service to update.
changes: A list of dictionaries containing the following keys:
op, path, and value. The "op" key can be any of the
following actions: add, replace, or remove. Path
is the path to update. A value must be specified for
add or replace ops, but can be omitted for remove.
"""
self._services_manager.patch(service_id, changes)
def delete_service(self, service):
"""Delete a CDN service."""
self._services_manager.delete(service)
def delete_assets(self, service_id, url=None, all=False):
"""Delete CDN assets
Arguments:
service_id: The ID of the service to delete from.
url: The URL at which to delete assets
all: When True, delete all assets associated with the service_id.
You cannot specifiy both url and all.
"""
self._services_manager.delete_assets(service_id, url, all)
#################################################################
# The following methods are defined in the generic client class,
# but don't have meaning in cdn, as there is not a single
# resource that defines this module.
#################################################################
def list(self, limit=None, marker=None):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def get(self, item):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def create(self, *args, **kwargs):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def delete(self, item):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def find(self, **kwargs):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
def findall(self, **kwargs):
"""Not applicable in Cloud CDN."""
raise NotImplementedError
#################################################################
| rackerlabs/heat-pyrax | pyrax/cloudcdn.py | Python | apache-2.0 | 7,833 | 0.000766 |
import os
WAGTAIL_ROOT = os.path.dirname(__file__)
STATIC_ROOT = os.path.join(WAGTAIL_ROOT, 'test-static')
MEDIA_ROOT = os.path.join(WAGTAIL_ROOT, 'test-media')
MEDIA_URL = '/media/'
DATABASES = {
'default': {
'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.environ.get('DATABASE_NAME', 'wagtail'),
'USER': os.environ.get('DATABASE_USER', None),
'PASSWORD': os.environ.get('DATABASE_PASS', None),
'HOST': os.environ.get('DATABASE_HOST', None),
'TEST': {
'NAME': os.environ.get('DATABASE_NAME', None),
}
}
}
SECRET_KEY = 'not needed'
ROOT_URLCONF = 'wagtail.tests.urls'
STATIC_URL = '/static/'
STATIC_ROOT = STATIC_ROOT
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
USE_TZ = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'wagtail.tests.context_processors.do_not_use_static_url',
'wagtail.contrib.settings.context_processors.settings',
],
},
},
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {
'extensions': [
'wagtail.wagtailcore.jinja2tags.core',
'wagtail.wagtailadmin.jinja2tags.userbar',
'wagtail.wagtailimages.jinja2tags.images',
],
},
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
INSTALLED_APPS = (
# Install wagtailredirects with its appconfig
# Theres nothing special about wagtailredirects, we just need to have one
# app which uses AppConfigs to test that hooks load properly
'wagtail.wagtailredirects.apps.WagtailRedirectsAppConfig',
'wagtail.tests.testapp',
'wagtail.tests.demosite',
'wagtail.tests.customuser',
'wagtail.tests.snippets',
'wagtail.tests.routablepage',
'wagtail.tests.search',
'wagtail.contrib.wagtailstyleguide',
'wagtail.contrib.wagtailsitemaps',
'wagtail.contrib.wagtailroutablepage',
'wagtail.contrib.wagtailfrontendcache',
'wagtail.contrib.wagtailapi',
'wagtail.contrib.wagtailsearchpromotions',
'wagtail.contrib.settings',
'wagtail.wagtailforms',
'wagtail.wagtailsearch',
'wagtail.wagtailembeds',
'wagtail.wagtailimages',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailsnippets',
'wagtail.wagtaildocs',
'wagtail.wagtailadmin',
'wagtail.wagtailcore',
'taggit',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
# Using DatabaseCache to make sure that the cache is cleared between tests.
# This prevents false-positives in some wagtail core tests where we are
# changing the 'wagtail_root_paths' key which may cause future tests to fail.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'cache',
}
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher', # don't use the intentionally slow default password hasher
)
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.wagtailsearch.backends.db',
}
}
AUTH_USER_MODEL = 'customuser.CustomUser'
if 'ELASTICSEARCH_URL' in os.environ:
WAGTAILSEARCH_BACKENDS['elasticsearch'] = {
'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch',
'URLS': [os.environ['ELASTICSEARCH_URL']],
'TIMEOUT': 10,
'max_retries': 1,
'AUTO_UPDATE': False,
}
WAGTAIL_SITE_NAME = "Test Site"
| inonit/wagtail | wagtail/tests/settings.py | Python | bsd-3-clause | 4,667 | 0.000429 |
# coding: utf-8
from fabkit import task
from fablib.mongodb import MongoDB
mongodb = MongoDB()
@task
def setup():
mongodb.setup()
return {'status': 1}
| syunkitada/fabkit-repo | fabscript/openstack/mongodb.py | Python | mit | 163 | 0 |
#!/usr/bin/env python3
# Advent of Code 2016 - Day 7, Part One
import sys
import re
from itertools import islice
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def has_abba(string):
for s in window(string, 4):
if s[:2] == s[:1:-1] and s[0] != s[1]:
return True
return False
def main(argv):
if len(argv) < 2:
print("Usage: day07-pt1.py puzzle.txt")
return 1
valid = 0
with open(argv[1]) as f:
for line in f:
nets = re.split('[\[\]]', line.strip())
if any(has_abba(s) for s in nets[::2]) \
and not any(has_abba(h) for h in nets[1::2]):
valid += 1
print(valid)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mcbor/adventofcode | 2016/day07/day07-pt1.py | Python | mit | 1,067 | 0.00656 |
"""
Compatible with py2 and py3, inspired by jinjin2, future, etc
common types & functions:
name 2.x 3.x
------------ ----------- -----------
unichr unichr chr
unicode unicode str
range xrange range
string_types (str, unicode) (str, )
pickle cPickle pickle
input raw_input input
StingIO, BytesIO
from io import StringIO, BytesIO
"""
import sys
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x:x
if not PY2:
unichr = chr
range = range
string_types = (str,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_iterator = _identity
python_2_unicode_compatible = _identity
ifilter = filter
imap = map
izip = zip
def u(s, encoding='utf8'):
if isinstance(s, str):
return s
else:
return str(s)
def b(s, encoding='utf8'):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode(encoding)
else:
return bytes(s)
import builtins
exec_ = getattr(builtins, "exec")
get_next = lambda x: x.next
input = input
open = open
else:
unichr = unichr
range = xrange
string_types = (str, unicode)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from io import BytesIO, StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def python_2_unicode_compatible(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def u(s, encoding='utf8'):
if isinstance(s, unicode):
return s
else:
return unicode(str(s), encoding)
def b(s, encoding='utf8'):
if isinstance(s, unicode):
return s.decode(encoding)
else:
return str(s)
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
get_next = lambda x: x.__next__
input = raw_input
from io import open
try:
next = next
except NameError:
def next(it):
return it.next()
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instanciation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
modules_mapping = {
'socketserver':'SocketServer',
'queue':'Queue',
'configparser':'ConfigParser',
'html.entities':'htmlentitydefs',
'html.parser':'HTMLParser',
'http.client':'httplib',
'http.server':['BaseHTTPServer', 'CGIHTTPServer', 'SimpleHTTPServer', 'CGIHTTPServer'],
# from BaseHTTPServer
# from CGIHTTPServer
# from SimpleHTTPServer
# from CGIHTTPServer
'http.cookies':'Cookie',
'http.cookiejar':'cookielib',
'urllib.parse':{'urlparse':['ParseResult', 'SplitResult',
'parse_qs', 'parse_qsl',
'urldefrag', 'urljoin',
'urlparse', 'urlsplit',
'urlunparse', 'urlunsplit'],
'urllib':['quote', 'quote_plus',
'unquote', 'unquote_plus',
'urlencode', 'splitquery']},
# from urlparse import (ParseResult, SplitResult, parse_qs, parse_qsl,
# urldefrag, urljoin, urlparse, urlsplit,
# urlunparse, urlunsplit)
# from urllib import (quote,
# quote_plus,
# unquote,
# unquote_plus,
# urlencode,
# splitquery)
'urllib.request':{'urllib':['pathname2url',
'url2pathname',
'getproxies',
'urlretrieve',
'urlcleanup',
'URLopener',
'FancyURLopener',
'proxy_bypass'],
'urllib2':['AbstractBasicAuthHandler',
'AbstractDigestAuthHandler',
'BaseHandler', 'CacheFTPHandler',
'FileHandler', 'FTPHandler',
'HTTPBasicAuthHandler',
'HTTPCookieProcessor',
'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler',
'HTTPErrorProcessor', 'HTTPHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'HTTPRedirectHandler', 'HTTPSHandler',
'URLError', 'build_opener',
'install_opener', 'OpenerDirector',
'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler',
'ProxyHandler', 'Request',
'UnknownHandler', 'urlopen'],
'urlparse':['urldefrag','urljoin', 'urlparse',
'urlunparse', 'urlsplit', 'urlunsplit',
'parse_qs', 'parse_q']},
# from urllib import (pathname2url,
# url2pathname,
# getproxies,
# urlretrieve,
# urlcleanup,
# URLopener,
# FancyURLopener,
# proxy_bypass)
# from urllib2 import (
# AbstractBasicAuthHandler,
# AbstractDigestAuthHandler,
# BaseHandler,
# CacheFTPHandler,
# FileHandler,
# FTPHandler,
# HTTPBasicAuthHandler,
# HTTPCookieProcessor,
# HTTPDefaultErrorHandler,
# HTTPDigestAuthHandler,
# HTTPErrorProcessor,
# HTTPHandler,
# HTTPPasswordMgr,
# HTTPPasswordMgrWithDefaultRealm,
# HTTPRedirectHandler,
# HTTPSHandler,
# URLError,
# build_opener,
# install_opener,
# OpenerDirector,
# ProxyBasicAuthHandler,
# ProxyDigestAuthHandler,
# ProxyHandler,
# Request,
# UnknownHandler,
# urlopen,
# )
# from urlparse import (
# urldefrag
# urljoin,
# urlparse,
# urlunparse,
# urlsplit,
# urlunsplit,
# parse_qs,
# parse_q
# )
'urllib.error':{'urllib':['ContentTooShortError'],
'urllib2':['URLError', 'HTTPError']},
# from urllib import ContentTooShortError
# from urllib2 import URLError, HTTPError
'xmlrpc.client':'xmlrpclib',
'xmlrpc.server':'xmlrpclib',
}
for k, v in modules_mapping.items():
if isinstance(v, dict):
x = {}
for m, attrs in v.items():
for a in attrs:
x[a] = m
modules_mapping[k] = x
def import_(module, objects=None, py2=None):
"""
:param module: py3 compatiable module path
:param objects: objects want to imported, it should be a list
:param via: for some py2 module, you should give the import path according the
objects which you want to imported
:return: object or module
Usage:
import_('urllib.parse', 'urlparse')
import_('urllib.parse', ['urlparse', 'urljoin'])
import_('urllib.parse', py2='urllib2')
"""
if not PY2:
mod = __import__(module, fromlist=['*'])
else:
path = modules_mapping.get(module)
if not path:
raise Exception("Can't find the module %s in mappings." % module)
if objects:
if not isinstance(objects, (list, tuple)):
objects = [objects]
r = []
for x in objects:
m = path.get(x)
if not m:
raise Exception("Can't find the object %s in %s." % (x, path))
mod = __import__(m, fromlist=['*'])
r.append(getattr(mod, x))
if len(r) > 1:
return tuple(r)
else:
return r[0]
else:
if isinstance(path, (dict, list)):
if not py2:
raise Exception("You should give py2 parameter to enable import from py2.")
path = py2
mod = __import__(path, fromlist=['*'])
return mod
| wwfifi/uliweb | uliweb/utils/_compat.py | Python | bsd-2-clause | 10,959 | 0.00365 |
#!/usr/bin/env python3
## Estimates errors using Monte Carlo sampling
# Hamish Silverwood, GRAPPA, UvA, 23 February 2015
import numpy as np
import gl_helper as gh
import pdb
import pickle
import sys
import numpy.random as rand
import matplotlib.pyplot as plt
#TEST this will eventually go outside
def ErSamp_gauss_linear_w_z():
fraction_err = 0.05
datafile = '/home/hsilverw/LoDaM/darcoda/Data_Sets/simplenu/simplenu_sigz_raw_sdz_p05_sdvz_5.dat'
data = np.loadtxt(datafile)
z_data = data[:, 0]
z_sampled = []
for z_val in z_data:
z_sampled.append(rand.normal(loc = z_val, scale= z_val*fraction_err))
return z_sampled
z_data_flat_distro = rand.random(2000000)
def ErSamp_flat_distro_test():
fraction_err = 0.001
z_data = z_data_flat_distro
z_sampled = []
for z_val in z_data:
z_sampled.append(abs(rand.normal(loc = z_val, scale = fraction_err)))
return z_sampled
def mc_nu_error(sampled_z_func, number_mcs, binmin, binmax, bincenter):
# sampled_z_func - returns a vector of z points
nu_vectors=[]
for jter in range(0, number_mcs):
jter_z_data = sampled_z_func()
jter_nu, dummy, dummy, dummy, dummy = gh.nu_sig_from_bins(binmin, binmax, jter_z_data, np.ones(len(jter_z_data)))
nu_vectors.append(jter_nu)
#Calculate standard deviations of nu
nu_vectors = np.array(nu_vectors)
nu_stdevs = []
nu_means = []
nu_medians = []
for pter in range(0, len(binmin)):
nu_stdevs.append(np.std(nu_vectors[:, pter]))
nu_means.append(np.mean(nu_vectors[:, pter]))
nu_medians.append(np.median(nu_vectors[:, pter]))
#pdb.set_trace()
#fig = plt.figure()
#ax = fig.add_subplot(111)
#no, bins, patches = ax.hist(nu_vectors[:,0], 100)
return np.array(nu_stdevs)
if __name__=="__main__":
binmin, binmax, bincenter = gh.bin_r_linear(0.2, 0.8, 12)
nu_stdevs = mc_nu_error(ErSamp_flat_distro_test, 100, binmin, binmax, bincenter)
pdb.set_trace()
| PascalSteger/gravimage | programs/gi_mc_errors.py | Python | gpl-2.0 | 2,025 | 0.011852 |
# -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pecan import rest
from pecan import abort
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from mistral import exceptions as ex
from mistral.api.controllers.v1 import task
from mistral.openstack.common import log as logging
from mistral.api.controllers import resource
from mistral.db import api as db_api
from mistral.engine import engine
LOG = logging.getLogger(__name__)
class Execution(resource.Resource):
"""Execution resource."""
id = wtypes.text
workbook_name = wtypes.text
task = wtypes.text
state = wtypes.text
# Context is a JSON object but since WSME doesn't support arbitrary
# dictionaries we have to use text type convert to json and back manually.
context = wtypes.text
def to_dict(self):
d = super(Execution, self).to_dict()
if d.get('context'):
d['context'] = json.loads(d['context'])
return d
@classmethod
def from_dict(cls, d):
e = cls()
for key, val in d.items():
if hasattr(e, key):
if key == 'context' and val:
val = json.dumps(val)
setattr(e, key, val)
return e
class Executions(resource.Resource):
"""A collection of Execution resources."""
executions = [Execution]
class ExecutionsController(rest.RestController):
tasks = task.TasksController()
@wsme_pecan.wsexpose(Execution, wtypes.text, wtypes.text)
def get(self, workbook_name, id):
LOG.debug("Fetch execution [workbook_name=%s, id=%s]" %
(workbook_name, id))
values = db_api.execution_get(workbook_name, id)
if not values:
abort(404)
else:
return Execution.from_dict(values)
@wsme_pecan.wsexpose(Execution, wtypes.text, wtypes.text, body=Execution)
def put(self, workbook_name, id, execution):
LOG.debug("Update execution [workbook_name=%s, id=%s, execution=%s]" %
(workbook_name, id, execution))
values = db_api.execution_update(workbook_name,
id,
execution.to_dict())
return Execution.from_dict(values)
@wsme_pecan.wsexpose(Execution, wtypes.text, body=Execution,
status_code=201)
def post(self, workbook_name, execution):
LOG.debug("Create execution [workbook_name=%s, execution=%s]" %
(workbook_name, execution))
try:
context = None
if execution.context:
context = json.loads(execution.context)
values = engine.start_workflow_execution(execution.workbook_name,
execution.task,
context)
except ex.MistralException as e:
#TODO(nmakhotkin) we should use thing such a decorator here
abort(400, e.message)
return Execution.from_dict(values)
@wsme_pecan.wsexpose(None, wtypes.text, wtypes.text, status_code=204)
def delete(self, workbook_name, id):
LOG.debug("Delete execution [workbook_name=%s, id=%s]" %
(workbook_name, id))
db_api.execution_delete(workbook_name, id)
@wsme_pecan.wsexpose(Executions, wtypes.text)
def get_all(self, workbook_name):
LOG.debug("Fetch executions [workbook_name=%s]" % workbook_name)
executions = [Execution.from_dict(values)
for values in db_api.executions_get(workbook_name)]
return Executions(executions=executions)
| dzimine/mistral | mistral/api/controllers/v1/execution.py | Python | apache-2.0 | 4,285 | 0.000233 |
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp855',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00ef: 0x2116, # NUMERO SIGN
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00fd: 0x00a7, # SECTION SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE
u'\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE
u'\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE
u'\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE
u'\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO
u'\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO
u'\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE
u'\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE
u'\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI
u'\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI
u'\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE
u'\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE
u'\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE
u'\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE
u'\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE
u'\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE
u'\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE
u'\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE
u'\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE
u'\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE
u'\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U
u'\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE
u'\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE
u'\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU
u'\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU
u'\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
u'\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A
u'\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE
u'\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE
u'\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE
u'\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE
u'\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE
u'\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE
u'\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE
u'\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE
u'\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF
u'\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF
u'\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE
u'\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA
u'\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA
u'\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I
u'\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I
u'\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA
u'\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL
u'\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL
u'\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM
u'\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM
u'\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN
u'\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN
u'\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O
u'\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O
u'\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE
u'\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA
u'\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER
u'\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER
u'\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES
u'\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES
u'\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE
u'\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE
u'\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U
u'\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U
u'\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE
u'\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE
u'\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE
u'\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE
u'\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u2116' # 0x00ef -> NUMERO SIGN
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU
u'\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU
u'\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE
u'\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE
u'\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA
u'\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA
u'\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E
u'\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E
u'\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA
u'\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE
u'\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE
u'\xa7' # 0x00fd -> SECTION SIGN
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00fd, # SECTION SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ad: 0x00f0, # SOFT HYPHEN
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO
0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE
0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE
0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE
0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI
0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE
0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE
0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE
0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE
0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE
0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U
0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE
0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A
0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I
0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O
0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U
0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE
0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE
0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE
0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00b7, # CYRILLIC SMALL LETTER I
0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA
0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL
0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM
0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN
0x043e: 0x00d6, # CYRILLIC SMALL LETTER O
0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e7, # CYRILLIC SMALL LETTER U
0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF
0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00f7, # CYRILLIC SMALL LETTER E
0x044e: 0x009c, # CYRILLIC SMALL LETTER YU
0x044f: 0x00de, # CYRILLIC SMALL LETTER YA
0x0451: 0x0084, # CYRILLIC SMALL LETTER IO
0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE
0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE
0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE
0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0457: 0x008c, # CYRILLIC SMALL LETTER YI
0x0458: 0x008e, # CYRILLIC SMALL LETTER JE
0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE
0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE
0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE
0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE
0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U
0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE
0x2116: 0x00ef, # NUMERO SIGN
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| zephyrplugins/zephyr | zephyr.plugin.jython/jython2.5.2rc3/Lib/encodings/cp855.py | Python | epl-1.0 | 34,106 | 0.019615 |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
__author__ = 'Marko A. Rodriguez (http://markorodriguez.com)'
import unittest
from unittest import TestCase
import six
from gremlin_python.statics import long
from gremlin_python.structure.graph import Edge
from gremlin_python.structure.graph import Property
from gremlin_python.structure.graph import Vertex
from gremlin_python.structure.graph import VertexProperty
from gremlin_python.structure.graph import Path
class TestGraph(TestCase):
def test_graph_objects(self):
vertex = Vertex(1)
assert "v[1]" == str(vertex)
assert "vertex" == vertex.label
assert "person" == Vertex(1, "person").label
assert vertex == Vertex(1)
#
edge = Edge(2, Vertex(1), "said", Vertex("hello", "phrase"))
assert "e[2][1-said->hello]" == str(edge)
assert Vertex(1) == edge.outV
assert Vertex("hello") == edge.inV
assert "said" == edge.label
assert "phrase" == edge.inV.label
assert edge.inV != edge.outV
#
vertex_property = VertexProperty(long(24), "name", "marko")
assert "vp[name->marko]" == str(vertex_property)
assert "name" == vertex_property.label
assert "name" == vertex_property.key
assert "marko" == vertex_property.value
assert long(24) == vertex_property.id
assert isinstance(vertex_property.id, long)
assert vertex_property == VertexProperty(long(24), "name", "marko")
#
property = Property("age", 29)
assert "p[age->29]" == str(property)
assert "age" == property.key
assert 29 == property.value
assert isinstance(property.value, int)
assert property == Property("age", 29)
if not six.PY3:
assert property != Property("age", long(29))
#
for i in [vertex, edge, vertex_property, property]:
for j in [vertex, edge, vertex_property, property]:
if type(i) != type(j):
assert i != j
else:
assert i == j
assert i.__hash__() == hash(i)
def test_path(self):
path = Path([set(["a", "b"]), set(["c", "b"]), set([])], [1, Vertex(1), "hello"])
assert "[1, v[1], 'hello']" == str(path)
assert 1 == path["a"]
assert Vertex(1) == path["c"]
assert [1, Vertex(1)] == path["b"]
assert path[0] == 1
assert path[1] == Vertex(1)
assert path[2] == "hello"
assert 3 == len(path)
assert "hello" in path
assert "goodbye" not in path
assert Vertex(1) in path
assert Vertex(123) not in path
#
try:
temp = path[3]
raise Exception("Accessing beyond the list index should throw an index error")
except IndexError:
pass
#
try:
temp = path["zz"]
raise Exception("Accessing nothing should throw a key error")
except KeyError:
pass
#
try:
temp = path[1:2]
raise Exception("Accessing using slices should throw a type error")
except TypeError:
pass
#
assert path == path
assert hash(path) == hash(path)
path2 = Path([set(["a", "b"]), set(["c", "b"]), set([])], [1, Vertex(1), "hello"])
assert path == path2
assert hash(path) == hash(path2)
assert path != Path([set(["a"]), set(["c", "b"]), set([])], [1, Vertex(1), "hello"])
assert path != Path([set(["a", "b"]), set(["c", "b"]), set([])], [3, Vertex(1), "hello"])
if __name__ == '__main__':
unittest.main()
| samiunn/incubator-tinkerpop | gremlin-python/src/main/jython/tests/structure/test_graph.py | Python | apache-2.0 | 4,417 | 0.001132 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteFiltersOperations(object):
"""RouteFiltersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_filter_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
"""Gets the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param expand: Expands referenced express route bgp peering resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "_models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_parameters, 'RouteFilter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "_models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteFilter"]
"""Creates or updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the create or update route filter
operation.
:type route_filter_parameters: ~azure.mgmt.network.v2020_05_01.models.RouteFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilter or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.RouteFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
route_filter_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
"""Updates tags of a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param parameters: Parameters supplied to update route filter tags.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterListResult"]
"""Gets all route filters in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterListResult"]
"""Gets all route filters in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeFilters'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/operations/_route_filters_operations.py | Python | mit | 27,222 | 0.004629 |
def foo():
# type: () -> int
print(42)
return 42
res = fo<caret>o() | jwren/intellij-community | python/testData/refactoring/inlineFunction/removingTypeComment/main.py | Python | apache-2.0 | 81 | 0.037037 |
# encoding: utf-8
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
'''
Holds some constants/settings related to celery.
'''
from androlyze.settings import *
############################################################
#---Max retry wait time
############################################################
# maximum wait time for database open
CELERY_DATABASE_OPEN_RETRY_MAX_TIME = 32
# maximum wait time for import script error
CELERY_IMPORT_SCRIPTS_ERROR_RETRY_MAX_TIME = 0
# maximum wait time for database storage
CELERY_DATABASE_STORE_RETRY_MAX_TIME = 120
############################################################
#---Result backend constants
# used to get information from callback handlers
############################################################
CELERY_RESULT_BACKEND_KEY_RESULT = "result"
CELERY_RESULT_BACKEND_KEY_TRACEBACK = "traceback"
CELERY_RESULT_BACKEND_KEY_STATUS = "status"
############################################################
#---Other
############################################################
# value for retrying until success
CELERY_RETRY_INFINITE = None
def get_analyze_task_name():
from androlyze.analyze.distributed.tasks.AnalyzeTask import AnalyzeTask
return AnalyzeTask.name
| nachtmaar/androlyze | androlyze/celery/CeleryConstants.py | Python | mit | 1,276 | 0.003135 |
#!/usr/bin/env python2
# coding=utf-8
"""
Defines gitver commands
"""
import re
import os
import sys
from string import Template
from termcolors import term, bold
from git import get_repo_info
from gitver.storage import KVStore
from sanity import check_gitignore
from defines import CFGDIR, PRJ_ROOT, CFGDIRNAME
from version import gitver_version, gitver_buildid
# file where to store NEXT strings <=> TAG user-defined mappings
NEXT_STORE_FILE = os.path.join(CFGDIR, ".next_store")
TPLDIR = os.path.join(CFGDIR, 'templates')
user_version_matcher = r"v{0,1}(?P<maj>\d+)\.(?P<min>\d+)\.(?P<patch>\d+)" \
r"(?:\.(?P<revision>\d+))?$"
#
# helpers
#
def template_path(name):
"""
Constructs and returns the absolute path for the specified template file
name.
"""
return os.path.join(TPLDIR, name)
def parse_templates(cfg, templates, repo, next_custom, preview):
"""
Parse one or more templates, substitute placeholder variables with
real values and write the result to the file specified in the template.
If preview is True, then the output will be written to the stdout while
informative messages will be output to the stderr.
"""
for t in templates.split(' '):
tpath = template_path(t)
if os.path.exists(tpath):
with open(tpath, 'r') as fp:
lines = fp.readlines()
if len(lines) < 2:
term.err("The template \"" + t + "\" is not valid, aborting.")
return
if not lines[0].startswith('#'):
term.err("The template \"" + t + "\" doesn't define any valid "
"output, aborting.")
return
output = str(lines[0]).strip(' #\n')
# resolve relative paths to the project's root
if not os.path.isabs(output):
output = os.path.join(PRJ_ROOT, output)
outdir = os.path.dirname(output)
if not os.path.exists(outdir):
term.err("The template output directory \"" + outdir +
"\" doesn't exists.")
term.info("Processing template \"" + bold(t) + "\" for " + output +
"...")
lines = lines[1:]
xformed = Template("".join(lines))
vstring = build_version_string(cfg, repo, False, next_custom)
args = build_format_args(cfg, repo, next_custom)
keywords = {
'CURRENT_VERSION': vstring,
'MAJOR': args['maj'],
'MINOR': args['min'],
'PATCH': args['patch'],
'REV': args['rev'],
'REV_PREFIX': args['rev_prefix'],
'BUILD_ID': args['build_id'],
'FULL_BUILD_ID': args['build_id_full'],
'COMMIT_COUNT': args['commit_count'],
'COMMIT_COUNT_STR':
str(args['commit_count']) if args['commit_count'] > 0 else '',
'COMMIT_COUNT_PREFIX': args['commit_count_prefix'],
'META_PR': args['meta_pr'],
'META_PR_PREFIX': args['meta_pr_prefix']
}
try:
res = xformed.substitute(keywords)
except KeyError as e:
term.err("Unknown key \"" + e.message + "\" found, aborting.")
sys.exit(1)
if not preview:
try:
fp = open(output, 'w')
fp.write(res)
fp.close()
except IOError:
term.err("Couldn't write file \"" + output + "\"")
sys.exit(1)
else:
term.out(res)
wrote_bytes = len(res) if preview else os.stat(output).st_size
term.info("Done, " + str(wrote_bytes) + " bytes written.")
else:
term.err("Couldn't find the \"" + t + "\" template")
sys.exit(1)
def parse_user_next_stable(user):
"""
Parse the specified user-defined string containing the next stable version
numbers and returns the discretized matches in a dictionary.
"""
try:
data = re.match(user_version_matcher, user).groupdict()
if len(data) < 3:
raise AttributeError
except AttributeError:
return False
return data
def build_format_args(cfg, repo_info, next_custom=None):
"""
Builds the formatting arguments by processing the specified repository
information and returns them.
If a tag defines pre-release metadata, this will have the precedence
over any existing user-defined string.
"""
in_next = repo_info['count'] > 0
has_next_custom = next_custom is not None and len(next_custom) > 0
vmaj = repo_info['maj']
vmin = repo_info['min']
vpatch = repo_info['patch']
vrev = repo_info['rev']
vcount = repo_info['count']
vpr = repo_info['pr']
vbuildid = repo_info['build-id']
has_pr = vpr is not None
has_rev = vrev is not None
# pre-release metadata in a tag has precedence over user-specified
# NEXT strings
if in_next and has_next_custom and not has_pr:
u = parse_user_next_stable(next_custom)
if not u:
term.err("Invalid custom NEXT version numbers detected!")
sys.exit(1)
vmaj = u['maj']
vmin = u['min']
vpatch = u['patch']
vrev = u['revision']
has_rev = vrev is not None
meta_pr = vpr if has_pr else \
cfg['default_meta_pr_in_next'] if in_next and has_next_custom else \
cfg['default_meta_pr_in_next_no_next'] if in_next else ''
args = {
'maj': vmaj,
'min': vmin,
'patch': vpatch,
'rev': vrev if has_rev else '',
'rev_prefix': '.' if has_rev else '',
'meta_pr': meta_pr,
'meta_pr_prefix': cfg['meta_pr_prefix'] if len(meta_pr) > 0 else '',
'commit_count': vcount if vcount > 0 else '',
'commit_count_prefix': cfg['commit_count_prefix'] if vcount > 0 else '',
'build_id': vbuildid,
'build_id_full': repo_info['full-build-id']
}
return args
def build_version_string(cfg, repo, promote=False, next_custom=None):
"""
Builds the final version string by processing the specified repository
information, optionally handling version promotion.
Version promotion will just return the user-specified next version string,
if any is present, else an empty string will be returned.
"""
in_next = repo['count'] > 0
has_next_custom = next_custom is not None and len(next_custom) > 0
if promote:
if has_next_custom:
# simulates next real version after proper tagging
version = next_custom
return version
else:
return ''
fmt = cfg['format'] if not in_next else cfg['format_next']
return fmt % build_format_args(cfg, repo, next_custom)
#
# commands
#
def cmd_version(cfg, args):
"""
Generates gitver's version string and license information and prints it
to the stdout.
"""
v = ('v' + gitver_version) if gitver_version is not None else 'n/a'
b = gitver_buildid if gitver_buildid is not None else 'n/a'
term.out("This is gitver " + bold(v))
term.out("Full build ID is " + bold(b))
from gitver import __license__
term.out(__license__)
def cmd_init(cfg, args):
"""
Initializes the current repository by creating the gitver's configuration
directory and creating the default configuration file, if none is present.
Multiple executions of this command will regenerate the default
configuration file whenever it's not found.
"""
from config import create_default_configuration_file
i = 0
if not os.path.exists(CFGDIR):
i += 1
os.makedirs(CFGDIR)
if not os.path.exists(TPLDIR):
i += 1
os.makedirs(TPLDIR)
# try create the default configuration file
wrote_cfg = create_default_configuration_file()
if wrote_cfg:
term.out("gitver has been initialized and configured.")
else:
term.warn("gitver couldn't create the default configuration file, "
"does it already exist?")
def cmd_current(cfg, args):
"""
Generates the current version string, depending on the state of the
repository and prints it to the stdout.
"""
next_store = KVStore(NEXT_STORE_FILE)
repo_info = get_repo_info()
last_tag = repo_info['last-tag']
has_next_custom = next_store.has(last_tag)
next_custom = next_store.get(last_tag) if has_next_custom else None
term.out(build_version_string(cfg, repo_info, False, next_custom))
def cmd_info(cfg, args):
"""
Generates version string and repository information and prints it to the
stdout.
"""
next_store = KVStore(NEXT_STORE_FILE)
repo_info = get_repo_info()
last_tag = repo_info['last-tag']
has_next_custom = next_store.has(last_tag)
next_custom = next_store.get(last_tag) if has_next_custom else None
if has_next_custom:
nvn = term.next(next_custom)
else:
nvn = "none, defaulting to " + \
term.next("-" + cfg['default_meta_pr_in_next_no_next']) + \
" suffix"
term.out("Most recent tag: " + term.tag(last_tag))
if repo_info['pr'] is None and repo_info['count'] > 0:
term.out("Using NEXT defined as: " + nvn)
term.out("(Pre-release metadata: none)")
elif repo_info['pr'] is not None:
term.out("(NEXT defined as: " + nvn + ")")
term.out("Using pre-release metadata: " +
term.tag(str(repo_info['pr'])))
term.out("Current build ID: " + term.tag(repo_info['full-build-id']))
promoted = build_version_string(cfg, repo_info, True, next_custom)
term.out(
"Current version: " + "v" +
term.ver(build_version_string(
cfg, repo_info, False, next_custom)) +
(" => v" + term.prom(promoted) if len(promoted) > 0 else '')
)
def cmd_list_templates(cfg, args):
"""
Generates a list of available templates by inspecting the gitver's template
directory and prints it to the stdout.
"""
tpls = [f for f in os.listdir(TPLDIR) if os.path.isfile(template_path(f))]
if len(tpls) > 0:
term.out("Available templates:")
for t in tpls:
term.out(" " + bold(t) + " (" + template_path(t) + ")")
else:
term.out("No templates available in " + TPLDIR)
def __cmd_build_template(cfg, args, preview=False):
"""
Internal-only function used for avoiding code duplication between
template-operating functions.
See cmd_build_template and cmd_preview_template for the full docs.
"""
next_store = KVStore(NEXT_STORE_FILE)
repo_info = get_repo_info()
last_tag = repo_info['last-tag']
has_next_custom = next_store.has(last_tag)
next_custom = next_store.get(last_tag) if has_next_custom else None
parse_templates(cfg, args.templates, repo_info, next_custom, preview)
def cmd_build_template(cfg, args):
"""
Performs placeholder variables substitution on the templates specified by
the @param args parameter and write the result to each respective output
file specified by the template itself.
"""
__cmd_build_template(cfg, args)
def cmd_preview_template(cfg, args):
"""
Performs placeholder variables substitution on the templates specified by
the @param args parameter and prints the result to the stdout.
"""
__cmd_build_template(cfg, args, True)
def cmd_next(cfg, args):
"""
Sets and defines the next stable version string for the most recent and
reachable tag.
The string should be supplied in the format "maj.min.patch[.revision]",
where angular brackets denotes an optional value.
All values are expected to be decimal numbers without leading zeros.
"""
next_store = KVStore(NEXT_STORE_FILE)
repo_info = get_repo_info()
last_tag = repo_info['last-tag']
vn = args.next_version_numbers
user = parse_user_next_stable(vn)
if not user:
term.err("Please specify valid version numbers.\nThe expected "
"format is <MAJ>.<MIN>.<PATCH>[.<REVISION>], e.g. v0.0.1, "
"0.0.1 or 0.0.2.1")
sys.exit(1)
custom = "%d.%d.%d" % (int(user['maj']), int(user['min']), int(user['patch']))
if user['revision'] is not None:
custom += ".%d" % (int(user['revision']))
next_store.set(last_tag, custom).save()
term.out("Set NEXT version string to " + term.next(custom) +
" for the current tag " + term.tag(last_tag))
def cmd_clean(cfg, args):
"""
Removes the user-defined next stable version for the most recent and
reachable tag or for the tag specified by the @param args parameter.
"""
next_store = KVStore(NEXT_STORE_FILE)
if len(args.tag) > 0:
tag = args.tag
else:
repo_info = get_repo_info()
tag = repo_info['last-tag']
has_custom = next_store.has(tag)
next_custom = next_store.get(tag) if has_custom else None
if has_custom:
next_store.rm(tag).save()
term.out("Cleaned up custom string version \"" + next_custom +
"\" for tag \"" + tag + "\"")
else:
term.out("No custom string version found for tag \"" + tag + "\"")
def cmd_cleanall(cfg, args):
"""
Removes ALL user-defined next stable versions.
"""
if os.path.exists(NEXT_STORE_FILE):
os.unlink(NEXT_STORE_FILE)
term.out("All previously set custom strings have been removed.")
else:
term.out("No NEXT custom strings found.")
def cmd_list_next(cfg, args):
"""
Generates a list of all user-defined next stable versions and prints them
to the stdout.
"""
next_store = KVStore(NEXT_STORE_FILE)
repo_info = get_repo_info()
last_tag = repo_info['last-tag']
has_next_custom = next_store.has(last_tag)
if not next_store.empty():
def print_item(k, v):
term.out(" %s => %s" % (term.tag(k), term.next(v)) +
(' (*)' if k == last_tag else ''))
term.out("Currently set NEXT custom strings (*=most recent and "
"reachable tag):")
for tag, vstring in sorted(next_store.items()):
print_item(tag, vstring)
if not has_next_custom:
print_item(last_tag, '<undefined>')
else:
term.out("No NEXT custom strings set.")
def cmd_check_gitignore(cfg, args):
"""
Provides a way to ensure that at least one line in the .gitignore file for
the current repository defines the '.gitver' directory in some way.
This means that even a definition such as "!.gitver" will pass the check,
but this imply some reasoning has been made before declaring something like
this.
"""
if check_gitignore():
term.out("Your .gitignore file looks fine.")
else:
term.out("Your .gitignore file doesn't define any rule for the " +
CFGDIRNAME + "\nconfiguration directory: it's recommended to "
"exclude it from\nthe repository, unless you know what you "
"are doing. If you are not\nsure, add this line to your "
".gitignore file:\n\n " + CFGDIRNAME + "\n")
| manuelbua/gitver | gitver/commands.py | Python | apache-2.0 | 15,445 | 0.000129 |
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 2 19:02:52 2015
@author: piotr at nicecircuits.com
"""
from libraryManager.symbol import symbol
from libraryManager.symbolPrimitive import *
from libraryManager.defaults import defaults
from libraryManager.common import *
class symbolIC(symbol):
"""
IC symbol generator
Generate symbol with pinsLeft, pinsRight pins on each side
pinsLeft, pinsRight: list of ["pin name", "pin number", type]
width: width of symbol rectangle
"""
def __init__(self, name, pinsLeft, pinsRight, width, refDes=defaults.icRefDes,\
showPinNames=True, showPinNumbers=True):
super().__init__(name, refDes, showPinNames, showPinNumbers)
# body
height = (max(len(pinsLeft), len(pinsRight))+1)*100
offset = 50 if (height % 200) > 0 else 0
if width % 200 >0:
width = (width//200+1) * 200
self.log.debug("IC symbol body: pins: %d, %d; height: %d, offset: %d" %\
(len(pinsLeft), len(pinsRight), height, offset))
self.primitives.append(symbolRectangle(0, position=[0,offset],\
dimensions=[width, height], filled=fillType.background))
# pins
pinLength = 200
pins = [pinsLeft, pinsRight]
for x in range(2):
y = height/2-100+offset
for p in pins[x]:
if p:
if isinstance(p[2],str):
p[2]=pinType.fromStr[p[2]]
self.pins.append(symbolPin(str(p[0]), str(p[1]), [(width/2+pinLength)*(1 if x>0 else -1),y],\
pinLength, p[2], rotation=180 if x>0 else 0))
y = y-100
self.nameObject.position=[0, height/2 + self.nameObject.height + offset]
self.valueObject.position=[0, -(height/2 + self.valueObject.height) + offset]
class symbolICquad(symbol):
"""
Quad IC symbol generator
Generate symbol with pins[] on each side
pins: list of 0..4 lists of ["pin name", "pin number", type]
top, right, bottom, left, clockwise
size: size of symbol rectangle (auto if 0)
"""
def __init__(self, name, pins, size=0, refDes=defaults.icRefDes,\
showPinNames=True, showPinNumbers=True):
super().__init__(name, refDes, showPinNames, showPinNumbers)
# body
if size==0:
size = (len(pins[0])+5)*100
offset = 50 if (size % 200) > 0 else 0
self.log.debug("IC quad symbol body: pins: %d; size: %d, offset: %d" %\
(sum([len(p) for p in pins]), size, offset))
self.primitives.append(symbolRectangle(0, position=[offset,offset],\
dimensions=[size, size], filled=fillType.background))
# pins
pinLength = 200
for side in range(4):
pos = [-len(pins[0])*50+50, size/2+pinLength]
rot = -side*90
for p in pins[side]:
if p:
self.pins.append(symbolPin(p[0], p[1],
translatePoints(rotatePoints([pos],rot),[offset,offset])[0],\
pinLength, p[2], rotation=(rot-90)%360))
pos[0] = pos[0]+100
# self.nameObject.position=[0, height/2 + self.nameObject.height + offset]
# self.valueObject.position=[0, -(height/2 + self.valueObject.height) + offset]
| NiceCircuits/pcbLibraryManager | src/pcbLibraryManager/symbols/symbolsIC.py | Python | cc0-1.0 | 3,334 | 0.013197 |
import sklearn.cross_validation as cv
import sklearn.dummy as dummy
from sklearn.mixture import GMM
from sklearn.hmm import GMMHMM
from sklearn import linear_model, naive_bayes
import collections
import itertools
import pandas as pd
from testResults import TestResults
from counters import *
import utils as utils
class Model():
__metaclass__ = ABCMeta
params = {}
isSklearn = True
def __init__(self, params, verbose=False):
self.params = params
self.verbose = verbose
def printv(self, arg, title=None):
if self.verbose:
if title is not None:
print title
print arg
@property
def name(self):
return self._name
@abstractmethod
def _train(self, data):
"""Returns a trained model."""
pass
def _test(self, model, testData, resultObj):
"""Compares predictions made by specified model against test data.
Returns a TestResults object.
"""
# restrict test data to principal component features
features = self.params['features']
test = np.array(testData[features])
# predict a dialog sequence using test data
# sklearn counts from 0 so add 1...
if self.isSklearn:
pred = [int(r) + 1 for r in list(model.predict(test))]
else:
pred = [int(r) for r in list(model.predict(test))]
# extract true ratings from test data
true = [int(rating) for rating in testData['rating'].values.tolist()]
resultObj.compare(true, pred)
return resultObj
def loocv(self, data):
"""Leave-one-out cross validation using given data.
Returns a TestResults objects, where results are averages from the
cross validation steps.
"""
mask = cv.LeaveOneLabelOut(data['label'].values)
results = TestResults(self.name, verbose=self.verbose)
for trainMask, testMask in mask:
# training
trainingData = data.loc[trainMask]
self.printv(trainingData, "training data:")
model = self._train(trainingData)
# testing
testData = data.loc[testMask]
self.printv(testData, "test data:")
# leave p labels out
for label, testGroup in testData.groupby("label"):
results = self._test(model, testGroup, results)
return results
def kfoldscv(self, data, folds):
"""K-folds cross validation using given data and number of folds.
Returns a TestResults objects, where results are averages from the
cross validation steps.
"""
results = TestResults(self.name, verbose=self.verbose)
labels = list(np.unique(data['label'].values))
for tr, te in cv.KFold(len(labels), n_folds=folds):
trainD = data[data['label'].isin([labels[i] for i in tr])]
testD = data[data['label'].isin([labels[i] for i in te])]
self.printv(trainD, "training data:")
self.printv(testD, "test data:")
model = self._train(trainD)
for label, testGroup in testD.groupby("label"):
results = self._test(model, testGroup, results)
return results
def setFeatures(self, features):
self.params['features'] = features
class Dummy(Model):
_name = "dummy"
def _train(self, data):
if 'constant' in self.params.keys():
model = dummy.DummyClassifier(strategy=self.params['strategy'],
constant=self.params['constant'])
else:
model = dummy.DummyClassifier(strategy=self.params['strategy'])
d = np.array(zip(*[data[f].values for f in self.params['features']]))
y = np.array(data['rating'].values)
model.fit(d, y)
return model
class Gmm(Model):
"""A Gaussian mixture model.
Parameters are number of mixture components (num_mixc) and
covariance type (cov_type). Example:
model = Gmm(params = {num_mixc: 3,
cov_type:'diag'})
"""
_name = "GMM"
def _train(self, data):
"""Trains a Gaussian mixture model, using the sklearn implementation."""
# parameters
features = self.params['features']
num_mixc = self.params['num_mixc']
cov_type = self.params['cov_type']
# prepare data shape
d = np.array(zip(*[data[f].values for f in features]))
# choose high number of EM-iterations to get constant results
gmm = GMM(num_mixc, cov_type, n_iter=300)
gmm.fit(d)
return gmm
class Gmmhmm(Model):
"""A hidden Markov model with Gaussian mixture emissions.
Parameters are number of mixture components (num_mixc), covariance type
(cov_type) and states (states). One Gaussian mixture model is created for
each state. Example:
model = Gmmhmm(params = {'num_mixc': 3,
'cov_type': 'diag',
'states': [1,2,3,4,5]})
"""
_name = "GMM-HMM"
def _train(self, data):
"""Trains a GMMHMM model, using the sklearn implementation and maximum-
likelihood estimates as HMM parameters (Hmm.mle(...)).
"""
# parameters
features = self.params['features']
num_mixc = self.params['num_mixc']
cov_type = self.params['cov_type']
states = self.params['states']
# train one GMM for each state
mixes = list()
for state in states:
# select data with current state label
d = data[data.rating == state]
# prepare data shape
d = np.array(zip(*[d[f].values for f in features]))
# init GMM
gmm = GMM(num_mixc, cov_type)
# train
gmm.fit(d)
mixes.append(gmm)
# train HMM with init, trans, GMMs=mixes
mle = Hmm.mle(MatrixCounterNoEmissions, data, states)
model = GMMHMM(n_components=len(states), init_params='', gmms=mixes)
model.transmat_ = mle.transition
model.startprob_ = mle.initial
return model
class Ols(Model):
""" Ordinary least squares regression """
_name = "OLS"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = linear_model.LinearRegression()
model.fit(X, y)
return model
class LogisticRegression(Model):
""" Logistic Regression """
_name = "Logit"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = linear_model.LogisticRegression(class_weight=self.params['class_weight'])
model.fit(X, y)
return model
class GaussianNaiveBayes(Model):
""" Gaussian Naive Bayes... """
_name = "G-NB"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = naive_bayes.GaussianNB()
model.fit(X, y)
return model
class MultinomialNaiveBayes(Model):
""" Multinomial Naive Bayes... """
_name = "M-NB"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = naive_bayes.MultinomialNB(alpha=self.params['alpha'],
fit_prior=self.params['fit_prior'])
model.fit(X, y)
return model
class Hmm(Model):
"""A hidden Markov model, using the Nltk implementation and maximum-
likelihood parameter estimates.
"""
_name = "HMM"
isSklearn = False
Parameters = collections.namedtuple(
'Parameters', 'initial transition emission emissionAlph')
class NltkWrapper():
def __init__(self, states, mle):
self.model = nltk.HiddenMarkovModelTagger(mle.emissionAlph,
states,
mle.transition,
mle.emission,
mle.initial)
def predict(self, obs):
tagged = self.model.tag([tuple(o) for o in obs])
return [val[1] for val in tagged]
def _train(self, data):
features = self.params['features']
states = self.params['states']
# calculate maximum-likelihood parameter estimates
mle = Hmm.mle_multipleFeatures(NltkCounter, data, states, features, self.verbose)
# create nltk HMM
model = Hmm.NltkWrapper(states, mle)
return model
@staticmethod
def mle(counterClass, data, stateAlphabet, feature=False):
""" Calculate maximum likelihood estimates for the HMM parameters
transitions probabilites, emission probabilites, and initial state
probabilites.
"""
f = feature is not False
states = utils.dfToSequences(data, ['rating'])
if f:
emissionAlphabet = pd.unique(data[feature].values.ravel())
emissions = utils.dfToSequences(data, [feature])
else:
emissionAlphabet = None
counter = counterClass(stateAlphabet, emissionAlphabet, states)
# count for each state sequence
for k, seq in enumerate(states):
if f: emi = emissions[k]
# for each state transition
for i, current in enumerate(seq):
# count(current, next, first, emission)
if f:
emission = emi[i]
else:
emission = False
next = seq[i + 1] if i < len(seq) - 1 else False
counter.count(i, current, next, emission)
return Hmm.Parameters(
initial=counter.getInitialProb(),
transition=counter.getTransitionProb(),
emission=counter.getEmissionProb(),
emissionAlph=emissionAlphabet
)
@staticmethod
def mle_multipleFeatures(counterClass, data, stateAlphabet, features, verbose=False):
""" Calculate maximum likelihood estimates of HMM parameters.
Parameters are transition probabilites, emission probabilites and
initial sta<te probabilites.
This method allows specifing multiple features and combines multiple
emission features assuming conditional independence:
P(feat1=a & feat2=b|state) = P(feat1=a|state) * P(feat2=b|state)
"""
p = lambda feat: Hmm.mle(DictCounter, data, stateAlphabet, feat)
counter = counterClass(stateAlphabet, [], False)
# calculate conditional probabilites for each feature & corresponding
# emission alphabet entry..
# P(feat_i=emm_ij|state_k) forall: I features, J_i emissions, K states
# ps = {feature:emmission distribution}
emission_probs = [p(f).emission for f in features]
# calculate inital state probabilites, transition probabilites using
# first/any feature
mle_single = Hmm.mle(counterClass, data, stateAlphabet, features[0])
initial_probs = mle_single.initial
transition_probs = mle_single.transition
# combine the emission alphabets of all given features
emissionAlphabet = list()
for f in features:
emissionAlphabet.append(pd.unique(data[f].values.ravel()))
# calculate all emission combinations
# and according probabilities per state
for comb in list(itertools.product(*emissionAlphabet)):
counter.addEmissionCombination(tuple(comb))
for state in stateAlphabet:
# for each individual prob of each feature
for emission, featNum in zip(comb, xrange(0, len(emission_probs))):
prob = emission_probs[featNum][state][emission]
counter.addCombinedEmissionProb(state, tuple(comb), prob)
if verbose:
print("Initial Probabilities")
printDictProbDist(initial_probs)
print("Transition Probabilities")
printCondDictProbDist(transition_probs)
print("Emission Probabilities")
printCondDictProbDist(counter.getCombinedEmissionProb())
return Hmm.Parameters(
initial=initial_probs,
transition=transition_probs,
emission=counter.getCombinedEmissionProb(),
emissionAlph=counter.getEmissionCombinations()
)
| phihes/sds-models | sdsModels/models.py | Python | mit | 12,892 | 0.000853 |
"""
WSGI config for polychart project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| Polychart/builder | server/polychart/wsgi.py | Python | agpl-3.0 | 1,058 | 0 |
#!/bin/env python
import os, sys
def usage():
print "%s WORK_DIR [num_results]" % sys.argv[0]
sys.exit(1)
try:
work_dir = sys.argv[1]
except:
print "you must specify your DenyHosts WORK_DIR"
usage()
try:
num = int(sys.argv[2])
except:
num = 10
fname = os.path.join(work_dir, "users-invalid")
try:
fp = open(fname, "r")
except:
print fname, "does not exist"
sys.exit(1)
d = {}
for line in fp:
try:
foo = line.split(":")
username = foo[0]
attempts = int(foo[1])
# timestamp = foo[2].strip()
except:
continue
l = d.get(attempts, [])
l.append(username)
d[attempts] = l
fp.close()
keys = d.keys()
keys.sort()
keys.reverse()
i = 0
for key in keys:
l = d.get(key)
for username in l:
i += 1
print username
if i >= num: break
if i >= num: break
| mobiledayadmin/DenyHosts | scripts/restricted_from_invalid.py | Python | gpl-2.0 | 884 | 0.013575 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# We use the inorder to find which elements are left and right of the curr element.
# And the post order to start with the first elemeent and then construct right and left trees.
class Solution:
def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:
def helper(inorderL, inorderR):
# base case
if inorderL >= inorderR:
return None
nonlocal postorder
curr = postorder.pop()
root = TreeNode(curr)
currPos = inorderMap[curr]
root.right = helper(currPos+1, inorderR)
root.left = helper(inorderL, currPos)
return root
inorderMap = {v:k for k, v in enumerate(inorder)}
return helper(0, len(inorder)) | saisankargochhayat/algo_quest | leetcode/106. Construct Binary Tree from Inorder and Postorder Traversal/soln.py | Python | apache-2.0 | 978 | 0.00818 |
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'moneta_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation '%s'" % sanitize_string(translation))
return False
else:
if source_f != translation_f:
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| moneta-project/moneta-2.0.1.0 | contrib/devtools/update-translations.py | Python | mit | 6,780 | 0.00649 |
from __future__ import absolute_import, print_function, division
from pony.py23compat import basestring
from functools import wraps
from contextlib import contextmanager
from pony.orm.core import Database
from pony.utils import import_module
def raises_exception(exc_class, msg=None):
def decorator(func):
def wrapper(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
self.fail("expected exception %s wasn't raised" % exc_class.__name__)
except exc_class as e:
if not e.args: self.assertEqual(msg, None)
elif msg is not None:
self.assertEqual(e.args[0], msg, "incorrect exception message. expected '%s', got '%s'" % (msg, e.args[0]))
wrapper.__name__ = func.__name__
return wrapper
return decorator
@contextmanager
def raises_if(test, cond, exc_class, exc_msg=None):
try:
yield
except exc_class as e:
test.assertTrue(cond)
if exc_msg is None: pass
elif exc_msg.startswith('...') and exc_msg != '...':
if exc_msg.endswith('...'):
test.assertIn(exc_msg[3:-3], str(e))
else:
test.assertTrue(str(e).endswith(exc_msg[3:]))
elif exc_msg.endswith('...'):
test.assertTrue(str(e).startswith(exc_msg[:-3]))
else:
test.assertEqual(str(e), exc_msg)
else:
test.assertFalse(cond)
def flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
class TestConnection(object):
def __init__(con, database):
con.database = database
if database and database.provider_name == 'postgres':
con.autocommit = True
def commit(con):
pass
def rollback(con):
pass
def cursor(con):
return test_cursor
class TestCursor(object):
def __init__(cursor):
cursor.description = []
cursor.rowcount = 0
def execute(cursor, sql, args=None):
pass
def fetchone(cursor):
return None
def fetchmany(cursor, size):
return []
def fetchall(cursor):
return []
test_cursor = TestCursor()
class TestPool(object):
def __init__(pool, database):
pool.database = database
def connect(pool):
return TestConnection(pool.database)
def release(pool, con):
pass
def drop(pool, con):
pass
def disconnect(pool):
pass
class TestDatabase(Database):
real_provider_name = None
raw_server_version = None
sql = None
def bind(self, provider_name, *args, **kwargs):
if self.real_provider_name is not None:
provider_name = self.real_provider_name
self.provider_name = provider_name
provider_module = import_module('pony.orm.dbproviders.' + provider_name)
provider_cls = provider_module.provider_cls
raw_server_version = self.raw_server_version
if raw_server_version is None:
if provider_name == 'sqlite': raw_server_version = '3.7.17'
elif provider_name in ('postgres', 'pygresql'): raw_server_version = '9.2'
elif provider_name == 'oracle': raw_server_version = '11.2.0.2.0'
elif provider_name == 'mysql': raw_server_version = '5.6.11'
else: assert False, provider_name # pragma: no cover
t = [ int(component) for component in raw_server_version.split('.') ]
if len(t) == 2: t.append(0)
server_version = tuple(t)
if provider_name in ('postgres', 'pygresql'):
server_version = int('%d%02d%02d' % server_version)
class TestProvider(provider_cls):
def inspect_connection(provider, connection):
pass
TestProvider.server_version = server_version
kwargs['pony_check_connection'] = False
kwargs['pony_pool_mockup'] = TestPool(self)
Database.bind(self, TestProvider, *args, **kwargs)
def _execute(database, sql, globals, locals, frame_depth):
assert False # pragma: no cover
def _exec_sql(database, sql, arguments=None, returning_id=False):
assert type(arguments) is not list and not returning_id
database.sql = sql
database.arguments = arguments
return test_cursor
def generate_mapping(database, filename=None, check_tables=True, create_tables=False):
return Database.generate_mapping(database, filename, create_tables=False)
| Ahmad31/Web_Flask_Cassandra | flask/lib/python2.7/site-packages/pony/orm/tests/testutils.py | Python | apache-2.0 | 4,752 | 0.008207 |
from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Dict, Optional
if False:
import zerver.tornado.event_queue
descriptors_by_handler_id = {} # type: Dict[int, zerver.tornado.event_queue.ClientDescriptor]
def get_descriptor_by_handler_id(handler_id):
# type: (int) -> zerver.tornado.event_queue.ClientDescriptor
return descriptors_by_handler_id.get(handler_id)
def set_descriptor_by_handler_id(handler_id, client_descriptor):
# type: (int, zerver.tornado.event_queue.ClientDescriptor) -> None
descriptors_by_handler_id[handler_id] = client_descriptor
def clear_descriptor_by_handler_id(handler_id, client_descriptor):
# type: (int, Optional[zerver.tornado.event_queue.ClientDescriptor]) -> None
del descriptors_by_handler_id[handler_id]
| christi3k/zulip | zerver/tornado/descriptors.py | Python | apache-2.0 | 821 | 0.00609 |
from pycp2k.inputsection import InputSection
class _basis4(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Default_keyword = []
self._name = "BASIS"
self._repeated_default_keywords = {'Default_keyword': 'DEFAULT_KEYWORD'}
self._attributes = ['Default_keyword']
| SINGROUP/pycp2k | pycp2k/classes/_basis4.py | Python | lgpl-3.0 | 328 | 0.006098 |
import io
import sys
import pytest
import progressbar
def test_nowrap():
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap()
assert stdout == sys.stdout
assert stderr == sys.stderr
progressbar.streams.unwrap()
assert stdout == sys.stdout
assert stderr == sys.stderr
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
def test_wrap():
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap(stderr=True, stdout=True)
assert stdout != sys.stdout
assert stderr != sys.stderr
# Wrap again
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap(stderr=True, stdout=True)
assert stdout == sys.stdout
assert stderr == sys.stderr
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
def test_excepthook():
progressbar.streams.wrap(stderr=True, stdout=True)
try:
raise RuntimeError()
except RuntimeError:
progressbar.streams.excepthook(*sys.exc_info())
progressbar.streams.unwrap_excepthook()
progressbar.streams.unwrap_excepthook()
def test_fd_as_io_stream():
stream = io.StringIO()
with progressbar.ProgressBar(fd=stream) as pb:
for i in range(101):
pb.update(i)
stream.close()
def test_no_newlines():
kwargs = dict(
redirect_stderr=True,
redirect_stdout=True,
line_breaks=False,
is_terminal=True,
)
with progressbar.ProgressBar(**kwargs) as bar:
for i in range(5):
bar.update(i)
for i in range(5, 10):
try:
print('\n\n', file=progressbar.streams.stdout)
print('\n\n', file=progressbar.streams.stderr)
except ValueError:
pass
bar.update(i)
@pytest.mark.parametrize('stream', [sys.__stdout__, sys.__stderr__])
def test_fd_as_standard_streams(stream):
with progressbar.ProgressBar(fd=stream) as pb:
for i in range(101):
pb.update(i)
| WoLpH/python-progressbar | tests/test_stream.py | Python | bsd-3-clause | 2,391 | 0 |
'''
Designs oligos for a pre RNA-seq selection method
'''
### imports ###
import sys
import os
import numpy as np
def readFastaFile(fastaFilePath):
'''
Given a path to a multiline fasta file, reads the file, returning two lists - one containing the sequences, the other containing the headers
inputs: path to a fasta file
outputs: a list of the sequences, a list of the sequence headers
'''
sequences = []
headers = []
with open(fastaFilePath) as f:
data = f.readlines()
sequence = ""
for line in data:
if ">" in line:
header = line.replace(">", "").strip()
headers.append(header)
if not sequence == "":
sequences.append(sequence.upper())
sequence = ""
else:
sequence += line.strip()
sequences.append(sequence.upper())
return sequences, headers
def makeOligos(targetSequences, targetLength, outputPath):
'''
Gives all non-unique k-mers of target length that appear in target sequences
inputs: a list of sequences, length of k-mers, path to write output files
outputs: writes the designed oligos to a Fasta file
'''
seenOligos = set()
for i in range(len(targetSequences)):
currentSeq = targetSequences[i]
for j in range(len(targetSequences[i]) - targetLength):
oligo = currentSeq[ j : j + targetLength ]
seenOligos.add(oligo)
# write fasta files
oligos = list(seenOligos)
for i in range(len(oligos)):
outFile = open(outputPath + "/" + oligos[i] + ".fa", "w")
for j in range(1):
outFile.write(">" + str(j) + "\n")
outFile.write(oligos[i] + "\n")
outFile.close()
if __name__ == "__main__":
targetDirectoryPath = sys.argv[1] # path to a directory containing fasta files giving the sequences we want the oligos to hybridize to
targetLength = int(sys.argv[2]) # desired length of oligos
outputPath = sys.argv[3] # path to write output files
# intialize lists
allTargetSequences = []
allTargetHeaders = []
# read in sequences
print("reading target files")
for targetFile in os.listdir(targetDirectoryPath):
print(targetFile)
targetSequences, targetHeaders = readFastaFile(targetDirectoryPath + "/" + targetFile)
allTargetSequences += targetSequences
allTargetHeaders += targetHeaders
print("writing oligo fasta files")
makeOligos(targetSequences, targetLength, outputPath)
| jenhantao/preRNA-seq_OligoDesigner | makeAllOligos.py | Python | bsd-2-clause | 2,270 | 0.031718 |
import pytest
import salt.engines
from tests.support.mock import MagicMock, patch
def test_engine_module_name():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
assert engine.name == "foobar"
def test_engine_title_set():
engine = salt.engines.Engine({}, "foobar.start", {}, {}, {}, {}, name="foobar")
with patch("salt.utils.process.appendproctitle", MagicMock()) as mm:
with pytest.raises(KeyError):
# The method does not exist so a KeyError will be raised.
engine.run()
mm.assert_called_with("foobar")
| saltstack/salt | tests/pytests/unit/engines/test_engines.py | Python | apache-2.0 | 595 | 0.003361 |
import os
import re
import json
def createSpeciesJson(source_data_path):
# create the species.json file using data from the specified path
# traverse directories looking for dirs named "1km". If it's
# path matches this pattern:
# .../<taxon-name>/models/<species-name>/1km
# then record that as a species / taxon record.
# here's a regex to test for species dirs:
one_km_regex = re.compile(r'/(\w+)/species/(\w+)/1km$')
# we'll get the species common name from here:
common_names = {}
cn_file = os.path.join(os.path.dirname(__file__), 'all_species.json')
try:
# try reading in the list of sci-to-common species names
with open(cn_file) as f:
common_names = json.load(f)
except:
# give up on common names if we can't read them
common_names = {}
#
# okay, ready to check for modelled species
#
species_list = {}
for dir, subdirs, files in os.walk(source_data_path):
match = one_km_regex.search(dir)
if match:
taxon = match.group(1)
sci_name = match.group(2).replace('_', ' ')
species_list[sci_name] = {
"commonNames": common_names.get(sci_name, [""]),
"group": taxon
}
# if we found a species dir, we don't need to keep
# os.walk()ing into its descendent dirs
subdirs[:] = []
# now save our species list
json_path = os.path.join(os.path.dirname(__file__), 'species.json')
with open(json_path, 'w') as json_file:
json.dump(species_list, json_file, sort_keys = True, indent = 4)
def createBiodiversityJson(source_data_path):
# create the biodiversity.json file using data from the specified path
# traverse directories looking for "deciles" dirs.
# If a dir's path matches this pattern:
# .../<taxon-name>/biodiversity/deciles
# then record that as a taxon / biodiversity record.
# here's a regex to test for biodiv dirs:
biodiv_regex = re.compile(r'/(\w+)/biodiversity/deciles$')
biodiv_list = {}
for dir, subdirs, files in os.walk(source_data_path):
match = biodiv_regex.search(dir)
if match:
taxon = match.group(1)
biodiv_list[taxon] = {
"group": taxon
}
# if we found a biodiv dir, we don't need to keep
# os.walk()ing into its descendent dirs
subdirs[:] = []
# now save our biodiv list
json_path = os.path.join(os.path.dirname(__file__), 'biodiversity.json')
with open(json_path, 'w') as json_file:
json.dump(biodiv_list, json_file, sort_keys = True, indent = 4)
| jcu-eresearch/climas-ng | webapp/climasng/data/datafinder.py | Python | apache-2.0 | 2,719 | 0.004046 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import MDAnalysis as mda
import numpy as np
import pytest
from numpy.testing import assert_equal
from MDAnalysis.lib.mdamath import triclinic_vectors
from MDAnalysisTests.datafiles import (DMS)
class TestDMSReader(object):
@pytest.fixture()
def universe(self):
return mda.Universe(DMS)
@pytest.fixture()
def ts(self, universe):
return universe.trajectory.ts
def test_global_cell(self, ts):
assert ts.dimensions is None
def test_velocities(self, ts):
assert_equal(hasattr(ts, "_velocities"), False)
def test_number_of_coords(self, universe):
# Desired value taken from VMD
# Info) Atoms: 3341
assert_equal(len(universe.atoms), 3341)
def test_coords_atom_0(self, universe):
# Desired coordinates taken directly from the SQLite file. Check unit
# conversion
coords_0 = np.array([-11.0530004501343,
26.6800003051758,
12.7419996261597, ],
dtype=np.float32)
assert_equal(universe.atoms[0].position, coords_0)
def test_n_frames(self, universe):
assert_equal(universe.trajectory.n_frames, 1,
"wrong number of frames in pdb")
def test_time(self, universe):
assert_equal(universe.trajectory.time, 0.0,
"wrong time of the frame")
def test_frame(self, universe):
assert_equal(universe.trajectory.frame, 0, "wrong frame number "
"(0-based, should be 0 for single frame readers)")
def test_frame_index_0(self, universe):
universe.trajectory[0]
assert_equal(universe.trajectory.ts.frame, 0,
"frame number for frame index 0 should be 0")
def test_frame_index_1_raises_IndexError(self, universe):
with pytest.raises(IndexError):
universe.trajectory[1]
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/coordinates/test_dms.py | Python | gpl-2.0 | 3,032 | 0.00033 |
#############
# ECE 612 Spring 2017
# Joe Parrish
#
# Use the same logic from SpectrumTester.py to generate multiple sine waves
# but write that output to a .wav file for file based testing of the project code
#############
import wave
import argparse
import numpy as np
def generate_sample_file(test_freqs, test_amps, chunk=4096, samplerate=44100):
filename = 'Sample'
x = np.arange(chunk)
y = np.zeros(chunk)
for test_freq,test_amp in zip(test_freqs,test_amps):
filename += '_' + str(test_freq) + 'Hz@' + str(test_amp)
y = np.add(y, np.sin(2 * np.pi * test_freq * x / samplerate) * test_amp)
filename += '.wav'
y = y.astype('i2')
wave_writer = wave.open(filename, mode='wb')
wave_writer.setnchannels(1)
wave_writer.setsampwidth(2)
wave_writer.setframerate(samplerate)
for x in range(0,8):
wave_writer.writeframes(y)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Write a wave file containing Numpy generated sine waves')
parser.add_argument('--freqs', nargs='+', type=int)
parser.add_argument('--amps', nargs='+', type=int)
args = parser.parse_args()
generate_sample_file(args.freqs, args.amps)
| parrisha/raspi-visualizer | samples/WavGenerator.py | Python | mit | 1,233 | 0.024331 |
__author__ = 'Andrew Williamson <[email protected]>'
| Atothendrew/XcodeBuildIncrementer | XcodeBuildIncrementer/__init__.py | Python | mit | 60 | 0 |
# -*- coding: utf-8 -*-
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('dpf.sitetheme')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
| a25kk/dpf | src/dpf.sitetheme/dpf/sitetheme/__init__.py | Python | mit | 217 | 0 |
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://www.gnu.org/licenses/>.
from mmfparser.bytereader import ByteReader
from mmfparser.loader import DataLoader
class ExtData(DataLoader):
name = None
data = None
def read(self, reader):
if self.settings.get('old', False):
self.filename = reader.readString()
self.data = reader.read()
else:
reader.checkDefault(reader.readInt(), 0)
reader.checkDefault(reader.read(), '')
def write(self, reader):
reader.writeInt(0)
__all__ = ['ExtData'] | joaormatos/anaconda | mmfparser/data/chunkloaders/extdata.py | Python | gpl-3.0 | 1,201 | 0.004163 |
# -*- coding: utf-8 -*-
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_("Name of User"), blank=True, max_length=255)
def __str__(self):
return self.username
| GeoMatDigital/django-geomat | geomat/users/models.py | Python | bsd-3-clause | 493 | 0 |
__author__ = 'Volodya'
from model.recordfields import RecordFields
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ['number of records', 'file'])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = 'data/records.json'
for o, a in opts:
if o == '-n':
n = int(a)
elif o == '-f':
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " " * 10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [RecordFields(firstname='', lastname='', middlename='',
nickname='', company='', address='')] + [
RecordFields(firstname=random_string('firstname', 10), lastname=random_string('lastname', 10), middlename=random_string('middlename', 10),
nickname=random_string('nickname', 10), company=random_string('company', 10), address=random_string('middlename', 10))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, 'w') as out:
jsonpickle.set_encoder_options('json', indent=2)
out.write(jsonpickle.encode(testdata)) | Spasley/python | generator/record.py | Python | apache-2.0 | 1,285 | 0.005447 |
from django.apps import AppConfig
class ResearchersConfig(AppConfig):
name = 'researchers'
| luisen14/treatment-tracking-project | treatment_tracker/researchers/apps.py | Python | apache-2.0 | 97 | 0 |
#
# Copyright (C) 2017 FreeIPA Contributors. See COPYING for license
#
from __future__ import absolute_import
import binascii
import os
import psutil
import re
import subprocess
import textwrap
import pytest
from unittest.mock import patch, mock_open
from ipaplatform.paths import paths
from ipapython import ipautil
from ipapython.admintool import ScriptError
from ipaserver.install import installutils
from ipaserver.install import ipa_backup
from ipaserver.install import ipa_restore
GPG_GENKEY = textwrap.dedent("""
%echo Generating a standard key
Key-Type: RSA
Key-Length: 2048
Name-Real: IPA Backup
Name-Comment: IPA Backup
Name-Email: [email protected]
Expire-Date: 0
Passphrase: {passphrase}
%commit
%echo done
""")
@pytest.fixture
def gpgkey(request, tempdir):
passphrase = "Secret123"
gnupghome = os.path.join(tempdir, "gnupg")
os.makedirs(gnupghome, 0o700)
# provide clean env for gpg test
env = os.environ.copy()
orig_gnupghome = env.get('GNUPGHOME')
env['GNUPGHOME'] = gnupghome
env['LC_ALL'] = 'C.UTF-8'
env['LANGUAGE'] = 'C'
devnull = open(os.devnull, 'w')
# allow passing passphrases to agent
with open(os.path.join(gnupghome, "gpg-agent.conf"), 'w') as f:
f.write("verbose\n")
f.write("allow-preset-passphrase\n")
# daemonize agent (detach from the console and run in the background)
subprocess.Popen(
[paths.GPG_AGENT, '--batch', '--daemon'],
env=env, stdout=devnull, stderr=devnull
)
def fin():
if orig_gnupghome is not None:
os.environ['GNUPGHOME'] = orig_gnupghome
else:
os.environ.pop('GNUPGHOME', None)
subprocess.run(
[paths.GPG_CONF, '--kill', 'all'],
check=True,
env=env,
)
request.addfinalizer(fin)
# create public / private key pair
keygen = os.path.join(gnupghome, 'keygen')
with open(keygen, 'w') as f:
f.write(GPG_GENKEY.format(passphrase=passphrase))
subprocess.check_call(
[paths.GPG2, '--batch', '--gen-key', keygen],
env=env, stdout=devnull, stderr=devnull
)
# get keygrip of private key
out = subprocess.check_output(
[paths.GPG2, "--list-secret-keys", "--with-keygrip"],
env=env, stderr=subprocess.STDOUT
)
mo = re.search("Keygrip = ([A-Z0-9]{32,})", out.decode('utf-8'))
if mo is None:
raise ValueError(out.decode('utf-8'))
keygrip = mo.group(1)
# unlock private key
cmd = "PRESET_PASSPHRASE {} -1 {}".format(
keygrip,
binascii.hexlify(passphrase.encode('utf-8')).decode('utf-8')
)
subprocess.check_call(
[paths.GPG_CONNECT_AGENT, cmd, "/bye"],
env=env, stdout=devnull, stderr=devnull
)
# set env for the rest of the progress
os.environ['GNUPGHOME'] = gnupghome
def test_gpg_encrypt(tempdir):
src = os.path.join(tempdir, "data.txt")
encrypted = os.path.join(tempdir, "data.gpg")
decrypted = os.path.join(tempdir, "data.out")
passwd = 'Secret123'
payload = 'Dummy text\n'
with open(src, 'w') as f:
f.write(payload)
installutils.encrypt_file(src, encrypted, password=passwd)
assert os.path.isfile(encrypted)
installutils.decrypt_file(encrypted, decrypted, password=passwd)
assert os.path.isfile(decrypted)
with open(decrypted) as f:
assert f.read() == payload
with pytest.raises(ipautil.CalledProcessError):
installutils.decrypt_file(encrypted, decrypted, password='invalid')
def test_gpg_asymmetric(tempdir, gpgkey):
src = os.path.join(tempdir, "asymmetric.txt")
encrypted = src + ".gpg"
payload = 'Dummy text\n'
with open(src, 'w') as f:
f.write(payload)
ipa_backup.encrypt_file(src, remove_original=True)
assert os.path.isfile(encrypted)
assert not os.path.exists(src)
ipa_restore.decrypt_file(tempdir, encrypted)
assert os.path.isfile(src)
with open(src) as f:
assert f.read() == payload
@pytest.mark.parametrize(
"platform, expected",
[
("fedora", "fedora"),
("fedora_container", "fedora"),
("fedora_containers", "fedora_containers"),
("fedoracontainer", "fedoracontainer"),
("rhel", "rhel"),
("rhel_container", "rhel"),
]
)
def test_get_current_platform(monkeypatch, platform, expected):
monkeypatch.setattr(installutils.ipaplatform, "NAME", platform)
assert installutils.get_current_platform() == expected
# The mock_exists in the following tests mocks that the cgroups
# files exist even in non-containers. The values are provided by
# mock_open_multi.
@patch('ipaserver.install.installutils.in_container')
@patch('os.path.exists')
def test_in_container_no_cgroup(mock_exists, mock_in_container):
"""
In a container in a container without cgroups, can't detect RAM
"""
mock_in_container.return_value = True
mock_exists.side_effect = [False, False]
with pytest.raises(ScriptError):
installutils.check_available_memory(False)
def mock_open_multi(*contents):
"""Mock opening multiple files.
For our purposes the first read is limit, second is usage.
Note: this overrides *all* opens so if you use pdb then you will
need to extend the list by 2.
"""
mock_files = [
mock_open(read_data=content).return_value for content in contents
]
mock_multi = mock_open()
mock_multi.side_effect = mock_files
return mock_multi
RAM_OK = str(1800 * 1000 * 1000)
RAM_CA_USED = str(150 * 1000 * 1000)
RAM_MOSTLY_USED = str(1500 * 1000 * 1000)
RAM_NOT_OK = str(10 * 1000 * 1000)
@patch('ipaserver.install.installutils.in_container')
@patch('builtins.open', mock_open_multi(RAM_NOT_OK, "0"))
@patch('os.path.exists')
def test_in_container_insufficient_ram(mock_exists, mock_in_container):
"""In a container with insufficient RAM and zero used"""
mock_in_container.return_value = True
mock_exists.side_effect = [True, True]
with pytest.raises(ScriptError):
installutils.check_available_memory(True)
@patch('ipaserver.install.installutils.in_container')
@patch('builtins.open', mock_open_multi(RAM_OK, RAM_CA_USED))
@patch('os.path.exists')
def test_in_container_ram_ok_no_ca(mock_exists, mock_in_container):
"""In a container with just enough RAM to install w/o a CA"""
mock_in_container.return_value = True
mock_exists.side_effect = [True, True]
installutils.check_available_memory(False)
@patch('ipaserver.install.installutils.in_container')
@patch('builtins.open', mock_open_multi(RAM_OK, RAM_MOSTLY_USED))
@patch('os.path.exists')
def test_in_container_insufficient_ram_with_ca(mock_exists, mock_in_container):
"""In a container and just miss the minimum RAM required"""
mock_in_container.return_value = True
mock_exists.side_effect = [True, True]
with pytest.raises(ScriptError):
installutils.check_available_memory(True)
@patch('ipaserver.install.installutils.in_container')
@patch('psutil.virtual_memory')
def test_not_container_insufficient_ram_with_ca(mock_psutil, mock_in_container):
"""Not a container and insufficient RAM"""
mock_in_container.return_value = False
fake_memory = psutil._pslinux.svmem
fake_memory.available = int(RAM_NOT_OK)
mock_psutil.return_value = fake_memory
with pytest.raises(ScriptError):
installutils.check_available_memory(True)
@patch('ipaserver.install.installutils.in_container')
@patch('psutil.virtual_memory')
def test_not_container_ram_ok(mock_psutil, mock_in_container):
"""Not a container and sufficient RAM"""
mock_in_container.return_value = False
fake_memory = psutil._pslinux.svmem
fake_memory.available = int(RAM_OK)
mock_psutil.return_value = fake_memory
installutils.check_available_memory(True)
| encukou/freeipa | ipatests/test_ipaserver/test_install/test_installutils.py | Python | gpl-3.0 | 7,877 | 0.000127 |
import requests
from bs4 import BeautifulSoup
def getMemes():
memep1URL="http://www.quickmeme.com/page/1/"
memep2URL="http://www.quickmeme.com/page/2/"
memep3URL="http://www.quickmeme.com/page/3/"
memep4URL="http://www.quickmeme.com/page/4/"
memep5URL="http://www.quickmeme.com/page/5/"
memep6URL="http://www.quickmeme.com/page/6/"
memep7URL="http://www.quickmeme.com/page/7/"
memep8URL="http://www.quickmeme.com/page/8/"
memep9URL="http://www.quickmeme.com/page/9/"
memep10URL="http://www.quickmeme.com/page/10/"
r_memep1=requests.get(memep1URL)
r_memep2=requests.get(memep2URL)
r_memep3=requests.get(memep3URL)
r_memep4=requests.get(memep4URL)
r_memep5=requests.get(memep5URL)
r_memep6=requests.get(memep6URL)
r_memep7=requests.get(memep7URL)
r_memep8=requests.get(memep8URL)
r_memep9=requests.get(memep9URL)
r_memep10=requests.get(memep10URL)
memep1Soup=BeautifulSoup(r_memep1.content, "lxml")
memep2Soup=BeautifulSoup(r_memep2.content, "lxml")
memep3Soup=BeautifulSoup(r_memep3.content, "lxml")
memep4Soup=BeautifulSoup(r_memep4.content, "lxml")
memep5Soup=BeautifulSoup(r_memep5.content, "lxml")
memep6Soup=BeautifulSoup(r_memep6.content, "lxml")
memep7Soup=BeautifulSoup(r_memep7.content, "lxml")
memep8Soup=BeautifulSoup(r_memep8.content, "lxml")
memep9Soup=BeautifulSoup(r_memep9.content, "lxml")
memep10Soup=BeautifulSoup(r_memep10.content, "lxml")
memep1Links=memep1Soup.find_all("a")
memep2Links=memep2Soup.find_all("a")
memep3Links=memep3Soup.find_all("a")
memep4Links=memep4Soup.find_all("a")
memep5Links=memep5Soup.find_all("a")
memep6Links=memep6Soup.find_all("a")
memep7Links=memep7Soup.find_all("a")
memep8Links=memep8Soup.find_all("a")
memep9Links=memep9Soup.find_all("a")
memep10Links=memep10Soup.find_all("a")
linkList=[memep1Links,memep2Links,memep3Links,memep4Links,memep5Links,memep6Links,memep7Links,
memep8Links,memep9Links,memep10Links]
memeLinks=[]
pointer=0
listLength=len(linkList)
for i in range(listLength):
for link in linkList[pointer]:
tempURL=link.get("href")
if tempURL is None:
continue
if tempURL.startswith("/p/"):
memeFix='http://www.quickmeme.com'
memeResolved=memeFix+tempURL
dictionary={}
dictionary['category']='Memes'
dictionary['url']=memeResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
memeLinks.append(dictionary)
pointer+=1
return memeLinks
def getPolitics():
nytimesURL="http://www.nytimes.com/pages/politics"
cnnURL="http://www.cnn.com/specials/politics/national-politics"
foxnewsURL="http://www.foxnews.com/politics"
#cbsnewsURL="http://www.cbsnews.com/politics"
abcnewsURL="http://www.abcnews.go.com/Politics/Election"
reutersnewsURL="http://www.reuters.com/politics"
bbcnewsURL="http://www.bbc.com/news/election/us2016"
#yahoonewsURL="https://www.yahoo.com/news/politics"
nbcnewsURL="http://www.nbcnews.com/politics/2016-election"
usatodaynewsURL="http://www.usatoday.com/section/global/elections-2016/"
#huffingtonpostnewsURL="http://www.huffingtonpost.com/section/politics"
timenewsURL="http://www.time.com/politics"
washingtonpostnewsURL="http://www.washingtonpost.com/politics/"
guardiannewsURL="https://www.theguardian.com/us-news/us-elections-2016"
#wsjnewsURL="http://www.wsj.com/news/politics"
#latimesnewsURL="http://www.latimes.com/politics"
nydailynewsURL="http://www.nydailynews.com/news/politics"
#chicagotribunenewsURL="http://www.chicagotribune.com/news/nationworld/politics/"
r_nytimes=requests.get(nytimesURL)
r_CNN=requests.get(cnnURL)
r_foxnews=requests.get(foxnewsURL)
#r_cbsnews=requests.get(cbsnewsURL)
r_abcnews=requests.get(abcnewsURL)
r_reutersnews=requests.get(reutersnewsURL)
r_bbcnews=requests.get(bbcnewsURL)
#r_yahoonews=requests.get(yahoonewsURL)
r_nbcnews=requests.get(nbcnewsURL)
r_usatodaynews=requests.get(usatodaynewsURL)
#r_huffingtonpostnews=requests.get(huffingtonpostnewsURL)
r_timenews=requests.get(timenewsURL)
r_washingtonpostnews=requests.get(washingtonpostnewsURL)
r_guardiannews=requests.get(guardiannewsURL)
#r_wsjnews=requests.get(wsjnewsURL)
#r_latimesnews=requests.get(latimesnewsURL)
r_nydailynews=requests.get(nydailynewsURL)
#r_chicagotribunenews=requests.get(chicagotribunenewsURL)
nytimesSoup=BeautifulSoup(r_nytimes.content, "lxml")
cnnSoup=BeautifulSoup(r_CNN.content, "lxml")
foxnewsSoup=BeautifulSoup(r_foxnews.content, "lxml")
#cbsnewsSoup=BeautifulSoup(r_cbsnews.content, "lxml")
abcnewsSoup=BeautifulSoup(r_abcnews.content, "lxml")
reutersnewsSoup=BeautifulSoup(r_reutersnews.content, "lxml")
bbcnewsSoup=BeautifulSoup(r_bbcnews.content, "lxml")
#yahoonewsSoup=BeautifulSoup(r_yahoonews.content, "lxml")
nbcnewsSoup=BeautifulSoup(r_nbcnews.content, "lxml")
usatodaynewsSoup=BeautifulSoup(r_usatodaynews.content, "lxml")
#huffingtonpostnewsSoup=BeautifulSoup(r_huffingtonpostnews.content, "lxml")
timenewsSoup=BeautifulSoup(r_timenews.content, "lxml")
washingtonpostnewsSoup=BeautifulSoup(r_washingtonpostnews.content, "lxml")
guardiannewsSoup=BeautifulSoup(r_guardiannews.content, "lxml")
#wsjnewsSoup=BeautifulSoup(r_wsjnews.content, "lxml")
#latimesnewsSoup=BeautifulSoup(r_latimesnews.content, "lxml")
nydailynewsSoup=BeautifulSoup(r_nydailynews.content, "lxml")
#chicagotribunenewsSoup=BeautifulSoup(r_chicagotribunenews.content, "lxml")
nytimesLinks=nytimesSoup.find_all("a")
cnnLinks=cnnSoup.find_all("a")
foxnewsLinks=foxnewsSoup.find_all("a")
#cbsnewsLinks=cbsnewsSoup.find_all("a")
abcnewsLinks=abcnewsSoup.find_all("a")
reutersnewsLinks=reutersnewsSoup.find_all("a")
bbcnewsLinks=bbcnewsSoup.find_all("a")
#yahoonewsLinks=yahoonewsSoup.find_all("a")
nbcnewsLinks=nbcnewsSoup.find_all("a")
usatodaynewsLinks=usatodaynewsSoup.find_all("a")
#huffingtonpostnewsLinks=huffingtonpostnewsSoup.find_all("a")
timenewsLinks=timenewsSoup.find_all("a")
washingtonpostnewsLinks=washingtonpostnewsSoup.find_all("a")
guardiannewsLinks=guardiannewsSoup.find_all("a")
#wsjnewsLinks=wsjnewsSoup.find_all("a")
#latimesnewsLinks=latimesnewsSoup.find_all("a")
nydailynewsLinks=nydailynewsSoup.find_all("a")
#chicagotribunenewsLinks=chicagotribunenewsSoup.find_all("a")
linkList=[nytimesLinks,cnnLinks,foxnewsLinks,abcnewsLinks,reutersnewsLinks,
bbcnewsLinks,nbcnewsLinks,usatodaynewsLinks,timenewsLinks,
washingtonpostnewsLinks,guardiannewsLinks,nydailynewsLinks]
politicsLinks=[]
politicsLinksFinal=[]
politicsLinksTitle=[]
politicsLinksTitleFinal=[]
pointer=0
listLength=len(linkList)
for i in range(listLength):
for link in linkList[pointer]:
tempURL=link.get("href")
if tempURL is None:
continue
if tempURL.startswith("http://www.nytimes.com/2016/"):
dictionary={}
dictionary['category']='Politics'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
politicsLinks.append(dictionary)
if tempURL.startswith("http://www.foxnews.com/politics/2016/10/"):
dictionary={}
dictionary['category']='Politics'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
politicsLinks.append(dictionary)
if tempURL.startswith("/2016/10/") or tempURL.startswith("/politics/2016/10/"):
cnnFix='http://www.cnn.com'
cnnResolved=cnnFix+tempURL
dictionary={}
dictionary['category']='Politics'
dictionary['url']=cnnResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
politicsLinks.append(dictionary)
if tempURL.startswith('http://abcnews.go.com/Politics/'):
dictionary={}
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
politicsLinks.append(dictionary)
if tempURL.startswith("/article/us-usa-election"):
reutersFix='http://www.reuters.com'
reutersResolved=reutersFix+tempURL
dictionary={}
dictionary['category']='Politics'
dictionary['url']=reutersResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
politicsLinks.append(dictionary)
if tempURL.startswith("http://time.com/453") or tempURL.startswith("http://time.com/454"):
dictionary={}
dictionary['category']='Politics'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
politicsLinks.append(dictionary)
if tempURL.startswith("https://www.washingtonpost.com/news/post-politics/wp/2016/") or tempURL.startswith("https://www.washingtonpost.com/news/the-fix/wp/2016/"):
dictionary={}
dictionary['category']='Politics'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
politicsLinks.append(dictionary)
if tempURL.startswith("https://www.theguardian.com/us-news/2016/"):
dictionary={}
dictionary['category']='Politics'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
politicsLinks.append(dictionary)
if tempURL.startswith("http://www.nydailynews.com/news/politics"):
dictionary={}
dictionary['category']='Politics'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
politicsLinks.append(dictionary)
pointer+=1
return politicsLinks
def getBusiness():
nytimesURL="http://www.nytimes.com/pages/business"
cnnURL="http://www.money.cnn.com"
foxnewsURL="http://www.foxbusiness.com"
#cbsnewsURL="http://www.cbsnews.com/politics"
abcnewsURL="http://www.abcnews.go.com/Business"
reutersnewsURL="http://www.reuters.com/finance"
bbcnewsURL="http://www.bbc.com/news/business"
#yahoonewsURL="https://www.yahoo.com/news/politics"
nbcnewsURL="http://www.nbcnews.com/business"
usatodaynewsURL="http://www.usatoday.com/money/"
#huffingtonpostnewsURL="http://www.huffingtonpost.com/section/business"
timenewsURL="http://www.time.com/business"
washingtonpostnewsURL="https://www.washingtonpost.com/business/"
guardiannewsURL="https://www.theguardian.com/us/business"
#wsjnewsURL="http://www.wsj.com/news/business"
#latimesnewsURL="http://www.latimes.com/politics"
#nydailynewsURL="http://www.nydailynews.com/news/politics"
#chicagotribunenewsURL="http://www.chicagotribune.com/business"
r_nytimes=requests.get(nytimesURL)
r_CNN=requests.get(cnnURL)
r_foxnews=requests.get(foxnewsURL)
#r_cbsnews=requests.get(cbsnewsURL)
r_abcnews=requests.get(abcnewsURL)
r_reutersnews=requests.get(reutersnewsURL)
r_bbcnews=requests.get(bbcnewsURL)
#r_yahoonews=requests.get(yahoonewsURL)
r_nbcnews=requests.get(nbcnewsURL)
r_usatodaynews=requests.get(usatodaynewsURL)
#r_huffingtonpostnews=requests.get(huffingtonpostnewsURL)
r_timenews=requests.get(timenewsURL)
r_washingtonpostnews=requests.get(washingtonpostnewsURL)
r_guardiannews=requests.get(guardiannewsURL)
#r_wsjnews=requests.get(wsjnewsURL)
#r_latimesnews=requests.get(latimesnewsURL)
#r_nydailynews=requests.get(nydailynewsURL)
#r_chicagotribunenews=requests.get(chicagotribunenewsURL)
nytimesSoup=BeautifulSoup(r_nytimes.content, "lxml")
cnnSoup=BeautifulSoup(r_CNN.content, "lxml")
foxnewsSoup=BeautifulSoup(r_foxnews.content, "lxml")
#cbsnewsSoup=BeautifulSoup(r_cbsnews.content, "lxml")
abcnewsSoup=BeautifulSoup(r_abcnews.content, "lxml")
reutersnewsSoup=BeautifulSoup(r_reutersnews.content, "lxml")
bbcnewsSoup=BeautifulSoup(r_bbcnews.content, "lxml")
#yahoonewsSoup=BeautifulSoup(r_yahoonews.content, "lxml")
nbcnewsSoup=BeautifulSoup(r_nbcnews.content, "lxml")
usatodaynewsSoup=BeautifulSoup(r_usatodaynews.content, "lxml")
#huffingtonpostnewsSoup=BeautifulSoup(r_huffingtonpostnews.content, "lxml")
timenewsSoup=BeautifulSoup(r_timenews.content, "lxml")
washingtonpostnewsSoup=BeautifulSoup(r_washingtonpostnews.content, "lxml")
guardiannewsSoup=BeautifulSoup(r_guardiannews.content, "lxml")
#wsjnewsSoup=BeautifulSoup(r_wsjnews.content, "lxml")
#latimesnewsSoup=BeautifulSoup(r_latimesnews.content, "lxml")
#nydailynewsSoup=BeautifulSoup(r_nydailynews.content, "lxml")
#chicagotribunenewsSoup=BeautifulSoup(r_chicagotribunenews.content, "lxml")
nytimesLinks=nytimesSoup.find_all("a")
cnnLinks=cnnSoup.find_all("a")
foxnewsLinks=foxnewsSoup.find_all("a")
#cbsnewsLinks=cbsnewsSoup.find_all("a")
abcnewsLinks=abcnewsSoup.find_all("a")
reutersnewsLinks=reutersnewsSoup.find_all("a")
bbcnewsLinks=bbcnewsSoup.find_all("a")
#yahoonewsLinks=yahoonewsSoup.find_all("a")
nbcnewsLinks=nbcnewsSoup.find_all("a")
usatodaynewsLinks=usatodaynewsSoup.find_all("a")
#huffingtonpostnewsLinks=huffingtonpostnewsSoup.find_all("a")
timenewsLinks=timenewsSoup.find_all("a")
washingtonpostnewsLinks=washingtonpostnewsSoup.find_all("a")
guardiannewsLinks=guardiannewsSoup.find_all("a")
#wsjnewsLinks=wsjnewsSoup.find_all("a")
#latimesnewsLinks=latimesnewsSoup.find_all("a")
#nydailynewsLinks=nydailynewsSoup.find_all("a")
#chicagotribunenewsLinks=chicagotribunenewsSoup.find_all("a")
linkList=[nytimesLinks,cnnLinks,foxnewsLinks,abcnewsLinks,reutersnewsLinks,
bbcnewsLinks,nbcnewsLinks,usatodaynewsLinks,timenewsLinks,
washingtonpostnewsLinks,guardiannewsLinks]
businessLinks=[]
pointer=0
listLength=len(linkList)
for i in range(listLength):
for link in linkList[pointer]:
tempURL=link.get("href")
if tempURL is None:
continue
if tempURL.startswith("http://www.nytimes.com/2016/"):
dictionary={}
dictionary['category']='Business'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
businessLinks.append(dictionary)
if tempURL.startswith("/markets/2016") or tempURL.startswith("/politics/2016") or tempURL.startswith("/features/2016") or tempURL.startswith("/investing/2016"):
foxFix='http://www.foxbusiness.com'
foxResolve=foxFix+tempURL
dictionary={}
dictionary['category']='Business'
dictionary['url']=foxResolve
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
businessLinks.append(dictionary)
if tempURL.startswith("/2016/10/"):
cnnFix='http://www.money.cnn.com'
cnnResolved=cnnFix+tempURL
dictionary={}
dictionary['category']='Business'
dictionary['url']=cnnResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
businessLinks.append(dictionary)
if tempURL.startswith('http://abcnews.go.com/Business/'):
dictionary={}
dictionary['category']='Business'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
businessLinks.append(dictionary)
if tempURL.startswith("/article/"):
reutersFix='http://www.reuters.com'
reutersResolved=reutersFix+tempURL
dictionary={}
dictionary['category']='Business'
dictionary['url']=reutersResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
businessLinks.append(dictionary)
if tempURL.startswith("/news/business-1") or tempURL.startswith("/news/business-2") or tempURL.startswith("/news/business-3"):
bbcFix='http://www.bbc.com'
bbcResolved=bbcFix+tempURL
dictionary={}
dictionary['category']='Business'
dictionary['url']=bbcResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
businessLinks.append(dictionary)
if tempURL.startswith("/business/consumer/") or tempURL.startswith("/business/personal-finance/") or tempURL.startswith("/business/economy/") or tempURL.startswith("/business/markets/"):
nbcFix='http://www.nbcnews.com'
nbcResolved=nbcFix+tempURL
dictionary={}
dictionary['category']='Business'
dictionary['url']=nbcResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
businessLinks.append(dictionary)
if tempURL.startswith("/story/money/"):
usatodayFix='http://www.usatoday.com'
usatodayResolved=usatodayFix+tempURL
dictionary={}
dictionary['category']='Business'
dictionary['url']=usatodayResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
businessLinks.append(dictionary)
if tempURL.startswith("http://time.com/453") or tempURL.startswith("http://time.com/454"):
dictionary={}
dictionary['category']='Business'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
businessLinks.append(dictionary)
if tempURL.startswith("https://www.washingtonpost.com/news/business/economy/") or tempURL.startswith("https://www.washingtonpost.com/news/on-small-business/wp/2016/")or tempURL.startswith("https://www.washingtonpost.com/news/wonk/wp/2016/") or tempURL.startswith("https://www.washingtonpost.com/news/on-leadership/wp/2016/") or tempURL.startswith("https://www.washingtonpost.com/news/get-there/wp/2016/") or tempURL.startswith("https://www.washingtonpost.com/news/the-swtich/wp/2016/") or tempURL.startswith("https://www.washingtonpost.com/news/capital-business/wp/2016/"):
dictionary={}
dictionary['category']='Business'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
businessLinks.append(dictionary)
if tempURL.startswith("https://www.theguardian.com/business/2016/") or tempURL.startswith("https://www.theguardian.com/us-news/2016/") or tempURL.startswith("https://www.theguardian.com/technology/2016/") or tempURL.startswith("https://www.theguardian.com/environment/2016/") or tempURL.startswith("https://www.theguardian.com/world/2016/") or tempURL.startswith("https://www.theguardian.com/sustainable-business/2016/"):
dictionary={}
dictionary['category']='Business'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
businessLinks.append(dictionary)
pointer+=1
return businessLinks
def getWorldNews():
nytimesURL="http://www.nytimes.com/pages/world"
cnnURL="http://www.cnn.com/world"
foxnewsURL="http://www.foxnews.com/world"
#cbsnewsURL="http://www.cbsnews.com/politics"
abcnewsURL="http://www.abcnews.go.com/International"
reutersnewsURL="http://www.reuters.com/news/world"
bbcnewsURL="http://www.bbc.com/news/world"
#yahoonewsURL="https://www.yahoo.com/news/politics"
nbcnewsURL="http://www.nbcnews.com/news/world"
usatodaynewsURL="http://www.usatoday.com/news/world"
#huffingtonpostnewsURL="http://www.huffingtonpost.com/section/business"
timenewsURL="http://www.time.com/world"
#washingtonpostnewsURL="https://www.washingtonpost.com/world/"
guardiannewsURL="https://www.theguardian.com/world"
#wsjnewsURL="http://www.wsj.com/news/business"
#latimesnewsURL="http://www.latimes.com/politics"
#nydailynewsURL="http://www.nydailynews.com/news/politics"
#chicagotribunenewsURL="http://www.chicagotribune.com/business"
r_nytimes=requests.get(nytimesURL)
r_CNN=requests.get(cnnURL)
r_foxnews=requests.get(foxnewsURL)
#r_cbsnews=requests.get(cbsnewsURL)
r_abcnews=requests.get(abcnewsURL)
r_reutersnews=requests.get(reutersnewsURL)
r_bbcnews=requests.get(bbcnewsURL)
#r_yahoonews=requests.get(yahoonewsURL)
r_nbcnews=requests.get(nbcnewsURL)
r_usatodaynews=requests.get(usatodaynewsURL)
#r_huffingtonpostnews=requests.get(huffingtonpostnewsURL)
r_timenews=requests.get(timenewsURL)
#r_washingtonpostnews=requests.get(washingtonpostnewsURL)
r_guardiannews=requests.get(guardiannewsURL)
#r_wsjnews=requests.get(wsjnewsURL)
#r_latimesnews=requests.get(latimesnewsURL)
#r_nydailynews=requests.get(nydailynewsURL)
#r_chicagotribunenews=requests.get(chicagotribunenewsURL)
nytimesSoup=BeautifulSoup(r_nytimes.content, "lxml")
cnnSoup=BeautifulSoup(r_CNN.content, "lxml")
foxnewsSoup=BeautifulSoup(r_foxnews.content, "lxml")
#cbsnewsSoup=BeautifulSoup(r_cbsnews.content, "lxml")
abcnewsSoup=BeautifulSoup(r_abcnews.content, "lxml")
reutersnewsSoup=BeautifulSoup(r_reutersnews.content, "lxml")
bbcnewsSoup=BeautifulSoup(r_bbcnews.content, "lxml")
#yahoonewsSoup=BeautifulSoup(r_yahoonews.content, "lxml")
nbcnewsSoup=BeautifulSoup(r_nbcnews.content, "lxml")
usatodaynewsSoup=BeautifulSoup(r_usatodaynews.content, "lxml")
#huffingtonpostnewsSoup=BeautifulSoup(r_huffingtonpostnews.content, "lxml")
timenewsSoup=BeautifulSoup(r_timenews.content, "lxml")
#washingtonpostnewsSoup=BeautifulSoup(r_washingtonpostnews.content, "lxml")
guardiannewsSoup=BeautifulSoup(r_guardiannews.content, "lxml")
#wsjnewsSoup=BeautifulSoup(r_wsjnews.content, "lxml")
#latimesnewsSoup=BeautifulSoup(r_latimesnews.content, "lxml")
#nydailynewsSoup=BeautifulSoup(r_nydailynews.content, "lxml")
#chicagotribunenewsSoup=BeautifulSoup(r_chicagotribunenews.content, "lxml")
nytimesLinks=nytimesSoup.find_all("a")
cnnLinks=cnnSoup.find_all("a")
foxnewsLinks=foxnewsSoup.find_all("a")
#cbsnewsLinks=cbsnewsSoup.find_all("a")
abcnewsLinks=abcnewsSoup.find_all("a")
reutersnewsLinks=reutersnewsSoup.find_all("a")
bbcnewsLinks=bbcnewsSoup.find_all("a")
#yahoonewsLinks=yahoonewsSoup.find_all("a")
nbcnewsLinks=nbcnewsSoup.find_all("a")
usatodaynewsLinks=usatodaynewsSoup.find_all("a")
#huffingtonpostnewsLinks=huffingtonpostnewsSoup.find_all("a")
timenewsLinks=timenewsSoup.find_all("a")
#washingtonpostnewsLinks=washingtonpostnewsSoup.find_all("a")
guardiannewsLinks=guardiannewsSoup.find_all("a")
#wsjnewsLinks=wsjnewsSoup.find_all("a")
#latimesnewsLinks=latimesnewsSoup.find_all("a")
#nydailynewsLinks=nydailynewsSoup.find_all("a")
#chicagotribunenewsLinks=chicagotribunenewsSoup.find_all("a")
linkList=[nytimesLinks,cnnLinks,foxnewsLinks,abcnewsLinks,reutersnewsLinks,
bbcnewsLinks,nbcnewsLinks,usatodaynewsLinks,timenewsLinks,guardiannewsLinks]
worldNewsLinks=[]
pointer=0
listLength=len(linkList)
for i in range(listLength):
for link in linkList[pointer]:
tempURL=link.get("href")
if tempURL is None:
continue
if tempURL.startswith("http://www.nytimes.com/2016/"):
dictionary={}
dictionary['category']='World_News'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
worldNewsLinks.append(dictionary)
if tempURL.startswith("/world/2016/10/"):
foxFix='http://www.foxnews.com'
foxResolve=foxFix+tempURL
dictionary={}
dictionary['category']='World_News'
dictionary['url']=foxResolve
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
worldNewsLinks.append(dictionary)
if tempURL.startswith("/2016/10/"):
cnnFix='http://www.cnn.com'
cnnResolved=cnnFix+tempURL
dictionary={}
dictionary['category']='World_News'
dictionary['url']=cnnResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
worldNewsLinks.append(dictionary)
if tempURL.startswith("/International/wireStory"):
abcFix='http://www.abcnews.go.com'
abcResolved=abcFix+tempURL
dictionary={}
dictionary['category']='World_News'
dictionary['url']=abcResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
worldNewsLinks.append(dictionary)
if tempURL.startswith("/article/") or tempURL.startswith("/video/2016/10/"):
reutersFix='http://www.reuters.com'
reutersResolved=reutersFix+tempURL
dictionary={}
dictionary['category']='World_News'
dictionary['url']=reutersResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
worldNewsLinks.append(dictionary)
if tempURL.startswith("/news/world-africa") or tempURL.startswith("/news/world-europe") or tempURL.startswith("/news/world-us"):
bbcFix='http://www.bbc.com'
bbcResolved=bbcFix+tempURL
dictionary={}
dictionary['category']='World_News'
dictionary['url']=bbcResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
worldNewsLinks.append(dictionary)
if tempURL.startswith("/news/world/"):
nbcFix='http://www.nbcnews.com'
nbcResolved=nbcFix+tempURL
dictionary={}
dictionary['category']='World_News'
dictionary['url']=nbcResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
worldNewsLinks.append(dictionary)
if tempURL.startswith("/story/news/world/2016/10/"):
usatodayFix='http://www.usatoday.com'
usatodayResolved=usatodayFix+tempURL
dictionary={}
dictionary['category']='World_News'
dictionary['url']=usatodayResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
worldNewsLinks.append(dictionary)
if tempURL.startswith("http://time.com/453") or tempURL.startswith("http://time.com/454"):
dictionary={}
dictionary['category']='World_News'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
worldNewsLinks.append(dictionary)
if tempURL.startswith("https://www.theguardian.com/world/2016/oct/"):
dictionary={}
dictionary['category']='World_News'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
worldNewsLinks.append(dictionary)
pointer+=1
return worldNewsLinks
def getUSNews():
nytimesURL="http://www.nytimes.com/section/us"
cnnURL="http://www.cnn.com/us"
foxnewsURL="http://www.foxnews.com/us"
#cbsnewsURL="http://www.cbsnews.com/politics"
abcnewsURL="http://www.abcnews.go.com/US"
reutersnewsURL="http://www.reuters.com/news/us"
bbcnewsURL="http://www.bbc.com/news/world/us_and_canada"
#yahoonewsURL="https://www.yahoo.com/news/politics"
nbcnewsURL="http://www.nbcnews.com/news/us-news"
usatodaynewsURL="http://www.usatoday.com/news/nation"
#huffingtonpostnewsURL="http://www.huffingtonpost.com/section/business"
timenewsURL="http://www.time.com/us"
#washingtonpostnewsURL="https://www.washingtonpost.com/world/"
guardiannewsURL="https://www.theguardian.com/us"
#wsjnewsURL="http://www.wsj.com/news/business"
#latimesnewsURL="http://www.latimes.com/politics"
#nydailynewsURL="http://www.nydailynews.com/news/politics"
#chicagotribunenewsURL="http://www.chicagotribune.com/business"
r_nytimes=requests.get(nytimesURL)
r_CNN=requests.get(cnnURL)
r_foxnews=requests.get(foxnewsURL)
#r_cbsnews=requests.get(cbsnewsURL)
r_abcnews=requests.get(abcnewsURL)
r_reutersnews=requests.get(reutersnewsURL)
r_bbcnews=requests.get(bbcnewsURL)
#r_yahoonews=requests.get(yahoonewsURL)
r_nbcnews=requests.get(nbcnewsURL)
r_usatodaynews=requests.get(usatodaynewsURL)
#r_huffingtonpostnews=requests.get(huffingtonpostnewsURL)
r_timenews=requests.get(timenewsURL)
#r_washingtonpostnews=requests.get(washingtonpostnewsURL)
r_guardiannews=requests.get(guardiannewsURL)
#r_wsjnews=requests.get(wsjnewsURL)
#r_latimesnews=requests.get(latimesnewsURL)
#r_nydailynews=requests.get(nydailynewsURL)
#r_chicagotribunenews=requests.get(chicagotribunenewsURL)
nytimesSoup=BeautifulSoup(r_nytimes.content, "lxml")
cnnSoup=BeautifulSoup(r_CNN.content, "lxml")
foxnewsSoup=BeautifulSoup(r_foxnews.content, "lxml")
#cbsnewsSoup=BeautifulSoup(r_cbsnews.content, "lxml")
abcnewsSoup=BeautifulSoup(r_abcnews.content, "lxml")
reutersnewsSoup=BeautifulSoup(r_reutersnews.content, "lxml")
bbcnewsSoup=BeautifulSoup(r_bbcnews.content, "lxml")
#yahoonewsSoup=BeautifulSoup(r_yahoonews.content, "lxml")
nbcnewsSoup=BeautifulSoup(r_nbcnews.content, "lxml")
usatodaynewsSoup=BeautifulSoup(r_usatodaynews.content, "lxml")
#huffingtonpostnewsSoup=BeautifulSoup(r_huffingtonpostnews.content, "lxml")
timenewsSoup=BeautifulSoup(r_timenews.content, "lxml")
#washingtonpostnewsSoup=BeautifulSoup(r_washingtonpostnews.content, "lxml")
guardiannewsSoup=BeautifulSoup(r_guardiannews.content, "lxml")
#wsjnewsSoup=BeautifulSoup(r_wsjnews.content, "lxml")
#latimesnewsSoup=BeautifulSoup(r_latimesnews.content, "lxml")
#nydailynewsSoup=BeautifulSoup(r_nydailynews.content, "lxml")
#chicagotribunenewsSoup=BeautifulSoup(r_chicagotribunenews.content, "lxml")
nytimesLinks=nytimesSoup.find_all("a")
cnnLinks=cnnSoup.find_all("a")
foxnewsLinks=foxnewsSoup.find_all("a")
#cbsnewsLinks=cbsnewsSoup.find_all("a")
abcnewsLinks=abcnewsSoup.find_all("a")
reutersnewsLinks=reutersnewsSoup.find_all("a")
bbcnewsLinks=bbcnewsSoup.find_all("a")
#yahoonewsLinks=yahoonewsSoup.find_all("a")
nbcnewsLinks=nbcnewsSoup.find_all("a")
usatodaynewsLinks=usatodaynewsSoup.find_all("a")
#huffingtonpostnewsLinks=huffingtonpostnewsSoup.find_all("a")
timenewsLinks=timenewsSoup.find_all("a")
#washingtonpostnewsLinks=washingtonpostnewsSoup.find_all("a")
guardiannewsLinks=guardiannewsSoup.find_all("a")
#wsjnewsLinks=wsjnewsSoup.find_all("a")
#latimesnewsLinks=latimesnewsSoup.find_all("a")
#nydailynewsLinks=nydailynewsSoup.find_all("a")
#chicagotribunenewsLinks=chicagotribunenewsSoup.find_all("a")
linkList=[nytimesLinks,cnnLinks,foxnewsLinks,abcnewsLinks,reutersnewsLinks,
bbcnewsLinks,nbcnewsLinks,usatodaynewsLinks,timenewsLinks,guardiannewsLinks]
USNewsLinks=[]
pointer=0
listLength=len(linkList)
outf=open("USNewsLinks.txt",'w')
for i in range(listLength):
for link in linkList[pointer]:
tempURL=link.get("href")
if tempURL is None:
continue
if tempURL.startswith("http://www.nytimes.com/2016/"):
dictionary={}
dictionary['category']='US_News'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
USNewsLinks.append(dictionary)
if tempURL.startswith("/us/2016/10/"):
foxFix='http://www.foxnews.com'
foxResolve=foxFix+tempURL
dictionary={}
dictionary['category']='US_News'
dictionary['url']=foxResolve
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
USNewsLinks.append(dictionary)
if tempURL.startswith("/2016/10/"):
cnnFix='http://www.cnn.com'
cnnResolved=cnnFix+tempURL
dictionary={}
dictionary['category']='US_News'
dictionary['url']=cnnResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
USNewsLinks.append(dictionary)
if tempURL.startswith("/US/wireStory/"):
abcFix='http://www.abcnews.go.com'
abcResolved=abcFix+tempURL
dictionary={}
dictionary['category']='US_News'
dictionary['url']=abcResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
USNewsLinks.append(dictionary)
if tempURL.startswith("/article/") or tempURL.startswith("/video/2016/10/"):
reutersFix='http://www.reuters.com'
reutersResolved=reutersFix+tempURL
dictionary={}
dictionary['category']='US_News'
dictionary['url']=reutersResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
USNewsLinks.append(dictionary)
if tempURL.startswith("/news/election-us") or tempURL.startswith("/news/world-us"):
bbcFix='http://www.bbc.com'
bbcResolved=bbcFix+tempURL
dictionary={}
dictionary['category']='US_News'
dictionary['url']=bbcResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
USNewsLinks.append(dictionary)
if tempURL.startswith("/news/us-news/"):
nbcFix='http://www.nbcnews.com'
nbcResolved=nbcFix+tempURL
dictionary={}
dictionary['category']='US_News'
dictionary['url']=nbcResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
USNewsLinks.append(dictionary)
if tempURL.startswith("/story/news/nation-now/2016/10/"):
usatodayFix='http://www.usatoday.com'
usatodayResolved=usatodayFix+tempURL
dictionary={}
dictionary['category']='US_News'
dictionary['url']=usatodayResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
USNewsLinks.append(dictionary)
if tempURL.startswith("http://time.com/453") or tempURL.startswith("http://time.com/454"):
dictionary={}
dictionary['category']='US_News'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
USNewsLinks.append(dictionary)
if tempURL.startswith("https://www.theguardian.com/us-news/2016/oct/"):
dictionary={}
dictionary['category']='US_News'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
USNewsLinks.append(dictionary)
pointer+=1
return USNewsLinks
def getEntertainment():
nytimesURL="http://www.nytimes.com/section/arts"
cnnURL="http://www.cnn.com/entertainment"
foxnewsURL="http://www.foxnews.com/entertainment"
#cbsnewsURL="http://www.cbsnews.com/politics"
abcnewsURL="http://www.abcnews.go.com/Entertainment"
reutersnewsURL="http://www.reuters.com/news/entertainment"
bbcnewsURL="http://www.bbc.com/news/entertainment_and_arts"
#yahoonewsURL="https://www.yahoo.com/news/politics"
#nbcnewsURL="http://www.nbcnews.com/pop-culture"
usatodaynewsURL="http://www.usatoday.com/life"
#huffingtonpostnewsURL="http://www.huffingtonpost.com/section/business"
timenewsURL="http://www.time.com/entertainment"
#washingtonpostnewsURL="https://www.washingtonpost.com/world/"
guardiannewsURL="https://www.theguardian.com/us/culture"
#wsjnewsURL="http://www.wsj.com/news/business"
#latimesnewsURL="http://www.latimes.com/politics"
#nydailynewsURL="http://www.nydailynews.com/news/politics"
#chicagotribunenewsURL="http://www.chicagotribune.com/business"
r_nytimes=requests.get(nytimesURL)
r_CNN=requests.get(cnnURL)
r_foxnews=requests.get(foxnewsURL)
#r_cbsnews=requests.get(cbsnewsURL)
r_abcnews=requests.get(abcnewsURL)
r_reutersnews=requests.get(reutersnewsURL)
r_bbcnews=requests.get(bbcnewsURL)
#r_yahoonews=requests.get(yahoonewsURL)
#r_nbcnews=requests.get(nbcnewsURL)
r_usatodaynews=requests.get(usatodaynewsURL)
#r_huffingtonpostnews=requests.get(huffingtonpostnewsURL)
r_timenews=requests.get(timenewsURL)
#r_washingtonpostnews=requests.get(washingtonpostnewsURL)
r_guardiannews=requests.get(guardiannewsURL)
#r_wsjnews=requests.get(wsjnewsURL)
#r_latimesnews=requests.get(latimesnewsURL)
#r_nydailynews=requests.get(nydailynewsURL)
#r_chicagotribunenews=requests.get(chicagotribunenewsURL)
nytimesSoup=BeautifulSoup(r_nytimes.content, "lxml")
cnnSoup=BeautifulSoup(r_CNN.content, "lxml")
foxnewsSoup=BeautifulSoup(r_foxnews.content, "lxml")
#cbsnewsSoup=BeautifulSoup(r_cbsnews.content, "lxml")
abcnewsSoup=BeautifulSoup(r_abcnews.content, "lxml")
reutersnewsSoup=BeautifulSoup(r_reutersnews.content, "lxml")
bbcnewsSoup=BeautifulSoup(r_bbcnews.content, "lxml")
#yahoonewsSoup=BeautifulSoup(r_yahoonews.content, "lxml")
#nbcnewsSoup=BeautifulSoup(r_nbcnews.content, "lxml")
usatodaynewsSoup=BeautifulSoup(r_usatodaynews.content, "lxml")
#huffingtonpostnewsSoup=BeautifulSoup(r_huffingtonpostnews.content, "lxml")
timenewsSoup=BeautifulSoup(r_timenews.content, "lxml")
#washingtonpostnewsSoup=BeautifulSoup(r_washingtonpostnews.content, "lxml")
guardiannewsSoup=BeautifulSoup(r_guardiannews.content, "lxml")
#wsjnewsSoup=BeautifulSoup(r_wsjnews.content, "lxml")
#latimesnewsSoup=BeautifulSoup(r_latimesnews.content, "lxml")
#nydailynewsSoup=BeautifulSoup(r_nydailynews.content, "lxml")
#chicagotribunenewsSoup=BeautifulSoup(r_chicagotribunenews.content, "lxml")
nytimesLinks=nytimesSoup.find_all("a")
cnnLinks=cnnSoup.find_all("a")
foxnewsLinks=foxnewsSoup.find_all("a")
#cbsnewsLinks=cbsnewsSoup.find_all("a")
abcnewsLinks=abcnewsSoup.find_all("a")
reutersnewsLinks=reutersnewsSoup.find_all("a")
bbcnewsLinks=bbcnewsSoup.find_all("a")
#yahoonewsLinks=yahoonewsSoup.find_all("a")
#nbcnewsLinks=nbcnewsSoup.find_all("a")
usatodaynewsLinks=usatodaynewsSoup.find_all("a")
#huffingtonpostnewsLinks=huffingtonpostnewsSoup.find_all("a")
timenewsLinks=timenewsSoup.find_all("a")
#washingtonpostnewsLinks=washingtonpostnewsSoup.find_all("a")
guardiannewsLinks=guardiannewsSoup.find_all("a")
#wsjnewsLinks=wsjnewsSoup.find_all("a")
#latimesnewsLinks=latimesnewsSoup.find_all("a")
#nydailynewsLinks=nydailynewsSoup.find_all("a")
#chicagotribunenewsLinks=chicagotribunenewsSoup.find_all("a")
linkList=[nytimesLinks,cnnLinks,foxnewsLinks,abcnewsLinks,reutersnewsLinks,
bbcnewsLinks,usatodaynewsLinks,timenewsLinks,guardiannewsLinks]
entertainmentLinks=[]
pointer=0
listLength=len(linkList)
for i in range(listLength):
for link in linkList[pointer]:
tempURL=link.get("href")
if tempURL is None:
continue
if tempURL.startswith("http://www.nytimes.com/2016/"):
dictionary={}
dictionary['category']='Entertainment'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
entertainmentLinks.append(dictionary)
if tempURL.startswith("/entertainment/2016/10/"):
foxFix='http://www.foxnews.com'
foxResolve=foxFix+tempURL
dictionary={}
dictionary['category']='Entertainment'
dictionary['url']=foxResolve
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
entertainmentLinks.append(dictionary)
if tempURL.startswith("/2016/10/"):
cnnFix='http://www.cnn.com'
cnnResolved=cnnFix+tempURL
dictionary={}
dictionary['category']='Entertainment'
dictionary['url']=cnnResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
entertainmentLinks.append(dictionary)
if tempURL.startswith("/Entertainment/"):
abcFix='http://www.abcnews.go.com'
abcResolved=abcFix+tempURL
dictionary={}
dictionary['category']='Entertainment'
dictionary['url']=abcResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
entertainmentLinks.append(dictionary)
if tempURL.startswith("/article/") or tempURL.startswith("/video/2016/10/"):
reutersFix='http://www.reuters.com'
reutersResolved=reutersFix+tempURL
dictionary={}
dictionary['category']='Entertainment'
dictionary['url']=reutersResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
entertainmentLinks.append(dictionary)
if tempURL.startswith("/news/entertainment-arts"):
bbcFix='http://www.bbc.com'
bbcResolved=bbcFix+tempURL
dictionary={}
dictionary['category']='Entertainment'
dictionary['url']=bbcResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
entertainmentLinks.append(dictionary)
if tempURL.startswith("/pop-culture/"):
nbcFix='http://www.nbcnews.com'
nbcResolved=nbcFix+tempURL
dictionary={}
dictionary['category']='Entertainment'
dictionary['url']=nbcResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
entertainmentLinks.append(dictionary)
if tempURL.startswith("/story/life/2016/10/"):
usatodayFix='http://www.usatoday.com'
usatodayResolved=usatodayFix+tempURL
dictionary={}
dictionary['category']='Entertainment'
dictionary['url']=usatodayResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
entertainmentLinks.append(dictionary)
if tempURL.startswith("http://time.com/453") or tempURL.startswith("http://time.com/454"):
dictionary={}
dictionary['category']='Entertainment'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
entertainmentLinks.append(dictionary)
if tempURL.startswith("https://www.theguardian.com/tv-and-radio/2016/oct/") or tempURL.startswith("https://www.theguardian.com/music/2016/oct/") or tempURL.startswith("https://www.theguardian.com/books/2016/oct/") or tempURL.startswith("https://www.theguardian.com/culture/2016/oct/"):
dictionary={}
dictionary['category']='Entertainment'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
entertainmentLinks.append(dictionary)
pointer+=1
return entertainmentLinks
def getSports():
nytimesURL="http://www.nytimes.com/section/sports"
cnnURL="http://www.bleacherrepot.com"
#foxnewsURL="http://www.foxnews.com/entertainment"
#cbsnewsURL="http://www.cbsnews.com/politics"
abcnewsURL="http://www.abcnews.go.com/Sports"
reutersnewsURL="http://www.reuters.com/news/sports"
#bbcnewsURL="http://www.bbc.com/sport"
yahoonewsURL="https://www.yahoo.com/news/politics"
nbcnewsURL="http://www.nbcsports.com/"
usatodaynewsURL="http://www.usatoday.com/sports"
#huffingtonpostnewsURL="http://www.huffingtonpost.com/section/business"
timenewsURL="http://www.time.com/sports"
#washingtonpostnewsURL="https://www.washingtonpost.com/world/"
guardiannewsURL="https://www.theguardian.com/us/sport"
#wsjnewsURL="http://www.wsj.com/news/business"
#latimesnewsURL="http://www.latimes.com/politics"
#nydailynewsURL="http://www.nydailynews.com/news/politics"
#chicagotribunenewsURL="http://www.chicagotribune.com/business"
r_nytimes=requests.get(nytimesURL)
r_CNN=requests.get(cnnURL)
#r_foxnews=requests.get(foxnewsURL)
#r_cbsnews=requests.get(cbsnewsURL)
r_abcnews=requests.get(abcnewsURL)
r_reutersnews=requests.get(reutersnewsURL)
#r_bbcnews=requests.get(bbcnewsURL)
#r_yahoonews=requests.get(yahoonewsURL)
r_nbcnews=requests.get(nbcnewsURL)
r_usatodaynews=requests.get(usatodaynewsURL)
#r_huffingtonpostnews=requests.get(huffingtonpostnewsURL)
r_timenews=requests.get(timenewsURL)
#r_washingtonpostnews=requests.get(washingtonpostnewsURL)
r_guardiannews=requests.get(guardiannewsURL)
#r_wsjnews=requests.get(wsjnewsURL)
#r_latimesnews=requests.get(latimesnewsURL)
#r_nydailynews=requests.get(nydailynewsURL)
#r_chicagotribunenews=requests.get(chicagotribunenewsURL)
nytimesSoup=BeautifulSoup(r_nytimes.content, "lxml")
cnnSoup=BeautifulSoup(r_CNN.content, "lxml")
#foxnewsSoup=BeautifulSoup(r_foxnews.content, "lxml")
#cbsnewsSoup=BeautifulSoup(r_cbsnews.content, "lxml")
abcnewsSoup=BeautifulSoup(r_abcnews.content, "lxml")
reutersnewsSoup=BeautifulSoup(r_reutersnews.content, "lxml")
#bbcnewsSoup=BeautifulSoup(r_bbcnews.content, "lxml")
#yahoonewsSoup=BeautifulSoup(r_yahoonews.content, "lxml")
nbcnewsSoup=BeautifulSoup(r_nbcnews.content, "lxml")
usatodaynewsSoup=BeautifulSoup(r_usatodaynews.content, "lxml")
#huffingtonpostnewsSoup=BeautifulSoup(r_huffingtonpostnews.content, "lxml")
timenewsSoup=BeautifulSoup(r_timenews.content, "lxml")
#washingtonpostnewsSoup=BeautifulSoup(r_washingtonpostnews.content, "lxml")
guardiannewsSoup=BeautifulSoup(r_guardiannews.content, "lxml")
#wsjnewsSoup=BeautifulSoup(r_wsjnews.content, "lxml")
#latimesnewsSoup=BeautifulSoup(r_latimesnews.content, "lxml")
#nydailynewsSoup=BeautifulSoup(r_nydailynews.content, "lxml")
#chicagotribunenewsSoup=BeautifulSoup(r_chicagotribunenews.content, "lxml")
nytimesLinks=nytimesSoup.find_all("a")
cnnLinks=cnnSoup.find_all("a")
#foxnewsLinks=foxnewsSoup.find_all("a")
#cbsnewsLinks=cbsnewsSoup.find_all("a")
abcnewsLinks=abcnewsSoup.find_all("a")
reutersnewsLinks=reutersnewsSoup.find_all("a")
#bbcnewsLinks=bbcnewsSoup.find_all("a")
#yahoonewsLinks=yahoonewsSoup.find_all("a")
nbcnewsLinks=nbcnewsSoup.find_all("a")
usatodaynewsLinks=usatodaynewsSoup.find_all("a")
#huffingtonpostnewsLinks=huffingtonpostnewsSoup.find_all("a")
timenewsLinks=timenewsSoup.find_all("a")
#washingtonpostnewsLinks=washingtonpostnewsSoup.find_all("a")
guardiannewsLinks=guardiannewsSoup.find_all("a")
#wsjnewsLinks=wsjnewsSoup.find_all("a")
#latimesnewsLinks=latimesnewsSoup.find_all("a")
#nydailynewsLinks=nydailynewsSoup.find_all("a")
#chicagotribunenewsLinks=chicagotribunenewsSoup.find_all("a")
linkList=[nytimesLinks,cnnLinks,abcnewsLinks,reutersnewsLinks,
nbcnewsLinks,usatodaynewsLinks,timenewsLinks,guardiannewsLinks]
sportsLinks=[]
pointer=0
listLength=len(linkList)
for i in range(listLength):
for link in linkList[pointer]:
tempURL=link.get("href")
if tempURL is None:
continue
if tempURL.startswith("http://www.nytimes.com/2016/"):
dictionary={}
dictionary['category']='Sports'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
sportsLinks.append(dictionary)
if tempURL.startswith("/articles/267"):
cnnFix='http://www.bleacherreport.com'
cnnResolved=cnnFix+tempURL
dictionary={}
dictionary['category']='Sports'
dictionary['url']=cnnResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
sportsLinks.append(dictionary)
if tempURL.startswith("/Sports/wireStory/"):
abcFix='http://www.abcnews.go.com'
abcResolved=abcFix+tempURL
dictionary={}
dictionary['category']='Sports'
dictionary['url']=abcResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
sportsLinks.append(dictionary)
if tempURL.startswith("/article/"):
reutersFix='http://www.reuters.com'
reutersResolved=reutersFix+tempURL
dictionary={}
dictionary['category']='Sports'
dictionary['url']=reutersResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
sportsLinks.append(dictionary)
if tempURL.startswith("/news/entertainment-arts"):
bbcFix='http://www.bbc.com'
bbcResolved=bbcFix+tempURL
dictionary={}
dictionary['category']='Sports'
dictionary['url']=bbcResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
sportsLinks.append(dictionary)
if tempURL.startswith("http://nhl.nbcsports.com/2016/10/") or tempURL.startswith("http://mlb.nbcsports.com/2016/10/") or tempURL.startswith("http://nascar.nbcsports.com/2016/10/") or tempURL.startswith("http://nhl.nbcsports.com/2016/10/") or tempURL.startswith("http://soccer.nbcsports.com/2016/10/") or tempURL.startswith("http://nba.nbcsports.com/2016/10/"):
nbcFix='http://www.nbcsports.com'
nbcResolved=nbcFix+tempURL
dictionary={}
dictionary['category']='Sports'
dictionary['url']=nbcResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
sportsLinks.append(dictionary)
if tempURL.startswith("/story/sports/mlb/2016/10/") or tempURL.startswith("/story/sports/ncaaf/2016/10/") or tempURL.startswith("/story/sports/nfl/2016/10/") or tempURL.startswith("/story/sports/nba/2016/10/") or tempURL.startswith("/story/sports/nascar/2016/10/"):
usatodayFix='http://www.usatoday.com'
usatodayResolved=usatodayFix+tempURL
dictionary={}
dictionary['category']='Sports'
dictionary['url']=usatodayResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
sportsLinks.append(dictionary)
if tempURL.startswith("http://time.com/453") or tempURL.startswith("http://time.com/454"):
dictionary={}
dictionary['category']='Sports'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
sportsLinks.append(dictionary)
if tempURL.startswith("https://www.theguardian.com/sport/2016/oct/"):
dictionary={}
dictionary['category']='Sports'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
sportsLinks.append(dictionary)
pointer+=1
return sportsLinks
def getTech():
nytimesURL="http://www.nytimes.com/pages/technology"
cnnURL="http://www.money.cnn.com/technology"
foxnewsURL="http://www.foxnews.com/tech"
#cbsnewsURL="http://www.cbsnews.com/politics"
abcnewsURL="http://www.abcnews.go.com/Technology"
reutersnewsURL="http://www.reuters.com/news/technology"
bbcnewsURL="http://www.bbc.com/news/technology"
#yahoonewsURL="https://www.yahoo.com/news/politics"
#nbcnewsURL="http://www.nbcnews.com/tech"
usatodaynewsURL="http://www.usatoday.com/tech"
#huffingtonpostnewsURL="http://www.huffingtonpost.com/section/business"
timenewsURL="http://www.time.com/tech"
#washingtonpostnewsURL="https://www.washingtonpost.com/world/"
guardiannewsURL="https://www.theguardian.com/us/technology"
#wsjnewsURL="http://www.wsj.com/news/business"
#latimesnewsURL="http://www.latimes.com/politics"
#nydailynewsURL="http://www.nydailynews.com/news/politics"
#chicagotribunenewsURL="http://www.chicagotribune.com/business"
r_nytimes=requests.get(nytimesURL)
r_CNN=requests.get(cnnURL)
r_foxnews=requests.get(foxnewsURL)
#r_cbsnews=requests.get(cbsnewsURL)
r_abcnews=requests.get(abcnewsURL)
r_reutersnews=requests.get(reutersnewsURL)
r_bbcnews=requests.get(bbcnewsURL)
#r_yahoonews=requests.get(yahoonewsURL)
#r_nbcnews=requests.get(nbcnewsURL)
r_usatodaynews=requests.get(usatodaynewsURL)
#r_huffingtonpostnews=requests.get(huffingtonpostnewsURL)
r_timenews=requests.get(timenewsURL)
#r_washingtonpostnews=requests.get(washingtonpostnewsURL)
r_guardiannews=requests.get(guardiannewsURL)
#r_wsjnews=requests.get(wsjnewsURL)
#r_latimesnews=requests.get(latimesnewsURL)
#r_nydailynews=requests.get(nydailynewsURL)
#r_chicagotribunenews=requests.get(chicagotribunenewsURL)
nytimesSoup=BeautifulSoup(r_nytimes.content, "lxml")
cnnSoup=BeautifulSoup(r_CNN.content, "lxml")
foxnewsSoup=BeautifulSoup(r_foxnews.content, "lxml")
#cbsnewsSoup=BeautifulSoup(r_cbsnews.content, "lxml")
abcnewsSoup=BeautifulSoup(r_abcnews.content, "lxml")
reutersnewsSoup=BeautifulSoup(r_reutersnews.content, "lxml")
bbcnewsSoup=BeautifulSoup(r_bbcnews.content, "lxml")
#yahoonewsSoup=BeautifulSoup(r_yahoonews.content, "lxml")
#nbcnewsSoup=BeautifulSoup(r_nbcnews.content, "lxml")
usatodaynewsSoup=BeautifulSoup(r_usatodaynews.content, "lxml")
#huffingtonpostnewsSoup=BeautifulSoup(r_huffingtonpostnews.content, "lxml")
timenewsSoup=BeautifulSoup(r_timenews.content, "lxml")
#washingtonpostnewsSoup=BeautifulSoup(r_washingtonpostnews.content, "lxml")
guardiannewsSoup=BeautifulSoup(r_guardiannews.content, "lxml")
#wsjnewsSoup=BeautifulSoup(r_wsjnews.content, "lxml")
#latimesnewsSoup=BeautifulSoup(r_latimesnews.content, "lxml")
#nydailynewsSoup=BeautifulSoup(r_nydailynews.content, "lxml")
#chicagotribunenewsSoup=BeautifulSoup(r_chicagotribunenews.content, "lxml")
nytimesLinks=nytimesSoup.find_all("a")
cnnLinks=cnnSoup.find_all("a")
foxnewsLinks=foxnewsSoup.find_all("a")
#cbsnewsLinks=cbsnewsSoup.find_all("a")
abcnewsLinks=abcnewsSoup.find_all("a")
reutersnewsLinks=reutersnewsSoup.find_all("a")
bbcnewsLinks=bbcnewsSoup.find_all("a")
#yahoonewsLinks=yahoonewsSoup.find_all("a")
#nbcnewsLinks=nbcnewsSoup.find_all("a")
usatodaynewsLinks=usatodaynewsSoup.find_all("a")
#huffingtonpostnewsLinks=huffingtonpostnewsSoup.find_all("a")
timenewsLinks=timenewsSoup.find_all("a")
#washingtonpostnewsLinks=washingtonpostnewsSoup.find_all("a")
guardiannewsLinks=guardiannewsSoup.find_all("a")
#wsjnewsLinks=wsjnewsSoup.find_all("a")
#latimesnewsLinks=latimesnewsSoup.find_all("a")
#nydailynewsLinks=nydailynewsSoup.find_all("a")
#chicagotribunenewsLinks=chicagotribunenewsSoup.find_all("a")
linkList=[nytimesLinks,cnnLinks,foxnewsLinks,abcnewsLinks,reutersnewsLinks,
bbcnewsLinks,usatodaynewsLinks,timenewsLinks,guardiannewsLinks]
techLinks=[]
pointer=0
listLength=len(linkList)
for i in range(listLength):
for link in linkList[pointer]:
tempURL=link.get("href")
if tempURL is None:
continue
if tempURL.startswith("http://www.nytimes.com/2016/"):
dictionary={}
dictionary['category']='Technology'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
techLinks.append(dictionary)
if tempURL.startswith("http://www.foxnews.com/tech/2016/10/"):
foxFix='http://www.foxnews.com'
foxResolve=foxFix+tempURL
dictionary={}
dictionary['category']='Technology'
dictionary['url']=foxResolve
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
techLinks.append(dictionary)
if tempURL.startswith("/2016/10/"):
cnnFix='http://www.money.cnn.com'
cnnResolved=cnnFix+tempURL
dictionary={}
dictionary['category']='Technology'
dictionary['url']=cnnResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
techLinks.append(dictionary)
if tempURL.startswith("/Technology/wireStory/"):
abcFix='http://www.abcnews.go.com'
abcResolved=abcFix+tempURL
dictionary={}
dictionary['category']='Technology'
dictionary['url']=abcResolved
dictionary['title']=link.get_text().replace('\n','')
dictionary['title']=link.get_text().replace('\t','')
if dictionary['title'] and not dictionary['title'].isspace():
techLinks.append(dictionary)
if tempURL.startswith("/article/") or tempURL.startswith("/video/2016/10/"):
reutersFix='http://www.reuters.com'
reutersResolved=reutersFix+tempURL
dictionary={}
dictionary['category']='Technology'
dictionary['url']=reutersResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
techLinks.append(dictionary)
if tempURL.startswith("/news/technology-"):
bbcFix='http://www.bbc.com'
bbcResolved=bbcFix+tempURL
dictionary={}
dictionary['category']='Technology'
dictionary['url']=bbcResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
techLinks.append(dictionary)
if tempURL.startswith("/story/tech/2016/10/") or tempURL.startswith("/story/tech/news/2016/10/"):
usatodayFix='http://www.usatoday.com'
usatodayResolved=usatodayFix+tempURL
dictionary={}
dictionary['category']='Technology'
dictionary['url']=usatodayResolved
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
techLinks.append(dictionary)
if tempURL.startswith("http://time.com/453") or tempURL.startswith("http://time.com/454"):
dictionary={}
dictionary['category']='Technology'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
techLinks.append(dictionary)
if tempURL.startswith("https://www.theguardian.com/technology/2016/oct/"):
dictionary={}
dictionary['category']='Technology'
dictionary['url']=tempURL
dictionary['title']=link.get_text().replace('\n','')
if dictionary['title'] and not dictionary['title'].isspace():
techLinks.append(dictionary)
pointer+=1
return techLinks
getPolitics()
getBusiness()
getMemes()
getWorldNews()
getUSNews()
getEntertainment()
getSports()
getTech()
| Jolopy/MizzyChan | mizzychan/app/crawl/rucrawler.py | Python | gpl-3.0 | 67,999 | 0.018721 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import (absolute_import, division, print_function)
import time
from ansible.module_utils.connection import Connection
checkpoint_argument_spec_for_objects = dict(
auto_publish_session=dict(type='bool'),
wait_for_task=dict(type='bool', default=True),
state=dict(type='str', choices=['present', 'absent'], default='present'),
version=dict(type='str')
)
checkpoint_argument_spec_for_facts = dict(
version=dict(type='str')
)
checkpoint_argument_spec_for_commands = dict(
wait_for_task=dict(type='bool', default=True),
version=dict(type='str')
)
delete_params = ['name', 'uid', 'layer', 'exception-group-name', 'layer', 'rule-name']
# send the request to checkpoint
def send_request(connection, version, url, payload=None):
code, response = connection.send_request('/web_api/' + version + url, payload)
return code, response
# get the payload from the user parameters
def is_checkpoint_param(parameter):
if parameter == 'auto_publish_session' or \
parameter == 'state' or \
parameter == 'wait_for_task' or \
parameter == 'version':
return False
return True
# build the payload from the parameters which has value (not None), and they are parameter of checkpoint API as well
def get_payload_from_parameters(params):
payload = {}
for parameter in params:
parameter_value = params[parameter]
if parameter_value and is_checkpoint_param(parameter):
if isinstance(parameter_value, dict):
payload[parameter.replace("_", "-")] = get_payload_from_parameters(parameter_value)
elif isinstance(parameter_value, list) and len(parameter_value) != 0 and isinstance(parameter_value[0], dict):
payload_list = []
for element_dict in parameter_value:
payload_list.append(get_payload_from_parameters(element_dict))
payload[parameter.replace("_", "-")] = payload_list
else:
payload[parameter.replace("_", "-")] = parameter_value
return payload
# wait for task
def wait_for_task(module, version, connection, task_id):
task_id_payload = {'task-id': task_id}
task_complete = False
current_iteration = 0
max_num_iterations = 300
# As long as there is a task in progress
while not task_complete and current_iteration < max_num_iterations:
current_iteration += 1
# Check the status of the task
code, response = send_request(connection, version, 'show-task', task_id_payload)
attempts_counter = 0
while code != 200:
if attempts_counter < 5:
attempts_counter += 1
time.sleep(2)
code, response = send_request(connection, version, 'show-task', task_id_payload)
else:
response['message'] = "ERROR: Failed to handle asynchronous tasks as synchronous, tasks result is" \
" undefined.\n" + response['message']
module.fail_json(msg=response)
# Count the number of tasks that are not in-progress
completed_tasks = 0
for task in response['tasks']:
if task['status'] == 'failed':
module.fail_json(msg='Task {0} with task id {1} failed. Look at the logs for more details'
.format(task['task-name'], task['task-id']))
if task['status'] == 'in progress':
break
completed_tasks += 1
# Are we done? check if all tasks are completed
if completed_tasks == len(response["tasks"]):
task_complete = True
else:
time.sleep(2) # Wait for two seconds
if not task_complete:
module.fail_json(msg="ERROR: Timeout.\nTask-id: {0}.".format(task_id_payload['task-id']))
# handle publish command, and wait for it to end if the user asked so
def handle_publish(module, connection, version):
if module.params['auto_publish_session']:
publish_code, publish_response = send_request(connection, version, 'publish')
if publish_code != 200:
module.fail_json(msg=publish_response)
if module.params['wait_for_task']:
wait_for_task(module, version, connection, publish_response['task-id'])
# handle a command
def api_command(module, command):
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params.get('version') else ''
code, response = send_request(connection, version, command, payload)
result = {'changed': True}
if code == 200:
if module.params['wait_for_task']:
if 'task-id' in response:
wait_for_task(module, version, connection, response['task-id'])
elif 'tasks' in response:
for task_id in response['tasks']:
wait_for_task(module, version, connection, task_id)
result[command] = response
else:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
return result
# handle api call facts
def api_call_facts(module, api_call_object, api_call_object_plural_version):
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params['version'] else ''
# if there is neither name nor uid, the API command will be in plural version (e.g. show-hosts instead of show-host)
if payload.get("name") is None and payload.get("uid") is None:
api_call_object = api_call_object_plural_version
code, response = send_request(connection, version, 'show-' + api_call_object, payload)
if code != 200:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
result = {api_call_object: response}
return result
# handle api call
def api_call(module, api_call_object):
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
result = {'changed': False}
if module.check_mode:
return result
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params.get('version') else ''
payload_for_equals = {'type': api_call_object, 'params': payload}
equals_code, equals_response = send_request(connection, version, 'equals', payload_for_equals)
result['checkpoint_session_uid'] = connection.get_session_uid()
# if code is 400 (bad request) or 500 (internal error) - fail
if equals_code == 400 or equals_code == 500:
module.fail_json(msg=equals_response)
if equals_code == 404 and equals_response['code'] == 'generic_err_command_not_found':
module.fail_json(msg='Relevant hotfix is not installed on Check Point server. See sk114661 on Check Point Support Center.')
if module.params['state'] == 'present':
if equals_code == 200:
if not equals_response['equals']:
code, response = send_request(connection, version, 'set-' + api_call_object, payload)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
else:
# objects are equals and there is no need for set request
pass
elif equals_code == 404:
code, response = send_request(connection, version, 'add-' + api_call_object, payload)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
elif module.params['state'] == 'absent':
if equals_code == 200:
payload_for_delete = get_copy_payload_with_some_params(payload, delete_params)
code, response = send_request(connection, version, 'delete-' + api_call_object, payload_for_delete)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
elif equals_code == 404:
# no need to delete because object dose not exist
pass
return result
# get the position in integer format
def get_number_from_position(payload, connection, version):
if 'position' in payload:
position = payload['position']
else:
return None
# This code relevant if we will decide to support 'top' and 'bottom' in position
# position_number = None
# # if position is not int, convert it to int. There are several cases: "top"
# if position == 'top':
# position_number = 1
# elif position == 'bottom':
# payload_for_show_access_rulebase = {'name': payload['layer'], 'limit': 0}
# code, response = send_request(connection, version, 'show-access-rulebase', payload_for_show_access_rulebase)
# position_number = response['total']
# elif isinstance(position, str):
# # here position is a number in format str (e.g. "5" and not 5)
# position_number = int(position)
# else:
# # here position suppose to be int
# position_number = position
#
# return position_number
return int(position)
# is the param position (if the user inserted it) equals between the object and the user input
def is_equals_with_position_param(payload, connection, version, api_call_object):
position_number = get_number_from_position(payload, connection, version)
# if there is no position param, then it's equals in vacuous truth
if position_number is None:
return True
payload_for_show_access_rulebase = {'name': payload['layer'], 'offset': position_number - 1, 'limit': 1}
rulebase_command = 'show-' + api_call_object.split('-')[0] + '-rulebase'
# if it's threat-exception, we change a little the payload and the command
if api_call_object == 'threat-exception':
payload_for_show_access_rulebase['rule-name'] = payload['rule-name']
rulebase_command = 'show-threat-rule-exception-rulebase'
code, response = send_request(connection, version, rulebase_command, payload_for_show_access_rulebase)
# if true, it means there is no rule in the position that the user inserted, so I return false, and when we will try to set
# the rule, the API server will get throw relevant error
if response['total'] < position_number:
return False
rule = response['rulebase'][0]
while 'rulebase' in rule:
rule = rule['rulebase'][0]
# if the names of the exist rule and the user input rule are equals, then it's means that their positions are equals so I
# return True. and there is no way that there is another rule with this name cause otherwise the 'equals' command would fail
if rule['name'] == payload['name']:
return True
else:
return False
# get copy of the payload without some of the params
def get_copy_payload_without_some_params(payload, params_to_remove):
copy_payload = dict(payload)
for param in params_to_remove:
if param in copy_payload:
del copy_payload[param]
return copy_payload
# get copy of the payload with only some of the params
def get_copy_payload_with_some_params(payload, params_to_insert):
copy_payload = {}
for param in params_to_insert:
if param in payload:
copy_payload[param] = payload[param]
return copy_payload
# is equals with all the params including action and position
def is_equals_with_all_params(payload, connection, version, api_call_object, is_access_rule):
if is_access_rule and 'action' in payload:
payload_for_show = get_copy_payload_with_some_params(payload, ['name', 'uid', 'layer'])
code, response = send_request(connection, version, 'show-' + api_call_object, payload_for_show)
exist_action = response['action']['name']
if exist_action != payload['action']:
return False
if not is_equals_with_position_param(payload, connection, version, api_call_object):
return False
return True
# handle api call for rule
def api_call_for_rule(module, api_call_object):
is_access_rule = True if 'access' in api_call_object else False
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
result = {'changed': False}
if module.check_mode:
return result
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params.get('version') else ''
if is_access_rule:
copy_payload_without_some_params = get_copy_payload_without_some_params(payload, ['action', 'position'])
else:
copy_payload_without_some_params = get_copy_payload_without_some_params(payload, ['position'])
payload_for_equals = {'type': api_call_object, 'params': copy_payload_without_some_params}
equals_code, equals_response = send_request(connection, version, 'equals', payload_for_equals)
result['checkpoint_session_uid'] = connection.get_session_uid()
# if code is 400 (bad request) or 500 (internal error) - fail
if equals_code == 400 or equals_code == 500:
module.fail_json(msg=equals_response)
if equals_code == 404 and equals_response['code'] == 'generic_err_command_not_found':
module.fail_json(msg='Relevant hotfix is not installed on Check Point server. See sk114661 on Check Point Support Center.')
if module.params['state'] == 'present':
if equals_code == 200:
if equals_response['equals']:
if not is_equals_with_all_params(payload, connection, version, api_call_object, is_access_rule):
equals_response['equals'] = False
if not equals_response['equals']:
# if user insert param 'position' and needed to use the 'set' command, change the param name to 'new-position'
if 'position' in payload:
payload['new-position'] = payload['position']
del payload['position']
code, response = send_request(connection, version, 'set-' + api_call_object, payload)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
else:
# objects are equals and there is no need for set request
pass
elif equals_code == 404:
code, response = send_request(connection, version, 'add-' + api_call_object, payload)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
result[api_call_object] = response
elif module.params['state'] == 'absent':
if equals_code == 200:
payload_for_delete = get_copy_payload_with_some_params(payload, delete_params)
code, response = send_request(connection, version, 'delete-' + api_call_object, payload_for_delete)
if code != 200:
module.fail_json(msg=response)
handle_publish(module, connection, version)
result['changed'] = True
elif equals_code == 404:
# no need to delete because object dose not exist
pass
return result
# handle api call facts for rule
def api_call_facts_for_rule(module, api_call_object, api_call_object_plural_version):
payload = get_payload_from_parameters(module.params)
connection = Connection(module._socket_path)
# if user insert a specific version, we add it to the url
version = ('v' + module.params['version'] + '/') if module.params['version'] else ''
# if there is neither name nor uid, the API command will be in plural version (e.g. show-hosts instead of show-host)
if payload.get("layer") is None:
api_call_object = api_call_object_plural_version
code, response = send_request(connection, version, 'show-' + api_call_object, payload)
if code != 200:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
result = {api_call_object: response}
return result
# The code from here till EOF will be deprecated when Rikis' modules will be deprecated
checkpoint_argument_spec = dict(auto_publish_session=dict(type='bool', default=True),
policy_package=dict(type='str', default='standard'),
auto_install_policy=dict(type='bool', default=True),
targets=dict(type='list')
)
def publish(connection, uid=None):
payload = None
if uid:
payload = {'uid': uid}
connection.send_request('/web_api/publish', payload)
def discard(connection, uid=None):
payload = None
if uid:
payload = {'uid': uid}
connection.send_request('/web_api/discard', payload)
def install_policy(connection, policy_package, targets):
payload = {'policy-package': policy_package,
'targets': targets}
connection.send_request('/web_api/install-policy', payload)
| thaim/ansible | lib/ansible/module_utils/network/checkpoint/checkpoint.py | Python | mit | 19,463 | 0.003853 |
# -*- coding: utf-8 -*-
"""
pidSim.py
A simulation of a vision control to steering PID loop accounting for communication and
processing latency and variation; demonstrates the impact of variation
to successful control when the control variable (CV) has direct influence on
the process variable (PV)
This allows students to experiment with how different elements in the scaling
of a control loop affect performance, this focusing efforts on successful
design.
The model consists of a PID processing software with an asynchronous alignment
with a camera frame which is also asynchronous to image processing software.
Communication latency and jitter are planned as well as image processing impacts.
A plot at the end shows a sample over some specified duration.
The initial conditions of the file represents a case that won't work well until
it is correct by improvements in the constants and image processing rates
Copyright (c) 2016 - RocketRedNeck.com RocketRedNeck.net
RocketRedNeck and MIT Licenses
RocketRedNeck hereby grants license for others to copy and modify this source code for
whatever purpose other's deem worthy as long as RocketRedNeck is given credit where
where credit is due and you leave RocketRedNeck out of it for all other nefarious purposes.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
****************************************************************************************************
"""
import matplotlib.pyplot as plot
import numpy as np
tmax_sec = 5.0
dt_sec = 0.001
ts_sec = np.arange(0.0, tmax_sec, 0.001)
nmax = ts_sec.__len__() # round(tmax_sec/dt_sec)
ns = range(0, nmax)
kp = 0.3 # Proportional gain
ki = 0.03 # Integral gain
kd = 0.0 # Derivative gain
kg = 1.0 # Plant (Process) gain
tau_sec = 0.1
sp = np.zeros(nmax) # Will initialize after first image processed
err = np.zeros(nmax)
intErr = np.zeros(nmax)
derrdt = np.zeros(nmax)
lastErr = 0.0
G = np.zeros(nmax) # Process output to be measured
exp = np.exp(-dt_sec/tau_sec)
# Model of the pid task via a java util.timer
# We add a random normal variation for task wakeup since the util.timer
# can only assure that the task wakes up no earlier than scheduled.
# Empirical measurement of the task latency is required for accurate
# modeling, but for now we can just assume about a 10% average
pidPeriod_sec = 0.02;
pidPeriod_index = round(pidPeriod_sec / dt_sec)
pidStart_index = 0 # "time" that PID computation started
pidDuration_sec = 0.001 # Time to complete PID calculation (models software latency)
pidDuration_index = round(pidDuration_sec / dt_sec)
pidEnd_index = pidStart_index + pidDuration_index # "time" that PID computation ended
pidMinJitter_sec = 0.000 # Minimum Random task jitter
pidMinJitter_index = round(pidMinJitter_sec / dt_sec)
pidMaxJitter_sec = 0.000 # Maximum Random task jitter
pidMaxJitter_index = round(pidMaxJitter_sec / dt_sec)
pidMeanJitter_index = round((pidMaxJitter_index + pidMinJitter_index)/2)
pidStdDevJitter_index = round((pidMaxJitter_index - pidMinJitter_index) / 3)
cvPid = np.zeros(nmax) # Initial value of cv coming from PID calculation
# The first communication link is assumed to be a CAN bus
# The bus overhead is assumed to be a total fixed time
# not exceeding about 1 ms for up to four (4) messages going to four (4)
# separate motors (including any increases for bit stuffing); in other words
# we assume something like 100 bits per message all mastered from the same
# location on a 1 Mbps bus.
# The underlying software is assumed to be some queue processing task that
# wakes upon a posted message. A complete review of the path is needed to
# assess whether to task that actually receives the posted message awakens
# immediately (higher priority) or must time slice with all other concurrent
# tasks. If communication tasking is forced to wait for an available cycle
# it is possible that an indeterminate delay may occur at the post-to-wire
# boundary; also, the communication tasking must post all messages queued
# to the wire in close sequence otherwise the motors will be out of phase
# We can inject an estimate of communication jitter as a whole using a
# simple normal distribution
comm0Start_index = 0 # "time" that first communication bus starts
comm0Delay_sec = 0.001 # Time to complete communication (MUST BE LESS THAN PID PERIOD)
comm0Delay_index = round(comm0Delay_sec / dt_sec)
comm0End_index = comm0Start_index + comm0Delay_index
comm0MinJitter_sec = 0.000
comm0MinJitter_index = round(comm0MinJitter_sec / dt_sec)
comm0MaxJitter_sec = 0.000
comm0MaxJitter_index = round(comm0MaxJitter_sec / dt_sec)
comm0MeanJitter_index = round((comm0MaxJitter_index + comm0MinJitter_index)/2)
comm0StdDevJitter_index = round((comm0MaxJitter_index - comm0MinJitter_index) / 3)
cvComm0 = np.zeros(nmax) # cv value delayed for first communication bus
camOffset_sec = 0.0 # Offset to represent asynchronous camera start
camOffset_index = round(camOffset_sec / dt_sec)
camStart_index = camOffset_index # "time" that camera runs
camRate_Hz = 30 # Camera frame rate
camPeriod_sec = 1.0/camRate_Hz
camPeriod_index = round(camPeriod_sec / dt_sec)
camEnd_index = camStart_index + camPeriod_index
camImage_index = round((camStart_index + camEnd_index) / 2) # Time associated with center of image
pvCam = np.zeros(nmax) # process variable delayed for camera framing
# The second communication bus is polled by the imaging software
# The time that the imaging software starts is asynchronous to the
# other system components, and it will not execute again until the
# image processing completes (which itself has some variation)
comm1Start_index = 0 # "time" that second communication bus starts
comm1Delay_sec = 0.020 # Time to complete communication
comm1Delay_index = round(comm1Delay_sec / dt_sec)
comm1End_index = comm1Start_index + comm1Delay_index
comm1MinJitter_sec = 0.000
comm1MinJitter_index = round(comm1MinJitter_sec / dt_sec)
comm1MaxJitter_sec = 0.000
comm1MaxJitter_index = round(comm1MaxJitter_sec / dt_sec)
comm1MeanJitter_index = round((comm1MaxJitter_index + comm1MinJitter_index)/2)
comm1StdDevJitter_index = round((comm1MaxJitter_index - comm1MinJitter_index) / 3)
pvComm1 = np.zeros(nmax) # pv value delayed for second communication bus
# Image processing consists of a bounded, but variable process
# The content of the image and the operating environment will cause the
# associated software to vary; we will use emprical estimates for a current
# approach and will assume the variation has a normal distribution with a
# 3-sigma distribution between the upper and lower limits
pvImageStart_index = 0
pvImageMaxRate_Hz = 5.0
pvImageMinRate_Hz = 3.0
pvImageRateSigma = 3
pvImageMaxDuration_sec = 1.0 / pvImageMinRate_Hz
pvImageMinDuration_sec = 1.0 / pvImageMaxRate_Hz
pvImageMaxDuration_index = round(pvImageMaxDuration_sec / dt_sec)
pvImageMinDuration_index = round(pvImageMinDuration_sec / dt_sec)
pvImageMeanDuration_index = round((pvImageMinDuration_index + pvImageMaxDuration_index)/2)
pvImageStdDevDuration_index = round((pvImageMaxDuration_index - pvImageMinDuration_index) / pvImageRateSigma)
pvImageEnd_index = pvImageStart_index + 2*pvImageMaxDuration_index
pvImage = np.zeros(nmax)
# Final communication link between image processing and the PID
comm2Start_index = 2*pvImageMaxDuration_index # "time" that third communication bus starts (always after image processing)
comm2Delay_sec = 0.020 # Time to complete communication
comm2Delay_index = round(comm2Delay_sec / dt_sec)
comm2End_index = comm2Start_index + comm1Delay_index
comm2Jitter_sec = 0.0 # Later we will add a "random" jitter that delays communication
comm2Jitter_index = round(comm2Jitter_sec / dt_sec)
pvComm2 = np.zeros(nmax) # pv value delayed for third communication bus
pvFinal = np.zeros(nmax)
for n in ns:
# Only run the PID calculation on a period boundary
# i.e., this branch represents the task scheduled on a boundary
# When jitter is enabled we will occasionally add a delay
# representing a late task start (independent of measurement jitter)
# We assume here that the task is delayed and not immediately preempted
# and thus able to make full use of its time slice
if (pidStdDevJitter_index == 0):
pidJitter_index = 0
else:
pidJitter_index = round(np.random.normal(pidMeanJitter_index, pidStdDevJitter_index))
if ((n % (pidPeriod_index + pidJitter_index)) == 0):
#print("@ " + str(n) + " pid start")
pidStart_index = n
pidEnd_index = pidStart_index + pidDuration_index
# Once we get going, we can compute the error as the
# difference of the setpoint and the latest output
# of the process variable (delivered after all sensor and
# communication delays)
if (n > 0):
err[n] = sp[n] - pvFinal[n-1]
# Assume we currently have no way of directly measuring derr
# so we use the err measurement to estimate the error rate
# In this sense, the error rate is an average over the
# previous interval of time since we last looked, thus the
# error rate is in the past
derrdt[n] = (err[n] - err[n-1]) / pidPeriod_sec
# Integrate the error (i.e., add it up)
intErr[n] = intErr[n-1] + err[n]
# Compute the control variable by summing the PID parts
# When the pidEnd_index is reached, the output will be
# forwarded to the communication sequence
cvPid[n] = (kp * err[n]) + (ki * intErr[n]) + (kd * derrdt[n])
elif (n > 0): # Previous output is held until the next task wakeup time
err[n] = err[n-1]
derrdt[n] = derrdt[n-1]
intErr[n] = intErr[n-1]
cvPid[n] = cvPid[n-1]
# Initiate communication delay
if (n == pidEnd_index):
#print("@ " + str(n) + " pid end = " + str(cvPid[n]))
comm0Start_index = n
if (comm0StdDevJitter_index == 0):
comm0Jitter_index = 0
else:
comm0Jitter_index = round(np.random.normal(comm0MeanJitter_index, comm0StdDevJitter_index))
comm0End_index = comm0Start_index + comm0Delay_index + comm0Jitter_index
# When communication delay has been met, move the information along
if (n == comm0End_index):
cvComm0[comm0End_index] = cvPid[comm0Start_index]
#print("@ " + str(n) + " comm0 end = " + str(cvComm0[comm0End_index]))
elif (n > 0): # Otherwise, just hold the previous command
cvComm0[n] = cvComm0[n-1]
# Currently just model the motor, gears, and kinematics as a simple
# time constant without limits
# We will likely improve this fidelity later by adding limiting
# The kinematics (physics) runs "continuously" so we update it
# every time step
G[n] = (kg * cvComm0[n] * (1.0 - exp)) + (G[n-1] * exp)
# Next is the sensor delay, communication, processing, and communication
# on the return path
# The process output will be sensed by a camera and delivered at the
# camera frame rate; the frame interval is asynchronous to all other
# processing periods.
# We currently assume insignificant jitter in the camera rate
# We also are neglecting any blur occuring due to motion
#
# However, we will pick a point midway in the frame to represent
# the time of the relevant image data; depending on the simulation
# time step and modeled frame rate for the camera can cause a jitter
# of up to a time step
if ((n % camPeriod_index) == camOffset_index):
#print("@ " + str(n) + " camera start")
camStart_index = n
camEnd_index = camStart_index + camPeriod_index
camImage_index = round((camStart_index + camEnd_index)/2) # Midpoint in time
# This is a point in time associated with the center pixel of
# the image. For now we will just assume that the item we will measure in the
# image is at the same point in time as the image center.
# Reality is that the difference is small and only important for
# very rapid target motion
# While the center point of the image time is important for averaging
# state on the image data, the frame will not be deliverable until the
# entire frame is ready for the next communication boundary (when the frame
# can be fetched)
if (n == (camEnd_index-1)):
pvCam[camStart_index:camEnd_index] = G[camImage_index]
#print("@ " + str(n) + " camera = " + str(G[camImage_index]))
# Image processing is assumed to operate as fast as it can
# but will have asynchronous start and duration will vary based on
# image content with a well defined lower and upper limit.
#
# The modeling is a small communication delay followed by a variable
# image processing delay; we will model a small normal distribution in
# time but will not model imaging errors
if (n == comm1Start_index):
#print("@ " + str(n) + " COMM1 start")
if (comm1StdDevJitter_index == 0):
comm1Jitter_index = 0
else:
comm1Jitter_index = round(np.random.normal(comm1MeanJitter_index, comm1StdDevJitter_index))
comm1End_index = comm1Start_index + comm1Delay_index + comm1Jitter_index
# Whichever image frame is available will now be forwarded
# We back up one camera period from when communication startsbecause the
# image information won't be available while a frame is being sampled
# The information is placed in the outgoing comm1 buffer at the end of
# communication, effectively delaying the image information and keeping
# the boundaries aynchronous to the resolution of the time step.
if (n == comm1End_index):
if (comm1Start_index >= camPeriod_index):
pvComm1[comm1End_index] = pvCam[comm1Start_index - camPeriod_index]
else:
pvComm1[comm1End_index] = pvCam[comm1Start_index]
#print("@ " + str(n) + " COMM1 end = " + str(pvComm1[comm1End_index]))
# Now that communication has completed, the image processing
# can start; here we represent a variable processing latency
# as a normal distribution between a min and max time assumed
# to be 3-sigma limit
# This is not a precise model of the statistical variation
# of actual image processing, but rather just enough variation
# to observe the impact to a control loop (if any)
pvImageStart_index = comm1End_index
if (pvImageStdDevDuration_index == 0):
pvImageJitter_index = pvImageMeanDuration_index
else:
pvImageJitter_index = round(np.random.normal(pvImageMeanDuration_index, pvImageStdDevDuration_index))
pvImageEnd_index = pvImageStart_index + pvImageJitter_index
elif (n > 0):
pvComm1[n] = pvComm1[n-1]
# When image processing is complete, we can begin to send the result
# to the final communication link and then restart the second comm link
# to read the camera again
if (n == pvImageEnd_index):
pvImage[pvImageEnd_index] = pvComm1[comm1End_index]
#print("@ " + str(n) + " IMAGE PROCESSING end = " + str(pvImage[pvImageEnd_index]))
comm2Start_index = pvImageEnd_index
elif (n > 0):
pvImage[n] = pvImage[n-1]
if (n == comm2Start_index):
comm2End_index = comm2Start_index + comm2Delay_index
#print("@ " + str(n) + " COMM2 start --> end = " + str(comm2End_index))
if (n == comm2End_index):
pvComm2[comm2End_index] = pvImage[comm2Start_index]
#print("@ " + str(n) + " COMM2 end = " + str(pvComm2[comm2End_index]))
comm1Start_index = comm2End_index + 1 # Restart image processing immediately
# Enforce causality
# We delay the awareness of the set point until after the first
# image is processed and communicated; it is only at that moment
# the system becomes aware of the error
if (n < nmax-1):
sp[n+1] = 1.0
elif (n > 0):
pvComm2[n] = pvComm2[n-1]
if (n < nmax-1):
sp[n+1] = sp[n]
pvFinal[n] = pvComm2[n]
plot.figure(1)
plot.cla()
plot.grid()
plot.plot(ts_sec,sp,label='sp')
plot.plot(ts_sec,err,label='err')
#plot.plot(ts_sec,cvPid,label='cvPid')
#plot.plot(ts_sec,cvComm0,'o',label='cvComm0')
plot.plot(ts_sec,G,label='G')
plot.plot(ts_sec,pvCam,label='CameraFrame'),
plot.plot(ts_sec,pvComm1,label='CamComm+ImageProcessing')
plot.plot(ts_sec,pvImage,label='NetworkTableStart')
plot.plot(ts_sec,pvComm2,label='NetworkTableEnd')
#plot.plot(ts_sec,pvFinal,label='pvFinal')
#plot.legend()
plot.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
| RocketRedNeck/PythonPlayground | pidSim.py | Python | mit | 18,070 | 0.008356 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.