repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
semonte/intellij-community | python/helpers/sphinxcontrib/napoleon/docstring.py | 44 | 32526 | # -*- coding: utf-8 -*-
# Copyright 2014 Rob Ruana
# Licensed under the BSD License, see LICENSE file for details.
"""Classes for docstring parsing and formatting."""
import collections
import inspect
import re
import sys
from six.moves import range
from pockets import modify_iter
from six import string_types
_directive_regex = re.compile(r'\.\. \S+::')
_google_section_regex = re.compile(r'^(\s|\w)+:\s*$')
_google_typed_arg_regex = re.compile(r'\s*(.+?)\s*\(\s*(.+?)\s*\)')
_numpy_section_regex = re.compile(r'^[=\-`:\'"~^_*+#<>]{2,}\s*$')
_xref_regex = re.compile(r'(:\w+:\S+:`.+?`|:\S+:`.+?`|`.+?`)')
class GoogleDocstring(object):
"""Convert Google style docstrings to reStructuredText.
Parameters
----------
docstring : str or List[str]
The docstring to parse, given either as a string or split into
individual lines.
config : Optional[sphinxcontrib.napoleon.Config or sphinx.config.Config]
The configuration settings to use. If not given, defaults to the
config object on `app`; or if `app` is not given defaults to the
a new `sphinxcontrib.napoleon.Config` object.
See Also
--------
:class:`sphinxcontrib.napoleon.Config`
Other Parameters
----------------
app : Optional[sphinx.application.Sphinx]
Application object representing the Sphinx process.
what : Optional[str]
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : Optional[str]
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : Optional[sphinx.ext.autodoc.Options]
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Example
-------
>>> from sphinxcontrib.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Args:
... arg1(int): Description of `arg1`
... arg2(str): Description of `arg2`
... Returns:
... str: Description of return value.
... '''
>>> print(GoogleDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
self._config = config
self._app = app
if not self._config:
from sphinxcontrib.napoleon import Config
self._config = self._app and self._app.config or Config()
if not what:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
self._what = what
self._name = name
self._obj = obj
self._opt = options
if isinstance(docstring, string_types):
docstring = docstring.splitlines()
self._lines = docstring
self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip())
self._parsed_lines = []
self._is_in_section = False
self._section_indent = 0
if not hasattr(self, '_directive_sections'):
self._directive_sections = []
if not hasattr(self, '_sections'):
self._sections = {
'args': self._parse_parameters_section,
'arguments': self._parse_parameters_section,
'attributes': self._parse_attributes_section,
'example': self._parse_examples_section,
'examples': self._parse_examples_section,
'keyword args': self._parse_keyword_arguments_section,
'keyword arguments': self._parse_keyword_arguments_section,
'methods': self._parse_methods_section,
'note': self._parse_note_section,
'notes': self._parse_notes_section,
'other parameters': self._parse_other_parameters_section,
'parameters': self._parse_parameters_section,
'return': self._parse_returns_section,
'returns': self._parse_returns_section,
'raises': self._parse_raises_section,
'references': self._parse_references_section,
'see also': self._parse_see_also_section,
'warning': self._parse_warning_section,
'warnings': self._parse_warning_section,
'warns': self._parse_warns_section,
'yield': self._parse_yields_section,
'yields': self._parse_yields_section,
}
self._parse()
def __str__(self):
"""Return the parsed docstring in reStructuredText format.
Returns
-------
str
UTF-8 encoded version of the docstring.
"""
if sys.version_info[0] >= 3:
return self.__unicode__()
else:
return self.__unicode__().encode('utf8')
def __unicode__(self):
"""Return the parsed docstring in reStructuredText format.
Returns
-------
unicode
Unicode version of the docstring.
"""
return u'\n'.join(self.lines())
def lines(self):
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
List[str]
The lines of the docstring in a list.
"""
return self._parsed_lines
def _consume_indented_block(self, indent=1):
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
(not line or self._is_indented(line, indent))):
lines.append(next(self._line_iter))
line = self._line_iter.peek()
return lines
def _consume_contiguous(self):
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
not self._is_section_header()):
lines.append(next(self._line_iter))
return lines
def _consume_empty(self):
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
lines.append(next(self._line_iter))
line = self._line_iter.peek()
return lines
def _consume_field(self, parse_type=True, prefer_type=False):
line = next(self._line_iter)
before, colon, after = self._partition_field_on_colon(line)
_name, _type, _desc = before, '', after
if parse_type:
match = _google_typed_arg_regex.match(before)
if match:
_name = match.group(1)
_type = match.group(2)
if _name[:2] == '**':
_name = r'\*\*'+_name[2:]
elif _name[:1] == '*':
_name = r'\*'+_name[1:]
if prefer_type and not _type:
_type, _name = _name, _type
indent = self._get_indent(line) + 1
_desc = [_desc] + self._dedent(self._consume_indented_block(indent))
_desc = self.__class__(_desc, self._config).lines()
return _name, _type, _desc
def _consume_fields(self, parse_type=True, prefer_type=False):
self._consume_empty()
fields = []
while not self._is_section_break():
_name, _type, _desc = self._consume_field(parse_type, prefer_type)
if _name or _type or _desc:
fields.append((_name, _type, _desc,))
return fields
def _consume_inline_attribute(self):
line = next(self._line_iter)
_type, colon, _desc = self._partition_field_on_colon(line)
if not colon:
_type, _desc = _desc, _type
_desc = [_desc] + self._dedent(self._consume_to_end())
_desc = self.__class__(_desc, self._config).lines()
return _type, _desc
def _consume_returns_section(self):
lines = self._dedent(self._consume_to_next_section())
if lines:
before, colon, after = self._partition_field_on_colon(lines[0])
_name, _type, _desc = '', '', lines
if colon:
if after:
_desc = [after] + lines[1:]
else:
_desc = lines[1:]
match = _google_typed_arg_regex.match(before)
if match:
_name = match.group(1)
_type = match.group(2)
else:
_type = before
_desc = self.__class__(_desc, self._config).lines()
return [(_name, _type, _desc,)]
else:
return []
def _consume_usage_section(self):
lines = self._dedent(self._consume_to_next_section())
return lines
def _consume_section_header(self):
section = next(self._line_iter)
stripped_section = section.strip(':')
if stripped_section.lower() in self._sections:
section = stripped_section
return section
def _consume_to_end(self):
lines = []
while self._line_iter.has_next():
lines.append(next(self._line_iter))
return lines
def _consume_to_next_section(self):
self._consume_empty()
lines = []
while not self._is_section_break():
lines.append(next(self._line_iter))
return lines + self._consume_empty()
def _dedent(self, lines, full=False):
if full:
return [line.lstrip() for line in lines]
else:
min_indent = self._get_min_indent(lines)
return [line[min_indent:] for line in lines]
def _format_admonition(self, admonition, lines):
lines = self._strip_empty(lines)
if len(lines) == 1:
return ['.. %s:: %s' % (admonition, lines[0].strip()), '']
elif lines:
lines = self._indent(self._dedent(lines), 3)
return ['.. %s::' % admonition, ''] + lines + ['']
else:
return ['.. %s::' % admonition, '']
def _format_block(self, prefix, lines, padding=None):
if lines:
if padding is None:
padding = ' ' * len(prefix)
result_lines = []
for i, line in enumerate(lines):
if i == 0:
result_lines.append((prefix + line).rstrip())
elif line:
result_lines.append(padding + line)
else:
result_lines.append('')
return result_lines
else:
return [prefix]
def _format_field(self, _name, _type, _desc):
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = has_desc and ' -- ' or ''
if _name:
if _type:
if '`' in _type:
field = '**%s** (%s)%s' % (_name, _type, separator)
else:
field = '**%s** (*%s*)%s' % (_name, _type, separator)
else:
field = '**%s**%s' % (_name, separator)
elif _type:
if '`' in _type:
field = '%s%s' % (_type, separator)
else:
field = '*%s*%s' % (_type, separator)
else:
field = ''
if has_desc:
return [field + _desc[0]] + _desc[1:]
else:
return [field]
def _format_fields(self, field_type, fields):
field_type = ':%s:' % field_type.strip()
padding = ' ' * len(field_type)
multi = len(fields) > 1
lines = []
for _name, _type, _desc in fields:
field = self._format_field(_name, _type, _desc)
if multi:
if lines:
lines.extend(self._format_block(padding + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' ', field))
if lines and lines[-1]:
lines.append('')
return lines
def _get_current_indent(self, peek_ahead=0):
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
while line != self._line_iter.sentinel:
if line:
return self._get_indent(line)
peek_ahead += 1
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
return 0
def _get_indent(self, line):
for i, s in enumerate(line):
if not s.isspace():
return i
return len(line)
def _get_min_indent(self, lines):
min_indent = None
for line in lines:
if line:
indent = self._get_indent(line)
if min_indent is None:
min_indent = indent
elif indent < min_indent:
min_indent = indent
return min_indent or 0
def _indent(self, lines, n=4):
return [(' ' * n) + line for line in lines]
def _is_indented(self, line, indent=1):
for i, s in enumerate(line):
if i >= indent:
return True
elif not s.isspace():
return False
return False
def _is_section_header(self):
section = self._line_iter.peek().lower()
match = _google_section_regex.match(section)
if match and section.strip(':') in self._sections:
header_indent = self._get_indent(section)
section_indent = self._get_current_indent(peek_ahead=1)
return section_indent > header_indent
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
if section.startswith(directive_section):
return True
return False
def _is_section_break(self):
line = self._line_iter.peek()
return (not self._line_iter.has_next() or
self._is_section_header() or
(self._is_in_section and
line and
not self._is_indented(line, self._section_indent)))
def _parse(self):
self._parsed_lines = self._consume_empty()
if self._name and (self._what == 'attribute' or self._what == 'data'):
self._parsed_lines.extend(self._parse_attribute_docstring())
return
while self._line_iter.has_next():
if self._is_section_header():
try:
section = self._consume_section_header()
self._is_in_section = True
self._section_indent = self._get_current_indent()
if _directive_regex.match(section):
lines = [section] + self._consume_to_next_section()
else:
lines = self._sections[section.lower()](section)
finally:
self._is_in_section = False
self._section_indent = 0
else:
if not self._parsed_lines:
lines = self._consume_contiguous() + self._consume_empty()
else:
lines = self._consume_to_next_section()
self._parsed_lines.extend(lines)
def _parse_attribute_docstring(self):
_type, _desc = self._consume_inline_attribute()
return self._format_field('', _type, _desc)
def _parse_attributes_section(self, section):
lines = []
for _name, _type, _desc in self._consume_fields():
if self._config.napoleon_use_ivar:
field = ':ivar %s: ' % _name
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':vartype %s: %s' % (_name, _type))
else:
lines.extend(['.. attribute:: ' + _name, ''])
field = self._format_field('', _type, _desc)
lines.extend(self._indent(field, 3))
lines.append('')
if self._config.napoleon_use_ivar:
lines.append('')
return lines
def _parse_examples_section(self, section):
use_admonition = self._config.napoleon_use_admonition_for_examples
return self._parse_generic_section(section, use_admonition)
def _parse_usage_section(self, section):
header = ['.. rubric:: Usage:', '']
block = ['.. code-block:: python', '']
lines = self._consume_usage_section()
lines = self._indent(lines, 3)
return header + block + lines + ['']
def _parse_generic_section(self, section, use_admonition):
lines = self._strip_empty(self._consume_to_next_section())
lines = self._dedent(lines)
if use_admonition:
header = '.. admonition:: %s' % section
lines = self._indent(lines, 3)
else:
header = '.. rubric:: %s' % section
if lines:
return [header, ''] + lines + ['']
else:
return [header, '']
def _parse_keyword_arguments_section(self, section):
return self._format_fields('Keyword Arguments', self._consume_fields())
def _parse_methods_section(self, section):
lines = []
for _name, _, _desc in self._consume_fields(parse_type=False):
lines.append('.. method:: %s' % _name)
if _desc:
lines.extend([''] + self._indent(_desc, 3))
lines.append('')
return lines
def _parse_note_section(self, section):
lines = self._consume_to_next_section()
return self._format_admonition('note', lines)
def _parse_notes_section(self, section):
use_admonition = self._config.napoleon_use_admonition_for_notes
return self._parse_generic_section('Notes', use_admonition)
def _parse_other_parameters_section(self, section):
return self._format_fields('Other Parameters', self._consume_fields())
def _parse_parameters_section(self, section):
fields = self._consume_fields()
if self._config.napoleon_use_param:
lines = []
for _name, _type, _desc in fields:
field = ':param %s: ' % _name
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':type %s: %s' % (_name, _type))
return lines + ['']
else:
return self._format_fields('Parameters', fields)
def _parse_raises_section(self, section):
fields = self._consume_fields(parse_type=False, prefer_type=True)
field_type = ':raises:'
padding = ' ' * len(field_type)
multi = len(fields) > 1
lines = []
for _, _type, _desc in fields:
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = has_desc and ' -- ' or ''
if _type:
has_refs = '`' in _type or ':' in _type
has_space = any(c in ' \t\n\v\f ' for c in _type)
if not has_refs and not has_space:
_type = ':exc:`%s`%s' % (_type, separator)
elif has_desc and has_space:
_type = '*%s*%s' % (_type, separator)
else:
_type = '%s%s' % (_type, separator)
if has_desc:
field = [_type + _desc[0]] + _desc[1:]
else:
field = [_type]
else:
field = _desc
if multi:
if lines:
lines.extend(self._format_block(padding + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' * ', field))
else:
lines.extend(self._format_block(field_type + ' ', field))
if lines and lines[-1]:
lines.append('')
return lines
def _parse_references_section(self, section):
use_admonition = self._config.napoleon_use_admonition_for_references
return self._parse_generic_section('References', use_admonition)
def _parse_returns_section(self, section):
fields = self._consume_returns_section()
multi = len(fields) > 1
if multi:
use_rtype = False
else:
use_rtype = self._config.napoleon_use_rtype
lines = []
for _name, _type, _desc in fields:
if use_rtype:
field = self._format_field(_name, '', _desc)
else:
field = self._format_field(_name, _type, _desc)
if multi:
if lines:
lines.extend(self._format_block(' * ', field))
else:
lines.extend(self._format_block(':returns: * ', field))
else:
lines.extend(self._format_block(':returns: ', field))
if _type and use_rtype:
lines.extend([':rtype: %s' % _type, ''])
if lines and lines[-1]:
lines.append('')
return lines
def _parse_see_also_section(self, section):
lines = self._consume_to_next_section()
return self._format_admonition('seealso', lines)
def _parse_warning_section(self, section):
lines = self._consume_to_next_section()
return self._format_admonition('warning', lines)
def _parse_warns_section(self, section):
return self._format_fields('Warns', self._consume_fields())
def _parse_yields_section(self, section):
fields = self._consume_returns_section()
return self._format_fields('Yields', fields)
def _partition_field_on_colon(self, line):
before_colon = []
after_colon = []
colon = ''
found_colon = False
for i, source in enumerate(_xref_regex.split(line)):
if found_colon:
after_colon.append(source)
else:
if (i % 2) == 0 and ":" in source:
found_colon = True
before, colon, after = source.partition(":")
before_colon.append(before)
after_colon.append(after)
else:
before_colon.append(source)
return ("".join(before_colon).strip(),
colon,
"".join(after_colon).strip())
def _strip_empty(self, lines):
if lines:
start = -1
for i, line in enumerate(lines):
if line:
start = i
break
if start == -1:
lines = []
end = -1
for i in reversed(range(len(lines))):
line = lines[i]
if line:
end = i
break
if start > 0 or end + 1 < len(lines):
lines = lines[start:end + 1]
return lines
class NumpyDocstring(GoogleDocstring):
"""Convert NumPy style docstrings to reStructuredText.
Parameters
----------
docstring : str or List[str]
The docstring to parse, given either as a string or split into
individual lines.
config : Optional[sphinxcontrib.napoleon.Config or sphinx.config.Config]
The configuration settings to use. If not given, defaults to the
config object on `app`; or if `app` is not given defaults to the
a new `sphinxcontrib.napoleon.Config` object.
See Also
--------
:class:`sphinxcontrib.napoleon.Config`
Other Parameters
----------------
app : Optional[sphinx.application.Sphinx]
Application object representing the Sphinx process.
what : Optional[str]
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : Optional[str]
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : Optional[sphinx.ext.autodoc.Options]
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Example
-------
>>> from sphinxcontrib.napoleon import Config
>>> config = Config(napoleon_use_param=True, napoleon_use_rtype=True)
>>> docstring = '''One line summary.
...
... Extended description.
...
... Parameters
... ----------
... arg1 : int
... Description of `arg1`
... arg2 : str
... Description of `arg2`
... Returns
... -------
... str
... Description of return value.
... '''
>>> print(NumpyDocstring(docstring, config))
One line summary.
<BLANKLINE>
Extended description.
<BLANKLINE>
:param arg1: Description of `arg1`
:type arg1: int
:param arg2: Description of `arg2`
:type arg2: str
<BLANKLINE>
:returns: Description of return value.
:rtype: str
<BLANKLINE>
Methods
-------
__str__()
Return the parsed docstring in reStructuredText format.
Returns
-------
str
UTF-8 encoded version of the docstring.
__unicode__()
Return the parsed docstring in reStructuredText format.
Returns
-------
unicode
Unicode version of the docstring.
lines()
Return the parsed lines of the docstring in reStructuredText format.
Returns
-------
List[str]
The lines of the docstring in a list.
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
self._directive_sections = ['.. index::']
super(NumpyDocstring, self).__init__(docstring, config, app, what,
name, obj, options)
def _consume_field(self, parse_type=True, prefer_type=False):
line = next(self._line_iter)
if parse_type:
_name, _, _type = self._partition_field_on_colon(line)
else:
_name, _type = line, ''
_name, _type = _name.strip(), _type.strip()
if prefer_type and not _type:
_type, _name = _name, _type
if _name[:2] == '**':
_name = r'\*\*'+_name[2:]
elif _name[:1] == '*':
_name = r'\*'+_name[1:]
indent = self._get_indent(line)
_desc = self._dedent(self._consume_indented_block(indent + 1))
_desc = self.__class__(_desc, self._config).lines()
return _name, _type, _desc
def _consume_returns_section(self):
return self._consume_fields(prefer_type=True)
def _consume_section_header(self):
section = next(self._line_iter)
if not _directive_regex.match(section):
# Consume the header underline
next(self._line_iter)
return section
def _is_section_break(self):
line1, line2 = self._line_iter.peek(2)
return (not self._line_iter.has_next() or
self._is_section_header() or
['', ''] == [line1, line2] or
(self._is_in_section and
line1 and
not self._is_indented(line1, self._section_indent)))
def _is_section_header(self):
section, underline = self._line_iter.peek(2)
section = section.lower()
if section in self._sections and isinstance(underline, string_types):
return bool(_numpy_section_regex.match(underline))
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
if section.startswith(directive_section):
return True
return False
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also_section(self, section):
lines = self._consume_to_next_section()
try:
return self._parse_numpydoc_see_also_section(lines)
except ValueError:
return self._format_admonition('seealso', lines)
def _parse_numpydoc_see_also_section(self, content):
"""
Derived from the NumpyDoc implementation of _parse_see_also.
See Also
--------
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
if not items:
return []
roles = {
'method': 'meth',
'meth': 'meth',
'function': 'func',
'func': 'func',
'class': 'class',
'exception': 'exc',
'exc': 'exc',
'object': 'obj',
'obj': 'obj',
'module': 'mod',
'mod': 'mod',
'data': 'data',
'constant': 'const',
'const': 'const',
'attribute': 'attr',
'attr': 'attr'
}
if self._what is None:
func_role = 'obj'
else:
func_role = roles.get(self._what, '')
lines = []
last_had_desc = True
for func, desc, role in items:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
lines += ['']
lines += [link]
else:
lines[-1] += ", %s" % link
if desc:
lines += self._indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
lines += ['']
return self._format_admonition('seealso', lines)
| apache-2.0 | -7,560,760,245,856,250,000 | 33.861736 | 79 | 0.514542 | false |
ypid/debops-playbooks | playbooks/lookup_plugins/lists.py | 2 | 2142 | # (c) 2015, Hartmut Goebel <[email protected]>
# Based on `runner/lookup_plugins/items.py` for Ansible
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Debops.
# This file is NOT part of Ansible yet.
#
# Debops is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Debops. If not, see <http://www.gnu.org/licenses/>.
'''
This file implements the `with_lists` lookup filter for Ansible. In
differenceto `with_items`, this one does *not* flatten the lists passed to.
Example:
- debug: msg="{{item.0}} -- {{item.1}} -- {{item.2}}"
with_lists:
- ["General", "Verbosity", "0"]
- ["Mapping", "Nobody-User", "nobody"]
- ["Mapping", "Nobody-Group", "nogroup"]
Output (shortend):
"msg": "General -- Verbosity -- 0"
"msg": "Mapping -- Nobody-User -- nobody"
"msg": "Mapping -- Nobody-Group -- nogroup"
'''
import ansible.utils as utils
import ansible.errors as errors
try:
from ansible.plugins.lookup import LookupBase
except ImportError:
LookupBase = object
class LookupModule(LookupBase):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, (list, set)):
raise errors.AnsibleError("with_list expects a list or a set")
for i, elem in enumerate(terms):
if not isinstance(elem, (list, tuple)):
raise errors.AnsibleError("with_list expects a list (or a set) of lists or tuples, but elem %i is not")
return terms
| gpl-3.0 | 7,829,566,924,794,505,000 | 33.548387 | 119 | 0.679272 | false |
LevinJ/ud730-Deep-Learning | A3_regularization/p4_mulitlayer.py | 1 | 3999 | from __future__ import print_function
import numpy as np
import tensorflow as tf
from A2_fullyconnected.p1_relulayer import HiddenRelu
import math
import utility.logger_tool
import logging
class Mulilayer_HiddenRelu(HiddenRelu):
def __init__(self):
HiddenRelu.__init__(self)
self.num_steps = 120 * 1000
self.train_subset = 519.090e+3
self.keep_prob = 0.9
return
def setupOptimizer(self):
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.2
decay_steps = 3500
decay_rate = 0.86
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
decay_steps, decay_rate, staircase=True)
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss, global_step=global_step)
return
def getTempModelOutput(self, dataset, keep_prob):
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
# res = tf.matmul(dataset, self.weights) + self.biases
# h_layer1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
a_layer1 = dataset
z_layer2 = tf.matmul(a_layer1, self.weights_layer1 ) + self.biases_layer1
a_layer2 = tf.nn.relu(z_layer2)
a_layer2 = tf.nn.dropout(a_layer2, keep_prob)
z_layer3 = tf.matmul(a_layer2 , self.weights_layer2 ) + self.biases_layer2
a_layer3 = tf.nn.relu(z_layer3)
a_layer3 = tf.nn.dropout(a_layer3, keep_prob)
z_layer4 = tf.matmul(a_layer3 , self.weights_layer3 ) + self.biases_layer3
a_layer4 = tf.nn.relu(z_layer4)
a_layer4 = tf.nn.dropout(a_layer4, keep_prob)
z_layer5 = tf.matmul(a_layer4 , self.weights_layer4 ) + self.biases_layer4
a_layer5 = tf.nn.relu(z_layer5)
a_layer5 = tf.nn.dropout(a_layer5, keep_prob)
z_layer6 = tf.matmul(a_layer5 , self.weights_layer5 ) + self.biases_layer5
return z_layer6
def setupVariables(self):
layer1Num = self.image_size * self.image_size
layer2Num = 1024
layer3Num = 300
layer4Num = 50
layer5Num = 50
layer6Num = self.num_labels
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random valued following a (truncated)
# normal distribution. The biases get initialized to zero.
self.weights_layer1 = tf.Variable(tf.truncated_normal([layer1Num, layer2Num], stddev=1 / math.sqrt(float(layer1Num))))
self.biases_layer1 = tf.Variable(tf.zeros([layer2Num]))
self.weights_layer2 = tf.Variable(tf.truncated_normal([layer2Num, layer3Num], stddev=1 / math.sqrt(float(layer2Num))))
self.biases_layer2 = tf.Variable(tf.zeros([layer3Num]))
self.weights_layer3 = tf.Variable(tf.truncated_normal([layer3Num, layer4Num], stddev=1 / math.sqrt(float(layer3Num))))
self.biases_layer3 = tf.Variable(tf.zeros([layer4Num]))
#
self.weights_layer4 = tf.Variable(tf.truncated_normal([layer4Num, layer5Num], stddev=1 / math.sqrt(float(layer4Num))))
self.biases_layer4 = tf.Variable(tf.zeros([layer5Num]))
self.weights_layer5 = tf.Variable(tf.truncated_normal([layer5Num, layer6Num], stddev=1 / math.sqrt(float(layer5Num))))
self.biases_layer5 = tf.Variable(tf.zeros([layer6Num]))
return
if __name__ == "__main__":
_=utility.logger_tool.Logger(filename='logs/Mulilayer_HiddenRelu.log',filemode='w',level=logging.DEBUG)
obj= Mulilayer_HiddenRelu()
obj.run()
| gpl-2.0 | 49,122,913,487,518,856 | 42.945055 | 126 | 0.636909 | false |
adrientetar/robofab | Lib/robofab/tools/remote.py | 1 | 4496 | """Remote control for MacOS FontLab.
initFontLabRemote() registers a callback for appleevents and
runFontLabRemote() sends the code from a different application,
such as a Mac Python IDE or Python interpreter.
"""
from robofab.world import world
if world.inFontLab and world.mac is not None:
from Carbon import AE as _AE
else:
import sys
from aetools import TalkTo
class FontLab(TalkTo):
pass
__all__ = ['initFontLabRemote', 'runFontLabRemote']
def _executePython(theAppleEvent, theReply):
import aetools
import io
import traceback
import sys
parms, attrs = aetools.unpackevent(theAppleEvent)
source = parms.get("----")
if source is None:
return
stdout = io.StringIO()
#print "<executing remote command>"
save = sys.stdout, sys.stderr
sys.stdout = sys.stderr = stdout
namespace = {}
try:
try:
exec(source, namespace)
except:
traceback.print_exc()
finally:
sys.stdout, sys.stderr = save
output = stdout.getvalue()
aetools.packevent(theReply, {"----": output})
_imported = False
def initFontLabRemote():
"""Call this in FontLab at startup of the application to switch on the remote."""
print("FontLabRemote is on.")
_AE.AEInstallEventHandler("Rfab", "exec", _executePython)
if world.inFontLab and world.mac is not None:
initFontLabRemote()
def runFontLabRemote(code):
"""Call this in the MacOS Python IDE to make FontLab execute the code."""
fl = FontLab("FLab", start=1)
ae, parms, attrs = fl.send("Rfab", "exec", {"----": code})
output = parms.get("----")
return output
# GlyphTransmit
# Convert a glyph to a string using digestPen, transmit string, unpack string with pointpen.
#
def Glyph2String(glyph):
from robofab.pens.digestPen import DigestPointPen
import pickle
p = DigestPointPen(glyph)
glyph.drawPoints(p)
info = {}
info['name'] = glyph.name
info['width'] = glyph.width
info['points'] = p.getDigest()
return str(pickle.dumps(info))
def String2Glyph(gString, penClass, font):
import pickle
if gString is None:
return None
info = pickle.loads(gString)
name = info['name']
if not name in list(font.keys()):
glyph = font.newGlyph(name)
else:
glyph = font[name]
pen = penClass(glyph)
for p in info['points']:
if p == "beginPath":
pen.beginPath()
elif p == "endPath":
pen.endPath()
else:
pt, type = p
pen.addPoint(pt, type)
glyph.width = info['width']
glyph.update()
return glyph
_makeFLGlyph = """
from robofab.world import CurrentFont
from robofab.tools.remote import receiveGlyph
code = '''%s'''
receiveGlyph(code, CurrentFont())
"""
def transmitGlyph(glyph):
from robofab.world import world
if world.inFontLab and world.mac is not None:
# we're in fontlab, on a mac
print(Glyph2String(glyph))
pass
else:
remoteProgram = _makeFLGlyph%Glyph2String(glyph)
print("remoteProgram", remoteProgram)
return runFontLabRemote(remoteProgram)
def receiveGlyph(glyphString, font=None):
from robofab.world import world
if world.inFontLab and world.mac is not None:
# we're in fontlab, on a mac
from robofab.pens.flPen import FLPointPen
print(String2Glyph(glyphString, FLPointPen, font))
pass
else:
from robofab.pens.rfUFOPen import RFUFOPointPen
print(String2Glyph(glyphString, RFUFOPointPen, font))
#
# command to tell FontLab to open a UFO and save it as a vfb
def os9PathConvert(path):
"""Attempt to convert a unix style path to a Mac OS9 style path.
No support for relative paths!
"""
if path.find("/Volumes") == 0:
# it's on the volumes list, some sort of external volume
path = path[len("/Volumes")+1:]
elif path[0] == "/":
# a dir on the root volume
path = path[1:]
new = path.replace("/", ":")
return new
_remoteUFOImportProgram = """
from robofab.objects.objectsFL import NewFont
import os.path
destinationPathVFB = "%(destinationPathVFB)s"
font = NewFont()
font.readUFO("%(sourcePathUFO)s", doProgress=True)
font.update()
font.save(destinationPathVFB)
print font, "done"
font.close()
"""
def makeVFB(sourcePathUFO, destinationPathVFB=None):
"""FontLab convenience function to import a UFO and save it as a VFB"""
import os
fl = FontLab("FLab", start=1)
if destinationPathVFB is None:
destinationPathVFB = os.path.splitext(sourcePathUFO)[0]+".vfb"
src9 = os9PathConvert(sourcePathUFO)
dst9 = os9PathConvert(destinationPathVFB)
code = _remoteUFOImportProgram%{'sourcePathUFO': src9, 'destinationPathVFB':dst9}
ae, parms, attrs = fl.send("Rfab", "exec", {"----": code})
output = parms.get("----")
return output
| bsd-3-clause | -7,016,055,477,685,894,000 | 24.697143 | 93 | 0.717082 | false |
hronoses/vispy | vispy/ext/_bundled/png.py | 12 | 83543 | #!/usr/bin/env python
# png.py - PNG encoder/decoder in pure Python
#
# Copyright (C) 2006 Johann C. Rocholl <[email protected]>
# Portions Copyright (C) 2009 David Jones <[email protected]>
# And probably portions Copyright (C) 2006 Nicko van Someren <[email protected]>
#
# Original concept by Johann C. Rocholl.
#
# LICENCE (MIT)
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Adapted for vispy
# http://www.python.org/doc/2.2.3/whatsnew/node5.html
from __future__ import generators
__version__ = "0.0.17"
from array import array
import math
# http://www.python.org/doc/2.4.4/lib/module-operator.html
import operator
import struct
import zlib
# http://www.python.org/doc/2.4.4/lib/module-warnings.html
import warnings
from .six.moves import map as imap
from .six import string_types
import itertools
__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
# The PNG signature.
# http://www.w3.org/TR/PNG/#5PNG-file-signature
_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
_adam7 = ((0, 0, 8, 8),
(4, 0, 8, 8),
(0, 4, 4, 8),
(2, 0, 4, 4),
(0, 2, 2, 4),
(1, 0, 2, 2),
(0, 1, 1, 2))
def group(s, n):
# See http://www.python.org/doc/2.6/library/functions.html#zip
return zip(*[iter(s)]*n)
def isarray(x):
"""Same as ``isinstance(x, array)`` except on Python 2.2, where it
always returns ``False``. This helps PyPNG work on Python 2.2.
"""
try:
return isinstance(x, array)
except TypeError:
# Because on Python 2.2 array.array is not a type.
return False
def tostring(row):
""" Python3 definition, array.tostring() is deprecated in Python3
"""
return row.tobytes()
# Conditionally convert to bytes. Works on Python 2 and Python 3.
try:
bytes('', 'ascii')
def strtobytes(x): return bytes(x, 'iso8859-1')
def bytestostr(x): return str(x, 'iso8859-1')
except (NameError, TypeError):
# We get NameError when bytes() does not exist (most Python
# 2.x versions), and TypeError when bytes() exists but is on
# Python 2.x (when it is an alias for str() and takes at most
# one argument).
strtobytes = str
bytestostr = str
def interleave_planes(ipixels, apixels, ipsize, apsize):
"""
Interleave (colour) planes, e.g. RGB + A = RGBA.
Return an array of pixels consisting of the `ipsize` elements of
data from each pixel in `ipixels` followed by the `apsize` elements
of data from each pixel in `apixels`. Conventionally `ipixels`
and `apixels` are byte arrays so the sizes are bytes, but it
actually works with any arrays of the same type. The returned
array is the same type as the input arrays which should be the
same type as each other.
"""
itotal = len(ipixels)
atotal = len(apixels)
newtotal = itotal + atotal
newpsize = ipsize + apsize
# Set up the output buffer
# See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
out = array(ipixels.typecode)
# It's annoying that there is no cheap way to set the array size :-(
out.extend(ipixels)
out.extend(apixels)
# Interleave in the pixel data
for i in range(ipsize):
out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
for i in range(apsize):
out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
return out
def check_palette(palette):
"""Check a palette argument (to the :class:`Writer` class)
for validity. Returns the palette as a list if okay; raises an
exception otherwise.
"""
# None is the default and is allowed.
if palette is None:
return None
p = list(palette)
if not (0 < len(p) <= 256):
raise ValueError("a palette must have between 1 and 256 entries")
seen_triple = False
for i,t in enumerate(p):
if len(t) not in (3,4):
raise ValueError(
"palette entry %d: entries must be 3- or 4-tuples." % i)
if len(t) == 3:
seen_triple = True
if seen_triple and len(t) == 4:
raise ValueError(
"palette entry %d: all 4-tuples must precede all 3-tuples" % i)
for x in t:
if int(x) != x or not(0 <= x <= 255):
raise ValueError(
"palette entry %d: values must be integer: 0 <= x <= 255" % i)
return p
def check_sizes(size, width, height):
"""Check that these arguments, in supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ValueError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ValueError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ValueError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size
def check_color(c, greyscale, which):
"""Checks that a colour argument for transparent or
background options is the right form. Returns the colour
(which, if it's a bar integer, is "corrected" to a 1-tuple).
"""
if c is None:
return c
if greyscale:
try:
l = len(c)
except TypeError:
c = (c,)
if len(c) != 1:
raise ValueError("%s for greyscale must be 1-tuple" %
which)
if not isinteger(c[0]):
raise ValueError(
"%s colour for greyscale must be integer" % which)
else:
if not (len(c) == 3 and
isinteger(c[0]) and
isinteger(c[1]) and
isinteger(c[2])):
raise ValueError(
"%s colour must be a triple of integers" % which)
return c
class Error(Exception):
def __str__(self):
return self.__class__.__name__ + ': ' + ' '.join(self.args)
class FormatError(Error):
"""Problem with input file format. In other words, PNG file does
not conform to the specification in some way and is invalid.
"""
class ChunkError(FormatError):
pass
class Writer:
"""
PNG encoder in pure Python.
"""
def __init__(self, width=None, height=None,
size=None,
greyscale=False,
alpha=False,
bitdepth=8,
palette=None,
transparent=None,
background=None,
gamma=None,
compression=None,
interlace=False,
bytes_per_sample=None, # deprecated
planes=None,
colormap=None,
maxval=None,
chunk_limit=2**20):
"""
Create a PNG encoder object.
Arguments:
width, height
Image size in pixels, as two separate arguments.
size
Image size (w,h) in pixels, as single argument.
greyscale
Input data is greyscale, not RGB.
alpha
Input data has alpha channel (RGBA or LA).
bitdepth
Bit depth: from 1 to 16.
palette
Create a palette for a colour mapped image (colour type 3).
transparent
Specify a transparent colour (create a ``tRNS`` chunk).
background
Specify a default background colour (create a ``bKGD`` chunk).
gamma
Specify a gamma value (create a ``gAMA`` chunk).
compression
zlib compression level: 0 (none) to 9 (more compressed);
default: -1 or None.
interlace
Create an interlaced image.
chunk_limit
Write multiple ``IDAT`` chunks to save memory.
The image size (in pixels) can be specified either by using the
`width` and `height` arguments, or with the single `size`
argument. If `size` is used it should be a pair (*width*,
*height*).
`greyscale` and `alpha` are booleans that specify whether
an image is greyscale (or colour), and whether it has an
alpha channel (or not).
`bitdepth` specifies the bit depth of the source pixel values.
Each source pixel value must be an integer between 0 and
``2**bitdepth-1``. For example, 8-bit images have values
between 0 and 255. PNG only stores images with bit depths of
1,2,4,8, or 16. When `bitdepth` is not one of these values,
the next highest valid bit depth is selected, and an ``sBIT``
(significant bits) chunk is generated that specifies the
original precision of the source image. In this case the
supplied pixel values will be rescaled to fit the range of
the selected bit depth.
The details of which bit depth / colour model combinations the
PNG file format supports directly, are somewhat arcane
(refer to the PNG specification for full details). Briefly:
"small" bit depths (1,2,4) are only allowed with greyscale and
colour mapped images; colour mapped images cannot have bit depth
16.
For colour mapped images (in other words, when the `palette`
argument is specified) the `bitdepth` argument must match one of
the valid PNG bit depths: 1, 2, 4, or 8. (It is valid to have a
PNG image with a palette and an ``sBIT`` chunk, but the meaning
is slightly different; it would be awkward to press the
`bitdepth` argument into service for this.)
The `palette` option, when specified, causes a colour mapped
image to be created: the PNG colour type is set to 3; greyscale
must not be set; alpha must not be set; transparent must not be
set; the bit depth must be 1,2,4, or 8. When a colour mapped
image is created, the pixel values are palette indexes and
the `bitdepth` argument specifies the size of these indexes
(not the size of the colour values in the palette).
The palette argument value should be a sequence of 3- or
4-tuples. 3-tuples specify RGB palette entries; 4-tuples
specify RGBA palette entries. If both 4-tuples and 3-tuples
appear in the sequence then all the 4-tuples must come
before all the 3-tuples. A ``PLTE`` chunk is created; if there
are 4-tuples then a ``tRNS`` chunk is created as well. The
``PLTE`` chunk will contain all the RGB triples in the same
sequence; the ``tRNS`` chunk will contain the alpha channel for
all the 4-tuples, in the same sequence. Palette entries
are always 8-bit.
If specified, the `transparent` and `background` parameters must
be a tuple with three integer values for red, green, blue, or
a simple integer (or singleton tuple) for a greyscale image.
If specified, the `gamma` parameter must be a positive number
(generally, a float). A ``gAMA`` chunk will be created.
Note that this will not change the values of the pixels as
they appear in the PNG file, they are assumed to have already
been converted appropriately for the gamma specified.
The `compression` argument specifies the compression level to
be used by the ``zlib`` module. Values from 1 to 9 specify
compression, with 9 being "more compressed" (usually smaller
and slower, but it doesn't always work out that way). 0 means
no compression. -1 and ``None`` both mean that the default
level of compession will be picked by the ``zlib`` module
(which is generally acceptable).
If `interlace` is true then an interlaced image is created
(using PNG's so far only interace method, *Adam7*). This does
not affect how the pixels should be presented to the encoder,
rather it changes how they are arranged into the PNG file.
On slow connexions interlaced images can be partially decoded
by the browser to give a rough view of the image that is
successively refined as more image data appears.
.. note ::
Enabling the `interlace` option requires the entire image
to be processed in working memory.
`chunk_limit` is used to limit the amount of memory used whilst
compressing the image. In order to avoid using large amounts of
memory, multiple ``IDAT`` chunks may be created.
"""
# At the moment the `planes` argument is ignored;
# its purpose is to act as a dummy so that
# ``Writer(x, y, **info)`` works, where `info` is a dictionary
# returned by Reader.read and friends.
# Ditto for `colormap`.
width, height = check_sizes(size, width, height)
del size
if width <= 0 or height <= 0:
raise ValueError("width and height must be greater than zero")
if not isinteger(width) or not isinteger(height):
raise ValueError("width and height must be integers")
# http://www.w3.org/TR/PNG/#7Integers-and-byte-order
if width > 2**32-1 or height > 2**32-1:
raise ValueError("width and height cannot exceed 2**32-1")
if alpha and transparent is not None:
raise ValueError(
"transparent colour not allowed with alpha channel")
if bytes_per_sample is not None:
warnings.warn('please use bitdepth instead of bytes_per_sample',
DeprecationWarning)
if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
raise ValueError(
"bytes per sample must be .125, .25, .5, 1, or 2")
bitdepth = int(8*bytes_per_sample)
del bytes_per_sample
if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
raise ValueError("bitdepth (%r) must be a positive integer <= 16" %
bitdepth)
self.rescale = None
if palette:
if bitdepth not in (1,2,4,8):
raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
if transparent is not None:
raise ValueError("transparent and palette not compatible")
if alpha:
raise ValueError("alpha and palette not compatible")
if greyscale:
raise ValueError("greyscale and palette not compatible")
else:
# No palette, check for sBIT chunk generation.
if alpha or not greyscale:
if bitdepth not in (8,16):
targetbitdepth = (8,16)[bitdepth > 8]
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
else:
assert greyscale
assert not alpha
if bitdepth not in (1,2,4,8,16):
if bitdepth > 8:
targetbitdepth = 16
elif bitdepth == 3:
targetbitdepth = 4
else:
assert bitdepth in (5,6,7)
targetbitdepth = 8
self.rescale = (bitdepth, targetbitdepth)
bitdepth = targetbitdepth
del targetbitdepth
if bitdepth < 8 and (alpha or not greyscale and not palette):
raise ValueError(
"bitdepth < 8 only permitted with greyscale or palette")
if bitdepth > 8 and palette:
raise ValueError(
"bit depth must be 8 or less for images with palette")
transparent = check_color(transparent, greyscale, 'transparent')
background = check_color(background, greyscale, 'background')
# It's important that the true boolean values (greyscale, alpha,
# colormap, interlace) are converted to bool because Iverson's
# convention is relied upon later on.
self.width = width
self.height = height
self.transparent = transparent
self.background = background
self.gamma = gamma
self.greyscale = bool(greyscale)
self.alpha = bool(alpha)
self.colormap = bool(palette)
self.bitdepth = int(bitdepth)
self.compression = compression
self.chunk_limit = chunk_limit
self.interlace = bool(interlace)
self.palette = check_palette(palette)
self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap
assert self.color_type in (0,2,3,4,6)
self.color_planes = (3,1)[self.greyscale or self.colormap]
self.planes = self.color_planes + self.alpha
# :todo: fix for bitdepth < 8
self.psize = (self.bitdepth/8) * self.planes
def make_palette(self):
"""Create the byte sequences for a ``PLTE`` and if necessary a
``tRNS`` chunk. Returned as a pair (*p*, *t*). *t* will be
``None`` if no ``tRNS`` chunk is necessary.
"""
p = array('B')
t = array('B')
for x in self.palette:
p.extend(x[0:3])
if len(x) > 3:
t.append(x[3])
p = tostring(p)
t = tostring(t)
if t:
return p,t
return p,None
def write(self, outfile, rows):
"""Write a PNG image to the output file. `rows` should be
an iterable that yields each row in boxed row flat pixel
format. The rows should be the rows of the original image,
so there should be ``self.height`` rows of ``self.width *
self.planes`` values. If `interlace` is specified (when
creating the instance), then an interlaced PNG file will
be written. Supply the rows in the normal image order;
the interlacing is carried out internally.
.. note ::
Interlacing will require the entire image to be in working
memory.
"""
if self.interlace:
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, itertools.chain(*rows))
return self.write_array(outfile, a)
nrows = self.write_passes(outfile, rows)
if nrows != self.height:
raise ValueError(
"rows supplied (%d) does not match height (%d)" %
(nrows, self.height))
def write_passes(self, outfile, rows, packed=False):
"""
Write a PNG image to the output file.
Most users are expected to find the :meth:`write` or
:meth:`write_array` method more convenient.
The rows should be given to this method in the order that
they appear in the output file. For straightlaced images,
this is the usual top to bottom ordering, but for interlaced
images the rows should have already been interlaced before
passing them to this function.
`rows` should be an iterable that yields each row. When
`packed` is ``False`` the rows should be in boxed row flat pixel
format; when `packed` is ``True`` each row should be a packed
sequence of bytes.
"""
# http://www.w3.org/TR/PNG/#5PNG-file-signature
outfile.write(_signature)
# http://www.w3.org/TR/PNG/#11IHDR
write_chunk(outfile, 'IHDR',
struct.pack("!2I5B", self.width, self.height,
self.bitdepth, self.color_type,
0, 0, self.interlace))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11gAMA
if self.gamma is not None:
write_chunk(outfile, 'gAMA',
struct.pack("!L", int(round(self.gamma*1e5))))
# See :chunk:order
# http://www.w3.org/TR/PNG/#11sBIT
if self.rescale:
write_chunk(outfile, 'sBIT',
struct.pack('%dB' % self.planes,
*[self.rescale[0]]*self.planes))
# :chunk:order: Without a palette (PLTE chunk), ordering is
# relatively relaxed. With one, gAMA chunk must precede PLTE
# chunk which must precede tRNS and bKGD.
# See http://www.w3.org/TR/PNG/#5ChunkOrdering
if self.palette:
p,t = self.make_palette()
write_chunk(outfile, 'PLTE', p)
if t:
# tRNS chunk is optional. Only needed if palette entries
# have alpha.
write_chunk(outfile, 'tRNS', t)
# http://www.w3.org/TR/PNG/#11tRNS
if self.transparent is not None:
if self.greyscale:
write_chunk(outfile, 'tRNS',
struct.pack("!1H", *self.transparent))
else:
write_chunk(outfile, 'tRNS',
struct.pack("!3H", *self.transparent))
# http://www.w3.org/TR/PNG/#11bKGD
if self.background is not None:
if self.greyscale:
write_chunk(outfile, 'bKGD',
struct.pack("!1H", *self.background))
else:
write_chunk(outfile, 'bKGD',
struct.pack("!3H", *self.background))
# http://www.w3.org/TR/PNG/#11IDAT
if self.compression is not None:
compressor = zlib.compressobj(self.compression)
else:
compressor = zlib.compressobj()
# Choose an extend function based on the bitdepth. The extend
# function packs/decomposes the pixel values into bytes and
# stuffs them onto the data array.
data = array('B')
if self.bitdepth == 8 or packed:
extend = data.extend
elif self.bitdepth == 16:
# Decompose into bytes
def extend(sl):
fmt = '!%dH' % len(sl)
data.extend(array('B', struct.pack(fmt, *sl)))
else:
# Pack into bytes
assert self.bitdepth < 8
# samples per byte
spb = int(8/self.bitdepth)
def extend(sl):
a = array('B', sl)
# Adding padding bytes so we can group into a whole
# number of spb-tuples.
l = float(len(a))
extra = math.ceil(l / float(spb))*spb - l
a.extend([0]*int(extra))
# Pack into bytes
l = group(a, spb)
l = map(lambda e: reduce(lambda x,y:
(x << self.bitdepth) + y, e), l)
data.extend(l)
if self.rescale:
oldextend = extend
factor = \
float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
def extend(sl):
oldextend(map(lambda x: int(round(factor*x)), sl))
# Build the first row, testing mostly to see if we need to
# changed the extend function to cope with NumPy integer types
# (they cause our ordinary definition of extend to fail, so we
# wrap it). See
# http://code.google.com/p/pypng/issues/detail?id=44
enumrows = enumerate(rows)
del rows
# First row's filter type.
data.append(0)
# :todo: Certain exceptions in the call to ``.next()`` or the
# following try would indicate no row data supplied.
# Should catch.
i,row = enumrows.next()
try:
# If this fails...
extend(row)
except:
# ... try a version that converts the values to int first.
# Not only does this work for the (slightly broken) NumPy
# types, there are probably lots of other, unknown, "nearly"
# int types it works for.
def wrapmapint(f):
return lambda sl: f(map(int, sl))
extend = wrapmapint(extend)
del wrapmapint
extend(row)
for i,row in enumrows:
# Add "None" filter type. Currently, it's essential that
# this filter type be used for every scanline as we do not
# mark the first row of a reduced pass image; that means we
# could accidentally compute the wrong filtered scanline if
# we used "up", "average", or "paeth" on such a line.
data.append(0)
extend(row)
if len(data) > self.chunk_limit:
compressed = compressor.compress(tostring(data))
if len(compressed):
write_chunk(outfile, 'IDAT', compressed)
# Because of our very witty definition of ``extend``,
# above, we must re-use the same ``data`` object. Hence
# we use ``del`` to empty this one, rather than create a
# fresh one (which would be my natural FP instinct).
del data[:]
if len(data):
compressed = compressor.compress(tostring(data))
else:
compressed = strtobytes('')
flushed = compressor.flush()
if len(compressed) or len(flushed):
write_chunk(outfile, 'IDAT', compressed + flushed)
# http://www.w3.org/TR/PNG/#11IEND
write_chunk(outfile, 'IEND')
return i+1
def write_array(self, outfile, pixels):
"""
Write an array in flat row flat pixel format as a PNG file on
the output file. See also :meth:`write` method.
"""
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16.
"""
if self.rescale:
raise Error("write_packed method not suitable for bit depth %d" %
self.rescale[0])
return self.write_passes(outfile, rows, packed=True)
def convert_pnm(self, infile, outfile):
"""
Convert a PNM file containing raw pixel data into a PNG file
with the parameters set in the writer object. Works for
(binary) PGM, PPM, and PAM formats.
"""
if self.interlace:
pixels = array('B')
pixels.fromfile(infile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.file_scanlines(infile))
def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
"""
Convert a PPM and PGM file containing raw pixel data into a
PNG outfile with the parameters set in the writer object.
"""
pixels = array('B')
pixels.fromfile(ppmfile,
(self.bitdepth/8) * self.color_planes *
self.width * self.height)
apixels = array('B')
apixels.fromfile(pgmfile,
(self.bitdepth/8) *
self.width * self.height)
pixels = interleave_planes(pixels, apixels,
(self.bitdepth/8) * self.color_planes,
(self.bitdepth/8))
if self.interlace:
self.write_passes(outfile, self.array_scanlines_interlace(pixels))
else:
self.write_passes(outfile, self.array_scanlines(pixels))
def file_scanlines(self, infile):
"""
Generates boxed rows in flat pixel format, from the input file
`infile`. It assumes that the input file is in a "Netpbm-like"
binary format, and is positioned at the beginning of the first
pixel. The number of pixels to read is taken from the image
dimensions (`width`, `height`, `planes`) and the number of bytes
per value is implied by the image `bitdepth`.
"""
# Values per row
vpr = self.width * self.planes
row_bytes = vpr
if self.bitdepth > 8:
assert self.bitdepth == 16
row_bytes *= 2
fmt = '>%dH' % vpr
def line():
return array('H', struct.unpack(fmt, infile.read(row_bytes)))
else:
def line():
scanline = array('B', infile.read(row_bytes))
return scanline
for y in range(self.height):
yield line()
def array_scanlines(self, pixels):
"""
Generates boxed rows (flat pixels) from flat rows (flat pixels)
in an array.
"""
# Values per row
vpr = self.width * self.planes
stop = 0
for y in range(self.height):
start = stop
stop = start + vpr
yield pixels[start:stop]
def array_scanlines_interlace(self, pixels):
"""
Generator for interlaced scanlines from an array. `pixels` is
the full source image in flat row flat pixel format. The
generator yields each scanline of the reduced passes in turn, in
boxed row flat pixel format.
"""
# http://www.w3.org/TR/PNG/#8InterlaceMethods
# Array type.
fmt = 'BH'[self.bitdepth > 8]
# Value per row
vpr = self.width * self.planes
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# Pixels per row (of reduced image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# number of values in reduced image row.
row_len = ppr*self.planes
for y in range(ystart, self.height, ystep):
if xstep == 1:
offset = y * vpr
yield pixels[offset:offset+vpr]
else:
row = array(fmt)
# There's no easier way to set the length of an array
row.extend(pixels[0:row_len])
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
row[i::self.planes] = \
pixels[offset+i:end_offset:skip]
yield row
def write_chunk(outfile, tag, data=strtobytes('')):
"""
Write a PNG chunk to the output file, including length and
checksum.
"""
# http://www.w3.org/TR/PNG/#5Chunk-layout
outfile.write(struct.pack("!I", len(data)))
tag = strtobytes(tag)
outfile.write(tag)
outfile.write(data)
checksum = zlib.crc32(tag)
checksum = zlib.crc32(data, checksum)
checksum &= 2**32-1
outfile.write(struct.pack("!I", checksum))
def write_chunks(out, chunks):
"""Create a PNG file by writing out the chunks."""
out.write(_signature)
for chunk in chunks:
write_chunk(out, *chunk)
def filter_scanline(type, line, fo, prev=None):
"""Apply a scanline filter to a scanline. `type` specifies the
filter type (0 to 4); `line` specifies the current (unfiltered)
scanline as a sequence of bytes; `prev` specifies the previous
(unfiltered) scanline as a sequence of bytes. `fo` specifies the
filter offset; normally this is size of a pixel in bytes (the number
of bytes per sample times the number of channels), but when this is
< 1 (for bit depths < 8) then the filter offset is 1.
"""
assert 0 <= type < 5
# The output array. Which, pathetically, we extend one-byte at a
# time (fortunately this is linear).
out = array('B', [type])
def sub():
ai = -fo
for x in line:
if ai >= 0:
x = (x - line[ai]) & 0xff
out.append(x)
ai += 1
def up():
for i,x in enumerate(line):
x = (x - prev[i]) & 0xff
out.append(x)
def average():
ai = -fo
for i,x in enumerate(line):
if ai >= 0:
x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
else:
x = (x - (prev[i] >> 1)) & 0xff
out.append(x)
ai += 1
def paeth():
# http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
ai = -fo # also used for ci
for i,x in enumerate(line):
a = 0
b = prev[i]
c = 0
if ai >= 0:
a = line[ai]
c = prev[ai]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
Pr = a
elif pb <= pc:
Pr = b
else:
Pr = c
x = (x - Pr) & 0xff
out.append(x)
ai += 1
if not prev:
# We're on the first line. Some of the filters can be reduced
# to simpler cases which makes handling the line "off the top"
# of the image simpler. "up" becomes "none"; "paeth" becomes
# "left" (non-trivial, but true). "average" needs to be handled
# specially.
if type == 2: # "up"
type = 0
elif type == 3:
prev = [0]*len(line)
elif type == 4: # "paeth"
type = 1
if type == 0:
out.extend(line)
elif type == 1:
sub()
elif type == 2:
up()
elif type == 3:
average()
else: # type == 4
paeth()
return out
def from_array(a, mode=None, info={}):
"""Create a PNG :class:`Image` object from a 2- or 3-dimensional
array. One application of this function is easy PIL-style saving:
``png.from_array(pixels, 'L').save('foo.png')``.
.. note :
The use of the term *3-dimensional* is for marketing purposes
only. It doesn't actually work. Please bear with us. Meanwhile
enjoy the complimentary snacks (on request) and please use a
2-dimensional array.
Unless they are specified using the *info* parameter, the PNG's
height and width are taken from the array size. For a 3 dimensional
array the first axis is the height; the second axis is the width;
and the third axis is the channel number. Thus an RGB image that is
16 pixels high and 8 wide will use an array that is 16x8x3. For 2
dimensional arrays the first axis is the height, but the second axis
is ``width*channels``, so an RGB image that is 16 pixels high and 8
wide will use a 2-dimensional array that is 16x24 (each row will be
8*3==24 sample values).
*mode* is a string that specifies the image colour format in a
PIL-style mode. It can be:
``'L'``
greyscale (1 channel)
``'LA'``
greyscale with alpha (2 channel)
``'RGB'``
colour image (3 channel)
``'RGBA'``
colour image with alpha (4 channel)
The mode string can also specify the bit depth (overriding how this
function normally derives the bit depth, see below). Appending
``';16'`` to the mode will cause the PNG to be 16 bits per channel;
any decimal from 1 to 16 can be used to specify the bit depth.
When a 2-dimensional array is used *mode* determines how many
channels the image has, and so allows the width to be derived from
the second array dimension.
The array is expected to be a ``numpy`` array, but it can be any
suitable Python sequence. For example, a list of lists can be used:
``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``. The exact
rules are: ``len(a)`` gives the first dimension, height;
``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
third dimension, unless an exception is raised in which case a
2-dimensional array is assumed. It's slightly more complicated than
that because an iterator of rows can be used, and it all still
works. Using an iterator allows data to be streamed efficiently.
The bit depth of the PNG is normally taken from the array element's
datatype (but if *mode* specifies a bitdepth then that is used
instead). The array element's datatype is determined in a way which
is supposed to work both for ``numpy`` arrays and for Python
``array.array`` objects. A 1 byte datatype will give a bit depth of
8, a 2 byte datatype will give a bit depth of 16. If the datatype
does not have an implicit size, for example it is a plain Python
list of lists, as above, then a default of 8 is used.
The *info* parameter is a dictionary that can be used to specify
metadata (in the same style as the arguments to the
:class:``png.Writer`` class). For this function the keys that are
useful are:
height
overrides the height derived from the array dimensions and allows
*a* to be an iterable.
width
overrides the width derived from the array dimensions.
bitdepth
overrides the bit depth derived from the element datatype (but
must match *mode* if that also specifies a bit depth).
Generally anything specified in the
*info* dictionary will override any implicit choices that this
function would otherwise make, but must match any explicit ones.
For example, if the *info* dictionary has a ``greyscale`` key then
this must be true when mode is ``'L'`` or ``'LA'`` and false when
mode is ``'RGB'`` or ``'RGBA'``.
"""
# We abuse the *info* parameter by modifying it. Take a copy here.
# (Also typechecks *info* to some extent).
info = dict(info)
# Syntax check mode string.
bitdepth = None
try:
# Assign the 'L' or 'RGBA' part to `gotmode`.
if mode.startswith('L'):
gotmode = 'L'
mode = mode[1:]
elif mode.startswith('RGB'):
gotmode = 'RGB'
mode = mode[3:]
else:
raise Error()
if mode.startswith('A'):
gotmode += 'A'
mode = mode[1:]
# Skip any optional ';'
while mode.startswith(';'):
mode = mode[1:]
# Parse optional bitdepth
if mode:
try:
bitdepth = int(mode)
except (TypeError, ValueError):
raise Error()
except Error:
raise Error("mode string should be 'RGB' or 'L;16' or similar.")
mode = gotmode
# Get bitdepth from *mode* if possible.
if bitdepth:
if info.get('bitdepth') and bitdepth != info['bitdepth']:
raise Error("mode bitdepth (%d) should match info bitdepth (%d)." %
(bitdepth, info['bitdepth']))
info['bitdepth'] = bitdepth
# Fill in and/or check entries in *info*.
# Dimensions.
if 'size' in info:
# Check width, height, size all match where used.
for dimension,axis in [('width', 0), ('height', 1)]:
if dimension in info:
if info[dimension] != info['size'][axis]:
raise Error(
"info[%r] should match info['size'][%r]." %
(dimension, axis))
info['width'],info['height'] = info['size']
if 'height' not in info:
try:
l = len(a)
except TypeError:
raise Error(
"len(a) does not work, supply info['height'] instead.")
info['height'] = l
# Colour format.
if 'greyscale' in info:
if bool(info['greyscale']) != ('L' in mode):
raise Error("info['greyscale'] should match mode.")
info['greyscale'] = 'L' in mode
if 'alpha' in info:
if bool(info['alpha']) != ('A' in mode):
raise Error("info['alpha'] should match mode.")
info['alpha'] = 'A' in mode
planes = len(mode)
if 'planes' in info:
if info['planes'] != planes:
raise Error("info['planes'] should match mode.")
# In order to work out whether we the array is 2D or 3D we need its
# first row, which requires that we take a copy of its iterator.
# We may also need the first row to derive width and bitdepth.
a,t = itertools.tee(a)
row = t.next()
del t
try:
row[0][0]
threed = True
testelement = row[0]
except (IndexError, TypeError):
threed = False
testelement = row
if 'width' not in info:
if threed:
width = len(row)
else:
width = len(row) // planes
info['width'] = width
# Not implemented yet
assert not threed
if 'bitdepth' not in info:
try:
dtype = testelement.dtype
# goto the "else:" clause. Sorry.
except AttributeError:
try:
# Try a Python array.array.
bitdepth = 8 * testelement.itemsize
except AttributeError:
# We can't determine it from the array element's
# datatype, use a default of 8.
bitdepth = 8
else:
# If we got here without exception, we now assume that
# the array is a numpy array.
if dtype.kind == 'b':
bitdepth = 1
else:
bitdepth = 8 * dtype.itemsize
info['bitdepth'] = bitdepth
for thing in 'width height bitdepth greyscale alpha'.split():
assert thing in info
return Image(a, info)
# So that refugee's from PIL feel more at home. Not documented.
fromarray = from_array
class Image:
"""A PNG image. You can create an :class:`Image` object from
an array of pixels by calling :meth:`png.from_array`. It can be
saved to disk with the :meth:`save` method.
"""
def __init__(self, rows, info):
"""
.. note ::
The constructor is not public. Please do not call it.
"""
self.rows = rows
self.info = info
def save(self, file):
"""Save the image to *file*. If *file* looks like an open file
descriptor then it is used, otherwise it is treated as a
filename and a fresh file is opened.
In general, you can only call this method once; after it has
been called the first time and the PNG image has been saved, the
source data will have been streamed, and cannot be streamed
again.
"""
w = Writer(**self.info)
try:
file.write
def close(): pass
except AttributeError:
file = open(file, 'wb')
def close(): file.close()
try:
w.write(file, self.rows)
finally:
close()
class _readable:
"""
A simple file-like interface for strings and arrays.
"""
def __init__(self, buf):
self.buf = buf
self.offset = 0
def read(self, n):
r = self.buf[self.offset:self.offset+n]
if isarray(r):
r = r.tostring()
self.offset += n
return r
class Reader:
"""
PNG decoder in pure Python.
"""
def __init__(self, _guess=None, **kw):
"""
Create a PNG decoder object.
The constructor expects exactly one keyword argument. If you
supply a positional argument instead, it will guess the input
type. You can choose among the following keyword arguments:
filename
Name of input file (a PNG file).
file
A file-like object (object with a read() method).
bytes
``array`` or ``string`` with PNG data.
"""
if ((_guess is not None and len(kw) != 0) or
(_guess is None and len(kw) != 1)):
raise TypeError("Reader() takes exactly 1 argument")
# Will be the first 8 bytes, later on. See validate_signature.
self.signature = None
self.transparent = None
# A pair of (len,type) if a chunk has been read but its data and
# checksum have not (in other words the file position is just
# past the 4 bytes that specify the chunk type). See preamble
# method for how this is used.
self.atchunk = None
if _guess is not None:
if isarray(_guess):
kw["bytes"] = _guess
elif isinstance(_guess, string_types):
kw["filename"] = _guess
elif hasattr(_guess, 'read'):
kw["file"] = _guess
if "filename" in kw:
self.file = open(kw["filename"], "rb")
elif "file" in kw:
self.file = kw["file"]
elif "bytes" in kw:
self.file = _readable(kw["bytes"])
else:
raise TypeError("expecting filename, file or bytes array")
def chunk(self, seek=None, lenient=False):
"""
Read the next PNG chunk from the input file; returns a
(*type*,*data*) tuple. *type* is the chunk's type as a string
(all PNG chunk types are 4 characters long). *data* is the
chunk's data content, as a string.
If the optional `seek` argument is
specified then it will keep reading chunks until it either runs
out of file or finds the type specified by the argument. Note
that in general the order of chunks in PNGs is unspecified, so
using `seek` can cause you to miss chunks.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
# http://www.w3.org/TR/PNG/#5Chunk-layout
if not self.atchunk:
self.atchunk = self.chunklentype()
length,type = self.atchunk
self.atchunk = None
data = self.file.read(length)
if len(data) != length:
raise ChunkError('Chunk %s too short for required %i octets.'
% (type, length))
checksum = self.file.read(4)
if len(checksum) != 4:
raise ValueError('Chunk %s too short for checksum.', tag)
if seek and type != seek:
continue
verify = zlib.crc32(strtobytes(type))
verify = zlib.crc32(data, verify)
# Whether the output from zlib.crc32 is signed or not varies
# according to hideous implementation details, see
# http://bugs.python.org/issue1202 .
# We coerce it to be positive here (in a way which works on
# Python 2.3 and older).
verify &= 2**32 - 1
verify = struct.pack('!I', verify)
if checksum != verify:
(a, ) = struct.unpack('!I', checksum)
(b, ) = struct.unpack('!I', verify)
message = "Checksum error in %s chunk: 0x%08X != 0x%08X." % (type, a, b)
if lenient:
warnings.warn(message, RuntimeWarning)
else:
raise ChunkError(message)
return type, data
def chunks(self):
"""Return an iterator that will yield each chunk as a
(*chunktype*, *content*) pair.
"""
while True:
t,v = self.chunk()
yield t,v
if t == 'IEND':
break
def undo_filter(self, filter_type, scanline, previous):
"""Undo the filter for a scanline. `scanline` is a sequence of
bytes that does not include the initial filter type byte.
`previous` is decoded previous scanline (for straightlaced
images this is the previous pixel row, but for interlaced
images, it is the previous scanline in the reduced image, which
in general is not the previous pixel row in the final image).
When there is no previous scanline (the first row of a
straightlaced image, or the first row in one of the passes in an
interlaced image), then this argument should be ``None``.
The scanline will have the effects of filtering removed, and the
result will be returned as a fresh sequence of bytes.
"""
# :todo: Would it be better to update scanline in place?
# Yes, with the Cython extension making the undo_filter fast,
# updating scanline inplace makes the code 3 times faster
# (reading 50 images of 800x800 went from 40s to 16s)
result = scanline
if filter_type == 0:
return result
if filter_type not in (1,2,3,4):
raise FormatError('Invalid PNG Filter Type.'
' See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
# Filter unit. The stride from one pixel to the corresponding
# byte from the previous pixel. Normally this is the pixel
# size in bytes, but when this is smaller than 1, the previous
# byte is used instead.
fu = max(1, self.psize)
# For the first line of a pass, synthesize a dummy previous
# line. An alternative approach would be to observe that on the
# first line 'up' is the same as 'null', 'paeth' is the same
# as 'sub', with only 'average' requiring any special case.
if not previous:
previous = array('B', [0]*len(scanline))
def sub():
"""Undo sub filter."""
ai = 0
# Loop starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(fu, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
def up():
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
def average():
"""Undo average filter."""
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
def paeth():
"""Undo Paeth filter."""
# Also used for ci.
ai = -fu
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
# Call appropriate filter algorithm. Note that 0 has already
# been dealt with.
(None,
pngfilters.undo_filter_sub,
pngfilters.undo_filter_up,
pngfilters.undo_filter_average,
pngfilters.undo_filter_paeth)[filter_type](fu, scanline, previous, result)
return result
def deinterlace(self, raw):
"""
Read raw pixel data, undo filters, deinterlace, and flatten.
Return in flat row flat pixel format.
"""
# Values per row (of the target image)
vpr = self.width * self.planes
# Make a result array, and make it big enough. Interleaving
# writes to the output array randomly (well, not quite), so the
# entire output array must be in memory.
fmt = 'BH'[self.bitdepth > 8]
a = array(fmt, [0]*vpr*self.height)
source_offset = 0
for xstart, ystart, xstep, ystep in _adam7:
if xstart >= self.width:
continue
# The previous (reconstructed) scanline. None at the
# beginning of a pass to indicate that there is no previous
# line.
recon = None
# Pixels per row (reduced pass image)
ppr = int(math.ceil((self.width-xstart)/float(xstep)))
# Row size in bytes for this pass.
row_size = int(math.ceil(self.psize * ppr))
for y in range(ystart, self.height, ystep):
filter_type = raw[source_offset]
source_offset += 1
scanline = raw[source_offset:source_offset+row_size]
source_offset += row_size
recon = self.undo_filter(filter_type, scanline, recon)
# Convert so that there is one element per pixel value
flat = self.serialtoflat(recon, ppr)
if xstep == 1:
assert xstart == 0
offset = y * vpr
a[offset:offset+vpr] = flat
else:
offset = y * vpr + xstart * self.planes
end_offset = (y+1) * vpr
skip = self.planes * xstep
for i in range(self.planes):
a[offset+i:end_offset:skip] = \
flat[i::self.planes]
return a
def iterboxed(self, rows):
"""Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""Convert a row of raw bytes into a flat row. Result will
be a freshly allocated object, not shared with
argument.
"""
if self.bitdepth == 8:
return array('B', raw)
if self.bitdepth == 16:
raw = tostring(raw)
return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
for o in raw:
out.extend(map(lambda i: mask&(o>>i), shifts))
return out[:width]
return imap(asvalues, rows)
def serialtoflat(self, bytes, width=None):
"""Convert serial format (byte stream) pixel data to flat row
flat pixel.
"""
if self.bitdepth == 8:
return bytes
if self.bitdepth == 16:
bytes = tostring(bytes)
return array('H',
struct.unpack('!%dH' % (len(bytes)//2), bytes))
assert self.bitdepth < 8
if width is None:
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = map(self.bitdepth.__mul__, reversed(range(spb)))
l = width
for o in bytes:
out.extend([(mask&(o>>s)) for s in shifts][:l])
l -= spb
if l <= 0:
l = width
return out
def iterstraight(self, raw):
"""Iterator that undoes the effect of filtering, and yields
each row in serialised format (as a sequence of bytes).
Assumes input is straightlaced. `raw` should be an iterable
that yields the raw bytes in chunks of arbitrary size.
"""
# length of row, in bytes
rb = self.row_bytes
a = array('B')
# The previous (reconstructed) scanline. None indicates first
# line of image.
recon = None
for some in raw:
a.extend(some)
while len(a) >= rb + 1:
filter_type = a[0]
scanline = a[1:rb+1]
del a[:rb+1]
recon = self.undo_filter(filter_type, scanline, recon)
yield recon
if len(a) != 0:
# :file:format We get here with a file format error:
# when the available bytes (after decompressing) do not
# pack into exact rows.
raise FormatError(
'Wrong size for decompressed IDAT chunk.')
assert len(a) == 0
def validate_signature(self):
"""If signature (header) has not been read then read and
validate it; otherwise do nothing.
"""
if self.signature:
return
self.signature = self.file.read(8)
if self.signature != _signature:
raise FormatError("PNG file has invalid signature.")
def preamble(self, lenient=False):
"""
Extract the image metadata by reading the initial part of
the PNG file up to the start of the ``IDAT`` chunk. All the
chunks that precede the ``IDAT`` chunk are read and either
processed for metadata or discarded.
If the optional `lenient` argument evaluates to True, checksum
failures will raise warnings rather than exceptions.
"""
self.validate_signature()
while True:
if not self.atchunk:
self.atchunk = self.chunklentype()
if self.atchunk is None:
raise FormatError(
'This PNG file has no IDAT chunks.')
if self.atchunk[1] == 'IDAT':
return
self.process_chunk(lenient=lenient)
def chunklentype(self):
"""Reads just enough of the input to determine the next
chunk's length and type, returned as a (*length*, *type*) pair
where *type* is a string. If there are no more chunks, ``None``
is returned.
"""
x = self.file.read(8)
if not x:
return None
if len(x) != 8:
raise FormatError(
'End of file whilst reading chunk length and type.')
length,type = struct.unpack('!I4s', x)
type = bytestostr(type)
if length > 2**31-1:
raise FormatError('Chunk %s is too large: %d.' % (type,length))
return length,type
def process_chunk(self, lenient=False):
"""Process the next chunk and its data. This only processes the
following chunk types, all others are ignored: ``IHDR``,
``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
type, data = self.chunk(lenient=lenient)
method = '_process_' + type
m = getattr(self, method, None)
if m:
m(data)
def _process_IHDR(self, data):
# http://www.w3.org/TR/PNG/#11IHDR
if len(data) != 13:
raise FormatError('IHDR chunk has incorrect length.')
(self.width, self.height, self.bitdepth, self.color_type,
self.compression, self.filter,
self.interlace) = struct.unpack("!2I5B", data)
check_bitdepth_colortype(self.bitdepth, self.color_type)
if self.compression != 0:
raise Error("unknown compression method %d" % self.compression)
if self.filter != 0:
raise FormatError("Unknown filter method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
% self.filter)
if self.interlace not in (0,1):
raise FormatError("Unknown interlace method %d,"
" see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
% self.interlace)
# Derived values
# http://www.w3.org/TR/PNG/#6Colour-values
colormap = bool(self.color_type & 1)
greyscale = not (self.color_type & 2)
alpha = bool(self.color_type & 4)
color_planes = (3,1)[greyscale or colormap]
planes = color_planes + alpha
self.colormap = colormap
self.greyscale = greyscale
self.alpha = alpha
self.color_planes = color_planes
self.planes = planes
self.psize = float(self.bitdepth)/float(8) * planes
if int(self.psize) == self.psize:
self.psize = int(self.psize)
self.row_bytes = int(math.ceil(self.width * self.psize))
# Stores PLTE chunk if present, and is used to check
# chunk ordering constraints.
self.plte = None
# Stores tRNS chunk if present, and is used to check chunk
# ordering constraints.
self.trns = None
# Stores sbit chunk if present.
self.sbit = None
def _process_PLTE(self, data):
# http://www.w3.org/TR/PNG/#11PLTE
if self.plte:
warnings.warn("Multiple PLTE chunks present.")
self.plte = data
if len(data) % 3 != 0:
raise FormatError(
"PLTE chunk's length should be a multiple of 3.")
if len(data) > (2**self.bitdepth)*3:
raise FormatError("PLTE chunk is too long.")
if len(data) == 0:
raise FormatError("Empty PLTE is not allowed.")
def _process_bKGD(self, data):
try:
if self.colormap:
if not self.plte:
warnings.warn(
"PLTE chunk is required before bKGD chunk.")
self.background = struct.unpack('B', data)
else:
self.background = struct.unpack("!%dH" % self.color_planes,
data)
except struct.error:
raise FormatError("bKGD chunk has incorrect length.")
def _process_tRNS(self, data):
# http://www.w3.org/TR/PNG/#11tRNS
self.trns = data
if self.colormap:
if not self.plte:
warnings.warn("PLTE chunk is required before tRNS chunk.")
else:
if len(data) > len(self.plte)/3:
# Was warning, but promoted to Error as it
# would otherwise cause pain later on.
raise FormatError("tRNS chunk is too long.")
else:
if self.alpha:
raise FormatError(
"tRNS chunk is not valid with colour type %d." %
self.color_type)
try:
self.transparent = \
struct.unpack("!%dH" % self.color_planes, data)
except struct.error:
raise FormatError("tRNS chunk has incorrect length.")
def _process_gAMA(self, data):
try:
self.gamma = struct.unpack("!L", data)[0] / 100000.0
except struct.error:
raise FormatError("gAMA chunk has incorrect length.")
def _process_sBIT(self, data):
self.sbit = data
if (self.colormap and len(data) != 3 or
not self.colormap and len(data) != self.planes):
raise FormatError("sBIT chunk has incorrect length.")
def read(self, lenient=False):
"""
Read the PNG file and decode it. Returns (`width`, `height`,
`pixels`, `metadata`).
May use excessive memory.
`pixels` are returned in boxed row flat pixel format.
If the optional `lenient` argument evaluates to True,
checksum failures will raise warnings rather than exceptions.
"""
def iteridat():
"""Iterator that yields all the ``IDAT`` chunks as strings."""
while True:
try:
type, data = self.chunk(lenient=lenient)
except ValueError as e:
raise ChunkError(e.args[0])
if type == 'IEND':
# http://www.w3.org/TR/PNG/#11IEND
break
if type != 'IDAT':
continue
# type == 'IDAT'
# http://www.w3.org/TR/PNG/#11IDAT
if self.colormap and not self.plte:
warnings.warn("PLTE chunk is required before IDAT chunk")
yield data
def iterdecomp(idat):
"""Iterator that yields decompressed strings. `idat` should
be an iterator that yields the ``IDAT`` chunk data.
"""
# Currently, with no max_length parameter to decompress,
# this routine will do one yield per IDAT chunk: Not very
# incremental.
d = zlib.decompressobj()
# Each IDAT chunk is passed to the decompressor, then any
# remaining state is decompressed out.
for data in idat:
# :todo: add a max_length argument here to limit output
# size.
yield array('B', d.decompress(data))
yield array('B', d.flush())
self.preamble(lenient=lenient)
raw = iterdecomp(iteridat())
if self.interlace:
raw = array('B', itertools.chain(*raw))
arraycode = 'BH'[self.bitdepth>8]
# Like :meth:`group` but producing an array.array object for
# each row.
pixels = imap(lambda *row: array(arraycode, row),
*[iter(self.deinterlace(raw))]*self.width*self.planes)
else:
pixels = self.iterboxed(self.iterstraight(raw))
meta = dict()
for attr in 'greyscale alpha planes bitdepth interlace'.split():
meta[attr] = getattr(self, attr)
meta['size'] = (self.width, self.height)
for attr in 'gamma transparent background'.split():
a = getattr(self, attr, None)
if a is not None:
meta[attr] = a
if self.plte:
meta['palette'] = self.palette()
return self.width, self.height, pixels, meta
def read_flat(self):
"""
Read a PNG file and decode it into flat row flat pixel format.
Returns (*width*, *height*, *pixels*, *metadata*).
May use excessive memory.
`pixels` are returned in flat row flat pixel format.
See also the :meth:`read` method which returns pixels in the
more stream-friendly boxed row flat pixel format.
"""
x, y, pixel, meta = self.read()
arraycode = 'BH'[meta['bitdepth']>8]
pixel = array(arraycode, itertools.chain(*pixel))
return x, y, pixel, meta
def palette(self, alpha='natural'):
"""Returns a palette that is a sequence of 3-tuples or 4-tuples,
synthesizing it from the ``PLTE`` and ``tRNS`` chunks. These
chunks should have already been processed (for example, by
calling the :meth:`preamble` method). All the tuples are the
same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
there is a ``tRNS`` chunk. Assumes that the image is colour type
3 and therefore a ``PLTE`` chunk is required.
If the `alpha` argument is ``'force'`` then an alpha channel is
always added, forcing the result to be a sequence of 4-tuples.
"""
if not self.plte:
raise FormatError(
"Required PLTE chunk is missing in colour type 3 image.")
plte = group(array('B', self.plte), 3)
if self.trns or alpha == 'force':
trns = array('B', self.trns or '')
trns.extend([255]*(len(plte)-len(trns)))
plte = map(operator.add, plte, group(trns, 1))
return plte
def asDirect(self):
"""Returns the image data as a direct representation of an
``x * y * planes`` array. This method is intended to remove the
need for callers to deal with palettes and transparency
themselves. Images with a palette (colour type 3)
are converted to RGB or RGBA; images with transparency (a
``tRNS`` chunk) are converted to LA or RGBA as appropriate.
When returned in this format the pixel values represent the
colour value directly without needing to refer to palettes or
transparency information.
Like the :meth:`read` method this method returns a 4-tuple:
(*width*, *height*, *pixels*, *meta*)
This method normally returns pixel values with the bit depth
they have in the source image, but when the source PNG has an
``sBIT`` chunk it is inspected and can reduce the bit depth of
the result pixels; pixel values will be reduced according to
the bit depth specified in the ``sBIT`` chunk (PNG nerds should
note a single result bit depth is used for all channels; the
maximum of the ones specified in the ``sBIT`` chunk. An RGB565
image will be rescaled to 6-bit RGB666).
The *meta* dictionary that is returned reflects the `direct`
format and not the original source image. For example, an RGB
source image with a ``tRNS`` chunk to represent a transparent
colour, will have ``planes=3`` and ``alpha=False`` for the
source image, but the *meta* dictionary returned by this method
will have ``planes=4`` and ``alpha=True`` because an alpha
channel is synthesized and added.
*pixels* is the pixel data in boxed row flat pixel format (just
like the :meth:`read` method).
All the other aspects of the image data are not changed.
"""
self.preamble()
# Simple case, no conversion necessary.
if not self.colormap and not self.trns and not self.sbit:
return self.read()
x,y,pixels,meta = self.read()
if self.colormap:
meta['colormap'] = False
meta['alpha'] = bool(self.trns)
meta['bitdepth'] = 8
meta['planes'] = 3 + bool(self.trns)
plte = self.palette()
def iterpal(pixels):
for row in pixels:
row = map(plte.__getitem__, row)
yield array('B', itertools.chain(*row))
pixels = iterpal(pixels)
elif self.trns:
# It would be nice if there was some reasonable way
# of doing this without generating a whole load of
# intermediate tuples. But tuples does seem like the
# easiest way, with no other way clearly much simpler or
# much faster. (Actually, the L to LA conversion could
# perhaps go faster (all those 1-tuples!), but I still
# wonder whether the code proliferation is worth it)
it = self.transparent
maxval = 2**meta['bitdepth']-1
planes = meta['planes']
meta['alpha'] = True
meta['planes'] += 1
typecode = 'BH'[meta['bitdepth']>8]
def itertrns(pixels):
for row in pixels:
# For each row we group it into pixels, then form a
# characterisation vector that says whether each
# pixel is opaque or not. Then we convert
# True/False to 0/maxval (by multiplication),
# and add it as the extra channel.
row = group(row, planes)
opa = map(it.__ne__, row)
opa = map(maxval.__mul__, opa)
opa = zip(opa) # convert to 1-tuples
yield array(typecode,
itertools.chain(*map(operator.add, row, opa)))
pixels = itertrns(pixels)
targetbitdepth = None
if self.sbit:
sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
targetbitdepth = max(sbit)
if targetbitdepth > meta['bitdepth']:
raise Error('sBIT chunk %r exceeds bitdepth %d' %
(sbit,self.bitdepth))
if min(sbit) <= 0:
raise Error('sBIT chunk %r has a 0-entry' % sbit)
if targetbitdepth == meta['bitdepth']:
targetbitdepth = None
if targetbitdepth:
shift = meta['bitdepth'] - targetbitdepth
meta['bitdepth'] = targetbitdepth
def itershift(pixels):
for row in pixels:
yield map(shift.__rrshift__, row)
pixels = itershift(pixels)
return x,y,pixels,meta
def asFloat(self, maxval=1.0):
"""Return image pixels as per :meth:`asDirect` method, but scale
all pixel values to be floating point values between 0.0 and
*maxval*.
"""
x,y,pixels,info = self.asDirect()
sourcemaxval = 2**info['bitdepth']-1
del info['bitdepth']
info['maxval'] = float(maxval)
factor = float(maxval)/float(sourcemaxval)
def iterfloat():
for row in pixels:
yield map(factor.__mul__, row)
return x,y,iterfloat(),info
def _as_rescale(self, get, targetbitdepth):
"""Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
width,height,pixels,meta = get()
maxval = 2**meta['bitdepth'] - 1
targetmaxval = 2**targetbitdepth - 1
factor = float(targetmaxval) / float(maxval)
meta['bitdepth'] = targetbitdepth
def iterscale():
for row in pixels:
yield map(lambda x: int(round(x*factor)), row)
if maxval == targetmaxval:
return width, height, pixels, meta
else:
return width, height, iterscale(), meta
def asRGB8(self):
"""Return the image data as an RGB pixels with 8-bits per
sample. This is like the :meth:`asRGB` method except that
this method additionally rescales the values so that they
are all between 0 and 255 (8-bit). In the case where the
source image has a bit depth < 8 the transformation preserves
all the information; where the source image has bit depth
> 8, then rescaling to 8-bit values loses precision. No
dithering is performed. Like :meth:`asRGB`, an alpha channel
in the source image will raise an exception.
This function returns a 4-tuple:
(*width*, *height*, *pixels*, *metadata*).
*width*, *height*, *metadata* are as per the
:meth:`read` method.
*pixels* is the pixel data in boxed row flat pixel format.
"""
return self._as_rescale(self.asRGB, 8)
def asRGBA8(self):
"""Return the image data as RGBA pixels with 8-bits per
sample. This method is similar to :meth:`asRGB8` and
:meth:`asRGBA`: The result pixels have an alpha channel, *and*
values are rescaled to the range 0 to 255. The alpha channel is
synthesized if necessary (with a small speed penalty).
"""
return self._as_rescale(self.asRGBA, 8)
def asRGB(self):
"""Return image as RGB pixels. RGB colour images are passed
through unchanged; greyscales are expanded into RGB
triplets (there is a small speed overhead for doing this).
An alpha channel in the source image will raise an
exception.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not meta['greyscale']:
return width,height,pixels,meta
meta['greyscale'] = False
typecode = 'BH'[meta['bitdepth'] > 8]
def iterrgb():
for row in pixels:
a = array(typecode, [0]) * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width,height,iterrgb(),meta
def asRGBA(self):
"""Return image as RGBA pixels. Greyscales are expanded into
RGB triplets; an alpha channel is synthesized if necessary.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``, and
``metadata['alpha']`` will be ``True``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha'] and not meta['greyscale']:
return width,height,pixels,meta
typecode = 'BH'[meta['bitdepth'] > 8]
maxval = 2**meta['bitdepth'] - 1
maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
def newarray():
return array(typecode, maxbuffer)
if meta['alpha'] and meta['greyscale']:
# LA to RGBA
def convert():
for row in pixels:
# Create a fresh target row, then copy L channel
# into first three target channels, and A channel
# into fourth channel.
a = newarray()
pngfilters.convert_la_to_rgba(row, a)
yield a
elif meta['greyscale']:
# L to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_l_to_rgba(row, a)
yield a
else:
assert not meta['alpha'] and not meta['greyscale']
# RGB to RGBA
def convert():
for row in pixels:
a = newarray()
pngfilters.convert_rgb_to_rgba(row, a)
yield a
meta['alpha'] = True
meta['greyscale'] = False
return width,height,convert(),meta
def check_bitdepth_colortype(bitdepth, colortype):
"""Check that `bitdepth` and `colortype` are both valid,
and specified in a valid combination. Returns if valid,
raise an Exception if not valid.
"""
if bitdepth not in (1,2,4,8,16):
raise FormatError("invalid bit depth %d" % bitdepth)
if colortype not in (0,2,3,4,6):
raise FormatError("invalid colour type %d" % colortype)
# Check indexed (palettized) images have 8 or fewer bits
# per pixel; check only indexed or greyscale images have
# fewer than 8 bits per pixel.
if colortype & 1 and bitdepth > 8:
raise FormatError(
"Indexed images (colour type %d) cannot"
" have bitdepth > 8 (bit depth %d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
if bitdepth < 8 and colortype not in (0,3):
raise FormatError("Illegal combination of bit depth (%d)"
" and colour type (%d)."
" See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
% (bitdepth, colortype))
def isinteger(x):
try:
return int(x) == x
except (TypeError, ValueError):
return False
class pngfilters(object):
def undo_filter_sub(filter_unit, scanline, previous, result):
"""Undo sub filter."""
ai = 0
# Loops starts at index fu. Observe that the initial part
# of the result is already filled in correctly with
# scanline.
for i in range(filter_unit, len(result)):
x = scanline[i]
a = result[ai]
result[i] = (x + a) & 0xff
ai += 1
undo_filter_sub = staticmethod(undo_filter_sub)
def undo_filter_up(filter_unit, scanline, previous, result):
"""Undo up filter."""
for i in range(len(result)):
x = scanline[i]
b = previous[i]
result[i] = (x + b) & 0xff
undo_filter_up = staticmethod(undo_filter_up)
def undo_filter_average(filter_unit, scanline, previous, result):
"""Undo up filter."""
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = 0
else:
a = result[ai]
b = previous[i]
result[i] = (x + ((a + b) >> 1)) & 0xff
ai += 1
undo_filter_average = staticmethod(undo_filter_average)
def undo_filter_paeth(filter_unit, scanline, previous, result):
"""Undo Paeth filter."""
# Also used for ci.
ai = -filter_unit
for i in range(len(result)):
x = scanline[i]
if ai < 0:
a = c = 0
else:
a = result[ai]
c = previous[ai]
b = previous[i]
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
pr = a
elif pb <= pc:
pr = b
else:
pr = c
result[i] = (x + pr) & 0xff
ai += 1
undo_filter_paeth = staticmethod(undo_filter_paeth)
def convert_la_to_rgba(row, result):
for i in range(3):
result[i::4] = row[0::2]
result[3::4] = row[1::2]
convert_la_to_rgba = staticmethod(convert_la_to_rgba)
def convert_l_to_rgba(row, result):
"""Convert a grayscale image to RGBA. This method assumes
the alpha channel in result is already correctly
initialized.
"""
for i in range(3):
result[i::4] = row
convert_l_to_rgba = staticmethod(convert_l_to_rgba)
def convert_rgb_to_rgba(row, result):
"""Convert an RGB image to RGBA. This method assumes the
alpha channel in result is already correctly initialized.
"""
for i in range(3):
result[i::4] = row[i::3]
convert_rgb_to_rgba = staticmethod(convert_rgb_to_rgba)
| bsd-3-clause | 7,352,109,194,574,598,000 | 36.71693 | 88 | 0.559999 | false |
noroutine/ansible | lib/ansible/modules/storage/netapp/sf_snapshot_schedule_manager.py | 43 | 13003 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_snapshot_schedule_manager
short_description: Manage SolidFire snapshot schedules
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create, destroy, or update accounts on SolidFire
options:
state:
description:
- Whether the specified schedule should exist or not.
required: true
choices: ['present', 'absent']
paused:
description:
- Pause / Resume a schedule.
required: false
recurring:
description:
- Should the schedule recur?
required: false
time_interval_days:
description: Time interval in days.
required: false
default: 1
time_interval_hours:
description: Time interval in hours.
required: false
default: 0
time_interval_minutes:
description: Time interval in minutes.
required: false
default: 0
name:
description:
- Name for the snapshot schedule.
required: true
snapshot_name:
description:
- Name for the created snapshots.
required: false
volumes:
description:
- Volume IDs that you want to set the snapshot schedule for.
- At least 1 volume ID is required for creating a new schedule.
- required when C(state=present)
required: false
retention:
description:
- Retention period for the snapshot.
- Format is 'HH:mm:ss'.
required: false
schedule_id:
description:
- The schedule ID for the schedule that you want to update or delete.
required: false
starting_date:
description:
- Starting date for the schedule.
- Required when C(state=present).
- Please use two '-' in the above format, or you may see an error- TypeError, is not JSON serializable description.
- "Format: C(2016--12--01T00:00:00Z)"
required: false
'''
EXAMPLES = """
- name: Create Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: Schedule_A
time_interval_days: 1
starting_date: 2016--12--01T00:00:00Z
volumes: 7
- name: Update Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
schedule_id: 6
recurring: True
snapshot_name: AnsibleSnapshots
- name: Delete Snapshot schedule
sf_snapshot_schedule_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
schedule_id: 6
"""
RETURN = """
schedule_id:
description: Schedule ID of the newly created schedule
returned: success
type: string
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireSnapShotSchedule(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
time_interval_days=dict(required=False, type='int', default=1),
time_interval_hours=dict(required=False, type='int', default=0),
time_interval_minutes=dict(required=False, type='int', default=0),
paused=dict(required=False, type='bool'),
recurring=dict(required=False, type='bool'),
starting_date=dict(type='str'),
snapshot_name=dict(required=False, type='str'),
volumes=dict(required=False, type='list'),
retention=dict(required=False, type='str'),
schedule_id=dict(type='int'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['starting_date', 'volumes'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
# self.interval = p['interval']
self.time_interval_days = p['time_interval_days']
self.time_interval_hours = p['time_interval_hours']
self.time_interval_minutes = p['time_interval_minutes']
self.paused = p['paused']
self.recurring = p['recurring']
self.starting_date = p['starting_date']
if self.starting_date is not None:
self.starting_date = self.starting_date.replace("--", "-")
self.snapshot_name = p['snapshot_name']
self.volumes = p['volumes']
self.retention = p['retention']
self.schedule_id = p['schedule_id']
self.create_schedule_result = None
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_schedule(self):
schedule_list = self.sfe.list_schedules()
for schedule in schedule_list.schedules:
if schedule.name == self.name:
# Update self.schedule_id:
if self.schedule_id is not None:
if schedule.schedule_id == self.schedule_id:
return schedule
else:
self.schedule_id = schedule.schedule_id
return schedule
return None
def create_schedule(self):
try:
sched = netapp_utils.Schedule()
# if self.interval == 'time_interval':
sched.frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
# Create schedule
sched.name = self.name
sched.schedule_info = netapp_utils.ScheduleInfo(
volume_ids=self.volumes,
snapshot_name=self.snapshot_name,
retention=self.retention
)
sched.paused = self.paused
sched.recurring = self.recurring
sched.starting_date = self.starting_date
self.create_schedule_result = self.sfe.create_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error creating schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_schedule(self):
try:
get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
sched = get_schedule_result.schedule
sched.to_be_deleted = True
self.sfe.modify_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error deleting schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def update_schedule(self):
try:
get_schedule_result = self.sfe.get_schedule(schedule_id=self.schedule_id)
sched = get_schedule_result.schedule
# Update schedule properties
# if self.interval == 'time_interval':
temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
if sched.frequency.days != temp_frequency.days or \
sched.frequency.hours != temp_frequency.hours \
or sched.frequency.minutes != temp_frequency.minutes:
sched.frequency = temp_frequency
sched.name = self.name
if self.volumes is not None:
sched.schedule_info.volume_ids = self.volumes
if self.retention is not None:
sched.schedule_info.retention = self.retention
if self.snapshot_name is not None:
sched.schedule_info.snapshot_name = self.snapshot_name
if self.paused is not None:
sched.paused = self.paused
if self.recurring is not None:
sched.recurring = self.recurring
if self.starting_date is not None:
sched.starting_date = self.starting_date
# Make API call
self.sfe.modify_schedule(schedule=sched)
except Exception as e:
self.module.fail_json(msg='Error updating schedule %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
schedule_exists = False
update_schedule = False
schedule_detail = self.get_schedule()
if schedule_detail:
schedule_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the account
if self.retention is not None and schedule_detail.schedule_info.retention != self.retention:
update_schedule = True
changed = True
elif schedule_detail.name != self.name:
update_schedule = True
changed = True
elif self.snapshot_name is not None and schedule_detail.schedule_info.snapshot_name != self.snapshot_name:
update_schedule = True
changed = True
elif self.volumes is not None and schedule_detail.schedule_info.volume_ids != self.volumes:
update_schedule = True
changed = True
elif self.paused is not None and schedule_detail.paused != self.paused:
update_schedule = True
changed = True
elif self.recurring is not None and schedule_detail.recurring != self.recurring:
update_schedule = True
changed = True
elif self.starting_date is not None and schedule_detail.starting_date != self.starting_date:
update_schedule = True
changed = True
elif self.time_interval_minutes is not None or self.time_interval_hours is not None \
or self.time_interval_days is not None:
temp_frequency = netapp_utils.TimeIntervalFrequency(days=self.time_interval_days,
hours=self.time_interval_hours,
minutes=self.time_interval_minutes)
if schedule_detail.frequency.days != temp_frequency.days or \
schedule_detail.frequency.hours != temp_frequency.hours \
or schedule_detail.frequency.minutes != temp_frequency.minutes:
update_schedule = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
# Skip changes
pass
else:
if self.state == 'present':
if not schedule_exists:
self.create_schedule()
elif update_schedule:
self.update_schedule()
elif self.state == 'absent':
self.delete_schedule()
if self.create_schedule_result is not None:
self.module.exit_json(changed=changed, schedule_id=self.create_schedule_result.schedule_id)
else:
self.module.exit_json(changed=changed)
def main():
v = SolidFireSnapShotSchedule()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,707,390,404,245,752,600 | 32.774026 | 123 | 0.561024 | false |
lsqtongxin/django | django/contrib/admin/helpers.py | 146 | 14347 | from __future__ import unicode_literals
import warnings
from django import forms
from django.conf import settings
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.utils import (
display_for_field, flatten_fieldsets, help_text_for_field, label_for_field,
lookup_field,
)
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.utils import flatatt
from django.template.defaultfilters import capfirst, linebreaksbr
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(label='', required=False, initial=0,
widget=forms.HiddenInput({'class': 'select-across'}))
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, fieldsets
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(
self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = ' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
def _media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = ['vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__") or isinstance(field, six.text_type):
self.fields = [field]
else:
self.fields = field
self.has_visible_field = not all(field in self.form.fields and
self.form.fields[field].widget.is_hidden
for field in self.fields)
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0),
model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(
'\n'.join(self.form[f].errors.as_ul()
for f in self.fields if f not in self.readonly_fields).strip('\n')
)
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
self.is_readonly = False
def label_tag(self):
classes = []
contents = conditional_escape(force_text(self.field.label))
if self.is_checkbox:
classes.append('vCheckboxLabel')
if self.field.field.required:
classes.append('required')
if not self.is_first:
classes.append('inline')
attrs = {'class': ' '.join(classes)} if classes else {}
# checkboxes should not have a label suffix as the checkbox appears
# to the left of the label.
return self.field.label_tag(contents=mark_safe(contents), attrs=attrs,
label_suffix='' if self.is_checkbox else None)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ if field.__name__ != '<lambda>' else ''
else:
class_name = field
if form._meta.labels and class_name in form._meta.labels:
label = form._meta.labels[class_name]
else:
label = label_for_field(field, form._meta.model, model_admin)
if form._meta.help_texts and class_name in form._meta.help_texts:
help_text = form._meta.help_texts[class_name]
else:
help_text = help_text_for_field(class_name, form._meta.model)
self.field = {
'name': class_name,
'label': label,
'help_text': help_text,
'field': field,
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
self.empty_value_display = model_admin.get_empty_value_display()
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
return format_html('<label{}>{}:</label>',
flatatt(attrs),
capfirst(force_text(label)))
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = self.empty_value_display
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
result_repr = smart_text(value)
if getattr(attr, "allow_tags", False):
result_repr = mark_safe(result_repr)
else:
result_repr = linebreaksbr(result_repr)
else:
if isinstance(f.remote_field, ManyToManyRel) and value is not None:
result_repr = ", ".join(map(six.text_type, value.all()))
else:
result_repr = display_for_field(value, f, self.empty_value_display)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,
readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
view_on_site_url = self.opts.get_view_on_site_url(original)
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, original, self.readonly_fields,
model_admin=self.opts, view_on_site_url=view_on_site_url)
for form in self.formset.extra_forms:
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, None, self.readonly_fields,
model_admin=self.opts)
yield InlineAdminForm(self.formset, self.formset.empty_form,
self.fieldsets, self.prepopulated_fields, None,
self.readonly_fields, model_admin=self.opts)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field_name in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field_name:
continue
if field_name in self.readonly_fields:
yield {
'label': label_for_field(field_name, self.opts.model, self.opts),
'widget': {
'is_hidden': False
},
'required': False,
'help_text': help_text_for_field(field_name, self.opts.model),
}
else:
yield self.formset.form.base_fields[field_name]
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None, view_on_site_url=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
self.show_url = original and view_on_site_url is not None
self.absolute_url = view_on_site_url
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields,
readonly_fields, model_admin)
@cached_property
def original_content_type_id(self):
warnings.warn(
'InlineAdminForm.original_content_type_id is deprecated and will be '
'removed in Django 1.10. If you were using this attribute to construct '
'the "view on site" URL, use the `absolute_url` attribute instead.',
RemovedInDjango110Warning, stacklevel=2
)
if self.original is not None:
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(self.original).pk
raise AttributeError
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(self.formset, self.form, name,
self.readonly_fields, model_admin=self.model_admin, **options)
def needs_explicit_pk_field(self):
# Auto fields are editable (oddly), so need to check for auto or non-editable pk
if self.form._meta.model._meta.has_auto_field or not self.form._meta.model._meta.pk.editable:
return True
# Also search any parents for an auto field. (The pk info is propagated to child
# models so that does not need to be checked in parents.)
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields,
model_admin=self.model_admin)
class AdminErrorList(forms.utils.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
super(AdminErrorList, self).__init__()
if form.is_bound:
self.extend(form.errors.values())
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(errors_in_inline_form.values())
| bsd-3-clause | 7,409,968,927,151,770,000 | 38.852778 | 101 | 0.60382 | false |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Pillow-2.3.0-py2.7-linux-x86_64.egg/PIL/PsdImagePlugin.py | 14 | 7371 | #
# The Python Imaging Library
# $Id$
#
# Adobe PSD 2.5/3.0 file handling
#
# History:
# 1995-09-01 fl Created
# 1997-01-03 fl Read most PSD images
# 1997-01-18 fl Fixed P and CMYK support
# 2001-10-21 fl Added seek/tell support (for layers)
#
# Copyright (c) 1997-2001 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.4"
from PIL import Image, ImageFile, ImagePalette, _binary
MODES = {
# (photoshop mode, bits) -> (pil mode, required channels)
(0, 1): ("1", 1),
(0, 8): ("L", 1),
(1, 8): ("L", 1),
(2, 8): ("P", 1),
(3, 8): ("RGB", 3),
(4, 8): ("CMYK", 4),
(7, 8): ("L", 1), # FIXME: multilayer
(8, 8): ("L", 1), # duotone
(9, 8): ("LAB", 3)
}
#
# helpers
i8 = _binary.i8
i16 = _binary.i16be
i32 = _binary.i32be
# --------------------------------------------------------------------.
# read PSD images
def _accept(prefix):
return prefix[:4] == b"8BPS"
##
# Image plugin for Photoshop images.
class PsdImageFile(ImageFile.ImageFile):
format = "PSD"
format_description = "Adobe Photoshop"
def _open(self):
read = self.fp.read
#
# header
s = read(26)
if s[:4] != b"8BPS" or i16(s[4:]) != 1:
raise SyntaxError("not a PSD file")
psd_bits = i16(s[22:])
psd_channels = i16(s[12:])
psd_mode = i16(s[24:])
mode, channels = MODES[(psd_mode, psd_bits)]
if channels > psd_channels:
raise IOError("not enough channels")
self.mode = mode
self.size = i32(s[18:]), i32(s[14:])
#
# color mode data
size = i32(read(4))
if size:
data = read(size)
if mode == "P" and size == 768:
self.palette = ImagePalette.raw("RGB;L", data)
#
# image resources
self.resources = []
size = i32(read(4))
if size:
# load resources
end = self.fp.tell() + size
while self.fp.tell() < end:
signature = read(4)
id = i16(read(2))
name = read(i8(read(1)))
if not (len(name) & 1):
read(1) # padding
data = read(i32(read(4)))
if (len(data) & 1):
read(1) # padding
self.resources.append((id, name, data))
if id == 1039: # ICC profile
self.info["icc_profile"] = data
#
# layer and mask information
self.layers = []
size = i32(read(4))
if size:
end = self.fp.tell() + size
size = i32(read(4))
if size:
self.layers = _layerinfo(self.fp)
self.fp.seek(end)
#
# image descriptor
self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
# keep the file open
self._fp = self.fp
self.frame = 0
def seek(self, layer):
# seek to given layer (1..max)
if layer == self.frame:
return
try:
if layer <= 0:
raise IndexError
name, mode, bbox, tile = self.layers[layer-1]
self.mode = mode
self.tile = tile
self.frame = layer
self.fp = self._fp
return name, bbox
except IndexError:
raise EOFError("no such layer")
def tell(self):
# return layer number (0=image, 1..max=layers)
return self.frame
def load_prepare(self):
# create image memory if necessary
if not self.im or\
self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.fill(self.mode, self.size, 0)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def _layerinfo(file):
# read layerinfo block
layers = []
read = file.read
for i in range(abs(i16(read(2)))):
# bounding box
y0 = i32(read(4)); x0 = i32(read(4))
y1 = i32(read(4)); x1 = i32(read(4))
# image info
info = []
mode = []
types = list(range(i16(read(2))))
if len(types) > 4:
continue
for i in types:
type = i16(read(2))
if type == 65535:
m = "A"
else:
m = "RGBA"[type]
mode.append(m)
size = i32(read(4))
info.append((m, size))
# figure out the image mode
mode.sort()
if mode == ["R"]:
mode = "L"
elif mode == ["B", "G", "R"]:
mode = "RGB"
elif mode == ["A", "B", "G", "R"]:
mode = "RGBA"
else:
mode = None # unknown
# skip over blend flags and extra information
filler = read(12)
name = ""
size = i32(read(4))
combined = 0
if size:
length = i32(read(4))
if length:
mask_y = i32(read(4)); mask_x = i32(read(4))
mask_h = i32(read(4)) - mask_y; mask_w = i32(read(4)) - mask_x
file.seek(length - 16, 1)
combined += length + 4
length = i32(read(4))
if length:
file.seek(length, 1)
combined += length + 4
length = i8(read(1))
if length:
# Don't know the proper encoding, Latin-1 should be a good guess
name = read(length).decode('latin-1', 'replace')
combined += length + 1
file.seek(size - combined, 1)
layers.append((name, mode, (x0, y0, x1, y1)))
# get tiles
i = 0
for name, mode, bbox in layers:
tile = []
for m in mode:
t = _maketile(file, m, bbox, 1)
if t:
tile.extend(t)
layers[i] = name, mode, bbox, tile
i = i + 1
return layers
def _maketile(file, mode, bbox, channels):
tile = None
read = file.read
compression = i16(read(2))
xsize = bbox[2] - bbox[0]
ysize = bbox[3] - bbox[1]
offset = file.tell()
if compression == 0:
#
# raw compression
tile = []
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer = layer + ";I"
tile.append(("raw", bbox, offset, layer))
offset = offset + xsize*ysize
elif compression == 1:
#
# packbits compression
i = 0
tile = []
bytecount = read(channels * ysize * 2)
offset = file.tell()
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer = layer + ";I"
tile.append(
("packbits", bbox, offset, layer)
)
for y in range(ysize):
offset = offset + i16(bytecount[i:i+2])
i = i + 2
file.seek(offset)
if offset & 1:
read(1) # padding
return tile
# --------------------------------------------------------------------
# registry
Image.register_open("PSD", PsdImageFile, _accept)
Image.register_extension("PSD", ".psd")
| apache-2.0 | -1,871,297,079,637,771,800 | 23.986441 | 80 | 0.464659 | false |
RadioFreeAsia/RDacity | lib-src/lv2/lv2/plugins/eg-midigate.lv2/waflib/Tools/c.py | 329 | 1066 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import TaskGen,Task,Utils
from waflib.Tools import c_preproc
from waflib.Tools.ccroot import link_task,stlink_task
@TaskGen.extension('.c')
def c_hook(self,node):
return self.create_compiled_task('c',node)
class c(Task.Task):
run_str='${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${CPPFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT}'
vars=['CCDEPS']
ext_in=['.h']
scan=c_preproc.scan
class cprogram(link_task):
run_str='${LINK_CC} ${LINKFLAGS} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB}'
ext_out=['.bin']
vars=['LINKDEPS']
inst_to='${BINDIR}'
class cshlib(cprogram):
inst_to='${LIBDIR}'
class cstlib(stlink_task):
pass
| gpl-2.0 | -1,920,577,208,043,450,000 | 43.416667 | 295 | 0.699812 | false |
0jpq0/kbengine | kbe/res/scripts/common/Lib/ctypes/test/test_structures.py | 79 | 15780 | import unittest
from ctypes import *
from ctypes.test import need_symbol
from struct import calcsize
import _testcapi
class SubclassesTest(unittest.TestCase):
def test_subclass(self):
class X(Structure):
_fields_ = [("a", c_int)]
class Y(X):
_fields_ = [("b", c_int)]
class Z(X):
pass
self.assertEqual(sizeof(X), sizeof(c_int))
self.assertEqual(sizeof(Y), sizeof(c_int)*2)
self.assertEqual(sizeof(Z), sizeof(c_int))
self.assertEqual(X._fields_, [("a", c_int)])
self.assertEqual(Y._fields_, [("b", c_int)])
self.assertEqual(Z._fields_, [("a", c_int)])
def test_subclass_delayed(self):
class X(Structure):
pass
self.assertEqual(sizeof(X), 0)
X._fields_ = [("a", c_int)]
class Y(X):
pass
self.assertEqual(sizeof(Y), sizeof(X))
Y._fields_ = [("b", c_int)]
class Z(X):
pass
self.assertEqual(sizeof(X), sizeof(c_int))
self.assertEqual(sizeof(Y), sizeof(c_int)*2)
self.assertEqual(sizeof(Z), sizeof(c_int))
self.assertEqual(X._fields_, [("a", c_int)])
self.assertEqual(Y._fields_, [("b", c_int)])
self.assertEqual(Z._fields_, [("a", c_int)])
class StructureTestCase(unittest.TestCase):
formats = {"c": c_char,
"b": c_byte,
"B": c_ubyte,
"h": c_short,
"H": c_ushort,
"i": c_int,
"I": c_uint,
"l": c_long,
"L": c_ulong,
"q": c_longlong,
"Q": c_ulonglong,
"f": c_float,
"d": c_double,
}
def test_simple_structs(self):
for code, tp in self.formats.items():
class X(Structure):
_fields_ = [("x", c_char),
("y", tp)]
self.assertEqual((sizeof(X), code),
(calcsize("c%c0%c" % (code, code)), code))
def test_unions(self):
for code, tp in self.formats.items():
class X(Union):
_fields_ = [("x", c_char),
("y", tp)]
self.assertEqual((sizeof(X), code),
(calcsize("%c" % (code)), code))
def test_struct_alignment(self):
class X(Structure):
_fields_ = [("x", c_char * 3)]
self.assertEqual(alignment(X), calcsize("s"))
self.assertEqual(sizeof(X), calcsize("3s"))
class Y(Structure):
_fields_ = [("x", c_char * 3),
("y", c_int)]
self.assertEqual(alignment(Y), alignment(c_int))
self.assertEqual(sizeof(Y), calcsize("3si"))
class SI(Structure):
_fields_ = [("a", X),
("b", Y)]
self.assertEqual(alignment(SI), max(alignment(Y), alignment(X)))
self.assertEqual(sizeof(SI), calcsize("3s0i 3si 0i"))
class IS(Structure):
_fields_ = [("b", Y),
("a", X)]
self.assertEqual(alignment(SI), max(alignment(X), alignment(Y)))
self.assertEqual(sizeof(IS), calcsize("3si 3s 0i"))
class XX(Structure):
_fields_ = [("a", X),
("b", X)]
self.assertEqual(alignment(XX), alignment(X))
self.assertEqual(sizeof(XX), calcsize("3s 3s 0s"))
def test_emtpy(self):
# I had problems with these
#
# Although these are pathological cases: Empty Structures!
class X(Structure):
_fields_ = []
class Y(Union):
_fields_ = []
# Is this really the correct alignment, or should it be 0?
self.assertTrue(alignment(X) == alignment(Y) == 1)
self.assertTrue(sizeof(X) == sizeof(Y) == 0)
class XX(Structure):
_fields_ = [("a", X),
("b", X)]
self.assertEqual(alignment(XX), 1)
self.assertEqual(sizeof(XX), 0)
def test_fields(self):
# test the offset and size attributes of Structure/Unoin fields.
class X(Structure):
_fields_ = [("x", c_int),
("y", c_char)]
self.assertEqual(X.x.offset, 0)
self.assertEqual(X.x.size, sizeof(c_int))
self.assertEqual(X.y.offset, sizeof(c_int))
self.assertEqual(X.y.size, sizeof(c_char))
# readonly
self.assertRaises((TypeError, AttributeError), setattr, X.x, "offset", 92)
self.assertRaises((TypeError, AttributeError), setattr, X.x, "size", 92)
class X(Union):
_fields_ = [("x", c_int),
("y", c_char)]
self.assertEqual(X.x.offset, 0)
self.assertEqual(X.x.size, sizeof(c_int))
self.assertEqual(X.y.offset, 0)
self.assertEqual(X.y.size, sizeof(c_char))
# readonly
self.assertRaises((TypeError, AttributeError), setattr, X.x, "offset", 92)
self.assertRaises((TypeError, AttributeError), setattr, X.x, "size", 92)
# XXX Should we check nested data types also?
# offset is always relative to the class...
def test_packed(self):
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 1
self.assertEqual(sizeof(X), 9)
self.assertEqual(X.b.offset, 1)
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 2
self.assertEqual(sizeof(X), 10)
self.assertEqual(X.b.offset, 2)
import struct
longlong_size = struct.calcsize("q")
longlong_align = struct.calcsize("bq") - longlong_size
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 4
self.assertEqual(sizeof(X), min(4, longlong_align) + longlong_size)
self.assertEqual(X.b.offset, min(4, longlong_align))
class X(Structure):
_fields_ = [("a", c_byte),
("b", c_longlong)]
_pack_ = 8
self.assertEqual(sizeof(X), min(8, longlong_align) + longlong_size)
self.assertEqual(X.b.offset, min(8, longlong_align))
d = {"_fields_": [("a", "b"),
("b", "q")],
"_pack_": -1}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
# Issue 15989
d = {"_fields_": [("a", c_byte)],
"_pack_": _testcapi.INT_MAX + 1}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
d = {"_fields_": [("a", c_byte)],
"_pack_": _testcapi.UINT_MAX + 2}
self.assertRaises(ValueError, type(Structure), "X", (Structure,), d)
def test_initializers(self):
class Person(Structure):
_fields_ = [("name", c_char*6),
("age", c_int)]
self.assertRaises(TypeError, Person, 42)
self.assertRaises(ValueError, Person, b"asldkjaslkdjaslkdj")
self.assertRaises(TypeError, Person, "Name", "HI")
# short enough
self.assertEqual(Person(b"12345", 5).name, b"12345")
# exact fit
self.assertEqual(Person(b"123456", 5).name, b"123456")
# too long
self.assertRaises(ValueError, Person, b"1234567", 5)
def test_conflicting_initializers(self):
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
# conflicting positional and keyword args
self.assertRaises(TypeError, POINT, 2, 3, x=4)
self.assertRaises(TypeError, POINT, 2, 3, y=4)
# too many initializers
self.assertRaises(TypeError, POINT, 2, 3, 4)
def test_keyword_initializers(self):
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
pt = POINT(1, 2)
self.assertEqual((pt.x, pt.y), (1, 2))
pt = POINT(y=2, x=1)
self.assertEqual((pt.x, pt.y), (1, 2))
def test_invalid_field_types(self):
class POINT(Structure):
pass
self.assertRaises(TypeError, setattr, POINT, "_fields_", [("x", 1), ("y", 2)])
def test_invalid_name(self):
# field name must be string
def declare_with_name(name):
class S(Structure):
_fields_ = [(name, c_int)]
self.assertRaises(TypeError, declare_with_name, b"x")
def test_intarray_fields(self):
class SomeInts(Structure):
_fields_ = [("a", c_int * 4)]
# can use tuple to initialize array (but not list!)
self.assertEqual(SomeInts((1, 2)).a[:], [1, 2, 0, 0])
self.assertEqual(SomeInts((1, 2)).a[::], [1, 2, 0, 0])
self.assertEqual(SomeInts((1, 2)).a[::-1], [0, 0, 2, 1])
self.assertEqual(SomeInts((1, 2)).a[::2], [1, 0])
self.assertEqual(SomeInts((1, 2)).a[1:5:6], [2])
self.assertEqual(SomeInts((1, 2)).a[6:4:-1], [])
self.assertEqual(SomeInts((1, 2, 3, 4)).a[:], [1, 2, 3, 4])
self.assertEqual(SomeInts((1, 2, 3, 4)).a[::], [1, 2, 3, 4])
# too long
# XXX Should raise ValueError?, not RuntimeError
self.assertRaises(RuntimeError, SomeInts, (1, 2, 3, 4, 5))
def test_nested_initializers(self):
# test initializing nested structures
class Phone(Structure):
_fields_ = [("areacode", c_char*6),
("number", c_char*12)]
class Person(Structure):
_fields_ = [("name", c_char * 12),
("phone", Phone),
("age", c_int)]
p = Person(b"Someone", (b"1234", b"5678"), 5)
self.assertEqual(p.name, b"Someone")
self.assertEqual(p.phone.areacode, b"1234")
self.assertEqual(p.phone.number, b"5678")
self.assertEqual(p.age, 5)
@need_symbol('c_wchar')
def test_structures_with_wchar(self):
class PersonW(Structure):
_fields_ = [("name", c_wchar * 12),
("age", c_int)]
p = PersonW("Someone \xe9")
self.assertEqual(p.name, "Someone \xe9")
self.assertEqual(PersonW("1234567890").name, "1234567890")
self.assertEqual(PersonW("12345678901").name, "12345678901")
# exact fit
self.assertEqual(PersonW("123456789012").name, "123456789012")
#too long
self.assertRaises(ValueError, PersonW, "1234567890123")
def test_init_errors(self):
class Phone(Structure):
_fields_ = [("areacode", c_char*6),
("number", c_char*12)]
class Person(Structure):
_fields_ = [("name", c_char * 12),
("phone", Phone),
("age", c_int)]
cls, msg = self.get_except(Person, b"Someone", (1, 2))
self.assertEqual(cls, RuntimeError)
self.assertEqual(msg,
"(Phone) <class 'TypeError'>: "
"expected bytes, int found")
cls, msg = self.get_except(Person, b"Someone", (b"a", b"b", b"c"))
self.assertEqual(cls, RuntimeError)
if issubclass(Exception, object):
self.assertEqual(msg,
"(Phone) <class 'TypeError'>: too many initializers")
else:
self.assertEqual(msg, "(Phone) TypeError: too many initializers")
def test_huge_field_name(self):
# issue12881: segfault with large structure field names
def create_class(length):
class S(Structure):
_fields_ = [('x' * length, c_int)]
for length in [10 ** i for i in range(0, 8)]:
try:
create_class(length)
except MemoryError:
# MemoryErrors are OK, we just don't want to segfault
pass
def get_except(self, func, *args):
try:
func(*args)
except Exception as detail:
return detail.__class__, str(detail)
@unittest.skip('test disabled')
def test_subclass_creation(self):
meta = type(Structure)
# same as 'class X(Structure): pass'
# fails, since we need either a _fields_ or a _abstract_ attribute
cls, msg = self.get_except(meta, "X", (Structure,), {})
self.assertEqual((cls, msg),
(AttributeError, "class must define a '_fields_' attribute"))
def test_abstract_class(self):
class X(Structure):
_abstract_ = "something"
# try 'X()'
cls, msg = self.get_except(eval, "X()", locals())
self.assertEqual((cls, msg), (TypeError, "abstract class"))
def test_methods(self):
## class X(Structure):
## _fields_ = []
self.assertIn("in_dll", dir(type(Structure)))
self.assertIn("from_address", dir(type(Structure)))
self.assertIn("in_dll", dir(type(Structure)))
def test_positional_args(self):
# see also http://bugs.python.org/issue5042
class W(Structure):
_fields_ = [("a", c_int), ("b", c_int)]
class X(W):
_fields_ = [("c", c_int)]
class Y(X):
pass
class Z(Y):
_fields_ = [("d", c_int), ("e", c_int), ("f", c_int)]
z = Z(1, 2, 3, 4, 5, 6)
self.assertEqual((z.a, z.b, z.c, z.d, z.e, z.f),
(1, 2, 3, 4, 5, 6))
z = Z(1)
self.assertEqual((z.a, z.b, z.c, z.d, z.e, z.f),
(1, 0, 0, 0, 0, 0))
self.assertRaises(TypeError, lambda: Z(1, 2, 3, 4, 5, 6, 7))
class PointerMemberTestCase(unittest.TestCase):
def test(self):
# a Structure with a POINTER field
class S(Structure):
_fields_ = [("array", POINTER(c_int))]
s = S()
# We can assign arrays of the correct type
s.array = (c_int * 3)(1, 2, 3)
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [1, 2, 3])
# The following are bugs, but are included here because the unittests
# also describe the current behaviour.
#
# This fails with SystemError: bad arg to internal function
# or with IndexError (with a patch I have)
s.array[0] = 42
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [42, 2, 3])
s.array[0] = 1
## s.array[1] = 42
items = [s.array[i] for i in range(3)]
self.assertEqual(items, [1, 2, 3])
def test_none_to_pointer_fields(self):
class S(Structure):
_fields_ = [("x", c_int),
("p", POINTER(c_int))]
s = S()
s.x = 12345678
s.p = None
self.assertEqual(s.x, 12345678)
class TestRecursiveStructure(unittest.TestCase):
def test_contains_itself(self):
class Recursive(Structure):
pass
try:
Recursive._fields_ = [("next", Recursive)]
except AttributeError as details:
self.assertIn("Structure or union cannot contain itself",
str(details))
else:
self.fail("Structure or union cannot contain itself")
def test_vice_versa(self):
class First(Structure):
pass
class Second(Structure):
pass
First._fields_ = [("second", Second)]
try:
Second._fields_ = [("first", First)]
except AttributeError as details:
self.assertIn("_fields_ is final", str(details))
else:
self.fail("AttributeError not raised")
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | -9,189,950,607,011,268,000 | 32.717949 | 86 | 0.510266 | false |
Bootz/multicore-opimization | llvm/tools/clang/utils/ABITest/Enumeration.py | 110 | 7814 | """Utilities for enumeration of finite and countably infinite sets.
"""
###
# Countable iteration
# Simplifies some calculations
class Aleph0(int):
_singleton = None
def __new__(type):
if type._singleton is None:
type._singleton = int.__new__(type)
return type._singleton
def __repr__(self): return '<aleph0>'
def __str__(self): return 'inf'
def __cmp__(self, b):
return 1
def __sub__(self, b):
raise ValueError,"Cannot subtract aleph0"
__rsub__ = __sub__
def __add__(self, b):
return self
__radd__ = __add__
def __mul__(self, b):
if b == 0: return b
return self
__rmul__ = __mul__
def __floordiv__(self, b):
if b == 0: raise ZeroDivisionError
return self
__rfloordiv__ = __floordiv__
__truediv__ = __floordiv__
__rtuediv__ = __floordiv__
__div__ = __floordiv__
__rdiv__ = __floordiv__
def __pow__(self, b):
if b == 0: return 1
return self
aleph0 = Aleph0()
def base(line):
return line*(line+1)//2
def pairToN((x,y)):
line,index = x+y,y
return base(line)+index
def getNthPairInfo(N):
# Avoid various singularities
if N==0:
return (0,0)
# Gallop to find bounds for line
line = 1
next = 2
while base(next)<=N:
line = next
next = line << 1
# Binary search for starting line
lo = line
hi = line<<1
while lo + 1 != hi:
#assert base(lo) <= N < base(hi)
mid = (lo + hi)>>1
if base(mid)<=N:
lo = mid
else:
hi = mid
line = lo
return line, N - base(line)
def getNthPair(N):
line,index = getNthPairInfo(N)
return (line - index, index)
def getNthPairBounded(N,W=aleph0,H=aleph0,useDivmod=False):
"""getNthPairBounded(N, W, H) -> (x, y)
Return the N-th pair such that 0 <= x < W and 0 <= y < H."""
if W <= 0 or H <= 0:
raise ValueError,"Invalid bounds"
elif N >= W*H:
raise ValueError,"Invalid input (out of bounds)"
# Simple case...
if W is aleph0 and H is aleph0:
return getNthPair(N)
# Otherwise simplify by assuming W < H
if H < W:
x,y = getNthPairBounded(N,H,W,useDivmod=useDivmod)
return y,x
if useDivmod:
return N%W,N//W
else:
# Conceptually we want to slide a diagonal line across a
# rectangle. This gives more interesting results for large
# bounds than using divmod.
# If in lower left, just return as usual
cornerSize = base(W)
if N < cornerSize:
return getNthPair(N)
# Otherwise if in upper right, subtract from corner
if H is not aleph0:
M = W*H - N - 1
if M < cornerSize:
x,y = getNthPair(M)
return (W-1-x,H-1-y)
# Otherwise, compile line and index from number of times we
# wrap.
N = N - cornerSize
index,offset = N%W,N//W
# p = (W-1, 1+offset) + (-1,1)*index
return (W-1-index, 1+offset+index)
def getNthPairBoundedChecked(N,W=aleph0,H=aleph0,useDivmod=False,GNP=getNthPairBounded):
x,y = GNP(N,W,H,useDivmod)
assert 0 <= x < W and 0 <= y < H
return x,y
def getNthNTuple(N, W, H=aleph0, useLeftToRight=False):
"""getNthNTuple(N, W, H) -> (x_0, x_1, ..., x_W)
Return the N-th W-tuple, where for 0 <= x_i < H."""
if useLeftToRight:
elts = [None]*W
for i in range(W):
elts[i],N = getNthPairBounded(N, H)
return tuple(elts)
else:
if W==0:
return ()
elif W==1:
return (N,)
elif W==2:
return getNthPairBounded(N, H, H)
else:
LW,RW = W//2, W - (W//2)
L,R = getNthPairBounded(N, H**LW, H**RW)
return (getNthNTuple(L,LW,H=H,useLeftToRight=useLeftToRight) +
getNthNTuple(R,RW,H=H,useLeftToRight=useLeftToRight))
def getNthNTupleChecked(N, W, H=aleph0, useLeftToRight=False, GNT=getNthNTuple):
t = GNT(N,W,H,useLeftToRight)
assert len(t) == W
for i in t:
assert i < H
return t
def getNthTuple(N, maxSize=aleph0, maxElement=aleph0, useDivmod=False, useLeftToRight=False):
"""getNthTuple(N, maxSize, maxElement) -> x
Return the N-th tuple where len(x) < maxSize and for y in x, 0 <=
y < maxElement."""
# All zero sized tuples are isomorphic, don't ya know.
if N == 0:
return ()
N -= 1
if maxElement is not aleph0:
if maxSize is aleph0:
raise NotImplementedError,'Max element size without max size unhandled'
bounds = [maxElement**i for i in range(1, maxSize+1)]
S,M = getNthPairVariableBounds(N, bounds)
else:
S,M = getNthPairBounded(N, maxSize, useDivmod=useDivmod)
return getNthNTuple(M, S+1, maxElement, useLeftToRight=useLeftToRight)
def getNthTupleChecked(N, maxSize=aleph0, maxElement=aleph0,
useDivmod=False, useLeftToRight=False, GNT=getNthTuple):
# FIXME: maxsize is inclusive
t = GNT(N,maxSize,maxElement,useDivmod,useLeftToRight)
assert len(t) <= maxSize
for i in t:
assert i < maxElement
return t
def getNthPairVariableBounds(N, bounds):
"""getNthPairVariableBounds(N, bounds) -> (x, y)
Given a finite list of bounds (which may be finite or aleph0),
return the N-th pair such that 0 <= x < len(bounds) and 0 <= y <
bounds[x]."""
if not bounds:
raise ValueError,"Invalid bounds"
if not (0 <= N < sum(bounds)):
raise ValueError,"Invalid input (out of bounds)"
level = 0
active = range(len(bounds))
active.sort(key=lambda i: bounds[i])
prevLevel = 0
for i,index in enumerate(active):
level = bounds[index]
W = len(active) - i
if level is aleph0:
H = aleph0
else:
H = level - prevLevel
levelSize = W*H
if N<levelSize: # Found the level
idelta,delta = getNthPairBounded(N, W, H)
return active[i+idelta],prevLevel+delta
else:
N -= levelSize
prevLevel = level
else:
raise RuntimError,"Unexpected loop completion"
def getNthPairVariableBoundsChecked(N, bounds, GNVP=getNthPairVariableBounds):
x,y = GNVP(N,bounds)
assert 0 <= x < len(bounds) and 0 <= y < bounds[x]
return (x,y)
###
def testPairs():
W = 3
H = 6
a = [[' ' for x in range(10)] for y in range(10)]
b = [[' ' for x in range(10)] for y in range(10)]
for i in range(min(W*H,40)):
x,y = getNthPairBounded(i,W,H)
x2,y2 = getNthPairBounded(i,W,H,useDivmod=True)
print i,(x,y),(x2,y2)
a[y][x] = '%2d'%i
b[y2][x2] = '%2d'%i
print '-- a --'
for ln in a[::-1]:
if ''.join(ln).strip():
print ' '.join(ln)
print '-- b --'
for ln in b[::-1]:
if ''.join(ln).strip():
print ' '.join(ln)
def testPairsVB():
bounds = [2,2,4,aleph0,5,aleph0]
a = [[' ' for x in range(15)] for y in range(15)]
b = [[' ' for x in range(15)] for y in range(15)]
for i in range(min(sum(bounds),40)):
x,y = getNthPairVariableBounds(i, bounds)
print i,(x,y)
a[y][x] = '%2d'%i
print '-- a --'
for ln in a[::-1]:
if ''.join(ln).strip():
print ' '.join(ln)
###
# Toggle to use checked versions of enumeration routines.
if False:
getNthPairVariableBounds = getNthPairVariableBoundsChecked
getNthPairBounded = getNthPairBoundedChecked
getNthNTuple = getNthNTupleChecked
getNthTuple = getNthTupleChecked
if __name__ == '__main__':
testPairs()
testPairsVB()
| gpl-3.0 | 5,023,564,937,907,231,000 | 27.311594 | 93 | 0.559381 | false |
jepler/linuxcnc-mirror | lib/python/qtvcp/plugins/simplewidgets_plugin.py | 3 | 20659 | #!/usr/bin/env python
from PyQt5 import QtCore, QtGui
from PyQt5.QtDesigner import QPyDesignerCustomWidgetPlugin
from qtvcp.widgets.simple_widgets import PushButton
from qtvcp.widgets.simple_widgets import CheckBox
from qtvcp.widgets.simple_widgets import RadioButton
from qtvcp.widgets.simple_widgets import LCDNumber
from qtvcp.widgets.simple_widgets import Slider
from qtvcp.widgets.simple_widgets import GridLayout
from qtvcp.widgets.simple_widgets import Dial
from qtvcp.widgets.general_hal_output import GeneralHALOutput
from qtvcp.widgets.general_hal_input import GeneralHALInput
from qtvcp.widgets.xembed import XEmbed
from qtvcp.widgets.radio_axis_selector import RadioAxisSelector
from qtvcp.widgets.axis_tool_button import AxisToolButton
from qtvcp.widgets.offset_tool_button import OffsetToolButton
from qtvcp.widgets.file_manager import FileManager
from qtvcp.widgets.image_switcher import ImageSwitcher
from qtvcp.widgets.image_switcher import StatusImageSwitcher
from qtvcp.widgets.machine_log import MachineLog
from qtvcp.widgets.qtvcp_icons import Icon
ICON = Icon()
####################################
# PUSHBUTTON
####################################
class PushButtonPlugin(QPyDesignerCustomWidgetPlugin):
# The __init__() method is only used to set up the plugin and define its
# initialized variable.
def __init__(self, parent = None):
super(PushButtonPlugin, self).__init__(parent)
self.initialized = False
# The initialize() and isInitialized() methods allow the plugin to set up
# any required resources, ensuring that this can only happen once for each
# plugin.
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
# This factory method creates new instances of our custom widget
def createWidget(self, parent):
return PushButton(parent)
# This method returns the name of the custom widget class
def name(self):
return "PushButton"
# Returns the name of the group in Qt Designer's widget box
def group(self):
return "Linuxcnc - HAL"
# Returns the icon
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('pushbutton')))
# Returns a tool tip short description
def toolTip(self):
return "Push button widget"
# Returns a short description of the custom widget for use in a "What's
# This?" help message for the widget.
def whatsThis(self):
return ""
# Returns True if the custom widget acts as a container for other widgets;
def isContainer(self):
return False
# Returns an XML description of a custom widget instance that describes
# default values for its properties.
def domXml(self):
return '<widget class="PushButton" name="pushbutton" />\n'
# Returns the module containing the custom widget class. It may include
# a module path.
def includeFile(self):
return "qtvcp.widgets.simple_widgets"
####################################
# CHECKBUTTON
####################################
class CheckBoxPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(CheckBoxPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return CheckBox(parent)
def name(self):
return "CheckBox"
def group(self):
return "Linuxcnc - HAL"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('checkbox')))
def toolTip(self):
return "HAL Checkbox widget"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '<widget class="CheckBox" name="checkbox" />\n'
def includeFile(self):
return "qtvcp.widgets.simple_widgets"
####################################
# RADIOBUTTON
####################################
class RadioButtonPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(RadioButtonPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return RadioButton(parent)
def name(self):
return "RadioButton"
def group(self):
return "Linuxcnc - HAL"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('radiobutton')))
def toolTip(self):
return "HAL Radiobutton widget"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '<widget class="RadioButton" name="radiobutton" />\n'
def includeFile(self):
return "qtvcp.widgets.simple_widgets"
####################################
# LCD Display
####################################
class LCDNumberPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(LCDNumberPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return LCDNumber(parent)
def name(self):
return "LCDNumber"
def group(self):
return "Linuxcnc - HAL"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('LCDNumber')))
def toolTip(self):
return "HAL LCD Display widget"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '<widget class="LCDNumber" name="lcdnumber" />\n'
def includeFile(self):
return "qtvcp.widgets.simple_widgets"
####################################
# Slider
####################################
class SliderPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(SliderPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return Slider(parent)
def name(self):
return "Slider"
def group(self):
return "Linuxcnc - HAL"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('slider')))
def toolTip(self):
return "HAL Slider widget"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return("""
<widget class="Slider" name="slider">
<property name=\"maximum\">
<number>100</number>
</property>
<property name=\"orientation\">
<enum>Qt::Horizontal</enum>
</property>
</widget>
""")
def includeFile(self):
return "qtvcp.widgets.simple_widgets"
####################################
# GridLayout
####################################
class LcncGridLayoutPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(LcncGridLayoutPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return GridLayout(parent)
def name(self):
return "GridLayout"
def group(self):
return "Linuxcnc - HAL"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('gridlayout')))
def toolTip(self):
return "HAL enable/disable GridLayout widget"
def whatsThis(self):
return ""
def isContainer(self):
return True
def domXml(self):
return '<widget class="GridLayout" name="gridlayout" />\n'
def includeFile(self):
return "qtvcp.widgets.simple_widgets"
####################################
# GeneralHALOutput
####################################
class GeneralHALOutputPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(GeneralHALOutputPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return GeneralHALOutput(parent)
def name(self):
return "GeneralHALOutput"
def group(self):
return "Linuxcnc - HAL"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('generalhaloutput')))
def toolTip(self):
return "Generalized HAL Output Pin Widget"
def whatsThis(self):
return "Used to add HAl Pins to Arbritrary widgets"
def isContainer(self):
return True
def domXml(self):
return '<widget class="GeneralHALOutput" name="generalhaloutput" />\n'
def includeFile(self):
return "qtvcp.widgets.general_hal_output"
####################################
# GeneralHALInput
####################################
class GeneralHALInputPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(GeneralHALInputPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return GeneralHALInput(parent)
def name(self):
return "GeneralHALInput"
def group(self):
return "Linuxcnc - HAL"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('generalhalinput')))
def toolTip(self):
return "Generalized HAL Input Pin Widget"
def whatsThis(self):
return "Used to add HAl Pins to Arbritrary widgets"
def isContainer(self):
return True
def domXml(self):
return '<widget class="GeneralHALInput" name="generalhalinput" />\n'
def includeFile(self):
return "qtvcp.widgets.general_hal_input"
####################################
# XEmbed
####################################
class XEmbedPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(XEmbedPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return XEmbed(parent)
def name(self):
return "XEmbed"
def group(self):
return "Linuxcnc - HAL"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('xembed')))
def toolTip(self):
return "Widget for embedding thirdparty programs"
def whatsThis(self):
return ""
def isContainer(self):
return True
def domXml(self):
return '<widget class="XEmbed" name="xembed" />\n'
def includeFile(self):
return "qtvcp.widgets.xembed"
####################################
# RadioAxisSelector
####################################
class RadioAxisSelectorPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(RadioAxisSelectorPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return RadioAxisSelector(parent)
def name(self):
return "RadioAxisSelector"
def group(self):
return "Linuxcnc - Controller"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('radioaxisselector')))
def toolTip(self):
return "RadioAxisSelector widget"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '<widget class="RadioAxisSelector" name="radioaxisselector" />\n'
def includeFile(self):
return "qtvcp.widgets.radio_axis_selector"
####################################
# AxisToolButton
####################################
class AxisToolButtonPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(AxisToolButtonPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return AxisToolButton(parent)
def name(self):
return "AxisToolButton"
def group(self):
return "Linuxcnc - Controller"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('axistoolbutton')))
def toolTip(self):
return "Button for selecting an Axis and setting the Origin"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '<widget class="AxisToolButton" name="axistoolbutton" />\n'
def includeFile(self):
return "qtvcp.widgets.axis_tool_button"
####################################
# OffsetToolButton
####################################
class OffsetToolButtonPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(OffsetToolButtonPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return OffsetToolButton(parent)
def name(self):
return "OffsetToolButton"
def group(self):
return "Linuxcnc - Controller"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('offsettoolbutton')))
def toolTip(self):
return "Button for selecting an Axis and setting the Origin"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '<widget class="OffsetToolButton" name="offsettoolbutton" />\n'
def includeFile(self):
return "qtvcp.widgets.offset_tool_button"
####################################
# FileManager
####################################
class FileManagerPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(FileManagerPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return FileManager(parent)
def name(self):
return "FileManager"
def group(self):
return "Linuxcnc - Controller"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('filemanager')))
def toolTip(self):
return "Button for selecting an Axis and setting the Origin"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '<widget class="FileManager" name="filemanager" />\n'
def includeFile(self):
return "qtvcp.widgets.file_manager"
####################################
# ImageSwitcher
####################################
class ImageSwitcherPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(ImageSwitcherPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
w = ImageSwitcher(parent)
w._designerInit()
return w
def name(self):
return "ImageSwitcher"
def group(self):
return "Linuxcnc - HAL"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('imageswitcher')))
def toolTip(self):
return "Label that switches between different images"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '''<widget class="ImageSwitcher" name="imageswitcher">
<property name="geometry">
<rect>
<x>10</x>
<y>0</y>
<width>50</width>
<height>50</height>
</rect>
</property>
</widget>'''
def includeFile(self):
return "qtvcp.widgets.image_switcher"
####################################
# StatusImageSwitcher
####################################
class StatusImageSwitcherPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(StatusImageSwitcherPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
w = StatusImageSwitcher(parent)
w._designerInit()
return w
def name(self):
return "StatusImageSwitcher"
def group(self):
return "Linuxcnc - Controller"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('statusimageswitcher')))
def toolTip(self):
return "Label that switches between different Images based on status"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '''<widget class="StatusImageSwitcher" name="statusimageswitcher"></widget>'''
def includeFile(self):
return "qtvcp.widgets.image_switcher"
####################################
# Dial
####################################
class DialPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(DialPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return Dial(parent)
def name(self):
return "Dial"
def group(self):
return "Linuxcnc - HAL"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('dial')))
def toolTip(self):
return "Handwheel Widget"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '''<widget class="Dial" name="dial"></widget>'''
def includeFile(self):
return "qtvcp.widgets.simple_widgets"
####################################
# MachineLog
####################################
class MachineLogPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent = None):
super(MachineLogPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return MachineLog(parent)
def name(self):
return "MachineLog"
def group(self):
return "Linuxcnc - Controller"
def icon(self):
return QtGui.QIcon(QtGui.QPixmap(ICON.get_path('machinelog')))
def toolTip(self):
return "Machine Log Display Widget"
def whatsThis(self):
return ""
def isContainer(self):
return False
def domXml(self):
return '''<widget class="MachineLog" name="machinelog"></widget>'''
def includeFile(self):
return "qtvcp.widgets.machine_log"
| lgpl-2.1 | -5,714,704,274,526,275,000 | 32.267311 | 93 | 0.608452 | false |
epall/selenium | firefox/src/py/firefoxlauncher.py | 4 | 2957 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launches the firefox and does necessary preparation like
installing the extension"""
from subprocess import Popen
from subprocess import PIPE
import logging
import shutil
import tempfile
import time
import platform
import os
from extensionconnection import ExtensionConnection
from firefox_profile import FirefoxProfile
import utils
MAX_START_ATTEMPTS = 20
class FirefoxLauncher(object):
"""Launches the firefox browser."""
def __init__(self):
self.extension_connection = ExtensionConnection()
self._start_cmd = utils.get_firefox_start_cmd()
self.process = None
def launch_browser(self, profile):
"""Launches the browser for the given profile name.
It is assumed the profile already exists.
"""
self.profile = profile
while self.extension_connection.is_connectable():
logging.info("Browser already running, kill it")
self.extension_connection.connect_and_quit()
time.sleep(1)
self._start_from_profile_path(profile.path)
attempts = 0
while not self.extension_connection.is_connectable():
attempts += 1
if attempts > MAX_START_ATTEMPTS:
raise RuntimeError("Unablet to start firefox")
self._start_from_profile_path(profile.path)
time.sleep(1)
def _lock_file_exists(self):
return os.path.exists(os.path.join(self.profile.path, ".parentlock"))
def kill(self):
"""Kill the browser.
This is useful when the browser is stuck.
"""
try:
if self.process:
os.kill(self.process.pid, 9)
except AttributeError:
# kill may not be available under windows environment
pass
def _start_from_profile_path(self, path):
os.environ["XRE_PROFILE_PATH"] = path
self.process = Popen([self._start_cmd, "-no-remote", "--verbose"])
def _wait_until_connectable(self):
"""Blocks until the extension is connectable in the firefox."""
while not self.extension_connection.is_connectable():
logging.debug("Waiting for browser to launch...")
if self.process.returncode:
# Browser has exited
return False
time.sleep(1)
return True
| apache-2.0 | -3,428,735,263,898,588,700 | 32.602273 | 77 | 0.660467 | false |
coderb0t/CouchPotatoServer | couchpotato/core/media/_base/providers/torrent/sceneaccess.py | 44 | 5755 | import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentProvider
log = CPLog(__name__)
class Base(TorrentProvider):
urls = {
'test': 'https://www.sceneaccess.eu/',
'login': 'https://www.sceneaccess.eu/login',
'login_check': 'https://www.sceneaccess.eu/inbox',
'detail': 'https://www.sceneaccess.eu/details?id=%s',
'search': 'https://www.sceneaccess.eu/browse?c%d=%d',
'archive': 'https://www.sceneaccess.eu/archive?&c%d=%d',
'download': 'https://www.sceneaccess.eu/%s',
}
http_time_between_calls = 1 # Seconds
def _searchOnTitle(self, title, media, quality, results):
url = self.buildUrl(title, media, quality)
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
try:
resultsTable = html.find('table', attrs = {'id': 'torrents-table'})
if resultsTable is None:
return
entries = resultsTable.find_all('tr', attrs = {'class': 'tt_row'})
for result in entries:
link = result.find('td', attrs = {'class': 'ttr_name'}).find('a')
url = result.find('td', attrs = {'class': 'td_dl'}).find('a')
seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a')
leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a')
torrent_id = link['href'].replace('details?id=', '')
results.append({
'id': torrent_id,
'name': link['title'],
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['detail'] % torrent_id,
'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]),
'seeders': tryInt(seeders.string) if seeders else 0,
'leechers': tryInt(leechers.string) if leechers else 0,
'get_more_info': self.getMoreInfo,
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def getMoreInfo(self, item):
full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'id': 'details_table'})
description = toUnicode(nfo_pre.text) if nfo_pre else ''
item['description'] = description
return item
# Login
def getLoginParams(self):
return {
'username': self.conf('username'),
'password': self.conf('password'),
'submit': 'come on in',
}
def loginSuccess(self, output):
return '/inbox' in output.lower()
loginCheckSuccess = loginSuccess
config = [{
'name': 'sceneaccess',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'SceneAccess',
'description': '<a href="https://sceneaccess.eu/">SceneAccess</a>',
'wizard': True,
'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAACT0lEQVR4AYVQS0sbURidO3OTmajJ5FElTTOkPmZ01GhHrIq0aoWAj1Vc+A/cuRMXbl24V9SlCGqrLhVFCrooEhCp2BAx0mobTY2kaR7qmOm87EXL1EWxh29xL+c7nPMdgGHYO5bF/gdbefnr6WlbWRnxluMwAB4Z0uEgXa7nwaDL7+/RNPzxbYvb/XJ0FBYVfd/ayh0fQ4qCGEHcm0KLRZUk7Pb2YRJPRwcsKMidnKD3t9VVT3s7BDh+z5FOZ3Vfn3h+Hltfx00mRRSRWFcUmmVNhYVqPn8dj3va2oh+txvcQRVF9ebm1fi4k+dRFbosY5rm4Hk7xxULQnJnx93S4g0EIEEQRoDLo6PrWEw8Pc0eHLwYGopMTDirqlJ7eyhYYGHhfgfHCcKYksZGVB/NcXI2mw6HhZERqrjYTNPHi4tFPh8aJIYIhgPlcCRDoZLW1s75+Z/7+59nZ/OJhLWigqAoKZX6Mjf3dXkZ3pydGYLc4aEoCCkInzQ1fRobS2xuvllaonkedfArnY5OTdGVldBkOADgqq2Nr6z8CIWaJietDHOhKB+HhwFKC6Gnq4ukKJvP9zcSbjYDXbeVlkKzuZBhnnV3e3t6UOmaJO0ODibW1hB1GYkg8R/gup7Z3TVZLJ5AILW9LcZiVpYtYBhw16O3t7cauckyeF9Tgz0ATpL2+nopmWycmbnY2LiKRjFk6/d7+/vRJfl4HGzV1T0UIM43MGBvaIBWK/YvwM5w+IMgGH8tkyEgvIpE7M3Nt6qqZrNyOq1kMmouh455Ggz+BhKY4GEc2CfwAAAAAElFTkSuQmCC',
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False,
},
{
'name': 'username',
'default': '',
},
{
'name': 'password',
'default': '',
'type': 'password',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 20,
'description': 'Starting score for each release found via this provider.',
}
],
},
],
}]
| gpl-3.0 | 535,981,053,093,615,200 | 41.316176 | 911 | 0.544744 | false |
gayancliyanage/karma | scripts/travis_after_all.py | 93 | 3418 | import os
import json
import time
import logging
try:
import urllib.request as urllib2
except ImportError:
import urllib2
log = logging.getLogger("travis.leader")
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
TRAVIS_JOB_NUMBER = 'TRAVIS_JOB_NUMBER'
TRAVIS_BUILD_ID = 'TRAVIS_BUILD_ID'
POLLING_INTERVAL = 'LEADER_POLLING_INTERVAL'
build_id = os.getenv(TRAVIS_BUILD_ID)
polling_interval = int(os.getenv(POLLING_INTERVAL, '5'))
#assume, first job is the leader
is_leader = lambda job_number: job_number.endswith('.1')
if not os.getenv(TRAVIS_JOB_NUMBER):
# seems even for builds with only one job, this won't get here
log.fatal("Don't use defining leader for build without matrix")
exit(1)
elif is_leader(os.getenv(TRAVIS_JOB_NUMBER)):
log.info("This is a leader")
else:
#since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_MINION=YES")
log.info("This is a minion")
exit(0)
class MatrixElement(object):
def __init__(self, json_raw):
self.is_finished = json_raw['finished_at'] is not None
self.is_succeeded = json_raw['result'] == 0
self.number = json_raw['number']
self.is_leader = is_leader(self.number)
def matrix_snapshot():
"""
:return: Matrix List
"""
response = urllib2.build_opener().open("https://api.travis-ci.org/builds/{0}".format(build_id)).read()
raw_json = json.loads(response)
matrix_without_leader = [MatrixElement(element) for element in raw_json["matrix"]]
return matrix_without_leader
def wait_others_to_finish():
def others_finished():
"""
Dumps others to finish
Leader cannot finish, it is working now
:return: tuple(True or False, List of not finished jobs)
"""
snapshot = matrix_snapshot()
finished = [el.is_finished for el in snapshot if not el.is_leader]
return reduce(lambda a, b: a and b, finished), [el.number for el in snapshot if
not el.is_leader and not el.is_finished]
while True:
finished, waiting_list = others_finished()
if finished: break
log.info("Leader waits for minions {0}...".format(waiting_list)) # just in case do not get "silence timeout"
time.sleep(polling_interval)
try:
wait_others_to_finish()
final_snapshot = matrix_snapshot()
log.info("Final Results: {0}".format([(e.number, e.is_succeeded) for e in final_snapshot]))
BUILD_AGGREGATE_STATUS = 'BUILD_AGGREGATE_STATUS'
others_snapshot = [el for el in final_snapshot if not el.is_leader]
if reduce(lambda a, b: a and b, [e.is_succeeded for e in others_snapshot]):
os.environ[BUILD_AGGREGATE_STATUS] = "others_succeeded"
elif reduce(lambda a, b: a and b, [not e.is_succeeded for e in others_snapshot]):
log.error("Others Failed")
os.environ[BUILD_AGGREGATE_STATUS] = "others_failed"
else:
log.warn("Others Unknown")
os.environ[BUILD_AGGREGATE_STATUS] = "unknown"
#since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_LEADER=YES {0}={1}".format(BUILD_AGGREGATE_STATUS, os.environ[BUILD_AGGREGATE_STATUS]))
except Exception as e:
log.fatal(e) | mit | 6,423,600,169,622,113,000 | 34.247423 | 119 | 0.661791 | false |
sigurdga/nidarholm | projects/views.py | 1 | 1754 | from django.views.generic.list_detail import object_list, object_detail
from django.shortcuts import get_object_or_404, render_to_response
from django.template.context import RequestContext
from django.http import HttpResponseRedirect
from projects.models import Project
from projects.forms import ProjectForm
def project_list(request):
queryset = Project.objects.for_user(request.user)
return object_list(request, queryset)
def project_detail(request, slug):
queryset = Project.objects.for_user(request.user)
return object_detail(request, queryset, slug=slug)
def new_project(request):
if request.method == 'POST':
project = Project()
form = ProjectForm(request.POST, instance=project)
if form.is_valid():
form.save(commit=False)
project.user = request.user
project.admingroup = request.organization.admingroup
project.save()
form.save_m2m()
return HttpResponseRedirect(project.get_absolute_url())
else:
project = Project()
form = ProjectForm(instance=project)
return render_to_response('projects/new_project.html', {'form': form}, context_instance=RequestContext(request))
def edit_project(request, slug):
if request.method == 'POST':
project = get_object_or_404(Project, slug=slug)
form = ProjectForm(request.POST, instance=project)
if form.is_valid():
form.save()
return HttpResponseRedirect(project.get_absolute_url())
else:
project = get_object_or_404(Project, slug=slug)
form = ProjectForm(instance=project)
return render_to_response('projects/new_project.html', {'form': form}, context_instance=RequestContext(request))
| agpl-3.0 | 8,512,307,491,745,753,000 | 35.541667 | 116 | 0.690422 | false |
RackSec/ansible | lib/ansible/modules/network/nxos/nxos_igmp.py | 50 | 7592 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_igmp
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages IGMP global configuration.
description:
- Manages IGMP global configuration configuration settings.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- When C(state=default), all supported params will be reset to a
default state.
- If restart is set to true with other params set, the restart will happen
last, i.e. after the configuration takes place.
options:
flush_routes:
description:
- Removes routes when the IGMP process is restarted. By default,
routes are not flushed.
required: false
default: null
choices: ['true', 'false']
enforce_rtr_alert:
description:
- Enables or disables the enforce router alert option check for
IGMPv2 and IGMPv3 packets.
required: false
default: null
choices: ['true', 'false']
restart:
description:
- Restarts the igmp process (using an exec config command).
required: false
default: null
choices: ['true', 'false']
state:
description:
- Manages desired state of the resource.
required: false
default: present
choices: ['present', 'default']
'''
EXAMPLES = '''
- name: Default igmp global params (all params except restart)
nxos_igmp:
state: default
host: "{{ inventory_hostname }}"
- name: Ensure the following igmp global config exists on the device
nxos_igmp:
flush_routes: true
enforce_rtr_alert: true
host: "{{ inventory_hostname }}"
- name: Restart the igmp process
nxos_igmp:
restart: true
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"enforce_rtr_alert": true, "flush_routes": true}
existing:
description: k/v pairs of existing IGMP configuration
returned: verbose mode
type: dict
sample: {"enforce_rtr_alert": true, "flush_routes": false}
end_state:
description: k/v pairs of IGMP configuration after module execution
returned: verbose mode
type: dict
sample: {"enforce_rtr_alert": true, "flush_routes": true}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["ip igmp flush-routes"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
PARAM_TO_COMMAND_KEYMAP = {
'flush_routes': 'ip igmp flush-routes',
'enforce_rtr_alert': 'ip igmp enforce-router-alert'
}
def get_value(arg, config):
REGEX = re.compile(r'{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
return value
def get_existing(module, args):
existing = {}
config = str(get_config(module))
for arg in args:
existing[arg] = get_value(arg, config)
return existing
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_commands(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
if module.params['state'] == 'default':
for key, value in proposed_commands.items():
if existing_commands.get(key):
commands.append('no {0}'.format(key))
else:
for key, value in proposed_commands.items():
if value is True:
commands.append(key)
else:
if existing_commands.get(key):
commands.append('no {0}'.format(key))
if module.params['restart']:
commands.append('restart igmp')
if commands:
parents = []
candidate.add(commands, parents=parents)
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def main():
argument_spec = dict(
flush_routes=dict(type='bool'),
enforce_rtr_alert=dict(type='bool'),
restart=dict(type='bool', default=False),
state=dict(choices=['present', 'default'], default='present'),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
state = module.params['state']
restart = module.params['restart']
if (state == 'default' and (module.params['flush_routes'] is not None or
module.params['enforce_rtr_alert'] is not None)):
module.fail_json(msg='When state=default other params have no effect.')
args = [
"flush_routes",
"enforce_rtr_alert",
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed_args = proposed.copy()
if state == 'default':
proposed_args = dict((k, False) for k in args)
result = {}
if (state == 'present' or (state == 'default' and
True in existing.values()) or restart):
candidate = CustomNetworkConfig(indent=3)
invoke('get_commands', module, existing, proposed_args, candidate)
response = load_config(module, candidate)
result.update(response)
else:
result['updates'] = []
if restart:
proposed['restart'] = restart
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed
result['warnings'] = warnings
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,146,778,391,365,201,000 | 28.426357 | 79 | 0.630005 | false |
pymedusa/SickRage | ext/oauthlib/oauth1/rfc5849/endpoints/access_token.py | 7 | 9405 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.access_token
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the access token provider logic of
OAuth 1.0 RFC 5849. It validates the correctness of access token requests,
creates and persists tokens as well as create the proper response to be
returned to the client.
"""
from __future__ import absolute_import, unicode_literals
import logging
from oauthlib.common import urlencode
from .. import errors
from .base import BaseEndpoint
log = logging.getLogger(__name__)
class AccessTokenEndpoint(BaseEndpoint):
"""An endpoint responsible for providing OAuth 1 access tokens.
Typical use is to instantiate with a request validator and invoke the
``create_access_token_response`` from a view function. The tuple returned
has all information necessary (body, status, headers) to quickly form
and return a proper response. See :doc:`/oauth1/validator` for details on which
validator methods to implement for this endpoint.
"""
def create_access_token(self, request, credentials):
"""Create and save a new access token.
Similar to OAuth 2, indication of granted scopes will be included as a
space separated list in ``oauth_authorized_realms``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The token as an urlencoded string.
"""
request.realms = self.request_validator.get_realms(
request.resource_owner_key, request)
token = {
'oauth_token': self.token_generator(),
'oauth_token_secret': self.token_generator(),
# Backport the authorized scopes indication used in OAuth2
'oauth_authorized_realms': ' '.join(request.realms)
}
token.update(credentials)
self.request_validator.save_access_token(token, request)
return urlencode(token.items())
def create_access_token_response(self, uri, http_method='GET', body=None,
headers=None, credentials=None):
"""Create an access token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AccessTokenEndpoint
>>> endpoint = AccessTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_access_token_response(
... 'https://your.provider/access_token?foo=bar',
... headers={
... 'Authorization': 'OAuth oauth_token=234lsdkf....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+resource+owner+key'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401
"""
resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
request = self._create_request(uri, http_method, body, headers)
valid, processed_request = self.validate_access_token_request(
request)
if valid:
token = self.create_access_token(request, credentials or {})
self.request_validator.invalidate_request_token(
request.client_key,
request.resource_owner_key,
request)
return resp_headers, token, 200
else:
return {}, None, 401
except errors.OAuth1Error as e:
return resp_headers, e.urlencoded, e.status_code
def validate_access_token_request(self, request):
"""Validate an access token request.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:raises: OAuth1Error if the request is invalid.
:returns: A tuple of 2 elements.
1. The validation result (True or False).
2. The request object.
"""
self._check_transport_security(request)
self._check_mandatory_parameters(request)
if not request.resource_owner_key:
raise errors.InvalidRequestError(
description='Missing resource owner.')
if not self.request_validator.check_request_token(
request.resource_owner_key):
raise errors.InvalidRequestError(
description='Invalid resource owner key format.')
if not request.verifier:
raise errors.InvalidRequestError(
description='Missing verifier.')
if not self.request_validator.check_verifier(request.verifier):
raise errors.InvalidRequestError(
description='Invalid verifier format.')
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
request_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_request_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_request_token
# The server MUST verify (Section 3.2) the validity of the request,
# ensure that the resource owner has authorized the provisioning of
# token credentials to the client, and ensure that the temporary
# credentials have not expired or been used before. The server MUST
# also verify the verification code received from the client.
# .. _`Section 3.2`: https://tools.ietf.org/html/rfc5849#section-3.2
#
# Note that early exit would enable resource owner authorization
# verifier enumertion.
valid_verifier = self.request_validator.validate_verifier(
request.client_key,
request.resource_owner_key,
request.verifier,
request)
valid_signature = self._check_signature(request, is_token_request=True)
# log the results to the validator_log
# this lets us handle internal reporting and analysis
request.validator_log['client'] = valid_client
request.validator_log['resource_owner'] = valid_resource_owner
request.validator_log['verifier'] = valid_verifier
request.validator_log['signature'] = valid_signature
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_verifier,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client:, %s", valid_client)
log.info("Valid token:, %s", valid_resource_owner)
log.info("Valid verifier:, %s", valid_verifier)
log.info("Valid signature:, %s", valid_signature)
return v, request
| gpl-3.0 | -6,871,182,156,631,930,000 | 42.341014 | 137 | 0.624668 | false |
ej2/pixelpuncher | pixelpuncher/npc/utils/conversation.py | 1 | 5304 | import random
from annoying.functions import get_object_or_None
from datetime import datetime
from pixelpuncher.game.utils.message import add_game_message
from pixelpuncher.game.utils.messages import pixels_dropped_message, xp_gained_message, item_given, \
location_unlocked_message
from pixelpuncher.item.utils import add_item_type_to_player
from pixelpuncher.location.utils import unlock_location
from pixelpuncher.npc.models import ResponseTrigger, Triggers, ResponseHandlers, ResponseLog
from pixelpuncher.npc.utils.relationships import get_relationship, adjust_relationship_score
from pixelpuncher.player.utils.collections import check_collections
MERCHANT_GREETINGS = [
"Welcome to {}!",
"Welcome!",
"Can I help you?",
"Welcome, can I help you?",
"Welcome, what can I do to serve you?",
"Hello, welcome to {}!",
"Buy somethin' will ya!"
]
NPC_GREETINGS = [
"Yes?",
"What's up?",
"Hey.",
"Hello.",
"How ya doing?",
"What's happening?",
"What's going on?",
"How are things?",
]
NPC_GREETINGS_FORMAL = [
"State your business.",
"May I help you?",
"Good morning.",
"Good evening.",
]
def get_merchant_greeting(location):
return get_saying(location.npc.name, NPC_GREETINGS)
greeting = str(random.choice(MERCHANT_GREETINGS)).format(location.name)
return '{} says "{}"'.format(location.npc.name, greeting)
#
# def get_merchant_greeting(location):
# greeting = random.choice(GREETINGS)
# return str(greeting).format(location.name)
#
def get_npc_greeting(npc):
return get_saying(npc.name, NPC_GREETINGS)
def get_formal_npc_greeting(npc):
return get_saying(npc.name, NPC_GREETINGS_FORMAL)
def get_saying(name, saying_list):
saying = str(random.choice(saying_list))
return '{} says "{}"'.format(name, saying)
def parse_trigger_text(text):
output_text = ""
if text.lower().startswith("ask about"):
trigger_type = Triggers.ASK
output_text = text[9:]
elif text.lower().startswith("ask"):
trigger_type = Triggers.ASK
output_text = text[3:]
elif text.lower().startswith("tell"):
trigger_type = Triggers.TELL
output_text = text[4:]
elif text.lower().startswith("help"):
trigger_type = Triggers.HELP
output_text = text[4:]
else:
trigger_type = Triggers.ACKNOWLEDGE
output_text = text
return trigger_type, output_text.strip()
def get_response(player, npc, trigger_text, trigger_type):
response = None
trigger = get_object_or_None(
ResponseTrigger, npcs__in=[npc], trigger_text=trigger_text, trigger_type=trigger_type.upper())
if trigger:
if trigger.responses.count() == 1:
return trigger.responses.all()[0]
else:
if trigger.response_handler == ResponseHandlers.Random:
response = random.choice(trigger.responses.all())
return response
elif trigger.response_handler == ResponseHandlers.Ordered:
last_response = get_last_response(player, trigger)
if last_response:
next_priority = last_response.priority + 1
next_response = trigger.responses.filter(priority=next_priority)
if next_response.count() > 0:
response = next_response[0]
else:
response = trigger.responses.filter(priority=1)[0]
if response:
log_response(player, response, trigger)
return response
else:
return last_response
else:
return None
def response_reward(response, player, npc):
if response.health_change != 0:
add_game_message(player, player.adjust_health(response.health_change))
if response.energy_change != 0:
add_game_message(player, player.adjust_energy(response.energy_change))
if response.pixels_change != 0:
player.pixels += response.pixels_change
add_game_message(player, pixels_dropped_message(response.pixels_change))
if response.xp_change > 0:
player.xp += response.xp_change
add_game_message(player, xp_gained_message(response.pixels_change))
for item_type in response.reward_items.all():
item = add_item_type_to_player(item_type, player)
add_game_message(player, item_given(npc, item))
check_collections(player, item_type)
player.save()
if response.location_unlock:
unlock_location(player, response.location_unlock)
add_game_message(player, location_unlocked_message(response.location_unlock.name))
if response.relationship_points != 0:
relationship = get_relationship(player, npc)
adjust_relationship_score(relationship, response.relationship_points)
def log_response(player, response, trigger):
log = ResponseLog(player=player, response=response, response_trigger=trigger, response_date=datetime.now())
log.save()
return log
def get_last_response(player, trigger):
logs = ResponseLog.objects.filter(player=player, response_trigger=trigger).order_by("-response__priority")
if logs.count() > 0:
return logs[0].response
else:
return None
| bsd-3-clause | 164,031,938,155,241,660 | 30.384615 | 111 | 0.652526 | false |
code-google-com/cortex-vfx | test/IECore/MeshNormalsOpTest.py | 8 | 3581 | ##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from IECore import *
import math
class MeshNormalsOpTest( unittest.TestCase ) :
def testPlane( self ) :
p = MeshPrimitive.createPlane( Box2f( V2f( -1 ), V2f( 1 ) ) )
if "N" in p :
del p["N"]
self.assert_( not "N" in p )
pp = MeshNormalsOp()( input=p )
self.assert_( "N" in pp )
self.assertEqual( pp["N"].interpolation, PrimitiveVariable.Interpolation.Vertex )
normals = pp["N"].data
self.assert_( normals.isInstanceOf( V3fVectorData.staticTypeId() ) )
self.assertEqual( normals.size(), pp.variableSize( PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( normals.getInterpretation(), GeometricData.Interpretation.Normal )
for n in normals :
self.assertEqual( n, V3f( 0, 0, 1 ) )
def testOnlyNAdded( self ) :
p = MeshPrimitive.createPlane( Box2f( V2f( -1 ), V2f( 1 ) ) )
pp = MeshNormalsOp()( input=p )
del pp["N"]
self.assertEqual( pp, p )
def testSphere( self ) :
s = Reader.create( "test/IECore/data/cobFiles/pSphereShape1.cob" ).read()
del s["N"]
self.assert_( not "N" in s )
ss = MeshNormalsOp()( input=s )
self.assert_( "N" in ss )
self.assertEqual( ss["N"].interpolation, PrimitiveVariable.Interpolation.Vertex )
normals = ss["N"].data
self.assert_( normals.isInstanceOf( V3fVectorData.staticTypeId() ) )
self.assertEqual( normals.size(), ss.variableSize( PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( normals.getInterpretation(), GeometricData.Interpretation.Normal )
points = ss["P"].data
for i in range( 0, normals.size() ) :
self.assert_( math.fabs( normals[i].length() - 1 ) < 0.001 )
p = points[i].normalize()
self.assert_( normals[i].dot( p ) > 0.99 )
self.assert_( normals[i].dot( p ) < 1.01 )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -1,982,644,797,105,634,300 | 36.302083 | 95 | 0.67914 | false |
PetrDlouhy/django | django/utils/six.py | 408 | 30194 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
### Additional customizations for Django ###
if PY3:
memoryview = memoryview
buffer_types = (bytes, bytearray, memoryview)
else:
# memoryview and buffer are not strictly equivalent, but should be fine for
# django core usage (mainly BinaryField). However, Jython doesn't support
# buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
if sys.platform.startswith('java'):
memoryview = memoryview
else:
memoryview = buffer
buffer_types = (bytearray, memoryview)
| bsd-3-clause | 2,425,962,393,310,686,000 | 34.355972 | 98 | 0.633073 | false |
kosz85/django | tests/forms_tests/tests/test_validators.py | 111 | 2210 | import re
from unittest import TestCase
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
class TestFieldWithValidators(TestCase):
def test_all_errors_get_reported(self):
class UserForm(forms.Form):
full_name = forms.CharField(
max_length=50,
validators=[
validators.validate_integer,
validators.validate_email,
]
)
string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex='^[a-zA-Z]*$',
message="Letters only.",
)
]
)
ignore_case_string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex='^[a-z]*$',
message="Letters only.",
flags=re.IGNORECASE,
)
]
)
form = UserForm({
'full_name': 'not int nor mail',
'string': '2 is not correct',
'ignore_case_string': "IgnORE Case strIng",
})
with self.assertRaises(ValidationError) as e:
form.fields['full_name'].clean('not int nor mail')
self.assertEqual(2, len(e.exception.messages))
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['string'], ["Letters only."])
self.assertEqual(form.errors['string'], ["Letters only."])
def test_field_validators_can_be_any_iterable(self):
class UserForm(forms.Form):
full_name = forms.CharField(
max_length=50,
validators=(
validators.validate_integer,
validators.validate_email,
)
)
form = UserForm({'full_name': 'not int nor mail'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['full_name'], ['Enter a valid integer.', 'Enter a valid email address.'])
| bsd-3-clause | -8,180,420,647,220,429,000 | 33.53125 | 110 | 0.503167 | false |
dhodhala88/Bosch1 | weblate/trans/management/commands/loadpo.py | 9 | 1599 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from weblate.trans.management.commands import WeblateLangCommand
from optparse import make_option
from django.db import transaction
class Command(WeblateLangCommand):
help = '(re)loads translations from disk'
option_list = WeblateLangCommand.option_list + (
make_option(
'--force',
action='store_true',
dest='force',
default=False,
help='Force rereading files even when they should be up to date'
),
)
def handle(self, *args, **options):
langs = None
if options['lang'] is not None:
langs = options['lang'].split(',')
for subproject in self.get_subprojects(*args, **options):
with transaction.atomic():
subproject.create_translations(options['force'], langs)
| gpl-3.0 | -1,130,772,112,330,005,600 | 35.272727 | 76 | 0.674812 | false |
pidah/st2contrib | packs/tesla/actions/lib/parsers.py | 6 | 1045 | from pytesla import Vehicle
__all__ = [
'FieldLists',
'ResultSets'
]
class FieldLists(object):
"""
The lists of fields we want to return for each class
"""
VEHICLE = ['id', 'vin', 'mobile_enabled',
'charge_state', 'climate_state',
'drive_state', 'vehicle_state']
class ResultSets(object):
def selector(self, output):
if isinstance(output, Vehicle):
return self.parse(output, FieldLists.VEHICLE)
else:
return output
def formatter(self, output):
formatted = []
if isinstance(output, list):
for o in output:
formatted.append(self.selector(o))
else:
formatted = self.selector(output)
return formatted
def _getval(self, obj, field):
return self.selector(getattr(obj, field))
def parse(self, output, field_list):
instance_data = {field: self._getval(output, field)
for field in field_list}
return instance_data
| apache-2.0 | -863,106,859,584,771,500 | 24.487805 | 59 | 0.570335 | false |
glorizen/nupic | examples/opf/experiments/missing_record/make_datasets.py | 34 | 4661 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets for the multi-step prediction experiments
"""
import os
import random
import datetime
from optparse import OptionParser
from nupic.data.file_record_stream import FileRecordStream
def _generateSimple(filename="simple.csv", numSequences=1, elementsPerSeq=3,
numRepeats=10):
""" Generate a simple dataset. This contains a bunch of non-overlapping
sequences.
At the end of the dataset, we introduce missing records so that test
code can insure that the model didn't get confused by them.
Parameters:
----------------------------------------------------
filename: name of the file to produce, including extension. It will
be created in a 'datasets' sub-directory within the
directory containing this script.
numSequences: how many sequences to generate
elementsPerSeq: length of each sequence
numRepeats: how many times to repeat each sequence in the output
"""
# Create the output file
scriptDir = os.path.dirname(__file__)
pathname = os.path.join(scriptDir, 'datasets', filename)
print "Creating %s..." % (pathname)
fields = [('timestamp', 'datetime', 'T'),
('field1', 'string', ''),
('field2', 'float', '')]
outFile = FileRecordStream(pathname, write=True, fields=fields)
# Create the sequences
sequences = []
for i in range(numSequences):
seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)]
sequences.append(seq)
# Write out the sequences in random order
seqIdxs = []
for i in range(numRepeats):
seqIdxs += range(numSequences)
random.shuffle(seqIdxs)
# Put 1 hour between each record
timestamp = datetime.datetime(year=2012, month=1, day=1, hour=0, minute=0,
second=0)
timeDelta = datetime.timedelta(hours=1)
# Write out the sequences without missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
# Now, write some out with missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for i,x in enumerate(seq):
if i != 1:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for i,x in enumerate(seq):
if i != 1:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
# Write out some more of the sequences *without* missing records
for seqIdx in seqIdxs:
seq = sequences[seqIdx]
for x in seq:
outFile.appendRecord([timestamp, str(x), x])
timestamp += timeDelta
outFile.close()
if __name__ == '__main__':
helpString = \
"""%prog [options]
Generate artificial datasets for testing multi-step prediction """
# ============================================================================
# Process command line arguments
parser = OptionParser(helpString)
parser.add_option("--verbosity", default=0, type="int",
help="Verbosity level, either 0, 1, 2, or 3 [default: %default].")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.error("No arguments accepted")
# Set random seed
random.seed(42)
# Create the dataset directory if necessary
datasetsDir = os.path.join(os.path.dirname(__file__), 'datasets')
if not os.path.exists(datasetsDir):
os.mkdir(datasetsDir)
# Generate the sample datasets
_generateSimple('simple_0.csv', numSequences=1, elementsPerSeq=3,
numRepeats=10)
| agpl-3.0 | -1,233,084,006,512,125,200 | 30.493243 | 80 | 0.634199 | false |
notfier/touristique | data/tests/test_views.py | 1 | 3784 | from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from data.tests.factories import DepartmentFactory
from registration.models import User
from ..models import Department
class TestDepartment(APITestCase):
def setUp(self):
self.department_one = DepartmentFactory()
self.department_two = DepartmentFactory()
user = User.objects.create(first_name='Bo', last_name='Devchik', email='[email protected]')
token_key = Token.objects.get(user=user).key
self.client.credentials(HTTP_AUTHORIZATION='Token {0}'.format(token_key))
def test_unauthorized_user(self):
# sign out
self.client.credentials(HTTP_AUTHORIZATION='')
response = self.client.get(reverse('data-get-departments'))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data.get('detail'),
'Authentication credentials were not provided.'
)
def test_get_departments_list(self):
response = self.client.get(reverse('data-get-departments'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
self.assertIn(self.department_two.address, str(response.content))
self.assertIn(self.department_one.name, str(response.content))
class TestDepartmentInfo(APITestCase):
def setUp(self):
self.department_one = DepartmentFactory()
user = User.objects.create(first_name='Bo', last_name='Devchik', email='[email protected]')
token_key = Token.objects.get(user=user).key
self.client.credentials(HTTP_AUTHORIZATION='Token {0}'.format(token_key))
def test_unauthorized_user(self):
# sign out
self.client.credentials(HTTP_AUTHORIZATION='')
response = self.client.get(reverse('data-get-department'), data={
'department_pk': self.department_one.pk
})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(
response.data.get('detail'),
'Authentication credentials were not provided.'
)
def test_get_department_info(self):
response = self.client.get(reverse('data-get-department'), data={
'department_pk': self.department_one.pk
})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(self.department_one.name, str(response.content))
self.assertIn(self.department_one.address, str(response.content))
def test_change_department_info(self):
response = self.client.put(reverse('data-get-department'), data={
'department_pk': self.department_one.pk,
'phone': '777777',
'address': 'Carrer de l\'Avenir',
'name': self.department_one.name,
'zip_code': self.department_one.zip_code
})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('777777', str(response.content))
self.assertIn('Carrer de l\'Avenir', str(response.content))
def test_create_department(self):
response = self.client.post(reverse('data-get-department'), data={
'phone': '777777',
'address': 'Carrer de l\'Avenir',
'name': 'Parabola',
'zip_code': '77777'
})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertIn('777777', str(response.content))
self.assertIn('Carrer de l\'Avenir', str(response.content))
self.assertIn('Parabola', str(response.content))
self.assertEqual(Department.objects.count(), 2)
| mit | 363,379,996,320,904,260 | 37.612245 | 98 | 0.65777 | false |
MadCat34/Sick-Beard | cherrypy/wsgiserver/ssl_builtin.py | 74 | 2542 | """A library for integrating pyOpenSSL with CherryPy.
The ssl module must be importable for SSL functionality.
To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
BuiltinSSLAdapter.
ssl_adapter.certificate: the filename of the server SSL certificate.
ssl_adapter.private_key: the filename of the server's private key file.
"""
try:
import ssl
except ImportError:
ssl = None
from cherrypy import wsgiserver
class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating Python's builtin ssl module with CherryPy."""
def __init__(self, certificate, private_key, certificate_chain=None):
if ssl is None:
raise ImportError("You must install the ssl module to use HTTPS.")
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def bind(self, sock):
"""Wrap and return the given socket."""
return sock
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
try:
s = ssl.wrap_socket(sock, do_handshake_on_connect=True,
server_side=True, certfile=self.certificate,
keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23)
except ssl.SSLError, e:
if e.errno == ssl.SSL_ERROR_EOF:
# This is almost certainly due to the cherrypy engine
# 'pinging' the socket to assert it's connectable;
# the 'ping' isn't SSL.
return None, {}
elif e.errno == ssl.SSL_ERROR_SSL:
if e.args[1].endswith('http request'):
# The client is speaking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError
raise
return s, self.get_environ(s)
# TODO: fill this out more with mod ssl env
def get_environ(self, sock):
"""Create WSGI environ entries to be merged into each request."""
cipher = sock.cipher()
ssl_environ = {
"wsgi.url_scheme": "https",
"HTTPS": "on",
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]
## SSL_VERSION_INTERFACE string The mod_ssl program version
## SSL_VERSION_LIBRARY string The OpenSSL program version
}
return ssl_environ
def makefile(self, sock, mode='r', bufsize= -1):
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| gpl-3.0 | -922,521,747,266,302,300 | 35.84058 | 78 | 0.609756 | false |
HewlettPackard/oneview-redfish-toolkit | oneview_redfish_toolkit/api/processor.py | 1 | 2778 | # -*- coding: utf-8 -*-
# Copyright (2018) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oneview_redfish_toolkit.api.redfish_json_validator \
import RedfishJsonValidator
from oneview_redfish_toolkit.api.resource_block_collection \
import ResourceBlockCollection
import oneview_redfish_toolkit.api.status_mapping as status_mapping
class Processor(RedfishJsonValidator):
"""Creates a Processor Redfish dict
Populates self.redfish with some hardcoded Processor
values and data retrieved from Oneview.
"""
SCHEMA_NAME = 'Processor'
def __init__(self, server_hardware, processor_id):
"""Processor constructor
Populates self.redfish with the some common contents
and data from OneView server hardware.
Args:
server_hardware: server hardware dict from OneView
processor_id: processor identifier
"""
super().__init__(self.SCHEMA_NAME)
self.redfish["@odata.type"] = self.get_odata_type()
self.redfish["Id"] = processor_id
self.redfish["Name"] = "Processor " + processor_id
self.redfish["Status"] = dict()
state, health = status_mapping.\
get_redfish_server_hardware_status_struct(server_hardware)
self.redfish["Status"]["State"] = state
self.redfish["Status"]["Health"] = health
self.redfish["ProcessorType"] = "CPU"
self.redfish["Model"] = server_hardware["processorType"]
self.redfish["MaxSpeedMHz"] = server_hardware["processorSpeedMhz"]
self.redfish["TotalCores"] = server_hardware["processorCoreCount"]
self._fill_links(server_hardware)
self.redfish["@odata.context"] = \
"/redfish/v1/$metadata#Processor.Processor"
self.redfish["@odata.id"] = \
ResourceBlockCollection.BASE_URI + "/" \
+ server_hardware["uuid"] + "/Systems/1/Processors/" + processor_id
self._validate()
def _fill_links(self, server_hardware):
self.redfish["Links"] = dict()
self.redfish["Links"]["Chassis"] = dict()
self.redfish["Links"]["Chassis"]["@odata.id"] = \
"/redfish/v1/Chassis/" + server_hardware["uuid"]
| apache-2.0 | 1,042,905,216,736,022,400 | 37.054795 | 79 | 0.660907 | false |
john-wang-metro/metro-openerp | metro_project/project_gtd.py | 2 | 1195 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.addons.base_status.base_stage import base_stage
from openerp.tools.translate import _
from lxml import etree
from openerp import netsvc
| agpl-3.0 | 6,040,038,014,424,057,000 | 43.259259 | 80 | 0.635146 | false |
lunafeng/django | tests/gis_tests/tests.py | 281 | 3127 | import sys
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.db import ProgrammingError
from django.utils import six
try:
from django.contrib.gis.db.backends.postgis.operations import PostGISOperations
HAS_POSTGRES = True
except ImportError:
HAS_POSTGRES = False
except ImproperlyConfigured as e:
# If psycopg is installed but not geos, the import path hits
# django.contrib.gis.geometry.backend which will "helpfully" convert
# an ImportError into an ImproperlyConfigured.
# Here, we make sure we're only catching this specific case and not another
# ImproperlyConfigured one.
if e.args and e.args[0].startswith('Could not import user-defined GEOMETRY_BACKEND'):
HAS_POSTGRES = False
else:
six.reraise(*sys.exc_info())
if HAS_POSTGRES:
class FakeConnection(object):
def __init__(self):
self.settings_dict = {
'NAME': 'test',
}
class FakePostGISOperations(PostGISOperations):
def __init__(self, version=None):
self.version = version
self.connection = FakeConnection()
def _get_postgis_func(self, func):
if func == 'postgis_lib_version':
if self.version is None:
raise ProgrammingError
else:
return self.version
elif func == 'version':
pass
else:
raise NotImplementedError('This function was not expected to be called')
@unittest.skipUnless(HAS_POSTGRES, "The psycopg2 driver is needed for these tests")
class TestPostgisVersionCheck(unittest.TestCase):
"""
Tests that the postgis version check parses correctly the version numbers
"""
def test_get_version(self):
expect = '1.0.0'
ops = FakePostGISOperations(expect)
actual = ops.postgis_lib_version()
self.assertEqual(expect, actual)
def test_version_classic_tuple(self):
expect = ('1.2.3', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_version_dev_tuple(self):
expect = ('1.2.3dev', 1, 2, 3)
ops = FakePostGISOperations(expect[0])
actual = ops.postgis_version_tuple()
self.assertEqual(expect, actual)
def test_valid_version_numbers(self):
versions = [
('1.3.0', 1, 3, 0),
('2.1.1', 2, 1, 1),
('2.2.0dev', 2, 2, 0),
]
for version in versions:
ops = FakePostGISOperations(version[0])
actual = ops.spatial_version
self.assertEqual(version[1:], actual)
def test_invalid_version_numbers(self):
versions = ['nope', '123']
for version in versions:
ops = FakePostGISOperations(version)
self.assertRaises(Exception, lambda: ops.spatial_version)
def test_no_version_number(self):
ops = FakePostGISOperations()
self.assertRaises(ImproperlyConfigured, lambda: ops.spatial_version)
| bsd-3-clause | 949,587,669,800,330,800 | 32.265957 | 89 | 0.622642 | false |
simod/geonode | geonode/contrib/worldmap/gazetteer/management/commands/updategazetteer.py | 2 | 1208 | from django.core.management import BaseCommand
from geonode.gazetteer.models import GazetteerEntry
from geonode.maps.models import Layer
class Command(BaseCommand):
help = """
Assigns usernames to all gazetteer features that do not have an associated
username yet.
"""
args = '[none]'
# TODO update this to new version
def handle(self, *args, **kwargs):
gaz_layers = GazetteerEntry.objects.filter(
username__isnull=True).values('layer_name').distinct()
print ("Found %d layers in gazetteer with unasssigned users") % len(gaz_layers)
for gl in gaz_layers:
lname = gl['layer_name']
try:
layer = Layer.objects.get(name=lname)
username = layer.owner.username
print("Assigning features for %s to %s") % (layer.name, username)
GazetteerEntry.objects.filter(
layer_name__exact=lname).update(username=username)
except Layer.DoesNotExist:
print("Layer %s no longer exists, removing from gazetteer" % lname)
GazetteerEntry.objects.filter(layer_name__exact=lname).delete()
print("Complete")
| gpl-3.0 | 5,616,339,821,686,497,000 | 39.266667 | 87 | 0.626656 | false |
geoffarnold/mercador-sub | doc/source/conf.py | 1 | 2460 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mercador-sub'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
| apache-2.0 | 250,598,950,635,894,560 | 31.8 | 79 | 0.689837 | false |
yoer/hue | desktop/core/ext-py/lxml/src/lxml/html/_html5builder.py | 36 | 3124 | """
This module implements a tree builder for html5lib that generates lxml
html element trees. This module uses camelCase as it follows the
html5lib style guide.
"""
from html5lib.treebuilders import _base, etree as etree_builders
from lxml import html, etree
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self.childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
def __init__(self):
html_builder = etree_builders.getETreeModule(html, fullTree=False)
etree_builder = etree_builders.getETreeModule(etree, fullTree=False)
self.elementClass = html_builder.Element
self.commentClass = etree_builder.Comment
_base.TreeBuilder.__init__(self)
def reset(self):
_base.TreeBuilder.reset(self)
self.rootInserted = False
self.initialComments = []
self.doctype = None
def getDocument(self):
return self.document._elementTree
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(element.getchildren())
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, name, publicId, systemId):
doctype = self.doctypeClass(name, publicId, systemId)
self.doctype = doctype
def insertComment(self, data, parent=None):
if not self.rootInserted:
self.initialComments.append(data)
else:
_base.TreeBuilder.insertComment(self, data, parent)
def insertRoot(self, name):
buf = []
if self.doctype and self.doctype.name:
buf.append('<!DOCTYPE %s' % self.doctype.name)
if self.doctype.publicId is not None or self.doctype.systemId is not None:
buf.append(' PUBLIC "%s" "%s"' % (self.doctype.publicId,
self.doctype.systemId))
buf.append('>')
buf.append('<html></html>')
root = html.fromstring(u''.join(buf))
# Append the initial comments:
for comment in self.initialComments:
root.addprevious(etree.Comment(comment))
# Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Add the root element to the internal child/open data structures
root_element = self.elementClass(name)
root_element._element = root
self.document.childNodes.append(root_element)
self.openElements.append(root_element)
self.rootInserted = True
| apache-2.0 | 7,704,361,501,429,851,000 | 31.541667 | 86 | 0.637004 | false |
crosswalk-project/chromium-crosswalk-efl | gpu/gles2_conform_support/generate_gles2_conform_tests.py | 139 | 1430 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""code generator for OpenGL ES 2.0 conformance tests."""
import os
import re
import sys
def ReadFileAsLines(filename):
"""Reads a file, removing blank lines and lines that start with #"""
file = open(filename, "r")
raw_lines = file.readlines()
file.close()
lines = []
for line in raw_lines:
line = line.strip()
if len(line) > 0 and not line.startswith("#"):
lines.append(line)
return lines
def GenerateTests(file):
"""Generates gles2_conform_test_autogen.cc"""
tests = ReadFileAsLines(
"../../third_party/gles2_conform/GTF_ES/glsl/GTF/mustpass_es20.run")
file.write("""
#include "gpu/gles2_conform_support/gles2_conform_test.h"
#include "testing/gtest/include/gtest/gtest.h"
""")
for test in tests:
file.write("""
TEST(GLES2ConformTest, %(name)s) {
EXPECT_TRUE(RunGLES2ConformTest("%(path)s"));
}
""" % {
"name": re.sub(r'[^A-Za-z0-9]', '_', test),
"path": test,
})
def main(argv):
"""This is the main function."""
if len(argv) >= 1:
dir = argv[0]
else:
dir = '.'
file = open(os.path.join(dir, 'gles2_conform_test_autogen.cc'), 'wb')
GenerateTests(file)
file.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -7,319,833,296,315,713,000 | 21.698413 | 74 | 0.638462 | false |
salfab/CouchPotatoServer | libs/tmdb3/pager.py | 10 | 3292 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: pager.py List-like structure designed for handling paged results
# Python Library
# Author: Raymond Wagner
#-----------------------
from collections import Sequence, Iterator
class PagedIterator( Iterator ):
def __init__(self, parent):
self._parent = parent
self._index = -1
self._len = len(parent)
def __iter__(self):
return self
def next(self):
self._index += 1
if self._index == self._len:
raise StopIteration
return self._parent[self._index]
class UnpagedData( object ):
def copy(self):
return self.__class__()
def __mul__(self, other):
return (self.copy() for a in range(other))
def __rmul__(self, other):
return (self.copy() for a in range(other))
class PagedList( Sequence ):
"""
List-like object, with support for automatically grabbing additional
pages from a data source.
"""
_iter_class = None
def __iter__(self):
if self._iter_class is None:
self._iter_class = type(self.__class__.__name__ + 'Iterator',
(PagedIterator,), {})
return self._iter_class(self)
def __len__(self):
try:
return self._len
except:
return len(self._data)
def __init__(self, iterable, pagesize=20):
self._data = list(iterable)
self._pagesize = pagesize
def __getitem__(self, index):
if isinstance(index, slice):
return [self[x] for x in xrange(*index.indices(len(self)))]
if index >= len(self):
raise IndexError("list index outside range")
if (index >= len(self._data)) \
or isinstance(self._data[index], UnpagedData):
self._populatepage(index/self._pagesize + 1)
return self._data[index]
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
def __contains__(self, item):
raise NotImplementedError
def _populatepage(self, page):
pagestart = (page-1) * self._pagesize
if len(self._data) < pagestart:
self._data.extend(UnpagedData()*(pagestart-len(self._data)))
if len(self._data) == pagestart:
self._data.extend(self._getpage(page))
else:
for data in self._getpage(page):
self._data[pagestart] = data
pagestart += 1
def _getpage(self, page):
raise NotImplementedError("PagedList._getpage() must be provided "+\
"by subclass")
class PagedRequest( PagedList ):
"""
Derived PageList that provides a list-like object with automatic paging
intended for use with search requests.
"""
def __init__(self, request, handler=None):
self._request = request
if handler: self._handler = handler
super(PagedRequest, self).__init__(self._getpage(1), 20)
def _getpage(self, page):
req = self._request.new(page=page)
res = req.readJSON()
self._len = res['total_results']
for item in res['results']:
yield self._handler(item)
| gpl-3.0 | 7,884,298,967,934,325,000 | 29.201835 | 76 | 0.561968 | false |
appi147/Jarvis | jarviscli/plugins/timeconv.py | 1 | 4129 | from __future__ import division
from plugin import plugin
@plugin('timeconv')
class timeconv():
"""
timeconv Documentation.
timeconv is a time converter.
Supports: picosecond, nanosecond, microsecond, millisecond, second, minute, hour, day, week, month, year
Usage: The input time measurement units are:
ps : picosecond,
ns : nanosecond,
mum : microsecond,
mm : millisecond,
s : second,
min : minute,
h : hour,
d : day,
wk : week,
mon : month,
yr : year
First you will be asked to enter the amount you want to convert.
Second you will be asked to enter the time measurement unit of the amount.
And then you will be asked to enter to which time measurement unit you want to convert.
"""
time_units = [
"yr", "mon", "wk", "d", "h", "min", "s", "ms", "mus", "ns", "ps"
]
units = {
"ps": "picosecond",
"ns": "nanosecond",
"mus": "microsecond",
"ms": "millisecond",
"s": "second",
"min": "minute",
"h": "hour",
"d": "day",
"wk": "week",
"mon": "month",
"yr": "year"
}
units_data = {
"yr2mon": 12,
"mon2wk": 4.34812141,
"wk2d": 7,
"d2h": 24,
"h2min": 60,
"min2s": 60,
"s2ms": 1000,
"ms2mus": 1000,
"mus2ns": 1000,
"ns2ps": 1000
}
def __call__(self, jarvis, s):
while True:
amount = jarvis.input_number('Enter an amount: ')
from_unit = self.get_units(jarvis, 'Enter from which unit: ')
to_unit = self.get_units(jarvis, 'Enter to which unit: ')
if (from_unit != to_unit):
break
else:
jarvis.say('Please enter different units')
convamount = self.time_convert(jarvis, amount, from_unit, to_unit)
precision = 0
if (convamount.is_integer() is False):
precision = jarvis.input_number("Please enter precision (max:12): ")
while True:
if (precision.is_integer() and precision <= 12):
break
else:
precision = jarvis.input_number("Please enter an integer (max:12): ")
convamount = round(convamount, int(precision))
outputText = self.txt_build(amount, convamount, from_unit, to_unit)
jarvis.say(outputText)
def time_convert(self, jarvis, amount, fr, to):
for i in range(len(self.time_units)):
if (self.time_units[i] == fr):
start = i
if (self.time_units[i] == to):
end = i
if ((end - start) > 0):
reverse = False
if ((end - start) < 0):
reverse = True
tmp = start
start = end
end = tmp
multiplier = 1
convamount = multiplier
for i in range(start, end, 1):
kbuild = self.time_units[i] + "2" + self.time_units[i + 1]
multiplier = multiplier * self.units_data.get(kbuild)
mulitplier = round(multiplier, 17)
if reverse:
convamount = (1 / multiplier) * amount
else:
convamount = multiplier * amount
convamount = round(convamount, 12)
return convamount
def get_units(self, jarvis, prompt):
while True:
u = jarvis.input(prompt).lower()
if u in self.time_units:
return u
else:
prompt = 'Please enter a valid unit: '
continue
def txt_build(self, amount, convamount, from_unit, to_unit):
if (amount == 1):
fromdisp = self.units.get(from_unit)
else:
fromdisp = self.units.get(from_unit) + "s"
if (convamount == 1):
todisp = self.units.get(to_unit)
else:
todisp = self.units.get(to_unit) + "s"
txt = str(amount) + " " + fromdisp + " is equal to " + str(convamount) + " " + todisp
return txt
| mit | 1,450,973,150,460,500,700 | 26.711409 | 108 | 0.507387 | false |
TRESCLOUD/odoopub | openerp/addons/base/tests/test_orm.py | 24 | 17148 | from collections import defaultdict
from openerp.tools import mute_logger
from openerp.tests import common
UID = common.ADMIN_USER_ID
DB = common.DB
class TestORM(common.TransactionCase):
""" test special behaviors of ORM CRUD functions
TODO: use real Exceptions types instead of Exception """
def setUp(self):
super(TestORM, self).setUp()
cr, uid = self.cr, self.uid
self.partner = self.registry('res.partner')
self.users = self.registry('res.users')
self.p1 = self.partner.name_create(cr, uid, 'W')[0]
self.p2 = self.partner.name_create(cr, uid, 'Y')[0]
self.ir_rule = self.registry('ir.rule')
# sample unprivileged user
employee_gid = self.ref('base.group_user')
self.uid2 = self.users.create(cr, uid, {'name': 'test user', 'login': 'test', 'groups_id': [4,employee_gid]})
@mute_logger('openerp.models')
def testAccessDeletedRecords(self):
""" Verify that accessing deleted records works as expected """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
self.partner.unlink(cr, uid, [p1])
# read() is expected to skip deleted records because our API is not
# transactional for a sequence of search()->read() performed from the
# client-side... a concurrent deletion could therefore cause spurious
# exceptions even when simply opening a list view!
# /!\ Using unprileged user to detect former side effects of ir.rules!
self.assertEqual([{'id': p2, 'name': 'Y'}], self.partner.read(cr, uid2, [p1,p2], ['name']), "read() should skip deleted records")
self.assertEqual([], self.partner.read(cr, uid2, [p1], ['name']), "read() should skip deleted records")
# Deleting an already deleted record should be simply ignored
self.assertTrue(self.partner.unlink(cr, uid, [p1]), "Re-deleting should be a no-op")
# Updating an already deleted record should raise, even as admin
with self.assertRaises(Exception):
self.partner.write(cr, uid, [p1], {'name': 'foo'})
@mute_logger('openerp.models')
def testAccessFilteredRecords(self):
""" Verify that accessing filtered records works as expected for non-admin user """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
partner_model = self.registry('ir.model').search(cr, uid, [('model','=','res.partner')])[0]
self.ir_rule.create(cr, uid, {'name': 'Y is invisible',
'domain_force': [('id', '!=', p1)],
'model_id': partner_model})
# search as unprivileged user
partners = self.partner.search(cr, uid2, [])
self.assertFalse(p1 in partners, "W should not be visible...")
self.assertTrue(p2 in partners, "... but Y should be visible")
# read as unprivileged user
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1], ['name'])
# write as unprivileged user
with self.assertRaises(Exception):
self.partner.write(cr, uid2, [p1], {'name': 'foo'})
# unlink as unprivileged user
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1])
# Prepare mixed case
self.partner.unlink(cr, uid, [p2])
# read mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1,p2], ['name'])
# delete mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1,p2])
def test_multi_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
records = self.partner.read(self.cr, UID, [record_id])
self.assertIsInstance(records, list)
def test_one_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
record = self.partner.read(self.cr, UID, record_id)
self.assertIsInstance(record, dict)
@mute_logger('openerp.models')
def test_search_read(self):
# simple search_read
self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
found = self.partner.search_read(self.cr, UID, [['name', '=', 'MyPartner1']], ['name'])
self.assertEqual(len(found), 1)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertTrue('id' in found[0])
# search_read correct order
self.partner.create(self.cr, UID, {'name': 'MyPartner2'})
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertEqual(found[1]['name'], 'MyPartner2')
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name desc")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner2')
self.assertEqual(found[1]['name'], 'MyPartner1')
# search_read that finds nothing
found = self.partner.search_read(self.cr, UID, [['name', '=', 'Does not exists']], ['name'])
self.assertEqual(len(found), 0)
def test_groupby_date(self):
partners = dict(
A='2012-11-19',
B='2012-12-17',
C='2012-12-31',
D='2013-01-07',
E='2013-01-14',
F='2013-01-28',
G='2013-02-11',
)
all_partners = []
partners_by_day = defaultdict(set)
partners_by_month = defaultdict(set)
partners_by_year = defaultdict(set)
for name, date in partners.items():
p = self.partner.create(self.cr, UID, dict(name=name, date=date))
all_partners.append(p)
partners_by_day[date].add(p)
partners_by_month[date.rsplit('-', 1)[0]].add(p)
partners_by_year[date.split('-', 1)[0]].add(p)
def read_group(interval, domain=None):
main_domain = [('id', 'in', all_partners)]
if domain:
domain = ['&'] + main_domain + domain
else:
domain = main_domain
rg = self.partner.read_group(self.cr, self.uid, domain, ['date'], 'date' + ':' + interval)
result = {}
for r in rg:
result[r['date:' + interval]] = set(self.partner.search(self.cr, self.uid, r['__domain']))
return result
self.assertEqual(len(read_group('day')), len(partners_by_day))
self.assertEqual(len(read_group('month')), len(partners_by_month))
self.assertEqual(len(read_group('year')), len(partners_by_year))
rg = self.partner.read_group(self.cr, self.uid, [('id', 'in', all_partners)],
['date'], ['date:month', 'date:day'], lazy=False)
self.assertEqual(len(rg), len(all_partners))
class TestInherits(common.TransactionCase):
""" test the behavior of the orm for models that use _inherits;
specifically: res.users, that inherits from res.partner
"""
def setUp(self):
super(TestInherits, self).setUp()
self.partner = self.registry('res.partner')
self.user = self.registry('res.users')
def test_create(self):
""" creating a user should automatically create a new partner """
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo = self.user.browse(self.cr, UID, foo_id)
self.assertNotIn(foo.partner_id.id, partners_before)
def test_create_with_ancestor(self):
""" creating a user with a specific 'partner_id' should not create a new partner """
par_id = self.partner.create(self.cr, UID, {'name': 'Foo'})
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'partner_id': par_id, 'login': 'foo', 'password': 'foo'})
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(set(partners_before), set(partners_after))
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, 'Foo')
self.assertEqual(foo.partner_id.id, par_id)
@mute_logger('openerp.models')
def test_read(self):
""" inherited fields should be read without any indirection """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo_values, = self.user.read(self.cr, UID, [foo_id])
partner_id = foo_values['partner_id'][0]
partner_values, = self.partner.read(self.cr, UID, [partner_id])
self.assertEqual(foo_values['name'], partner_values['name'])
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, foo.partner_id.name)
@mute_logger('openerp.models')
def test_copy(self):
""" copying a user should automatically copy its partner, too """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
bar_id = self.user.copy(self.cr, UID, foo_id, {'login': 'bar', 'password': 'bar'})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
self.assertEqual(foo_before, foo_after)
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertEqual(bar.login, 'bar')
self.assertNotEqual(foo.id, bar.id)
self.assertNotEqual(foo.partner_id.id, bar.partner_id.id)
@mute_logger('openerp.models')
def test_copy_with_ancestor(self):
""" copying a user with 'parent_id' in defaults should not duplicate the partner """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo',
'login_date': '2016-01-01'})
par_id = self.partner.create(self.cr, UID, {'name': 'Bar'})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
partners_before = self.partner.search(self.cr, UID, [])
bar_id = self.user.copy(self.cr, UID, foo_id, {'partner_id': par_id, 'login': 'bar'})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(foo_before, foo_after)
self.assertEqual(set(partners_before), set(partners_after))
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertNotEqual(foo.id, bar.id)
self.assertEqual(bar.partner_id.id, par_id)
self.assertEqual(bar.login, 'bar', "login is given from copy parameters")
self.assertEqual(bar.login_date, foo.login_date, "login_date copied from original record")
self.assertEqual(bar.name, 'Bar', "name is given from specific partner")
CREATE = lambda values: (0, False, values)
UPDATE = lambda id, values: (1, id, values)
DELETE = lambda id: (2, id, False)
FORGET = lambda id: (3, id, False)
LINK_TO = lambda id: (4, id, False)
DELETE_ALL = lambda: (5, False, False)
REPLACE_WITH = lambda ids: (6, False, ids)
def sorted_by_id(list_of_dicts):
"sort dictionaries by their 'id' field; useful for comparisons"
return sorted(list_of_dicts, key=lambda d: d.get('id'))
class TestO2MSerialization(common.TransactionCase):
""" test the orm method 'write' on one2many fields """
def setUp(self):
super(TestO2MSerialization, self).setUp()
self.partner = self.registry('res.partner')
def test_no_command(self):
" empty list of commands yields an empty list of records "
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [])
self.assertEqual(results, [])
def test_CREATE_commands(self):
" returns the VALUES dict as-is "
values = [{'foo': 'bar'}, {'foo': 'baz'}, {'foo': 'baq'}]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', map(CREATE, values))
self.assertEqual(results, values)
def test_LINK_TO_command(self):
" reads the records from the database, records are returned with their ids. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(LINK_TO, ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_bare_ids_command(self):
" same as the equivalent LINK_TO commands "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', ids, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_UPDATE_command(self):
" take the in-db records and merge the provided information in "
id_foo = self.partner.create(self.cr, UID, {'name': 'foo'})
id_bar = self.partner.create(self.cr, UID, {'name': 'bar'})
id_baz = self.partner.create(self.cr, UID, {'name': 'baz', 'city': 'tag'})
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
LINK_TO(id_foo),
UPDATE(id_bar, {'name': 'qux', 'city': 'tagtag'}),
UPDATE(id_baz, {'name': 'quux'})
], ['name', 'city'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': id_foo, 'name': 'foo', 'city': False},
{'id': id_bar, 'name': 'qux', 'city': 'tagtag'},
{'id': id_baz, 'name': 'quux', 'city': 'tag'}
]))
def test_DELETE_command(self):
" deleted records are not returned at all. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = [DELETE(ids[0]), DELETE(ids[1]), DELETE(ids[2])]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(results, [])
def test_mixed_commands(self):
ids = [
self.partner.create(self.cr, UID, {'name': name})
for name in ['NObar', 'baz', 'qux', 'NOquux', 'NOcorge', 'garply']
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
CREATE({'name': 'foo'}),
UPDATE(ids[0], {'name': 'bar'}),
LINK_TO(ids[1]),
DELETE(ids[2]),
UPDATE(ids[3], {'name': 'quux',}),
UPDATE(ids[4], {'name': 'corge'}),
CREATE({'name': 'grault'}),
LINK_TO(ids[5])
], ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'name': 'foo'},
{'id': ids[0], 'name': 'bar'},
{'id': ids[1], 'name': 'baz'},
{'id': ids[3], 'name': 'quux'},
{'id': ids[4], 'name': 'corge'},
{'name': 'grault'},
{'id': ids[5], 'name': 'garply'}
]))
def test_LINK_TO_pairs(self):
"LINK_TO commands can be written as pairs, instead of triplets"
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(lambda id: (4, id), ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_singleton_commands(self):
"DELETE_ALL can appear as a singleton"
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [DELETE_ALL()], ['name'])
self.assertEqual(results, [])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,005,618,308,776,809,000 | 41.763092 | 137 | 0.570154 | false |
mitodl/salt-extensions | extensions/_utils/yamldumper.py | 1 | 2433 | # -*- coding: utf-8 -*-
'''
salt.utils.yamldumper
~~~~~~~~~~~~~~~~~~~~~
'''
# pylint: disable=W0232
# class has no __init__ method
from __future__ import absolute_import
try:
from yaml import CDumper as Dumper
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import Dumper
from yaml import SafeDumper
import yaml
import collections
from salt.utils.odict import OrderedDict
try:
from ioflo.aid.odicting import odict # pylint: disable=E0611
HAS_IOFLO = True
except ImportError:
odict = None
HAS_IOFLO = False
class IndentMixin(Dumper):
'''
Mixin that improves YAML dumped list readability
by indenting them by two spaces,
instead of being flush with the key they are under.
'''
def increase_indent(self, flow=False, indentless=False):
return super(IndentMixin, self).increase_indent(flow, False)
class OrderedDumper(Dumper):
'''
A YAML dumper that represents python OrderedDict as simple YAML map.
'''
class SafeOrderedDumper(SafeDumper):
'''
A YAML safe dumper that represents python OrderedDict as simple YAML map.
'''
class IndentedSafeOrderedDumper(IndentMixin, SafeOrderedDumper):
'''
A YAML safe dumper that represents python OrderedDict as simple YAML map,
and also indents lists by two spaces.
'''
pass
def represent_ordereddict(dumper, data):
return dumper.represent_dict(list(data.items()))
OrderedDumper.add_representer(OrderedDict, represent_ordereddict)
SafeOrderedDumper.add_representer(OrderedDict, represent_ordereddict)
OrderedDumper.add_representer(
collections.defaultdict,
yaml.representer.SafeRepresenter.represent_dict
)
SafeOrderedDumper.add_representer(
collections.defaultdict,
yaml.representer.SafeRepresenter.represent_dict
)
if HAS_IOFLO:
OrderedDumper.add_representer(odict, represent_ordereddict)
SafeOrderedDumper.add_representer(odict, represent_ordereddict)
def get_dumper(dumper_name):
return {
'OrderedDumper': OrderedDumper,
'SafeOrderedDumper': SafeOrderedDumper,
'IndentedSafeOrderedDumper': IndentedSafeOrderedDumper,
}.get(dumper_name)
def safe_dump(data, stream=None, **kwargs):
'''
Use a custom dumper to ensure that defaultdict and OrderedDict are
represented properly
'''
return yaml.dump(data, stream, Dumper=SafeOrderedDumper, **kwargs) | bsd-3-clause | 8,198,978,793,737,928,000 | 24.354167 | 77 | 0.719688 | false |
MatthewCox/dotfiles | weechat/python/spell_correction.py | 1 | 33422 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013-2019 by nils_2 <[email protected]>
#
# a simple spell correction for a "misspelled" word
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# 2019-03-01: nils_2, (freenode.#weechat)
# 0.9 : fix bug with auto popup 'spell_suggestion' item (StarlitGhost)
#
# 2019-02-24: nils_2, (freenode.#weechat)
# 0.8.1: fix ValueError when a misspelled word don't have a suggestion and you move cursor in input_line
#
# 2019-02-08: nils_2, (freenode.#weechat)
# 0.8 : add: make addword function shortkey compatible (ldlework)
# : -> https://github.com/weechat/weechat/pull/1288 (WeeChat >=2.4)
# : add: selected-suggestion color
# : -> https://github.com/weechat/scripts/issues/203
# : support for "spell" plugin (WeeChat >=2.5)
# : item "spell_suggest" renamed to "spell_suggestion"
# : -> https://github.com/weechat/weechat/issues/1299
#
# 2013-10-04: nils_2, (freenode.#weechat)
# 0.7 : add: addword function (idea by Nei)
# : localvar wasn't removed after /input return
#
# 2013-10-04: nils_2, (freenode.#weechat)
# 0.6 : add: new option replace_mode
# : add: support of /eval (weechat >= 0.4.2)
# : fix: typo in help
#
# 2013-06-27: nils_2, (freenode.#weechat)
# 0.5 : fix: bug with root input bar
#
# 2013-02-16: nils_2, (freenode.#weechat)
# 0.4 : bug with empty localvar removed (reported by swimmer)
#
# 2013-01-31: nils_2, (freenode.#weechat)
# 0.3 : using new info "aspell_dict" (weechat >= 0.4.1)
#
# 2013-01-25: nils_2, (freenode.#weechat)
# 0.2 : make script compatible with Python 3.x
#
# 2012-01-13: nils_2, (freenode.#weechat)
# 0.1 : - initial release -
#
# requires: WeeChat version >= 0.4.0
# better use latest stable or devel version : http://www.weechat.org/download/
#
#
# Development is currently hosted at
# https://github.com/weechatter/weechat-scripts
#
try:
import weechat, re #sys
except Exception:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: http://www.weechat.org/")
quit()
SCRIPT_NAME = "spell_correction"
SCRIPT_AUTHOR = "nils_2 <[email protected]>"
SCRIPT_VERSION = "0.9"
SCRIPT_LICENSE = "GPL"
SCRIPT_DESC = "a spell correction script to use with spell/aspell plugin"
OPTIONS = { 'auto_pop_up_item' : ('off','automatic pop-up suggestion item on a misspelled word'),
'auto_replace' : ('on','replaces misspelled word with selected suggestion, automatically. If you use "off" you will have to bind command "/%s replace" to a key' % SCRIPT_NAME),
'catch_input_completion' : ('on','will catch the input_complete commands [TAB-key]'),
'eat_input_char' : ('on','will eat the next char you type, after replacing a misspelled word'),
'suggest_item' : ('${white}%S${default}', 'item format (%S = suggestion, %D = dict). Colors are allowed with format "${color}". note: since WeeChat 0.4.2 content is evaluated, see /help eval.'),
'hide_single_dict' : ('on','will hide dict in item if you have a single dict for buffer only'),
'complete_near' : ('0','show suggestions item only if you are n-chars near the misspelled word (0 = off). Using \'replace_mode\' cursor has to be n-chars near misspelled word to cycle through suggestions.'),
'replace_mode' : ('off','misspelled word will be replaced directly by suggestions. Use option \'complete_near\' to specify range and item \'spell_suggestion\' to show possible suggestions.'),
}
Hooks = {'catch_input_completion': '', 'catch_input_return': ''}
regex_color=re.compile('\$\{([^\{\}]+)\}')
regex_optional_tags=re.compile('%\{[^\{\}]+\}')
multiline_input = 0
plugin_name = "spell" # WeeChat >= 2.5
old_plugin_name = "aspell" # WeeChat < 2.5
# ================================[ weechat options & description ]===============================
def init_options():
for option,value in OPTIONS.items():
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, value[0])
OPTIONS[option] = value[0]
else:
OPTIONS[option] = weechat.config_get_plugin(option)
weechat.config_set_desc_plugin(option, '%s (default: "%s")' % (value[1], value[0]))
def toggle_refresh(pointer, name, value):
global OPTIONS
option = name[len('plugins.var.python.' + SCRIPT_NAME + '.'):] # get optionname
OPTIONS[option] = value # save new value
if OPTIONS['catch_input_completion'].lower() == "off":
if Hooks['catch_input_completion']:
weechat.unhook(Hooks['catch_input_completion'])
Hooks['catch_input_completion'] = ''
weechat.unhook(Hooks['catch_input_return'])
Hooks['catch_input_return'] = ''
elif OPTIONS['catch_input_completion'].lower() == "on":
if not Hooks['catch_input_completion']:
Hooks['catch_input_completion'] = weechat.hook_command_run('/input complete*', 'input_complete_cb', '')
Hooks['catch_input_return'] = weechat.hook_command_run('/input return', 'input_return_cb', '')
return weechat.WEECHAT_RC_OK
# ================================[ hooks() ]===============================
# called from command and when TAB is pressed
def auto_suggest_cmd_cb(data, buffer, args):
arguments = args.split(' ')
input_line = weechat.buffer_get_string(buffer, 'input')
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_input_line', '%s' % input_line)
if args.lower() == 'replace':
replace_misspelled_word(buffer)
return weechat.WEECHAT_RC_OK
# if not weechat.buffer_get_string(buffer,'localvar_spell_correction_suggest_item'):
# return weechat.WEECHAT_RC_OK
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
if not position:
position = -1
if arguments[0].lower() == 'addword' and len(arguments) >= 2:
found_dicts = get_aspell_dict_for(buffer)
if len(found_dicts.split(",")) == 1 and len(arguments) == 2:
word = arguments[1]
weechat.command("","/%s addword %s" % (plugin_name,word) )
elif arguments[1] in found_dicts.split(",") and len(arguments) == 3:
word = arguments[2]
weechat.command("","/%s addword %s %s" % (plugin_name,arguments[1],word))
# get localvar for misspelled_word and suggestions from buffer or return
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
if not localvar_aspell_suggest:
return weechat.WEECHAT_RC_OK
if arguments[0].lower() == 'addword' and len(arguments) == 1:
found_dicts = get_aspell_dict_for(buffer)
if not ":" in localvar_aspell_suggest and not "," in found_dicts:
weechat.command("","/%s addword %s" % (plugin_name,localvar_aspell_suggest))
else:
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
weechat.command("","/%s addword %s" % (plugin_name,misspelled_word))
return weechat.WEECHAT_RC_OK
if not ":" in localvar_aspell_suggest:
return weechat.WEECHAT_RC_OK
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
aspell_suggestions = aspell_suggestions.replace('/',',')
aspell_suggestion_list = aspell_suggestions.split(',')
if len(aspell_suggestion_list) == 0:
position = -1
weechat.bar_item_update('spell_correction')
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
# append an empty entry to suggestions to quit without changes.
if OPTIONS['auto_replace'].lower() == "on":
aspell_suggestion_list.append('')
position = int(position)
# cycle backwards through suggestions
if args == '/input complete_previous' or args == 'previous':
# position <= -1? go to last suggestion
if position <= -1:
position = len(aspell_suggestion_list)-1
position -= 1
# cycle forward through suggestions
else:
if position >= len(aspell_suggestion_list)-1:
position = 0
else:
position += 1
# 2 = TAB or command is called
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:%s' % ('2',str(position),aspell_suggestion_list[position]))
weechat.bar_item_update('spell_correction')
return weechat.WEECHAT_RC_OK
# spell_correction_suggest_item:
def show_spell_correction_item_cb (data, item, window):
# check for root input bar!
if not window:
window = weechat.current_window()
# weechat.buffer_get_string(buffer,'localvar_spell_correction_suggest_item'):
buffer = weechat.window_get_pointer(window,"buffer")
if buffer == '':
return ''
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
if not position or not aspell_suggest_item:
return ''
config_spell_suggest_item = weechat.config_get_plugin('suggest_item')
dict_found = search_dict(buffer,position)
if dict_found:
if config_spell_suggest_item:
show_item = config_spell_suggest_item.replace('%S',aspell_suggest_item)
show_item = show_item.replace('%D',dict_found)
show_item = substitute_colors(show_item)
return '%s' % (show_item)
else:
return aspell_suggest_item
else:
if config_spell_suggest_item:
show_item = config_spell_suggest_item.replace('%S',aspell_suggest_item)
if weechat.config_get_plugin('hide_single_dict').lower() == 'off':
show_item = show_item.replace('%D',get_aspell_dict_for(buffer))
else:
show_item = show_item.replace('%D','').rstrip()
show_item = substitute_colors(show_item)
return '%s' % (show_item)
return aspell_suggest_item
# if a suggestion is selected and you edit input line, then replace misspelled word!
def input_text_changed_cb(data, signal, signal_data):
global multiline_input
if multiline_input == '1':
return weechat.WEECHAT_RC_OK
buffer = signal_data
if not buffer:
return weechat.WEECHAT_RC_OK
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
if not position or not aspell_suggest_item:
cursor_pos = weechat.buffer_get_integer(buffer,'input_pos')
if get_localvar_aspell_suggest(buffer) and cursor_pos >0: # save cursor position of misspelled word
weechat.buffer_set(buffer, 'localvar_set_current_cursor_pos', '%s' % cursor_pos)
else:
saved_cursor_pos = weechat.buffer_get_string(buffer, 'localvar_current_cursor_pos')
if saved_cursor_pos != '':
if int(cursor_pos) > int(saved_cursor_pos) + int(OPTIONS['complete_near']) + 3: # +3 to be sure!
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
# 1 = cursor etc., 2 = TAB, 3 = replace_mode
if tab_complete != '0':
if not aspell_suggest_item:
aspell_suggest_item = ''
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:%s' % ('0',position,aspell_suggest_item))
return weechat.WEECHAT_RC_OK
if OPTIONS['auto_replace'].lower() == "on":
replace_misspelled_word(buffer)
return weechat.WEECHAT_RC_OK
# weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:' % ('0','-1'))
weechat.bar_item_update('spell_correction')
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
# also remove localvar_suggest_item
def replace_misspelled_word(buffer):
input_line = weechat.buffer_get_string(buffer, 'localvar_spell_correction_suggest_input_line')
if not input_line:
# remove spell_correction item
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_item', '')
weechat.bar_item_update('spell_correction')
weechat.bar_item_update('spell_suggestion')
return
if OPTIONS['eat_input_char'].lower() == 'off' or input_line == '':
input_pos = weechat.buffer_get_integer(buffer,'input_pos')
# check cursor position
if len(input_line) < int(input_pos) or input_line[int(input_pos)-1] == ' ' or input_line == '':
input_line = weechat.buffer_get_string(buffer, 'input')
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_input_line', '')
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
# localvar_aspell_suggest = word,word2/wort,wort2
if localvar_aspell_suggest:
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
aspell_suggestions = aspell_suggestions.replace('/',',')
aspell_suggestion_list = aspell_suggestions.split(',')
else:
return
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
if not position or not aspell_suggest_item:
return
position = int(position)
input_line = input_line.replace(misspelled_word, aspell_suggestion_list[position])
if input_line[-2:] == ' ':
input_line = input_line.rstrip()
input_line = input_line + ' '
weechat.buffer_set(buffer,'input',input_line)
# set new cursor position. check if suggestion is longer or smaller than misspelled word
input_pos = weechat.buffer_get_integer(buffer,'input_pos') + 1
length_misspelled_word = len(misspelled_word)
length_suggestion_word = len(aspell_suggestion_list[position])
if length_misspelled_word < length_suggestion_word:
difference = length_suggestion_word - length_misspelled_word
new_position = input_pos + difference + 1
weechat.buffer_set(buffer,'input_pos',str(new_position))
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_item', '')
weechat.bar_item_update('spell_suggestion')
weechat.bar_item_update('spell_correction')
# ================================[ subroutines ]===============================
# get aspell dict for suggestion
def search_dict(buffer,position):
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
dicts_found = localvar_aspell_suggest.count("/")
if not dicts_found:
return 0
# aspell.dict.full_name = en_GB,de_DE-neu
# localvar_dict = en_GB,de_DE-neu
dictionary = get_aspell_dict_for(buffer)
if not dictionary:
return 0
dictionary_list = dictionary.split(',')
# more then one dict?
if len(dictionary_list) > 1:
undef,aspell_suggestions = localvar_aspell_suggest.split(':')
dictionary = aspell_suggestions.split('/')
words = 0
i = -1
for a in dictionary:
i += 1
words += a.count(',')+1
if words > int(position):
break
return dictionary_list[i]
# format of localvar aspell_suggest (using two dicts): diehs:die hs,die-hs,dies/dies,Diebs,Viehs
def get_localvar_aspell_suggest(buffer):
return weechat.buffer_get_string(buffer, 'localvar_%s_suggest' % plugin_name)
def get_aspell_dict_for(buffer):
# this should never happens, but to be sure. Otherwise WeeChat will crash
if buffer == '':
return ''
if int(version) >= 0x00040100:
return weechat.info_get("%s_dict" % plugin_name, buffer)
# this is a "simple" work around and it only works for buffers with given dictionary
# no fallback for partial name like "aspell.dict.irc". Get your hands on WeeChat 0.4.1
full_name = weechat.buffer_get_string(buffer,'full_name')
return weechat.config_string(weechat.config_get('%s.dict.%s' % (plugin_name,weechat.buffer_get_string(buffer,'full_name'))))
def substitute_colors(text):
if int(version) >= 0x00040200:
return weechat.string_eval_expression(text,{},{},{})
# substitute colors in output
return re.sub(regex_color, lambda match: weechat.color(match.group(1)), text)
def get_position_and_suggest_item(buffer):
if weechat.buffer_get_string(buffer,'localvar_spell_correction_suggest_item'):
tab_complete,position,aspell_suggest_item = weechat.buffer_get_string(buffer,'localvar_spell_correction_suggest_item').split(':',2)
return (tab_complete,position,aspell_suggest_item)
else:
return ('', '', '')
def aspell_suggest_cb(data, signal, signal_data):
buffer = signal_data
if OPTIONS['replace_mode'].lower() == 'on':
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
if localvar_aspell_suggest:
# aspell says, suggested word is also misspelled. check out if we already have a suggestion list and don't use the new misspelled word!
if weechat.buffer_get_string(buffer,'localvar_inline_suggestions'):
return weechat.WEECHAT_RC_OK
if not ":" in localvar_aspell_suggest:
return weechat.WEECHAT_RC_OK
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
aspell_suggestions = aspell_suggestions.replace('/',',')
weechat.buffer_set(buffer, 'localvar_set_inline_suggestions', '%s:%s:%s' % ('2','0',aspell_suggestions))
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
if OPTIONS['auto_pop_up_item'].lower() == 'on':
auto_suggest_cmd_cb('', buffer, '')
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_input_line', '')
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
def get_last_position_of_misspelled_word(misspelled_word, buffer):
input_pos = weechat.buffer_get_integer(buffer,'input_pos')
input_line = weechat.buffer_get_string(buffer, 'input')
x = input_line.rfind(misspelled_word, 0, int(input_pos))
y = x + len(misspelled_word)
return x, y, input_pos
# this is a work-around for multiline
def multiline_cb(data, signal, signal_data):
global multiline_input
multiline_input = signal_data
# if multiline_input == '1':
# buffer = weechat.window_get_pointer(weechat.current_window(),"buffer")
# input_line = weechat.buffer_get_string(buffer, 'input')
# else:
# buffer = weechat.window_get_pointer(weechat.current_window(),"buffer")
# input_line_bak = weechat.buffer_get_string(buffer, 'input')
# if input_line != input_line_bak:
# input_text_changed_cb('','',buffer)
return weechat.WEECHAT_RC_OK
# ================================[ hook_keys() ]===============================
# TAB key pressed?
def input_complete_cb(data, buffer, command):
# check if a misspelled word already exists!
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
if not localvar_aspell_suggest and not weechat.buffer_get_string(buffer,'localvar_inline_replace_mode'):
return weechat.WEECHAT_RC_OK
# first [TAB] on a misspelled word in "replace mode"
if OPTIONS['replace_mode'].lower() == "on" and not weechat.buffer_get_string(buffer,'localvar_inline_replace_mode') and int(OPTIONS['complete_near']) >= 0:
weechat.buffer_set(buffer, 'localvar_set_inline_replace_mode', '1')
if not ":" in localvar_aspell_suggest:
return weechat.WEECHAT_RC_OK
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
begin_last_position, end_last_position, input_pos = get_last_position_of_misspelled_word(misspelled_word, buffer)
# maybe nick completion?
if begin_last_position == -1:
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
if input_pos - end_last_position > int(OPTIONS['complete_near']):
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
aspell_suggestions = aspell_suggestions.replace('/',',')
weechat.buffer_set(buffer, 'localvar_set_inline_suggestions', '%s:%s:%s' % ('2','0',aspell_suggestions))
weechat.buffer_set(buffer, 'localvar_set_save_position_of_word', '%s:%s' % (begin_last_position, end_last_position))
inline_suggestions = aspell_suggestions.split(',')
input_line = weechat.buffer_get_string(buffer, 'input')
input_line = input_line[:begin_last_position] + inline_suggestions[0] + input_line[end_last_position:]
# input_line = input_line.replace(misspelled_word, inline_suggestions[0])
word_differ = 0
if len(misspelled_word) > len(inline_suggestions[0]):
word_differ = len(misspelled_word) - len(inline_suggestions[0])
else:
word_differ = len(inline_suggestions[0]) - len(misspelled_word)
if input_line[-2:] == ' ':
input_line = input_line.rstrip()
input_line = input_line + ' '
weechat.buffer_set(buffer,'input',input_line)
input_pos = int(input_pos) + word_differ
weechat.buffer_set(buffer,'input_pos',str(input_pos))
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
# after first [TAB] on a misspelled word in "replace mode"
if OPTIONS['replace_mode'].lower() == "on" and weechat.buffer_get_string(buffer,'localvar_inline_replace_mode') == "1" and int(OPTIONS['complete_near']) >= 0:
if not ":" in weechat.buffer_get_string(buffer,'localvar_inline_suggestions'):
return weechat.WEECHAT_RC_OK
tab_complete,position,aspell_suggest_items = weechat.buffer_get_string(buffer,'localvar_inline_suggestions').split(':',2)
if not position or not aspell_suggest_items:
weechat.buffer_set(buffer, 'localvar_del_inline_replace_mode', '')
return weechat.WEECHAT_RC_OK
inline_suggestions = aspell_suggest_items.split(',')
position = int(position)
previous_position = position
# cycle backwards through suggestions
if command == '/input complete_previous':
# position <= -1? go to last suggestion
if position <= -1:
position = len(inline_suggestions)-1
else:
position -= 1
# cycle forward through suggestions
elif command == '/input complete_next':
if position >= len(inline_suggestions)-1:
position = 0
else:
position += 1
begin_last_position, end_last_position, input_pos = get_last_position_of_misspelled_word(inline_suggestions[previous_position], buffer)
if input_pos - end_last_position > int(OPTIONS['complete_near']):
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
input_line = weechat.buffer_get_string(buffer, 'input')
input_line = input_line[:begin_last_position] + inline_suggestions[position] + input_line[end_last_position:]
# input_line = input_line.replace(inline_suggestions[previous_position], inline_suggestions[position])
word_differ = 0
if len(inline_suggestions[previous_position]) > len(inline_suggestions[position]):
word_differ = len(inline_suggestions[previous_position]) - len(inline_suggestions[position])
else:
word_differ = len(inline_suggestions[position]) - len(inline_suggestions[previous_position])
if input_line[-2:] == ' ':
input_line = input_line.rstrip()
input_line = input_line + ' '
weechat.buffer_set(buffer,'input',input_line)
input_pos = int(input_pos) + word_differ
weechat.buffer_set(buffer,'input_pos',str(input_pos))
weechat.buffer_set(buffer, 'localvar_set_inline_suggestions', '%s:%s:%s' % ('2',str(position),aspell_suggest_items))
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
if int(OPTIONS['complete_near']) > 0:
if not ":" in localvar_aspell_suggest:
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
begin_last_position, end_last_position, input_pos = get_last_position_of_misspelled_word(misspelled_word, buffer)
if input_pos - end_last_position > int(OPTIONS['complete_near']):
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:%s' % ('2',position,aspell_suggest_item))
auto_suggest_cmd_cb('', buffer, command)
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
def delete_localvar_replace_mode(buffer):
if OPTIONS['replace_mode'].lower() == "on":
weechat.buffer_set(buffer, 'localvar_del_inline_replace_mode', '')
weechat.buffer_set(buffer, 'localvar_del_inline_suggestions', '')
weechat.buffer_set(buffer, 'localvar_del_save_position_of_word', '')
weechat.buffer_set(buffer, 'localvar_del_current_cursor_pos', '')
weechat.bar_item_update('spell_suggestion')
# if a suggestion is selected and you press [RETURN] replace misspelled word!
def input_return_cb(data, signal, signal_data):
buffer = signal
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
if not position or not aspell_suggest_item:
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
if OPTIONS['auto_replace'].lower() == "on" and aspell_suggest_item:
replace_misspelled_word(buffer)
delete_localvar_replace_mode(buffer)
return weechat.WEECHAT_RC_OK
# /input delete_*
def input_delete_cb(data, signal, signal_data):
buffer = signal
delete_localvar_replace_mode(buffer)
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_item', '')
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_input_line', '')
weechat.bar_item_update('spell_correction')
weechat.bar_item_update('spell_suggestion')
return weechat.WEECHAT_RC_OK
# /input move_* (cursor position)
def input_move_cb(data, signal, signal_data):
buffer = signal
if OPTIONS['replace_mode'].lower() == "on" and weechat.buffer_get_string(buffer,'localvar_inline_replace_mode') == "1":
delete_localvar_replace_mode(buffer)
weechat.buffer_set(buffer, 'localvar_del_spell_correction_suggest_item', '')
return weechat.WEECHAT_RC_OK
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
if not localvar_aspell_suggest or not ":" in localvar_aspell_suggest:
return weechat.WEECHAT_RC_OK
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
if not aspell_suggest_item in aspell_suggestions:
aspell_suggestion_list = aspell_suggestions.split(',',1)
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:%s' % ('1',0,aspell_suggestion_list[0]))
weechat.bar_item_update('spell_correction')
return weechat.WEECHAT_RC_OK
weechat.buffer_set(buffer, 'localvar_set_spell_correction_suggest_item', '%s:%s:%s' % ('1',position,aspell_suggest_item))
return weechat.WEECHAT_RC_OK
def show_spell_suggestion_item_cb (data, item, window):
buffer = weechat.window_get_pointer(window,"buffer")
if buffer == '':
return ''
if OPTIONS['replace_mode'].lower() == "on":
if not weechat.buffer_get_string(buffer,'localvar_inline_suggestions'):
return ''
tab_complete,position,aspell_suggest_items = weechat.buffer_get_string(buffer,'localvar_inline_suggestions').split(':',2)
localvar_aspell_suggest = "dummy:%s" % aspell_suggest_items
# return aspell_suggest_items
else:
tab_complete,position,aspell_suggest_item = get_position_and_suggest_item(buffer)
localvar_aspell_suggest = get_localvar_aspell_suggest(buffer)
# localvar_aspell_suggest = word,word2/wort,wort2
if localvar_aspell_suggest:
misspelled_word,aspell_suggestions = localvar_aspell_suggest.split(':')
aspell_suggestions_orig = aspell_suggestions
aspell_suggestions = aspell_suggestions.replace('/',',')
aspell_suggestion_list = aspell_suggestions.split(',')
if not position:
return ''
if position == "-1":
return aspell_suggestions_orig
if int(position) < len(aspell_suggestion_list):
reset_color = weechat.color('reset')
color = weechat.color(weechat.config_color(weechat.config_get("%s.color.misspelled" % plugin_name)))
new_word = aspell_suggestion_list[int(position)].replace(aspell_suggestion_list[int(position)],'%s%s%s' % (color, aspell_suggestion_list[int(position)], reset_color))
aspell_suggestion_list[int(position)] = new_word # replace word with colored word
aspell_suggestions_orig = ','.join(map(str, aspell_suggestion_list))
else:
return ''
return aspell_suggestions_orig
def window_switch_cb(data, signal, signal_data):
weechat.bar_item_update('spell_correction')
return weechat.WEECHAT_RC_OK
def buffer_switch_cb(data, signal, signal_data):
weechat.bar_item_update('spell_correction')
return weechat.WEECHAT_RC_OK
# ================================[ check for nick ]===============================
def weechat_nicklist_search_nick(buffer, nick):
return weechat.nicklist_search_nick(buffer, "", nick)
# ================================[ main ]===============================
if __name__ == "__main__":
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
version = weechat.info_get("version_number", "") or 0
if int(version) < 0x00040000:
weechat.prnt('','%s%s %s' % (weechat.prefix('error'),SCRIPT_NAME,': needs version 0.4.0 or higher'))
weechat.command('','/wait 1ms /python unload %s' % SCRIPT_NAME)
if int(version) < 0x02050000:
plugin_name = old_plugin_name
SCRIPT_HELP = """addword : add a word to personal aspell dictionary (does not work with multiple dicts)
previous : to cycle though previous suggestion
replace : to replace misspelled word
Quick start:
You should add script item "spell_correction" to a bar (i suggest using the input_bar).
On an misspelled word, press TAB to cycle through suggestions. Press any key on suggestion
to replace misspelled word with current displayed suggestion.
Also check script options: /fset %(s)s
IMPORTANT:
"%(p)s.check.suggestions" option has to be set to a value >= 0 (default: -1 (off)).
"%(p)s.color.misspelled" option is used to highlight current suggestion in "%(p)s-suggestion" item
Using "%(p)s.check.real_time" the nick-completion will not work. All misspelled words
in input_line have to be replaced first.
""" %dict(p=plugin_name, s=SCRIPT_NAME)
weechat.hook_command(SCRIPT_NAME, SCRIPT_DESC, 'addword <word>||previous||replace',
SCRIPT_HELP+
'',
'previous|replace|addword',
'auto_suggest_cmd_cb', '')
init_options()
weechat.hook_command_run('/input delete_*', 'input_delete_cb', '')
weechat.hook_command_run('/input move*', 'input_move_cb', '')
weechat.hook_signal ('input_text_changed', 'input_text_changed_cb', '')
# multiline workaround
weechat.hook_signal('input_flow_free', 'multiline_cb', '')
weechat.hook_signal ('%s_suggest' % plugin_name, 'aspell_suggest_cb', '')
weechat.hook_signal ('buffer_switch', 'buffer_switch_cb','')
weechat.hook_signal ('window_switch', 'window_switch_cb','')
if OPTIONS['catch_input_completion'].lower() == "on":
Hooks['catch_input_completion'] = weechat.hook_command_run('/input complete*', 'input_complete_cb', '')
Hooks['catch_input_return'] = weechat.hook_command_run('/input return', 'input_return_cb', '')
weechat.hook_config('plugins.var.python.' + SCRIPT_NAME + '.*', 'toggle_refresh', '')
weechat.bar_item_new('spell_correction', 'show_spell_correction_item_cb', '')
weechat.bar_item_new('spell_suggestion', 'show_spell_suggestion_item_cb', '')
| mit | 8,494,495,120,535,851,000 | 45.355062 | 236 | 0.641254 | false |
rogerthat-platform/rogerthat-backend | src/rogerthat/bizz/job/re_index_service_identities.py | 1 | 1824 | # -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from google.appengine.api import search
from google.appengine.ext import db
from rogerthat.bizz.job import run_job
from rogerthat.bizz.service import SERVICE_INDEX, SERVICE_LOCATION_INDEX, re_index
from rogerthat.consts import HIGH_LOAD_WORKER_QUEUE
from rogerthat.dal.profile import is_trial_service
from rogerthat.models import ServiceIdentity
from rogerthat.utils.service import get_service_user_from_service_identity_user
def drop_index(index):
while True:
search_result = index.get_range(ids_only=True)
ids = [r.doc_id for r in search_result.results]
if not ids:
break
index.delete(ids)
def job(queue=HIGH_LOAD_WORKER_QUEUE):
svc_index = search.Index(name=SERVICE_INDEX)
loc_index = search.Index(name=SERVICE_LOCATION_INDEX)
drop_index(svc_index)
drop_index(loc_index)
run_job(query, [], worker, [], worker_queue=queue)
def worker(si_key):
si = db.get(si_key)
service_user = get_service_user_from_service_identity_user(si.service_identity_user)
if is_trial_service(service_user):
return
re_index(si.service_identity_user)
def query():
return ServiceIdentity.all(keys_only=True)
| apache-2.0 | 8,081,620,779,431,046,000 | 31 | 88 | 0.72807 | false |
pegasusict/AMM | lib/reportbuilder.py | 1 | 1941 | #!/usr/bin/env python3
"""
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Audiophiles Music Manager Build 20180119 VER0.0.0PREALPHA *
* (C)2017 Mattijs Snepvangers [email protected] *
* lib/reportbuider.py Report Builder VER0.0.0PREALPHA *
* License: MIT Please keep my name in the credits *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
"""
class report_builder:
@classmethod
def __init__(self):
"""initialising report generator"""
# self.reportdata:dict = []
self._valid_subjects = ["purged_files", "reclaimed space"]
@classmethod
def update(self, kwargs):
"""reportdata collector"""
for key, value in kwargs:
if key not in self._valid_subjects:
raise TypeError("update recieved an unknown subject: \
{}".format(key)
else:
field = self.reportdata[key]
if type(field) == int or type(field) == float:
self.reportdata[key] =+ value
else:
self.reportdata[key] = self.reportdata[key] + value
@classmethod
def render_report(self, reportType):
"""reportbuilder info, more to come"""
###TODO### devise a way to generate the report properly
#if reportType == "display":
# print '### display template'
#elif reportType == "html":
# print '### html template'
#else: # reportType = "text"
# print '### text template'
pass
def main():
"""just in case somebody wants to test this file by itself"""
print("It works!!! ;-)")
###TODO### do something with the various methods/functions of this file
# standard boilerplate
if __name__ == '__main__':
main()
| mit | 5,438,956,111,110,056,000 | 37.058824 | 79 | 0.49356 | false |
endlessm/chromium-browser | third_party/llvm/lldb/examples/python/x86_64_target_definition.py | 6 | 20727 | #!/usr/bin/python
#===-- x86_64_target_definition.py -----------------------------*- C++ -*-===//
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===//
#----------------------------------------------------------------------
# DESCRIPTION
#
# This file can be used with the following setting:
# plugin.process.gdb-remote.target-definition-file
# This setting should be used when you are trying to connect to a
# remote GDB server that doesn't support any of the register discovery
# packets that LLDB normally uses.
#
# Why is this necessary? LLDB doesn't require a new build of LLDB that
# targets each new architecture you will debug with. Instead, all
# architectures are supported and LLDB relies on extra GDB server
# packets to discover the target we are connecting to so that is can
# show the right registers for each target. This allows the GDB server
# to change and add new registers without requiring a new LLDB build
# just so we can see new registers.
#
# This file implements the x86_64 registers for the darwin version of
# GDB and allows you to connect to servers that use this register set.
#
# USAGE
#
# (lldb) settings set plugin.process.gdb-remote.target-definition-file /path/to/x86_64_target_definition.py
# (lldb) gdb-remote other.baz.com:1234
#
# The target definition file will get used if and only if the
# qRegisterInfo packets are not supported when connecting to a remote
# GDB server.
#----------------------------------------------------------------------
from lldb import *
# Compiler and DWARF register numbers
name_to_gcc_dwarf_regnum = {
'rax': 0,
'rdx': 1,
'rcx': 2,
'rbx': 3,
'rsi': 4,
'rdi': 5,
'rbp': 6,
'rsp': 7,
'r8': 8,
'r9': 9,
'r10': 10,
'r11': 11,
'r12': 12,
'r13': 13,
'r14': 14,
'r15': 15,
'rip': 16,
'xmm0': 17,
'xmm1': 18,
'xmm2': 19,
'xmm3': 20,
'xmm4': 21,
'xmm5': 22,
'xmm6': 23,
'xmm7': 24,
'xmm8': 25,
'xmm9': 26,
'xmm10': 27,
'xmm11': 28,
'xmm12': 29,
'xmm13': 30,
'xmm14': 31,
'xmm15': 32,
'stmm0': 33,
'stmm1': 34,
'stmm2': 35,
'stmm3': 36,
'stmm4': 37,
'stmm5': 38,
'stmm6': 39,
'stmm7': 30,
'ymm0': 41,
'ymm1': 42,
'ymm2': 43,
'ymm3': 44,
'ymm4': 45,
'ymm5': 46,
'ymm6': 47,
'ymm7': 48,
'ymm8': 49,
'ymm9': 40,
'ymm10': 41,
'ymm11': 42,
'ymm12': 43,
'ymm13': 44,
'ymm14': 45,
'ymm15': 46
}
name_to_gdb_regnum = {
'rax': 0,
'rbx': 1,
'rcx': 2,
'rdx': 3,
'rsi': 4,
'rdi': 5,
'rbp': 6,
'rsp': 7,
'r8': 8,
'r9': 9,
'r10': 10,
'r11': 11,
'r12': 12,
'r13': 13,
'r14': 14,
'r15': 15,
'rip': 16,
'rflags': 17,
'cs': 18,
'ss': 19,
'ds': 20,
'es': 21,
'fs': 22,
'gs': 23,
'stmm0': 24,
'stmm1': 25,
'stmm2': 26,
'stmm3': 27,
'stmm4': 28,
'stmm5': 29,
'stmm6': 30,
'stmm7': 31,
'fctrl': 32,
'fstat': 33,
'ftag': 34,
'fiseg': 35,
'fioff': 36,
'foseg': 37,
'fooff': 38,
'fop': 39,
'xmm0': 40,
'xmm1': 41,
'xmm2': 42,
'xmm3': 43,
'xmm4': 44,
'xmm5': 45,
'xmm6': 46,
'xmm7': 47,
'xmm8': 48,
'xmm9': 49,
'xmm10': 50,
'xmm11': 51,
'xmm12': 52,
'xmm13': 53,
'xmm14': 54,
'xmm15': 55,
'mxcsr': 56,
'ymm0': 57,
'ymm1': 58,
'ymm2': 59,
'ymm3': 60,
'ymm4': 61,
'ymm5': 62,
'ymm6': 63,
'ymm7': 64,
'ymm8': 65,
'ymm9': 66,
'ymm10': 67,
'ymm11': 68,
'ymm12': 69,
'ymm13': 70,
'ymm14': 71,
'ymm15': 72
}
name_to_generic_regnum = {
'rip': LLDB_REGNUM_GENERIC_PC,
'rsp': LLDB_REGNUM_GENERIC_SP,
'rbp': LLDB_REGNUM_GENERIC_FP,
'rdi': LLDB_REGNUM_GENERIC_ARG1,
'rsi': LLDB_REGNUM_GENERIC_ARG2,
'rdx': LLDB_REGNUM_GENERIC_ARG3,
'rcx': LLDB_REGNUM_GENERIC_ARG4,
'r8': LLDB_REGNUM_GENERIC_ARG5,
'r9': LLDB_REGNUM_GENERIC_ARG6
}
def get_reg_num(reg_num_dict, reg_name):
if reg_name in reg_num_dict:
return reg_num_dict[reg_name]
return LLDB_INVALID_REGNUM
def get_reg_num(reg_num_dict, reg_name):
if reg_name in reg_num_dict:
return reg_num_dict[reg_name]
return LLDB_INVALID_REGNUM
x86_64_register_infos = [
{'name': 'rax',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'rbx',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'rcx', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg4'},
{'name': 'rdx', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg3'},
{'name': 'rsi', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg2'},
{'name': 'rdi', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg1'},
{'name': 'rbp', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'fp'},
{'name': 'rsp', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'sp'},
{'name': 'r8', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg5'},
{'name': 'r9', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'arg6'},
{'name': 'r10',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'r11',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'r12',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'r13',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'r14',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'r15',
'set': 0,
'bitsize': 64,
'encoding': eEncodingUint,
'format': eFormatAddressInfo},
{'name': 'rip', 'set': 0, 'bitsize': 64, 'encoding': eEncodingUint,
'format': eFormatAddressInfo, 'alt-name': 'pc'},
{'name': 'rflags', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'cs', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'ss', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'ds', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'es', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fs', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'gs', 'set': 0, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'stmm0',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm1',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm2',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm3',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm4',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm5',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm6',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'stmm7',
'set': 1,
'bitsize': 80,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'fctrl', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fstat', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'ftag', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fiseg', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fioff', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'foseg', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fooff', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'fop', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
{'name': 'xmm0',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm1',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm2',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm3',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm4',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm5',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm6',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm7',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm8',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm9',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm10',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm11',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm12',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm13',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm14',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'xmm15',
'set': 1,
'bitsize': 128,
'encoding': eEncodingVector,
'format': eFormatVectorOfUInt8},
{'name': 'mxcsr', 'set': 1, 'bitsize': 32,
'encoding': eEncodingUint, 'format': eFormatHex},
# Registers that are contained in or composed of one of more other
# registers
{'name': 'eax',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rax[31:0]'},
{'name': 'ebx',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbx[31:0]'},
{'name': 'ecx',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rcx[31:0]'},
{'name': 'edx',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdx[31:0]'},
{'name': 'edi',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdi[31:0]'},
{'name': 'esi',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsi[31:0]'},
{'name': 'ebp',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbp[31:0]'},
{'name': 'esp',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsp[31:0]'},
{'name': 'r8d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r8[31:0]'},
{'name': 'r9d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r9[31:0]'},
{'name': 'r10d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r10[31:0]'},
{'name': 'r11d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r11[31:0]'},
{'name': 'r12d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r12[31:0]'},
{'name': 'r13d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r13[31:0]'},
{'name': 'r14d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r14[31:0]'},
{'name': 'r15d',
'set': 0,
'bitsize': 32,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r15[31:0]'},
{'name': 'ax',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rax[15:0]'},
{'name': 'bx',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbx[15:0]'},
{'name': 'cx',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rcx[15:0]'},
{'name': 'dx',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdx[15:0]'},
{'name': 'di',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdi[15:0]'},
{'name': 'si',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsi[15:0]'},
{'name': 'bp',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbp[15:0]'},
{'name': 'sp',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsp[15:0]'},
{'name': 'r8w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r8[15:0]'},
{'name': 'r9w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r9[15:0]'},
{'name': 'r10w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r10[15:0]'},
{'name': 'r11w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r11[15:0]'},
{'name': 'r12w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r12[15:0]'},
{'name': 'r13w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r13[15:0]'},
{'name': 'r14w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r14[15:0]'},
{'name': 'r15w',
'set': 0,
'bitsize': 16,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r15[15:0]'},
{'name': 'ah',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rax[15:8]'},
{'name': 'bh',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbx[15:8]'},
{'name': 'ch',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rcx[15:8]'},
{'name': 'dh',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdx[15:8]'},
{'name': 'al',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rax[7:0]'},
{'name': 'bl',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbx[7:0]'},
{'name': 'cl',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rcx[7:0]'},
{'name': 'dl',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdx[7:0]'},
{'name': 'dil',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rdi[7:0]'},
{'name': 'sil',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsi[7:0]'},
{'name': 'bpl',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rbp[7:0]'},
{'name': 'spl',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'rsp[7:0]'},
{'name': 'r8l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r8[7:0]'},
{'name': 'r9l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r9[7:0]'},
{'name': 'r10l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r10[7:0]'},
{'name': 'r11l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r11[7:0]'},
{'name': 'r12l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r12[7:0]'},
{'name': 'r13l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r13[7:0]'},
{'name': 'r14l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r14[7:0]'},
{'name': 'r15l',
'set': 0,
'bitsize': 8,
'encoding': eEncodingUint,
'format': eFormatHex,
'slice': 'r15[7:0]'},
]
g_target_definition = None
def get_target_definition():
global g_target_definition
if g_target_definition is None:
g_target_definition = {}
offset = 0
for reg_info in x86_64_register_infos:
reg_name = reg_info['name']
# Only fill in the offset if there is no 'slice' in the register
# info
if 'slice' not in reg_info and 'composite' not in reg_info:
reg_info['offset'] = offset
offset += reg_info['bitsize'] // 8
# Set the GCC/DWARF register number for this register if it has one
reg_num = get_reg_num(name_to_gcc_dwarf_regnum, reg_name)
if reg_num != LLDB_INVALID_REGNUM:
reg_info['gcc'] = reg_num
reg_info['dwarf'] = reg_num
# Set the generic register number for this register if it has one
reg_num = get_reg_num(name_to_generic_regnum, reg_name)
if reg_num != LLDB_INVALID_REGNUM:
reg_info['generic'] = reg_num
# Set the GDB register number for this register if it has one
reg_num = get_reg_num(name_to_gdb_regnum, reg_name)
if reg_num != LLDB_INVALID_REGNUM:
reg_info['gdb'] = reg_num
g_target_definition['sets'] = [
'General Purpose Registers',
'Floating Point Registers']
g_target_definition['registers'] = x86_64_register_infos
g_target_definition[
'host-info'] = {'triple': 'x86_64-apple-macosx', 'endian': eByteOrderLittle}
g_target_definition['g-packet-size'] = offset
return g_target_definition
def get_dynamic_setting(target, setting_name):
if setting_name == 'gdb-server-target-definition':
return get_target_definition()
| bsd-3-clause | 656,548,904,977,199,700 | 25.675676 | 107 | 0.522459 | false |
bxshi/gem5 | src/arch/x86/isa/insts/general_purpose/compare_and_test/compare.py | 91 | 3017 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop CMP_R_M
{
ld t1, seg, sib, disp
sub t0, reg, t1, flags=(OF, SF, ZF, AF, PF, CF)
};
def macroop CMP_R_P
{
rdip t7
ld t1, seg, riprel, disp
sub t0, reg, t1, flags=(OF, SF, ZF, AF, PF, CF)
};
def macroop CMP_M_I
{
limm t2, imm
ld t1, seg, sib, disp
sub t0, t1, t2, flags=(OF, SF, ZF, AF, PF, CF)
};
def macroop CMP_P_I
{
limm t2, imm
rdip t7
ld t1, seg, riprel, disp
sub t0, t1, t2, flags=(OF, SF, ZF, AF, PF, CF)
};
def macroop CMP_M_R
{
ld t1, seg, sib, disp
sub t0, t1, reg, flags=(OF, SF, ZF, AF, PF, CF)
};
def macroop CMP_P_R
{
rdip t7
ld t1, seg, riprel, disp
sub t0, t1, reg, flags=(OF, SF, ZF, AF, PF, CF)
};
def macroop CMP_R_R
{
sub t0, reg, regm, flags=(OF, SF, ZF, AF, PF, CF)
};
def macroop CMP_R_I
{
limm t1, imm
sub t0, reg, t1, flags=(OF, SF, ZF, AF, PF, CF)
};
'''
| bsd-3-clause | 1,639,766,114,973,048,600 | 32.522222 | 72 | 0.721246 | false |
Lukc/ospace-lukc | server/lib/medusa/mime_type_table.py | 4 | 3941 | # -*- Python -*-
# Converted by ./convert_mime_type_table.py from:
# /usr/src2/apache_1.2b6/conf/mime.types
#
content_type_map = \
{
'ai': 'application/postscript',
'aif': 'audio/x-aiff',
'aifc': 'audio/x-aiff',
'aiff': 'audio/x-aiff',
'au': 'audio/basic',
'avi': 'video/x-msvideo',
'bcpio': 'application/x-bcpio',
'bin': 'application/octet-stream',
'cdf': 'application/x-netcdf',
'class': 'application/octet-stream',
'cpio': 'application/x-cpio',
'cpt': 'application/mac-compactpro',
'csh': 'application/x-csh',
'dcr': 'application/x-director',
'dir': 'application/x-director',
'dms': 'application/octet-stream',
'doc': 'application/msword',
'dvi': 'application/x-dvi',
'dxr': 'application/x-director',
'eps': 'application/postscript',
'etx': 'text/x-setext',
'exe': 'application/octet-stream',
'gif': 'image/gif',
'gtar': 'application/x-gtar',
'gz': 'application/x-gzip',
'hdf': 'application/x-hdf',
'hqx': 'application/mac-binhex40',
'htm': 'text/html',
'html': 'text/html',
'ice': 'x-conference/x-cooltalk',
'ief': 'image/ief',
'jpe': 'image/jpeg',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'kar': 'audio/midi',
'latex': 'application/x-latex',
'lha': 'application/octet-stream',
'lzh': 'application/octet-stream',
'man': 'application/x-troff-man',
'me': 'application/x-troff-me',
'mid': 'audio/midi',
'midi': 'audio/midi',
'mif': 'application/x-mif',
'mov': 'video/quicktime',
'movie': 'video/x-sgi-movie',
'mp2': 'audio/mpeg',
'mpe': 'video/mpeg',
'mpeg': 'video/mpeg',
'mpg': 'video/mpeg',
'mpga': 'audio/mpeg',
'mp3': 'audio/mpeg',
'ms': 'application/x-troff-ms',
'nc': 'application/x-netcdf',
'oda': 'application/oda',
'pbm': 'image/x-portable-bitmap',
'pdb': 'chemical/x-pdb',
'pdf': 'application/pdf',
'pgm': 'image/x-portable-graymap',
'png': 'image/png',
'pnm': 'image/x-portable-anymap',
'ppm': 'image/x-portable-pixmap',
'ppt': 'application/powerpoint',
'ps': 'application/postscript',
'qt': 'video/quicktime',
'ra': 'audio/x-realaudio',
'ram': 'audio/x-pn-realaudio',
'ras': 'image/x-cmu-raster',
'rgb': 'image/x-rgb',
'roff': 'application/x-troff',
'rpm': 'audio/x-pn-realaudio-plugin',
'rtf': 'application/rtf',
'rtx': 'text/richtext',
'sgm': 'text/x-sgml',
'sgml': 'text/x-sgml',
'sh': 'application/x-sh',
'shar': 'application/x-shar',
'sit': 'application/x-stuffit',
'skd': 'application/x-koan',
'skm': 'application/x-koan',
'skp': 'application/x-koan',
'skt': 'application/x-koan',
'snd': 'audio/basic',
'src': 'application/x-wais-source',
'sv4cpio': 'application/x-sv4cpio',
'sv4crc': 'application/x-sv4crc',
't': 'application/x-troff',
'tar': 'application/x-tar',
'tcl': 'application/x-tcl',
'tex': 'application/x-tex',
'texi': 'application/x-texinfo',
'texinfo': 'application/x-texinfo',
'tif': 'image/tiff',
'tiff': 'image/tiff',
'tr': 'application/x-troff',
'tsv': 'text/tab-separated-values',
'txt': 'text/plain',
'ustar': 'application/x-ustar',
'vcd': 'application/x-cdlink',
'vrml': 'x-world/x-vrml',
'wav': 'audio/x-wav',
'wrl': 'x-world/x-vrml',
'xbm': 'image/x-xbitmap',
'xpm': 'image/x-xpixmap',
'xwd': 'image/x-xwindowdump',
'xyz': 'chemical/x-pdb',
'zip': 'application/zip',
}
| gpl-2.0 | 4,761,771,291,149,950,000 | 33.876106 | 49 | 0.512814 | false |
KousikaGanesh/purchaseandInventory | openerp/tools/lru.py | 204 | 2946 | # -*- coding: utf-8 -*-
# taken from http://code.activestate.com/recipes/252524-length-limited-o1-lru-cache-implementation/
import threading
from func import synchronized
__all__ = ['LRU']
class LRUNode(object):
__slots__ = ['prev', 'next', 'me']
def __init__(self, prev, me):
self.prev = prev
self.me = me
self.next = None
class LRU(object):
"""
Implementation of a length-limited O(1) LRU queue.
Built for and used by PyPE:
http://pype.sourceforge.net
Copyright 2003 Josiah Carlson.
"""
def __init__(self, count, pairs=[]):
self._lock = threading.RLock()
self.count = max(count, 1)
self.d = {}
self.first = None
self.last = None
for key, value in pairs:
self[key] = value
@synchronized()
def __contains__(self, obj):
return obj in self.d
@synchronized()
def __getitem__(self, obj):
a = self.d[obj].me
self[a[0]] = a[1]
return a[1]
@synchronized()
def __setitem__(self, obj, val):
if obj in self.d:
del self[obj]
nobj = LRUNode(self.last, (obj, val))
if self.first is None:
self.first = nobj
if self.last:
self.last.next = nobj
self.last = nobj
self.d[obj] = nobj
if len(self.d) > self.count:
if self.first == self.last:
self.first = None
self.last = None
return
a = self.first
a.next.prev = None
self.first = a.next
a.next = None
del self.d[a.me[0]]
del a
@synchronized()
def __delitem__(self, obj):
nobj = self.d[obj]
if nobj.prev:
nobj.prev.next = nobj.next
else:
self.first = nobj.next
if nobj.next:
nobj.next.prev = nobj.prev
else:
self.last = nobj.prev
del self.d[obj]
@synchronized()
def __iter__(self):
cur = self.first
while cur is not None:
cur2 = cur.next
yield cur.me[1]
cur = cur2
@synchronized()
def __len__(self):
return len(self.d)
@synchronized()
def iteritems(self):
cur = self.first
while cur is not None:
cur2 = cur.next
yield cur.me
cur = cur2
@synchronized()
def iterkeys(self):
return iter(self.d)
@synchronized()
def itervalues(self):
for i,j in self.iteritems():
yield j
@synchronized()
def keys(self):
return self.d.keys()
@synchronized()
def pop(self,key):
v=self[key]
del self[key]
return v
@synchronized()
def clear(self):
self.d = {}
self.first = None
self.last = None
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,181,878,062,574,327,000 | 23.147541 | 99 | 0.510523 | false |
gloaec/trifle | src/trifle/anyconfig/backend/tests/backends.py | 1 | 1243 | #
# Copyright (C) 2012 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
import anyconfig.backend.backends as T
import unittest
class Test_00_pure_functions(unittest.TestCase):
def test_10_find_by_file(self):
ini_cf = "/a/b/c.ini"
unknown_cf = "/a/b/c.xyz"
jsn_cfs = ["/a/b/c.jsn", "/a/b/c.json", "/a/b/c.js"]
yml_cfs = ["/a/b/c.yml", "/a/b/c.yaml"]
self.assertTrue(ini_cf, T.BINI.IniConfigParser)
self.assertTrue(T.find_by_file(unknown_cf) is None)
for f in jsn_cfs:
self.assertTrue(f, T.BJSON.JsonConfigParser)
for f in yml_cfs:
self.assertTrue(f, T.BYAML.YamlConfigParser)
def test_20_find_by_type(self):
ini_t = "ini"
jsn_t = "json"
yml_t = "yaml"
unknown_t = "unknown_type"
self.assertTrue(ini_t, T.BINI.IniConfigParser)
self.assertTrue(jsn_t, T.BJSON.JsonConfigParser)
self.assertTrue(yml_t, T.BYAML.YamlConfigParser)
self.assertTrue(T.find_by_type(unknown_t) is None)
def test_30_list_types(self):
types = T.list_types()
self.assertTrue(isinstance(types, list))
self.assertTrue(bool(list)) # ensure it's not empty.
# vim:sw=4:ts=4:et:
| gpl-3.0 | -6,018,841,246,328,181,000 | 27.906977 | 61 | 0.600965 | false |
JioCloud/nova | nova/objects/numa.py | 24 | 8397 | # Copyright 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova import exception
from nova.objects import base
from nova.objects import fields
from nova.virt import hardware
def all_things_equal(obj_a, obj_b):
for name in obj_a.fields:
set_a = obj_a.obj_attr_is_set(name)
set_b = obj_b.obj_attr_is_set(name)
if set_a != set_b:
return False
elif not set_a:
continue
if getattr(obj_a, name) != getattr(obj_b, name):
return False
return True
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class NUMACell(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added pinned_cpus and siblings fields
# Version 1.2: Added mempages field
VERSION = '1.2'
fields = {
'id': fields.IntegerField(read_only=True),
'cpuset': fields.SetOfIntegersField(),
'memory': fields.IntegerField(),
'cpu_usage': fields.IntegerField(default=0),
'memory_usage': fields.IntegerField(default=0),
'pinned_cpus': fields.SetOfIntegersField(),
'siblings': fields.ListOfSetsOfIntegersField(),
'mempages': fields.ListOfObjectsField('NUMAPagesTopology'),
}
obj_relationships = {
'mempages': [('1.2', '1.0')]
}
def __eq__(self, other):
return all_things_equal(self, other)
def __ne__(self, other):
return not (self == other)
@property
def free_cpus(self):
return self.cpuset - self.pinned_cpus or set()
@property
def free_siblings(self):
return [sibling_set & self.free_cpus
for sibling_set in self.siblings]
@property
def avail_cpus(self):
return len(self.free_cpus)
@property
def avail_memory(self):
return self.memory - self.memory_usage
def pin_cpus(self, cpus):
if self.pinned_cpus & cpus:
raise exception.CPUPinningInvalid(requested=list(cpus),
pinned=list(self.pinned_cpus))
self.pinned_cpus |= cpus
def unpin_cpus(self, cpus):
if (self.pinned_cpus & cpus) != cpus:
raise exception.CPUPinningInvalid(requested=list(cpus),
pinned=list(self.pinned_cpus))
self.pinned_cpus -= cpus
def _to_dict(self):
return {
'id': self.id,
'cpus': hardware.format_cpu_spec(
self.cpuset, allow_ranges=False),
'mem': {
'total': self.memory,
'used': self.memory_usage},
'cpu_usage': self.cpu_usage}
@classmethod
def _from_dict(cls, data_dict):
cpuset = hardware.parse_cpu_spec(
data_dict.get('cpus', ''))
cpu_usage = data_dict.get('cpu_usage', 0)
memory = data_dict.get('mem', {}).get('total', 0)
memory_usage = data_dict.get('mem', {}).get('used', 0)
cell_id = data_dict.get('id')
return cls(id=cell_id, cpuset=cpuset, memory=memory,
cpu_usage=cpu_usage, memory_usage=memory_usage,
mempages=[], pinned_cpus=set([]), siblings=[])
def can_fit_hugepages(self, pagesize, memory):
"""Returns whether memory can fit into hugepages size
:param pagesize: a page size in KibB
:param memory: a memory size asked to fit in KiB
:returns: whether memory can fit in hugepages
:raises: MemoryPageSizeNotSupported if page size not supported
"""
for pages in self.mempages:
if pages.size_kb == pagesize:
return (memory <= pages.free_kb and
(memory % pages.size_kb) == 0)
raise exception.MemoryPageSizeNotSupported(pagesize=pagesize)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class NUMAPagesTopology(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'size_kb': fields.IntegerField(),
'total': fields.IntegerField(),
'used': fields.IntegerField(default=0),
}
def __eq__(self, other):
return all_things_equal(self, other)
def __ne__(self, other):
return not (self == other)
@property
def free(self):
"""Returns the number of avail pages."""
return self.total - self.used
@property
def free_kb(self):
"""Returns the avail memory size in KiB."""
return self.free * self.size_kb
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class NUMATopology(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Update NUMACell to 1.1
# Version 1.2: Update NUMACell to 1.2
VERSION = '1.2'
fields = {
'cells': fields.ListOfObjectsField('NUMACell'),
}
obj_relationships = {
'cells': [('1.0', '1.0'), ('1.1', '1.1'), ('1.2', '1.2')]
}
@classmethod
def obj_from_primitive(cls, primitive):
if 'nova_object.name' in primitive:
obj_topology = super(NUMATopology, cls).obj_from_primitive(
primitive)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that there are no cases of the old format stored in
# the database (or forever, if we can never guarantee that).
obj_topology = NUMATopology._from_dict(primitive)
return obj_topology
def _to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
@classmethod
def obj_from_db_obj(cls, db_obj):
return cls.obj_from_primitive(
jsonutils.loads(db_obj))
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def _to_dict(self):
# TODO(sahid): needs to be removed.
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
return cls(cells=[
NUMACell._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
@base.NovaObjectRegistry.register
class NUMATopologyLimits(base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'cpu_allocation_ratio': fields.FloatField(),
'ram_allocation_ratio': fields.FloatField(),
}
def to_dict_legacy(self, host_topology):
cells = []
for cell in host_topology.cells:
cells.append(
{'cpus': hardware.format_cpu_spec(
cell.cpuset, allow_ranges=False),
'mem': {'total': cell.memory,
'limit': cell.memory * self.ram_allocation_ratio},
'cpu_limit': len(cell.cpuset) * self.cpu_allocation_ratio,
'id': cell.id})
return {'cells': cells}
@classmethod
def obj_from_db_obj(cls, db_obj):
if 'nova_object.name' in db_obj:
obj_topology = cls.obj_from_primitive(db_obj)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that all compute nodes are using RPC API => 3.40.
cell = db_obj['cells'][0]
ram_ratio = cell['mem']['limit'] / float(cell['mem']['total'])
cpu_ratio = cell['cpu_limit'] / float(len(hardware.parse_cpu_spec(
cell['cpus'])))
obj_topology = NUMATopologyLimits(
cpu_allocation_ratio=cpu_ratio,
ram_allocation_ratio=ram_ratio)
return obj_topology
| apache-2.0 | -7,834,457,552,893,495,000 | 32.321429 | 78 | 0.590092 | false |
AlgoHunt/nerual_style_transfer | nets/nets_factory.py | 31 | 5146 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from nets import alexnet
from nets import cifarnet
from nets import inception
from nets import lenet
from nets import mobilenet_v1
from nets import overfeat
from nets import resnet_v1
from nets import resnet_v2
from nets import vgg
slim = tf.contrib.slim
networks_map = {'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet,
'overfeat': overfeat.overfeat,
'vgg_a': vgg.vgg_a,
'vgg_16': vgg.vgg_16,
'vgg_19': vgg.vgg_19,
'inception_v1': inception.inception_v1,
'inception_v2': inception.inception_v2,
'inception_v3': inception.inception_v3,
'inception_v4': inception.inception_v4,
'inception_resnet_v2': inception.inception_resnet_v2,
'lenet': lenet.lenet,
'resnet_v1_50': resnet_v1.resnet_v1_50,
'resnet_v1_101': resnet_v1.resnet_v1_101,
'resnet_v1_152': resnet_v1.resnet_v1_152,
'resnet_v1_200': resnet_v1.resnet_v1_200,
'resnet_v2_50': resnet_v2.resnet_v2_50,
'resnet_v2_101': resnet_v2.resnet_v2_101,
'resnet_v2_152': resnet_v2.resnet_v2_152,
'resnet_v2_200': resnet_v2.resnet_v2_200,
'mobilenet_v1': mobilenet_v1.mobilenet_v1,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_075,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_050,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_025,
}
arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'cifarnet': cifarnet.cifarnet_arg_scope,
'overfeat': overfeat.overfeat_arg_scope,
'vgg_a': vgg.vgg_arg_scope,
'vgg_16': vgg.vgg_arg_scope,
'vgg_19': vgg.vgg_arg_scope,
'inception_v1': inception.inception_v3_arg_scope,
'inception_v2': inception.inception_v3_arg_scope,
'inception_v3': inception.inception_v3_arg_scope,
'inception_v4': inception.inception_v4_arg_scope,
'inception_resnet_v2':
inception.inception_resnet_v2_arg_scope,
'lenet': lenet.lenet_arg_scope,
'resnet_v1_50': resnet_v1.resnet_arg_scope,
'resnet_v1_101': resnet_v1.resnet_arg_scope,
'resnet_v1_152': resnet_v1.resnet_arg_scope,
'resnet_v1_200': resnet_v1.resnet_arg_scope,
'resnet_v2_50': resnet_v2.resnet_arg_scope,
'resnet_v2_101': resnet_v2.resnet_arg_scope,
'resnet_v2_152': resnet_v2.resnet_arg_scope,
'resnet_v2_200': resnet_v2.resnet_arg_scope,
'mobilenet_v1': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_arg_scope,
}
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
"""Returns a network_fn such as `logits, end_points = network_fn(images)`.
Args:
name: The name of the network.
num_classes: The number of classes to use for classification.
weight_decay: The l2 coefficient for the model weights.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
network_fn: A function that applies the model to a batch of images. It has
the following signature:
logits, end_points = network_fn(images)
Raises:
ValueError: If network `name` is not recognized.
"""
if name not in networks_map:
raise ValueError('Name of network unknown %s' % name)
func = networks_map[name]
@functools.wraps(func)
def network_fn(images):
arg_scope = arg_scopes_map[name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
return func(images, num_classes, is_training=is_training)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn
| apache-2.0 | -3,492,424,506,681,660,000 | 42.610169 | 80 | 0.608239 | false |
Pikecillo/genna | external/4Suite-XML-1.0.2/test/Xml/Core/__init__.py | 1 | 1056 | __revision__ = '$Id: __init__.py,v 1.12 2005/11/15 02:22:41 jkloth Exp $'
def PreprocessFiles(dirs, files):
"""
PreprocessFiles(dirs, files) -> (dirs, files)
This function is responsible for sorting and trimming the
file and directory lists as needed for proper testing.
"""
from Ft.Lib.TestSuite import RemoveTests, SortTests
ignored_files = ['test_domlette_readers',
'test_domlette_interfaces',
'test_domlette_memory',
'test_domlette_writers',
'test_catalog',
'test_ranges', # only supported by 4DOM
'test_get_all_ns',
'test_string_strip',
'test_split_qname',
]
RemoveTests(files, ignored_files)
ordered_files = ['test_domlette', 'test_saxlette']
SortTests(files, ordered_files)
ignored_dirs = []
RemoveTests(dirs, ignored_dirs)
ordered_dirs = []
SortTests(dirs, ordered_dirs)
return (dirs, files)
| gpl-2.0 | -1,441,631,121,599,679,000 | 31 | 73 | 0.557765 | false |
groutr/conda-tools | src/conda_tools/environment/history.py | 1 | 7101 | """
Adapted from conda/history.py
Licensed under BSD 3-clause license.
"""
from __future__ import print_function
import re
import time
from json import loads
from os.path import isfile, join
from functools import lru_cache
from ..common import lazyproperty
class CondaHistoryException(Exception):
pass
class CondaHistoryWarning(Warning):
pass
def dist2pair(dist):
dist = str(dist)
if dist.endswith(']'):
dist = dist.split('[', 1)[0]
if dist.endswith('.tar.bz2'):
dist = dist[:-8]
parts = dist.split('::', 1)
return 'defaults' if len(parts) < 2 else parts[0], parts[-1]
def dist2quad(dist):
channel, dist = dist2pair(dist)
parts = dist.rsplit('-', 2) + ['', '']
return (parts[0], parts[1], parts[2], channel)
def is_diff(content):
return any(s.startswith(('-', '+')) for s in content)
def pretty_diff(diff):
added = {}
removed = {}
for s in diff:
fn = s[1:]
name, version, _, channel = dist2quad(fn)
if channel != 'defaults':
version += ' (%s)' % channel
if s.startswith('-'):
removed[name.lower()] = version
elif s.startswith('+'):
added[name.lower()] = version
changed = set(added) & set(removed)
for name in sorted(changed):
yield ' %s {%s -> %s}' % (name, removed[name], added[name])
for name in sorted(set(removed) - changed):
yield '-%s-%s' % (name, removed[name])
for name in sorted(set(added) - changed):
yield '+%s-%s' % (name, added[name])
def pretty_content(content):
if is_diff(content):
return pretty_diff(content)
else:
return iter(sorted(content))
class History(object):
def __init__(self, prefix):
meta_dir = join(prefix, 'conda-meta')
self.path = join(meta_dir, 'history')
@lazyproperty
def _parse(self):
"""
parse the history file and return a list of
tuples(datetime strings, set of distributions/diffs, comments)
"""
res = []
if not isfile(self.path):
return res
sep_pat = re.compile(r'==>\s*(.+?)\s*<==')
with open(self.path, 'r') as f:
lines = f.read().splitlines()
for line in lines:
line = line.strip()
if not line:
continue
m = sep_pat.match(line)
if m:
res.append((m.group(1), set(), []))
elif line.startswith('#'):
res[-1][2].append(line)
else:
res[-1][1].add(line)
return res
@lazyproperty
def get_user_requests(self):
"""
return a list of user requested items. Each item is a dict with the
following keys:
'date': the date and time running the command
'cmd': a list of argv of the actual command which was run
'action': install/remove/update
'specs': the specs being used
"""
res = []
com_pat = re.compile(r'#\s*cmd:\s*(.+)')
spec_pat = re.compile(r'#\s*(\w+)\s*specs:\s*(.+)')
for dt, unused_cont, comments in self._parse:
item = {'date': dt}
for line in comments:
m = com_pat.match(line)
if m:
argv = m.group(1).split()
if argv[0].endswith('conda'):
argv[0] = 'conda'
item['cmd'] = argv
m = spec_pat.match(line)
if m:
action, specs = m.groups()
item['action'] = action
item['specs'] = loads(specs.replace("'", "\""))
if 'cmd' in item:
res.append(item)
return res
@lazyproperty
def construct_states(self):
"""
return a list of tuples(datetime strings, set of distributions)
"""
res = []
cur = set([])
for dt, cont, unused_com in self._parse:
if not is_diff(cont):
cur = cont
else:
for s in cont:
if s.startswith('-'):
cur.discard(s[1:])
elif s.startswith('+'):
cur.add(s[1:])
else:
raise CondaHistoryException('Did not expect: %s' % s)
res.append((dt, cur.copy()))
return res
def get_state(self, rev=-1):
"""
return the state, i.e. the set of distributions, for a given revision,
defaults to latest (which is the same as the current state when
the log file is up-to-date)
"""
states = self.construct_states
if not states:
return set([])
times, pkgs = zip(*states)
return pkgs[rev]
def print_log(self):
for i, (date, content, unused_com) in enumerate(self._parse):
print('%s (rev %d)' % (date, i))
for line in pretty_content(content):
print(' %s' % line)
print()
@lazyproperty
def object_log(self):
result = []
for i, (date, content, unused_com) in enumerate(self._parse):
# Based on Mateusz's code; provides more details about the
# history event
event = {
'date': date,
'rev': i,
'install': [],
'remove': [],
'upgrade': [],
'downgrade': []
}
added = {}
removed = {}
if is_diff(content):
for pkg in content:
name, version, build, channel = dist2quad(pkg[1:])
if pkg.startswith('+'):
added[name.lower()] = (version, build, channel)
elif pkg.startswith('-'):
removed[name.lower()] = (version, build, channel)
changed = set(added) & set(removed)
for name in sorted(changed):
old = removed[name]
new = added[name]
details = {
'old': '-'.join((name,) + old),
'new': '-'.join((name,) + new)
}
if new > old:
event['upgrade'].append(details)
else:
event['downgrade'].append(details)
for name in sorted(set(removed) - changed):
event['remove'].append('-'.join((name,) + removed[name]))
for name in sorted(set(added) - changed):
event['install'].append('-'.join((name,) + added[name]))
else:
for pkg in sorted(content):
event['install'].append(pkg)
result.append(event)
return result
def __repr__(self):
return 'History({}) @ {}'.format(self.path, hex(id(self)))
def __str__(self):
return 'History({})'.format(self.path)
| bsd-3-clause | -1,287,298,767,413,866,800 | 30.281938 | 78 | 0.47782 | false |
jordanemedlock/psychtruths | temboo/Library/Basecamp/UpdateEntry.py | 5 | 5739 | # -*- coding: utf-8 -*-
###############################################################################
#
# UpdateEntry
# Updates a calendar event or milestone in a project you specify.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateEntry(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateEntry Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateEntry, self).__init__(temboo_session, '/Library/Basecamp/UpdateEntry')
def new_input_set(self):
return UpdateEntryInputSet()
def _make_result_set(self, result, path):
return UpdateEntryResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateEntryChoreographyExecution(session, exec_id, path)
class UpdateEntryInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateEntry
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((required, string) A valid Basecamp account name. This is the first part of the account's URL.)
"""
super(UpdateEntryInputSet, self)._set_input('AccountName', value)
def set_EndDate(self, value):
"""
Set the value of the EndDate input for this Choreo. ((required, date) The new end date for the updated entry, in the format YYYY-MM-DD.)
"""
super(UpdateEntryInputSet, self)._set_input('EndDate', value)
def set_EntryID(self, value):
"""
Set the value of the EntryID input for this Choreo. ((required, integer) The ID for the calendar entry to update.)
"""
super(UpdateEntryInputSet, self)._set_input('EntryID', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) The Basecamp account password. Use the value 'X' when specifying an API Key for the Username input.)
"""
super(UpdateEntryInputSet, self)._set_input('Password', value)
def set_ProjectID(self, value):
"""
Set the value of the ProjectID input for this Choreo. ((required, integer) The ID of the project with the calendar entry to update.)
"""
super(UpdateEntryInputSet, self)._set_input('ProjectID', value)
def set_ResponsibleParty(self, value):
"""
Set the value of the ResponsibleParty input for this Choreo. ((optional, any) The user ID or company ID (preceded by a “c”, as in "c1234") to reassign the entry to. Applies only to "Milestone" entry types.)
"""
super(UpdateEntryInputSet, self)._set_input('ResponsibleParty', value)
def set_StartDate(self, value):
"""
Set the value of the StartDate input for this Choreo. ((optional, date) The new start date for the updated entry, in the format YYYY-MM-DD.)
"""
super(UpdateEntryInputSet, self)._set_input('StartDate', value)
def set_Title(self, value):
"""
Set the value of the Title input for this Choreo. ((optional, string) The new title for the updated entry.)
"""
super(UpdateEntryInputSet, self)._set_input('Title', value)
def set_Type(self, value):
"""
Set the value of the Type input for this Choreo. ((optional, string) The new type for the updated entry, either "CalendarEvent" (the default) or "Milestone".)
"""
super(UpdateEntryInputSet, self)._set_input('Type', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) A Basecamp account username or API Key.)
"""
super(UpdateEntryInputSet, self)._set_input('Username', value)
class UpdateEntryResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateEntry Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response returned from Basecamp.)
"""
return self._output.get('Response', None)
def get_TemplateOutput(self):
"""
Retrieve the value for the "TemplateOutput" output from this Choreo execution. (The request created from the input template.)
"""
return self._output.get('TemplateOutput', None)
class UpdateEntryChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateEntryResultSet(response, path)
| apache-2.0 | 3,255,442,861,134,114,300 | 42.44697 | 214 | 0.662075 | false |
youprofit/zato | code/zato-web-admin/src/zato/admin/web/forms/load_balancer.py | 7 | 3670 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from operator import itemgetter
# Django
from django import forms
# Zato
from zato.common.haproxy import timeouts, http_log
from zato.common.util import make_repr
def populate_choices(form, fields_choices):
""" A convenience function used in several places for populating a given
form's SELECT choices.
"""
for field_name, choices in fields_choices:
form.fields[field_name].choices = []
choices = sorted(choices.items(), key=itemgetter(0))
for choice_id, choice_info in choices:
choice_name = choice_info[1]
form.fields[field_name].choices.append([choice_id, choice_name])
class ManageLoadBalancerForm(forms.Form):
""" Form for the graphical management of HAProxy.
"""
global_log_host = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:70%"}))
global_log_port = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:70%"}))
global_log_facility = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:70%"}))
global_log_level = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:70%"}))
timeout_connect = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:30%"}))
timeout_client = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:30%"}))
timeout_server = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:30%"}))
http_plain_bind_address = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:70%"}))
http_plain_bind_port = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:30%"}))
http_plain_log_http_requests = forms.ChoiceField()
http_plain_maxconn = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:30%"}))
http_plain_monitor_uri = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:70%"}))
def __init__(self, initial={}):
super(ManageLoadBalancerForm, self).__init__(initial=initial)
fields_choices = (
("http_plain_log_http_requests", http_log),
)
populate_choices(self, fields_choices)
def __repr__(self):
return make_repr(self)
class ManageLoadBalancerSourceCodeForm(forms.Form):
""" Form for the source code-level management of HAProxy.
"""
source_code = forms.CharField(widget=forms.Textarea(attrs={"style":"overflow:auto; width:100%; white-space: pre-wrap;height:400px"}))
class RemoteCommandForm(forms.Form):
""" Form for the direct interface to HAProxy's commands.
"""
command = forms.ChoiceField()
timeout = forms.ChoiceField()
extra = forms.CharField(widget=forms.TextInput(attrs={"style":"width:40%"}))
result = forms.CharField(widget=forms.Textarea(attrs={"style":"overflow:auto; width:100%; white-space: pre-wrap;height:400px"}))
def __init__(self, commands, initial={}):
super(RemoteCommandForm, self).__init__(initial=initial)
fields_choices = (
("command", commands),
("timeout", timeouts),
)
populate_choices(self, fields_choices)
def __repr__(self):
return make_repr(self)
| gpl-3.0 | 4,184,276,006,662,553,600 | 42.690476 | 137 | 0.681744 | false |
gVallverdu/pymatgen | pymatgen/alchemy/materials.py | 4 | 14429 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides various representations of transformed structures. A
TransformedStructure is a structure that has been modified by undergoing a
series of transformations.
"""
import os
import re
import json
import datetime
from monty.json import MontyDecoder, jsanitize
from pymatgen.core.structure import Structure
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp.inputs import Poscar
from monty.json import MSONable
from pymatgen.io.vasp.sets import MPRelaxSet
from warnings import warn
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 2, 2012"
dec = MontyDecoder()
class TransformedStructure(MSONable):
"""
Container object for new structures that include history of
transformations.
Each transformed structure is made up of a sequence of structures with
associated transformation history.
"""
def __init__(self, structure, transformations=None, history=None,
other_parameters=None):
"""
Initializes a transformed structure from a structure.
Args:
structure (Structure): Input structure
transformations ([Transformations]): List of transformations to
apply.
history (list): Previous history.
other_parameters (dict): Additional parameters to be added.
"""
self.final_structure = structure
self.history = history or []
self.other_parameters = other_parameters or {}
self._undone = []
transformations = transformations or []
for t in transformations:
self.append_transformation(t)
def undo_last_change(self):
"""
Undo the last change in the TransformedStructure.
Raises:
IndexError: If already at the oldest change.
"""
if len(self.history) == 0:
raise IndexError("Can't undo. Already at oldest change.")
if 'input_structure' not in self.history[-1]:
raise IndexError("Can't undo. Latest history has no "
"input_structure")
h = self.history.pop()
self._undone.append((h, self.final_structure))
s = h["input_structure"]
if isinstance(s, dict):
s = Structure.from_dict(s)
self.final_structure = s
def redo_next_change(self):
"""
Redo the last undone change in the TransformedStructure.
Raises:
IndexError: If already at the latest change.
"""
if len(self._undone) == 0:
raise IndexError("Can't redo. Already at latest change.")
h, s = self._undone.pop()
self.history.append(h)
self.final_structure = s
def __getattr__(self, name):
s = object.__getattribute__(self, 'final_structure')
return getattr(s, name)
def __len__(self):
return len(self.history)
def append_transformation(self, transformation, return_alternatives=False,
clear_redo=True):
"""
Appends a transformation to the TransformedStructure.
Args:
transformation: Transformation to append
return_alternatives: Whether to return alternative
TransformedStructures for one-to-many transformations.
return_alternatives can be a number, which stipulates the
total number of structures to return.
clear_redo: Boolean indicating whether to clear the redo list.
By default, this is True, meaning any appends clears the
history of undoing. However, when using append_transformation
to do a redo, the redo list should not be cleared to allow
multiple redos.
"""
if clear_redo:
self._undone = []
if return_alternatives and transformation.is_one_to_many:
ranked_list = transformation.apply_transformation(
self.final_structure, return_ranked_list=return_alternatives)
input_structure = self.final_structure.as_dict()
alts = []
for x in ranked_list[1:]:
s = x.pop("structure")
actual_transformation = x.pop("transformation", transformation)
hdict = actual_transformation.as_dict()
hdict["input_structure"] = input_structure
hdict["output_parameters"] = x
self.final_structure = s
d = self.as_dict()
d['history'].append(hdict)
d['final_structure'] = s.as_dict()
alts.append(TransformedStructure.from_dict(d))
x = ranked_list[0]
s = x.pop("structure")
actual_transformation = x.pop("transformation", transformation)
hdict = actual_transformation.as_dict()
hdict["input_structure"] = self.final_structure.as_dict()
hdict["output_parameters"] = x
self.history.append(hdict)
self.final_structure = s
return alts
else:
s = transformation.apply_transformation(self.final_structure)
hdict = transformation.as_dict()
hdict["input_structure"] = self.final_structure.as_dict()
hdict["output_parameters"] = {}
self.history.append(hdict)
self.final_structure = s
def append_filter(self, structure_filter):
"""
Adds a filter.
Args:
structure_filter (StructureFilter): A filter implementating the
AbstractStructureFilter API. Tells transmuter waht structures
to retain.
"""
hdict = structure_filter.as_dict()
hdict["input_structure"] = self.final_structure.as_dict()
self.history.append(hdict)
def extend_transformations(self, transformations,
return_alternatives=False):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
return_alternatives: Whether to return alternative
TransformedStructures for one-to-many transformations.
return_alternatives can be a number, which stipulates the
total number of structures to return.
"""
for t in transformations:
self.append_transformation(t,
return_alternatives=return_alternatives)
def get_vasp_input(self, vasp_input_set=MPRelaxSet, **kwargs):
"""
Returns VASP input as a dict of vasp objects.
Args:
vasp_input_set (pymatgen.io.vaspio_set.VaspInputSet): input set
to create vasp input files from structures
"""
d = vasp_input_set(self.final_structure, **kwargs).get_vasp_input()
d["transformations.json"] = json.dumps(self.as_dict())
return d
def write_vasp_input(self, vasp_input_set=MPRelaxSet, output_dir=".",
create_directory=True, **kwargs):
r"""
Writes VASP input to an output_dir.
Args:
vasp_input_set:
pymatgen.io.vaspio_set.VaspInputSet like object that creates
vasp input files from structures
output_dir: Directory to output files
create_directory: Create the directory if not present. Defaults to
True.
**kwargs: All keyword args supported by the VASP input set.
"""
vasp_input_set(self.final_structure, **kwargs).write_input(
output_dir, make_dir_if_not_present=create_directory)
with open(os.path.join(output_dir, "transformations.json"), "w") as fp:
json.dump(self.as_dict(), fp)
def __str__(self):
output = ["Current structure", "------------",
str(self.final_structure),
"\nHistory",
"------------"]
for h in self.history:
h.pop('input_structure', None)
output.append(str(h))
output.append("\nOther parameters")
output.append("------------")
output.append(str(self.other_parameters))
return "\n".join(output)
def set_parameter(self, key, value):
"""
Set a parameter
:param key: The string key
:param value: The value.
"""
self.other_parameters[key] = value
@property
def was_modified(self):
"""
Boolean describing whether the last transformation on the structure
made any alterations to it one example of when this would return false
is in the case of performing a substitution transformation on the
structure when the specie to replace isn't in the structure.
"""
return not self.final_structure == self.structures[-2]
@property
def structures(self):
"""
Copy of all structures in the TransformedStructure. A
structure is stored after every single transformation.
"""
hstructs = [Structure.from_dict(s['input_structure'])
for s in self.history if 'input_structure' in s]
return hstructs + [self.final_structure]
@staticmethod
def from_cif_string(cif_string, transformations=None, primitive=True,
occupancy_tolerance=1.):
"""
Generates TransformedStructure from a cif string.
Args:
cif_string (str): Input cif string. Should contain only one
structure. For cifs containing multiple structures, please use
CifTransmuter.
transformations ([Transformations]): Sequence of transformations
to be applied to the input structure.
primitive (bool): Option to set if the primitive cell should be
extracted. Defaults to True. However, there are certain
instances where you might want to use a non-primitive cell,
e.g., if you are trying to generate all possible orderings of
partial removals or order a disordered structure.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
TransformedStructure
"""
parser = CifParser.from_string(cif_string, occupancy_tolerance)
raw_string = re.sub(r"'", "\"", cif_string)
cif_dict = parser.as_dict()
cif_keys = list(cif_dict.keys())
s = parser.get_structures(primitive)[0]
partial_cif = cif_dict[cif_keys[0]]
if "_database_code_ICSD" in partial_cif:
source = partial_cif["_database_code_ICSD"] + "-ICSD"
else:
source = "uploaded cif"
source_info = {"source": source,
"datetime": str(datetime.datetime.now()),
"original_file": raw_string,
"cif_data": cif_dict[cif_keys[0]]}
return TransformedStructure(s, transformations, history=[source_info])
@staticmethod
def from_poscar_string(poscar_string, transformations=None):
"""
Generates TransformedStructure from a poscar string.
Args:
poscar_string (str): Input POSCAR string.
transformations ([Transformations]): Sequence of transformations
to be applied to the input structure.
"""
p = Poscar.from_string(poscar_string)
if not p.true_names:
raise ValueError("Transformation can be craeted only from POSCAR "
"strings with proper VASP5 element symbols.")
raw_string = re.sub(r"'", "\"", poscar_string)
s = p.structure
source_info = {"source": "POSCAR",
"datetime": str(datetime.datetime.now()),
"original_file": raw_string}
return TransformedStructure(s, transformations, history=[source_info])
def as_dict(self):
"""
Dict representation of the TransformedStructure.
"""
d = self.final_structure.as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["history"] = jsanitize(self.history)
d["version"] = __version__
d["last_modified"] = str(datetime.datetime.utcnow())
d["other_parameters"] = jsanitize(self.other_parameters)
return d
@classmethod
def from_dict(cls, d):
"""
Creates a TransformedStructure from a dict.
"""
s = Structure.from_dict(d)
return cls(s, history=d["history"],
other_parameters=d.get("other_parameters", None))
def to_snl(self, authors, **kwargs):
"""
Generate SNL from TransformedStructure.
:param authors: List of authors
:param **kwargs: All kwargs supported by StructureNL.
:return: StructureNL
"""
if self.other_parameters:
warn('Data in TransformedStructure.other_parameters discarded '
'during type conversion to SNL')
hist = []
for h in self.history:
snl_metadata = h.pop('_snl', {})
hist.append({'name': snl_metadata.pop('name', 'pymatgen'),
'url': snl_metadata.pop(
'url', 'http://pypi.python.org/pypi/pymatgen'),
'description': h})
from pymatgen.util.provenance import StructureNL
return StructureNL(self.final_structure, authors, history=hist, **kwargs)
@classmethod
def from_snl(cls, snl):
"""
Create TransformedStructure from SNL.
Args:
snl (StructureNL): Starting snl
Returns:
TransformedStructure
"""
hist = []
for h in snl.history:
d = h.description
d['_snl'] = {'url': h.url, 'name': h.name}
hist.append(d)
return cls(snl.structure, history=hist)
| mit | 7,620,490,389,961,376,000 | 36.673629 | 81 | 0.585903 | false |
misterhat/youtube-dl | youtube_dl/extractor/hellporno.py | 153 | 2279 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
js_to_json,
remove_end,
)
class HellPornoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hellporno\.com/videos/(?P<id>[^/]+)'
_TEST = {
'url': 'http://hellporno.com/videos/dixie-is-posing-with-naked-ass-very-erotic/',
'md5': '1fee339c610d2049699ef2aa699439f1',
'info_dict': {
'id': '149116',
'display_id': 'dixie-is-posing-with-naked-ass-very-erotic',
'ext': 'mp4',
'title': 'Dixie is posing with naked ass very erotic',
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
title = remove_end(self._html_search_regex(
r'<title>([^<]+)</title>', webpage, 'title'), ' - Hell Porno')
flashvars = self._parse_json(self._search_regex(
r'var\s+flashvars\s*=\s*({.+?});', webpage, 'flashvars'),
display_id, transform_source=js_to_json)
video_id = flashvars.get('video_id')
thumbnail = flashvars.get('preview_url')
ext = flashvars.get('postfix', '.mp4')[1:]
formats = []
for video_url_key in ['video_url', 'video_alt_url']:
video_url = flashvars.get(video_url_key)
if not video_url:
continue
video_text = flashvars.get('%s_text' % video_url_key)
fmt = {
'url': video_url,
'ext': ext,
'format_id': video_text,
}
m = re.search(r'^(?P<height>\d+)[pP]', video_text)
if m:
fmt['height'] = int(m.group('height'))
formats.append(fmt)
self._sort_formats(formats)
categories = self._html_search_meta(
'keywords', webpage, 'categories', default='').split(',')
return {
'id': video_id,
'display_id': display_id,
'title': title,
'thumbnail': thumbnail,
'categories': categories,
'age_limit': 18,
'formats': formats,
}
| unlicense | -5,847,943,721,423,577,000 | 31.098592 | 89 | 0.51075 | false |
farmisen/electron | script/create-dist.py | 65 | 5723 | #!/usr/bin/env python
import os
import re
import shutil
import subprocess
import sys
import stat
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \
get_target_arch, get_chromedriver_version
from lib.util import scoped_cwd, rm_rf, get_atom_shell_version, make_zip, \
execute, atom_gyp
ATOM_SHELL_VERSION = get_atom_shell_version()
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
CHROMIUM_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'download', 'libchromiumcontent', 'static_library')
PROJECT_NAME = atom_gyp()['project_name%']
PRODUCT_NAME = atom_gyp()['product_name%']
TARGET_BINARIES = {
'darwin': [
],
'win32': [
'{0}.exe'.format(PROJECT_NAME), # 'electron.exe'
'content_shell.pak',
'd3dcompiler_47.dll',
'icudtl.dat',
'libEGL.dll',
'libGLESv2.dll',
'msvcp120.dll',
'msvcr120.dll',
'node.dll',
'pdf.dll',
'content_resources_200_percent.pak',
'ui_resources_200_percent.pak',
'xinput1_3.dll',
'natives_blob.bin',
'snapshot_blob.bin',
'vccorlib120.dll',
],
'linux': [
PROJECT_NAME, # 'electron'
'content_shell.pak',
'icudtl.dat',
'libnode.so',
'natives_blob.bin',
'snapshot_blob.bin',
],
}
TARGET_DIRECTORIES = {
'darwin': [
'{0}.app'.format(PRODUCT_NAME),
],
'win32': [
'resources',
'locales',
],
'linux': [
'resources',
'locales',
],
}
SYSTEM_LIBRARIES = [
'libgcrypt.so',
'libnotify.so',
]
def main():
rm_rf(DIST_DIR)
os.makedirs(DIST_DIR)
target_arch = get_target_arch()
force_build()
create_symbols()
copy_binaries()
copy_chrome_binary('chromedriver')
copy_chrome_binary('mksnapshot')
copy_license()
if PLATFORM == 'linux':
strip_binaries()
if target_arch != 'arm':
copy_system_libraries()
create_version()
create_dist_zip()
create_chrome_binary_zip('chromedriver', get_chromedriver_version())
create_chrome_binary_zip('mksnapshot', ATOM_SHELL_VERSION)
create_symbols_zip()
def force_build():
build = os.path.join(SOURCE_ROOT, 'script', 'build.py')
execute([sys.executable, build, '-c', 'Release'])
def copy_binaries():
for binary in TARGET_BINARIES[PLATFORM]:
shutil.copy2(os.path.join(OUT_DIR, binary), DIST_DIR)
for directory in TARGET_DIRECTORIES[PLATFORM]:
shutil.copytree(os.path.join(OUT_DIR, directory),
os.path.join(DIST_DIR, directory),
symlinks=True)
def copy_chrome_binary(binary):
if PLATFORM == 'win32':
binary += '.exe'
src = os.path.join(CHROMIUM_DIR, binary)
dest = os.path.join(DIST_DIR, binary)
# Copy file and keep the executable bit.
shutil.copyfile(src, dest)
os.chmod(dest, os.stat(dest).st_mode | stat.S_IEXEC)
def copy_license():
shutil.copy2(os.path.join(SOURCE_ROOT, 'LICENSE'), DIST_DIR)
def strip_binaries():
if get_target_arch() == 'arm':
strip = 'arm-linux-gnueabihf-strip'
else:
strip = 'strip'
for binary in TARGET_BINARIES[PLATFORM]:
if binary.endswith('.so') or '.' not in binary:
execute([strip, os.path.join(DIST_DIR, binary)])
def copy_system_libraries():
executable_path = os.path.join(OUT_DIR, PROJECT_NAME) # our/R/electron
ldd = execute(['ldd', executable_path])
lib_re = re.compile('\t(.*) => (.+) \(.*\)$')
for line in ldd.splitlines():
m = lib_re.match(line)
if not m:
continue
for i, library in enumerate(SYSTEM_LIBRARIES):
real_library = m.group(1)
if real_library.startswith(library):
shutil.copyfile(m.group(2), os.path.join(DIST_DIR, real_library))
SYSTEM_LIBRARIES[i] = real_library
def create_version():
version_path = os.path.join(SOURCE_ROOT, 'dist', 'version')
with open(version_path, 'w') as version_file:
version_file.write(ATOM_SHELL_VERSION)
def create_symbols():
destination = os.path.join(DIST_DIR, '{0}.breakpad.syms'.format(PROJECT_NAME))
dump_symbols = os.path.join(SOURCE_ROOT, 'script', 'dump-symbols.py')
execute([sys.executable, dump_symbols, destination])
def create_dist_zip():
dist_name = '{0}-{1}-{2}-{3}.zip'.format(PROJECT_NAME, ATOM_SHELL_VERSION,
PLATFORM, get_target_arch())
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = TARGET_BINARIES[PLATFORM] + ['LICENSE', 'version']
if PLATFORM == 'linux':
files += [lib for lib in SYSTEM_LIBRARIES if os.path.exists(lib)]
dirs = TARGET_DIRECTORIES[PLATFORM]
make_zip(zip_file, files, dirs)
def create_chrome_binary_zip(binary, version):
dist_name = '{0}-{1}-{2}-{3}.zip'.format(binary, version, PLATFORM,
get_target_arch())
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = ['LICENSE']
if PLATFORM == 'win32':
files += [binary + '.exe']
else:
files += [binary]
make_zip(zip_file, files, [])
def create_symbols_zip():
dist_name = '{0}-{1}-{2}-{3}-symbols.zip'.format(PROJECT_NAME,
ATOM_SHELL_VERSION,
PLATFORM,
get_target_arch())
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = ['LICENSE', 'version']
dirs = ['{0}.breakpad.syms'.format(PROJECT_NAME)]
make_zip(zip_file, files, dirs)
if __name__ == '__main__':
sys.exit(main())
| mit | 6,543,523,820,929,492,000 | 26.252381 | 80 | 0.608597 | false |
jorik041/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py | 122 | 4889 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.checkout.scm import CommitMessage
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive
class MockSCM(object):
def __init__(self, filesystem=None, executive=None):
self.checkout_root = "/mock-checkout"
self.added_paths = set()
self._filesystem = filesystem or MockFileSystem()
self._executive = executive or MockExecutive()
def add(self, destination_path):
self.add_list([destination_path])
def add_list(self, destination_paths):
self.added_paths.update(set(destination_paths))
def has_working_directory_changes(self):
return False
def discard_working_directory_changes(self):
pass
def supports_local_commits(self):
return True
def has_local_commits(self):
return False
def discard_local_commits(self):
pass
def discard_local_changes(self):
pass
def exists(self, path):
# TestRealMain.test_real_main (and several other rebaseline tests) are sensitive to this return value.
# We should make those tests more robust, but for now we just return True always (since no test needs otherwise).
return True
def absolute_path(self, *comps):
return self._filesystem.join(self.checkout_root, *comps)
def changed_files(self, git_commit=None):
return ["MockFile1"]
def changed_files_for_revision(self, revision):
return ["MockFile1"]
def head_svn_revision(self):
return '1234'
def svn_revision(self, path):
return '5678'
def timestamp_of_revision(self, path, revision):
return '2013-02-01 08:48:05 +0000'
def create_patch(self, git_commit, changed_files=None):
return "Patch1"
def commit_ids_from_commitish_arguments(self, args):
return ["Commitish1", "Commitish2"]
def committer_email_for_revision(self, revision):
return "[email protected]"
def commit_locally_with_message(self, message):
pass
def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
pass
def merge_base(self, git_commit):
return None
def commit_message_for_local_commit(self, commit_id):
if commit_id == "Commitish1":
return CommitMessage("CommitMessage1\n" \
"https://bugs.example.org/show_bug.cgi?id=50000\n")
if commit_id == "Commitish2":
return CommitMessage("CommitMessage2\n" \
"https://bugs.example.org/show_bug.cgi?id=50001\n")
raise Exception("Bogus commit_id in commit_message_for_local_commit.")
def diff_for_file(self, path, log=None):
return path + '-diff'
def diff_for_revision(self, revision):
return "DiffForRevision%s\nhttp://bugs.webkit.org/show_bug.cgi?id=12345" % revision
def show_head(self, path):
return path
def svn_revision_from_commit_text(self, commit_text):
return "49824"
def delete(self, path):
return self.delete_list([path])
def delete_list(self, paths):
if not self._filesystem:
return
for path in paths:
if self._filesystem.exists(path):
self._filesystem.remove(path)
| bsd-3-clause | 8,801,293,066,691,935,000 | 35.214815 | 130 | 0.691348 | false |
TeamExodus/kernel_google_msm | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 | 6,754,878,308,238,543,000 | 24.79661 | 77 | 0.622865 | false |
nazoking/DNC-tensorflow | dnc/controller.py | 1 | 9060 | import tensorflow as tf
import numpy as np
class BaseController:
def __init__(self, input_size, output_size, memory_read_heads, memory_word_size, batch_size=1):
"""
constructs a controller as described in the DNC paper:
http://www.nature.com/nature/journal/vaop/ncurrent/full/nature20101.html
Parameters:
----------
input_size: int
the size of the data input vector
output_size: int
the size of the data output vector
memory_read_heads: int
the number of read haeds in the associated external memory
memory_word_size: int
the size of the word in the associated external memory
batch_size: int
the size of the input data batch [optional, usually set by the DNC object]
"""
self.input_size = input_size
self.output_size = output_size
self.read_heads = memory_read_heads
self.word_size = memory_word_size
self.batch_size = batch_size
# indicates if the internal neural network is recurrent
# by the existence of recurrent_update and get_state methods
has_recurrent_update = callable(getattr(self, 'update_state', None))
has_get_state = callable(getattr(self, 'get_state', None))
self.has_recurrent_nn = has_recurrent_update and has_get_state
# the actual size of the neural network input after flatenning and
# concatenating the input vector with the previously read vctors from memory
self.nn_input_size = self.word_size * self.read_heads + self.input_size
self.interface_vector_size = self.word_size * self.read_heads + 3 * self.word_size + 5 * self.read_heads + 3
# define network vars
with tf.name_scope("controller"):
self.network_vars()
nn_output_size = None
with tf.variable_scope("shape_inference"):
nn_output_size = self.get_nn_output_size()
initial_std = lambda in_nodes: np.min(1e-2, np.sqrt(2.0 / in_nodes))
# defining internal weights of the controller
self.interface_weights = tf.Variable(
tf.truncated_normal([nn_output_size, self.interface_vector_size], stddev=initial_std(nn_output_size)),
name='interface_weights'
)
self.nn_output_weights = tf.Variable(
tf.truncated_normal([nn_output_size, self.output_size], stddev=initial_std(nn_output_size)),
name='nn_output_weights'
)
self.mem_output_weights = tf.Variable(
tf.truncated_normal([self.word_size * self.read_heads, self.output_size], stddev=initial_std(self.word_size * self.read_heads)),
name='mem_output_weights'
)
def network_vars(self):
"""
defines the variables needed by the internal neural network
[the variables should be attributes of the class, i.e. self.*]
"""
raise NotImplementedError("network_vars is not implemented")
def network_op(self, X):
"""
defines the controller's internal neural network operation
Parameters:
----------
X: Tensor (batch_size, word_size * read_haeds + input_size)
the input data concatenated with the previously read vectors from memory
Returns: Tensor (batch_size, nn_output_size)
"""
raise NotImplementedError("network_op method is not implemented")
def get_nn_output_size(self):
"""
retrives the output size of the defined neural network
Returns: int
the output's size
Raises: ValueError
"""
input_vector = np.zeros([self.batch_size, self.nn_input_size], dtype=np.float32)
output_vector = None
if self.has_recurrent_nn:
output_vector,_ = self.network_op(input_vector, self.get_state())
else:
output_vector = self.network_op(input_vector)
shape = output_vector.get_shape().as_list()
if len(shape) > 2:
raise ValueError("Expected the neural network to output a 1D vector, but got %dD" % (len(shape) - 1))
else:
return shape[1]
def parse_interface_vector(self, interface_vector):
"""
pasres the flat interface_vector into its various components with their
correct shapes
Parameters:
----------
interface_vector: Tensor (batch_size, interface_vector_size)
the flattened inetrface vector to be parsed
Returns: dict
a dictionary with the components of the interface_vector parsed
"""
parsed = {}
r_keys_end = self.word_size * self.read_heads
r_strengths_end = r_keys_end + self.read_heads
w_key_end = r_strengths_end + self.word_size
erase_end = w_key_end + 1 + self.word_size
write_end = erase_end + self.word_size
free_end = write_end + self.read_heads
r_keys_shape = (-1, self.word_size, self.read_heads)
r_strengths_shape = (-1, self.read_heads)
w_key_shape = (-1, self.word_size, 1)
write_shape = erase_shape = (-1, self.word_size)
free_shape = (-1, self.read_heads)
modes_shape = (-1, 3, self.read_heads)
# parsing the vector into its individual components
parsed['read_keys'] = tf.reshape(interface_vector[:, :r_keys_end], r_keys_shape)
parsed['read_strengths'] = tf.reshape(interface_vector[:, r_keys_end:r_strengths_end], r_strengths_shape)
parsed['write_key'] = tf.reshape(interface_vector[:, r_strengths_end:w_key_end], w_key_shape)
parsed['write_strength'] = tf.reshape(interface_vector[:, w_key_end], (-1, 1))
parsed['erase_vector'] = tf.reshape(interface_vector[:, w_key_end + 1:erase_end], erase_shape)
parsed['write_vector'] = tf.reshape(interface_vector[:, erase_end:write_end], write_shape)
parsed['free_gates'] = tf.reshape(interface_vector[:, write_end:free_end], free_shape)
parsed['allocation_gate'] = tf.expand_dims(interface_vector[:, free_end], 1)
parsed['write_gate'] = tf.expand_dims(interface_vector[:, free_end + 1], 1)
parsed['read_modes'] = tf.reshape(interface_vector[:, free_end + 2:], modes_shape)
# transforming the components to ensure they're in the right ranges
parsed['read_strengths'] = 1 + tf.nn.softplus(parsed['read_strengths'])
parsed['write_strength'] = 1 + tf.nn.softplus(parsed['write_strength'])
parsed['erase_vector'] = tf.nn.sigmoid(parsed['erase_vector'])
parsed['free_gates'] = tf.nn.sigmoid(parsed['free_gates'])
parsed['allocation_gate'] = tf.nn.sigmoid(parsed['allocation_gate'])
parsed['write_gate'] = tf.nn.sigmoid(parsed['write_gate'])
parsed['read_modes'] = tf.nn.softmax(parsed['read_modes'], 1)
return parsed
def process_input(self, X, last_read_vectors, state=None):
"""
processes input data through the controller network and returns the
pre-output and interface_vector
Parameters:
----------
X: Tensor (batch_size, input_size)
the input data batch
last_read_vectors: (batch_size, word_size, read_heads)
the last batch of read vectors from memory
state: Tuple
state vectors if the network is recurrent
Returns: Tuple
pre-output: Tensor (batch_size, output_size)
parsed_interface_vector: dict
"""
flat_read_vectors = tf.reshape(last_read_vectors, (-1, self.word_size * self.read_heads))
complete_input = tf.concat(1, [X, flat_read_vectors])
nn_output, nn_state = None, None
if self.has_recurrent_nn:
nn_output, nn_state = self.network_op(complete_input, state)
else:
nn_output = self.network_op(complete_input)
pre_output = tf.matmul(nn_output, self.nn_output_weights)
interface = tf.matmul(nn_output, self.interface_weights)
parsed_interface = self.parse_interface_vector(interface)
if self.has_recurrent_nn:
return pre_output, parsed_interface, nn_state
else:
return pre_output, parsed_interface
def final_output(self, pre_output, new_read_vectors):
"""
returns the final output by taking rececnt memory changes into account
Parameters:
----------
pre_output: Tensor (batch_size, output_size)
the ouput vector from the input processing step
new_read_vectors: Tensor (batch_size, words_size, read_heads)
the newly read vectors from the updated memory
Returns: Tensor (batch_size, output_size)
"""
flat_read_vectors = tf.reshape(new_read_vectors, (-1, self.word_size * self.read_heads))
final_output = pre_output + tf.matmul(flat_read_vectors, self.mem_output_weights)
return final_output
| mit | 1,699,878,530,885,769,500 | 39.266667 | 145 | 0.613797 | false |
brezerk/taverna | userauth/tests.py | 2 | 1309 | # -*- coding: utf-8 -*-
# Copyright (C) 2010 by Alexey S. Malakhov <[email protected]>
# Opium <[email protected]>
#
# This file is part of Taverna
#
# Taverna is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Taverna is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Taverna. If not, see <http://www.gnu.org/licenses/>.
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| gpl-3.0 | 726,098,631,586,460,300 | 29.44186 | 73 | 0.692895 | false |
kvar/ansible | test/units/modules/network/nso/test_nso_verify.py | 40 | 5452 | #
# Copyright (c) 2017 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import json
from units.compat.mock import patch
from ansible.modules.network.nso import nso_verify
from . import nso_module
from .nso_module import MockResponse
from units.modules.utils import set_module_args
class TestNsoVerify(nso_module.TestNsoModule):
module = nso_verify
@patch('ansible.module_utils.network.nso.nso.open_url')
def test_nso_verify_empty_data(self, open_url_mock):
calls = [
MockResponse('login', {}, 200, '{}', {'set-cookie': 'id'}),
MockResponse('get_system_setting', {'operation': 'version'}, 200, '{"result": "4.4.3"}'),
MockResponse('logout', {}, 200, '{"result": {}}'),
]
open_url_mock.side_effect = lambda *args, **kwargs: nso_module.mock_call(calls, *args, **kwargs)
data = {}
set_module_args({
'username': 'user', 'password': 'password',
'url': 'http://localhost:8080/jsonrpc', 'data': data
})
self.execute_module(changed=False)
self.assertEqual(0, len(calls))
@patch('ansible.module_utils.network.nso.nso.open_url')
def test_nso_verify_violation(self, open_url_mock):
devices_schema = nso_module.load_fixture('devices_schema.json')
device_schema = nso_module.load_fixture('device_schema.json')
description_schema = nso_module.load_fixture('description_schema.json')
calls = [
MockResponse('login', {}, 200, '{}', {'set-cookie': 'id'}),
MockResponse('get_system_setting', {'operation': 'version'}, 200, '{"result": "4.5.0"}'),
MockResponse('get_module_prefix_map', {}, 200, '{"result": {"tailf-ncs": "ncs"}}'),
MockResponse('new_trans', {'mode': 'read'}, 200, '{"result": {"th": 1}}'),
MockResponse('get_schema', {'path': '/ncs:devices'}, 200, '{"result": %s}' % (json.dumps(devices_schema, ))),
MockResponse('get_schema', {'path': '/ncs:devices/device'}, 200, '{"result": %s}' % (json.dumps(device_schema, ))),
MockResponse('exists', {'path': '/ncs:devices/device{ce0}'}, 200, '{"result": {"exists": true}}'),
MockResponse('get_value', {'path': '/ncs:devices/device{ce0}/description'}, 200, '{"result": {"value": "In Violation"}}'),
MockResponse('get_schema', {'path': '/ncs:devices/device/description'}, 200, '{"result": %s}' % (json.dumps(description_schema, ))),
MockResponse('logout', {}, 200, '{"result": {}}'),
]
open_url_mock.side_effect = lambda *args, **kwargs: nso_module.mock_call(calls, *args, **kwargs)
data = nso_module.load_fixture('verify_violation_data.json')
set_module_args({
'username': 'user', 'password': 'password',
'url': 'http://localhost:8080/jsonrpc', 'data': data
})
self.execute_module(failed=True, violations=[
{'path': '/ncs:devices/device{ce0}/description', 'expected-value': 'Example Device', 'value': 'In Violation'},
])
self.assertEqual(0, len(calls))
@patch('ansible.module_utils.network.nso.nso.open_url')
def test_nso_verify_ok(self, open_url_mock):
devices_schema = nso_module.load_fixture('devices_schema.json')
device_schema = nso_module.load_fixture('device_schema.json')
calls = [
MockResponse('login', {}, 200, '{}', {'set-cookie': 'id'}),
MockResponse('get_system_setting', {'operation': 'version'}, 200, '{"result": "4.5.0"}'),
MockResponse('get_module_prefix_map', {}, 200, '{"result": {"tailf-ncs": "ncs"}}'),
MockResponse('new_trans', {'mode': 'read'}, 200, '{"result": {"th": 1}}'),
MockResponse('get_schema', {'path': '/ncs:devices'}, 200, '{"result": %s}' % (json.dumps(devices_schema, ))),
MockResponse('get_schema', {'path': '/ncs:devices/device'}, 200, '{"result": %s}' % (json.dumps(device_schema, ))),
MockResponse('exists', {'path': '/ncs:devices/device{ce0}'}, 200, '{"result": {"exists": true}}'),
MockResponse('get_value', {'path': '/ncs:devices/device{ce0}/description'}, 200, '{"result": {"value": "Example Device"}}'),
MockResponse('logout', {}, 200, '{"result": {}}'),
]
open_url_mock.side_effect = lambda *args, **kwargs: nso_module.mock_call(calls, *args, **kwargs)
data = nso_module.load_fixture('verify_violation_data.json')
set_module_args({
'username': 'user', 'password': 'password',
'url': 'http://localhost:8080/jsonrpc', 'data': data,
'validate_certs': False
})
self.execute_module(changed=False)
self.assertEqual(0, len(calls))
| gpl-3.0 | 8,700,889,084,996,924,000 | 49.018349 | 144 | 0.602715 | false |
vlinhd11/vlinhd11-android-scripting | python/src/Lib/_LWPCookieJar.py | 267 | 6553 | """Load / save to libwww-perl (LWP) format files.
Actually, the format is slightly extended from that used by LWP's
(libwww-perl's) HTTP::Cookies, to avoid losing some RFC 2965 information
not recorded by LWP.
It uses the version string "2.0", though really there isn't an LWP Cookies
2.0 format. This indicates that there is extra information in here
(domain_dot and # port_spec) while still being compatible with
libwww-perl, I hope.
"""
import time, re
from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
Cookie, MISSING_FILENAME_TEXT,
join_header_words, split_header_words,
iso2time, time2isoz)
def lwp_cookie_str(cookie):
"""Return string representation of Cookie in an the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
keys = cookie._rest.keys()
keys.sort()
for k in keys:
h.append((k, str(cookie._rest[k])))
h.append(("version", str(cookie.version)))
return join_header_words([h])
class LWPCookieJar(FileCookieJar):
"""
The LWPCookieJar saves a sequence of"Set-Cookie3" lines.
"Set-Cookie3" is the format used by the libwww-perl libary, not known
to be compatible with any browser, but which is easy to read and
doesn't lose information about RFC 2965 cookies.
Additional methods
as_lwp_str(ignore_discard=True, ignore_expired=True)
"""
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
"""Return cookies as a string of "\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
"""
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""])
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
# There really isn't an LWP Cookies 2.0 format, but this indicates
# that there is extra information in here (domain_dot and
# port_spec) while still being compatible with libwww-perl, I hope.
f.write("#LWP-Cookies-2.0\n")
f.write(self.as_lwp_str(ignore_discard, ignore_expires))
finally:
f.close()
def _really_load(self, f, filename, ignore_discard, ignore_expires):
magic = f.readline()
if not re.search(self.magic_re, magic):
msg = ("%r does not look like a Set-Cookie3 (LWP) format "
"file" % filename)
raise LoadError(msg)
now = time.time()
header = "Set-Cookie3:"
boolean_attrs = ("port_spec", "path_spec", "domain_dot",
"secure", "discard")
value_attrs = ("version",
"port", "path", "domain",
"expires",
"comment", "commenturl")
try:
while 1:
line = f.readline()
if line == "": break
if not line.startswith(header):
continue
line = line[len(header):].strip()
for data in split_header_words([line]):
name, value = data[0]
standard = {}
rest = {}
for k in boolean_attrs:
standard[k] = False
for k, v in data[1:]:
if k is not None:
lc = k.lower()
else:
lc = None
# don't lose case distinction for unknown fields
if (lc in value_attrs) or (lc in boolean_attrs):
k = lc
if k in boolean_attrs:
if v is None: v = True
standard[k] = v
elif k in value_attrs:
standard[k] = v
else:
rest[k] = v
h = standard.get
expires = h("expires")
discard = h("discard")
if expires is not None:
expires = iso2time(expires)
if expires is None:
discard = True
domain = h("domain")
domain_specified = domain.startswith(".")
c = Cookie(h("version"), name, value,
h("port"), h("port_spec"),
domain, domain_specified, h("domain_dot"),
h("path"), h("path_spec"),
h("secure"),
expires,
discard,
h("comment"),
h("commenturl"),
rest)
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Set-Cookie3 format file %r: %r" %
(filename, line))
| apache-2.0 | 4,973,137,202,819,715,000 | 37.547059 | 79 | 0.504502 | false |
tedder/ansible | test/units/modules/network/f5/test_bigip_pool.py | 25 | 17853 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_pool import ApiParameters
from library.modules.bigip_pool import ModuleParameters
from library.modules.bigip_pool import ModuleManager
from library.modules.bigip_pool import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_pool import ApiParameters
from ansible.modules.network.f5.bigip_pool import ModuleParameters
from ansible.modules.network.f5.bigip_pool import ModuleManager
from ansible.modules.network.f5.bigip_pool import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
monitor_type='m_of_n',
monitors=['/Common/Fake', '/Common/Fake2'],
quorum=1,
slow_ramp_time=200,
reselect_tries=5,
service_down_action='drop'
)
p = ModuleParameters(params=args)
assert p.monitor_type == 'm_of_n'
assert p.quorum == 1
assert p.monitors == 'min 1 of { /Common/Fake /Common/Fake2 }'
assert p.slow_ramp_time == 200
assert p.reselect_tries == 5
assert p.service_down_action == 'drop'
def test_api_parameters(self):
args = dict(
monitor="/Common/Fake and /Common/Fake2 ",
slowRampTime=200,
reselectTries=5,
serviceDownAction='drop'
)
p = ApiParameters(params=args)
assert p.monitors == '/Common/Fake and /Common/Fake2'
assert p.slow_ramp_time == 200
assert p.reselect_tries == 5
assert p.service_down_action == 'drop'
def test_unknown_module_lb_method(self):
args = dict(
lb_method='obscure_hyphenated_fake_method',
)
with pytest.raises(F5ModuleError):
p = ModuleParameters(params=args)
assert p.lb_method == 'foo'
def test_unknown_api_lb_method(self):
args = dict(
loadBalancingMode='obscure_hypenated_fake_method'
)
with pytest.raises(F5ModuleError):
p = ApiParameters(params=args)
assert p.lb_method == 'foo'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_pool(self, *args):
set_module_args(dict(
pool='fake_pool',
description='fakepool',
service_down_action='drop',
lb_method='round-robin',
partition='Common',
slow_ramp_time=10,
reselect_tries=1,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['description'] == 'fakepool'
assert results['service_down_action'] == 'drop'
assert results['lb_method'] == 'round-robin'
assert results['slow_ramp_time'] == 10
assert results['reselect_tries'] == 1
def test_create_pool_monitor_type_missing(self, *args):
set_module_args(dict(
pool='fake_pool',
lb_method='round-robin',
partition='Common',
monitors=['/Common/tcp', '/Common/http'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Common/http', '/Common/tcp']
assert results['monitor_type'] == 'and_list'
def test_create_pool_monitors_missing(self, *args):
set_module_args(dict(
pool='fake_pool',
lb_method='round-robin',
partition='Common',
monitor_type='and_list',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
msg = "The 'monitors' parameter cannot be empty when " \
"'monitor_type' parameter is specified"
with pytest.raises(F5ModuleError) as err:
mm.exec_module()
assert str(err.value) == msg
def test_create_pool_quorum_missing(self, *args):
set_module_args(dict(
pool='fake_pool',
lb_method='round-robin',
partition='Common',
monitor_type='m_of_n',
monitors=['/Common/tcp', '/Common/http'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
msg = "Quorum value must be specified with monitor_type 'm_of_n'."
with pytest.raises(F5ModuleError) as err:
mm.exec_module()
assert str(err.value) == msg
def test_create_pool_monitor_and_list(self, *args):
set_module_args(dict(
pool='fake_pool',
partition='Common',
monitor_type='and_list',
monitors=['/Common/tcp', '/Common/http'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Common/http', '/Common/tcp']
assert results['monitor_type'] == 'and_list'
def test_create_pool_monitor_m_of_n(self, *args):
set_module_args(dict(
pool='fake_pool',
partition='Common',
monitor_type='m_of_n',
quorum=1,
monitors=['/Common/tcp', '/Common/http'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Common/http', '/Common/tcp']
assert results['monitor_type'] == 'm_of_n'
def test_update_monitors(self, *args):
set_module_args(dict(
name='test_pool',
partition='Common',
monitor_type='and_list',
monitors=['/Common/http', '/Common/tcp'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
current = ApiParameters(params=load_fixture('load_ltm_pool.json'))
mm.update_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['monitor_type'] == 'and_list'
def test_create_pool_monitor_and_list_no_partition(self, *args):
set_module_args(dict(
pool='fake_pool',
monitor_type='and_list',
monitors=['tcp', 'http'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Common/http', '/Common/tcp']
assert results['monitor_type'] == 'and_list'
def test_create_pool_monitor_m_of_n_no_partition(self, *args):
set_module_args(dict(
pool='fake_pool',
monitor_type='m_of_n',
quorum=1,
monitors=['tcp', 'http'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Common/http', '/Common/tcp']
assert results['monitor_type'] == 'm_of_n'
def test_create_pool_monitor_and_list_custom_partition(self, *args):
set_module_args(dict(
pool='fake_pool',
partition='Testing',
monitor_type='and_list',
monitors=['tcp', 'http'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Testing/http', '/Testing/tcp']
assert results['monitor_type'] == 'and_list'
def test_create_pool_monitor_m_of_n_custom_partition(self, *args):
set_module_args(dict(
pool='fake_pool',
partition='Testing',
monitor_type='m_of_n',
quorum=1,
monitors=['tcp', 'http'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert results['monitors'] == ['/Testing/http', '/Testing/tcp']
assert results['monitor_type'] == 'm_of_n'
def test_create_pool_with_metadata(self, *args):
set_module_args(dict(
pool='fake_pool',
metadata=dict(ansible='2.4'),
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'fake_pool'
assert 'metadata' in results
assert 'ansible' in results['metadata']
assert results['metadata']['ansible'] == '2.4'
def test_create_aggregate_pools(self, *args):
set_module_args(dict(
aggregate=[
dict(
pool='fake_pool',
description='fakepool',
service_down_action='drop',
lb_method='round-robin',
partition='Common',
slow_ramp_time=10,
reselect_tries=1,
),
dict(
pool='fake_pool2',
description='fakepool2',
service_down_action='drop',
lb_method='predictive-node',
partition='Common',
slow_ramp_time=110,
reselect_tries=2,
)
],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 | 5,349,073,661,277,531,000 | 31.578467 | 91 | 0.563211 | false |
nfredrik/pyModelStuff | pymodel/StateCoverage.py | 2 | 2493 | """
StateCoverage: choose the (aname, args) whose next state has been used least
"""
import sys
import random
# Tester state is a bag of states: [ ( state , n of times used ), ... ]
# Implement bag of states as list of pairs, not dictionary with state keys
# because our states are themselves dictionaries, which are not hashable.
coverage = list()
# Functions for maintaining coverage
def additems(coverage, enabled):
# coverage(after) is empty if coverage(before) is empty and enabled is empty
# 'if' prevents dup of keys in coverage(before) but might be dups in enabled
coverage += [(next, 0) for (a,args,result,next,properties) in enabled
if next not in [ s for (s,n) in coverage] ]
def inbag(coverage, x):
# False if coverage is empty, but doesn't crash
return x in [ s for (s,n) in coverage ]
def count(coverage, x):
# always return first occurence, do dups matter?
# index [0] fails if list is empty, is that possible?
return [ n for (xitem,n) in coverage if xitem == x ][0]
def incr(coverage, x):
# get index and replace there, always update first index, do dups matter?
xs = [ s for (s,n) in coverage ]
i = xs.index(x) # raise ValueError if x not in xs, is that possible?
coverage[i] = (x, count(coverage, x) + 1)
def select_action(enabled):
"""
Choose the action + args whose next state has been used the least
If more than one action has been used that many of times, choose randomly
"""
# print 'enabled %s, coverage: %s' % (enabled, coverage)
if not enabled: # empty
return (None, None)
else:
additems(coverage, enabled)
# next line fails if enabled is empty - but it isn't, see above
# count in next line fails if coverage is empty - possible?
# no, because additems (above) will execute, and enabled is not empty
least = min([ count(coverage,next)
for (aname,args,result,next,properties) in enabled ])
aleast = [(aname,args,next)
for (aname,args,result,next,properties) in enabled
if count(coverage, next) == least]
# next line fails if aleast is empty - is that possible?
# could be possible if none in enabled results in next state in least
# BUT that's not possible because least (above) is built using enabled
(aname,args,next) = random.choice(aleast)
incr(coverage,next)
return (aname,args)
| bsd-3-clause | 4,410,946,048,620,479,000 | 38.868852 | 78 | 0.647012 | false |
duncanmmacleod/gwpy | gwpy/timeseries/io/hdf5.py | 3 | 4214 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""This module attaches the HDF5 input output methods to the TimeSeries.
"""
from astropy import units
from ...io import registry as io_registry
from ...io.hdf5 import (identify_hdf5, with_read_hdf5, with_write_hdf5)
from ...types.io.hdf5 import (read_hdf5_array, write_hdf5_series)
from .. import (TimeSeries, TimeSeriesDict,
StateVector, StateVectorDict)
SEC_UNIT = units.second
__author__ = 'Duncan Macleod <[email protected]>'
# -- read ---------------------------------------------------------------------
def read_hdf5_timeseries(h5f, path=None, start=None, end=None, **kwargs):
"""Read a `TimeSeries` from HDF5
"""
# read data
kwargs.setdefault('array_type', TimeSeries)
series = read_hdf5_array(h5f, path=path, **kwargs)
# crop if needed
if start is not None:
start = max(start, series.span[0])
if end is not None:
end = min(end, series.span[1])
if start is not None or end is not None:
return series.crop(start, end)
return series
def _is_timeseries_dataset(dataset):
"""Returns `True` if a dataset contains `TimeSeries` data
"""
return SEC_UNIT.is_equivalent(dataset.attrs.get('xunit', 'undef'))
@with_read_hdf5
def read_hdf5_dict(h5f, names=None, group=None, **kwargs):
"""Read a `TimeSeriesDict` from HDF5
"""
# find group from which to read
if group:
h5g = h5f[group]
else:
h5g = h5f
# find list of names to read
if names is None:
names = [key for key in h5g if _is_timeseries_dataset(h5g[key])]
# read names
out = kwargs.pop('dict_type', TimeSeriesDict)()
kwargs.setdefault('array_type', out.EntryClass)
for name in names:
out[name] = read_hdf5_timeseries(h5g[name], **kwargs)
return out
def read_hdf5_factory(data_class):
if issubclass(data_class, dict):
def read_(*args, **kwargs):
kwargs.setdefault('dict_type', data_class)
return read_hdf5_dict(*args, **kwargs)
else:
def read_(*args, **kwargs):
kwargs.setdefault('array_type', data_class)
return read_hdf5_timeseries(*args, **kwargs)
return read_
# -- write --------------------------------------------------------------------
@with_write_hdf5
def write_hdf5_dict(tsdict, h5f, group=None, **kwargs):
"""Write a `TimeSeriesBaseDict` to HDF5
Each series in the dict is written as a dataset in the group
"""
# create group if needed
if group and group not in h5f:
h5g = h5f.create_group(group)
elif group:
h5g = h5f[group]
else:
h5g = h5f
# write each timeseries
kwargs.setdefault('format', 'hdf5')
for key, series in tsdict.items():
series.write(h5g, path=str(key), **kwargs)
# -- register -----------------------------------------------------------------
# series classes
for series_class in (TimeSeries, StateVector):
reader = read_hdf5_factory(series_class)
io_registry.register_reader('hdf5', series_class, reader)
io_registry.register_writer('hdf5', series_class, write_hdf5_series)
io_registry.register_identifier('hdf5', series_class, identify_hdf5)
# dict classes
for dict_class in (TimeSeriesDict, StateVectorDict):
reader = read_hdf5_factory(dict_class)
io_registry.register_reader('hdf5', dict_class, reader)
io_registry.register_writer('hdf5', dict_class, write_hdf5_dict)
io_registry.register_identifier('hdf5', dict_class, identify_hdf5)
| gpl-3.0 | 3,434,838,635,407,927,000 | 31.167939 | 79 | 0.635738 | false |
cchurch/ansible | test/units/modules/network/fortimanager/test_fmgr_fwpol_package.py | 38 | 4044 | # Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
import pytest
try:
from ansible.modules.network.fortimanager import fmgr_fwpol_package
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
def load_fixtures():
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') + "/{filename}.json".format(
filename=os.path.splitext(os.path.basename(__file__))[0])
try:
with open(fixture_path, "r") as fixture_file:
fixture_data = json.load(fixture_file)
except IOError:
return []
return [fixture_data]
@pytest.fixture(autouse=True)
def module_mock(mocker):
connection_class_mock = mocker.patch('ansible.module_utils.basic.AnsibleModule')
return connection_class_mock
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortimanager.fmgr_fwpol_package.Connection')
return connection_class_mock
@pytest.fixture(scope="function", params=load_fixtures())
def fixture_data(request):
func_name = request.function.__name__.replace("test_", "")
return request.param.get(func_name, None)
fmg_instance = FortiManagerHandler(connection_mock, module_mock)
def test_fmgr_fwpol_package(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
# Test using fixture 1 #
output = fmgr_fwpol_package.fmgr_fwpol_package(fmg_instance, fixture_data[0]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 2 #
output = fmgr_fwpol_package.fmgr_fwpol_package(fmg_instance, fixture_data[1]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 3 #
output = fmgr_fwpol_package.fmgr_fwpol_package(fmg_instance, fixture_data[2]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 4 #
output = fmgr_fwpol_package.fmgr_fwpol_package(fmg_instance, fixture_data[3]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
def test_fmgr_fwpol_package_folder(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
# Test using fixture 1 #
output = fmgr_fwpol_package.fmgr_fwpol_package_folder(fmg_instance, fixture_data[0]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 2 #
output = fmgr_fwpol_package.fmgr_fwpol_package_folder(fmg_instance, fixture_data[1]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 3 #
output = fmgr_fwpol_package.fmgr_fwpol_package_folder(fmg_instance, fixture_data[2]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
# Test using fixture 4 #
output = fmgr_fwpol_package.fmgr_fwpol_package_folder(fmg_instance, fixture_data[3]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
| gpl-3.0 | -6,854,405,668,354,044,000 | 40.690722 | 110 | 0.715628 | false |
adngdb/socorro | webapp-django/crashstats/tokens/tests/test_middleware.py | 3 | 4138 | import datetime
import json
from nose.tools import eq_, ok_, assert_raises
from django.contrib.auth.models import User, Permission
from django.conf import settings
from django.test.client import RequestFactory
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import ImproperlyConfigured
from django.contrib.contenttypes.models import ContentType
from crashstats.base.tests.testbase import DjangoTestCase
from crashstats.tokens import models
from crashstats.tokens.middleware import APIAuthenticationMiddleware
class TestMiddleware(DjangoTestCase):
django_session_middleware = SessionMiddleware()
django_auth_middleware = AuthenticationMiddleware()
middleware = APIAuthenticationMiddleware()
def test_impropertly_configured(self):
request = RequestFactory().get('/')
assert_raises(
ImproperlyConfigured,
self.middleware.process_request,
request
)
def _get_request(self, **headers):
# boilerplate stuff
request = RequestFactory(**headers).get('/')
self.django_session_middleware.process_request(request)
self.django_auth_middleware.process_request(request)
assert request.user
return request
def test_no_token_key(self):
request = self._get_request()
eq_(self.middleware.process_request(request), None)
def test_non_existant_token_key(self):
request = self._get_request(HTTP_AUTH_TOKEN='xxx')
response = self.middleware.process_request(request)
eq_(response.status_code, 403)
# the response content will be JSON
result = json.loads(response.content)
eq_(result['error'], 'API Token not matched')
def test_expired_token(self):
user = User.objects.create(username='peterbe')
token = models.Token.objects.create(
user=user,
)
token.expires -= datetime.timedelta(
days=settings.TOKENS_DEFAULT_EXPIRATION_DAYS
)
token.save()
request = self._get_request(HTTP_AUTH_TOKEN=token.key)
response = self.middleware.process_request(request)
eq_(response.status_code, 403)
result = json.loads(response.content)
eq_(result['error'], 'API Token found but expired')
def test_token_valid(self):
user = User.objects.create(username='peterbe')
token = models.Token.objects.create(
user=user,
)
request = self._get_request(HTTP_AUTH_TOKEN=token.key)
response = self.middleware.process_request(request)
eq_(response, None)
eq_(request.user, user)
def test_token_permissions(self):
user = User.objects.create(username='peterbe')
token = models.Token.objects.create(
user=user,
)
ct, __ = ContentType.objects.get_or_create(
model='',
app_label='crashstats',
)
permission = Permission.objects.create(
codename='play',
content_type=ct
)
token.permissions.add(permission)
Permission.objects.create(
codename='fire',
content_type=ct
)
# deliberately not adding this second permission
request = self._get_request(HTTP_AUTH_TOKEN=token.key)
# do the magic to the request
self.middleware.process_request(request)
eq_(request.user, user)
ok_(request.user.has_perm('crashstats.play'))
ok_(not request.user.has_perm('crashstats.fire'))
def test_token_on_inactive_user(self):
user = User.objects.create(username='peterbe')
user.is_active = False
user.save()
token = models.Token.objects.create(
user=user,
)
request = self._get_request(HTTP_AUTH_TOKEN=token.key)
response = self.middleware.process_request(request)
eq_(response.status_code, 403)
result = json.loads(response.content)
eq_(result['error'], 'User of API token not active')
| mpl-2.0 | -4,301,736,498,665,749,500 | 33.483333 | 68 | 0.650797 | false |
svirusxxx/cjdns | node_build/dependencies/libuv/build/gyp/pylib/gyp/generator/dump_dependency_json.py | 899 | 2768 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
filename = 'dump.json'
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
| gpl-3.0 | -2,395,787,231,005,847,600 | 33.17284 | 77 | 0.699061 | false |
coala-analyzer/coala-quickstart | coala_quickstart/coala_quickstart.py | 1 | 5542 | import argparse
import logging
import os
import sys
from pyprint.ConsolePrinter import ConsolePrinter
from coala_utils.FilePathCompleter import FilePathCompleter
from coala_utils.Question import ask_question
from coala_quickstart import __version__
from coala_quickstart.interaction.Logo import print_welcome_message
from coala_quickstart.generation.InfoCollector import collect_info
from coala_quickstart.generation.Project import (
ask_to_select_languages,
get_used_languages,
print_used_languages,
valid_path,
)
from coala_quickstart.generation.FileGlobs import get_project_files
from coala_quickstart.Strings import PROJECT_DIR_HELP
from coala_quickstart.generation.Bears import (
filter_relevant_bears,
print_relevant_bears,
get_non_optional_settings_bears,
remove_unusable_bears,
)
from coala_quickstart.generation.Settings import (
generate_settings, write_coafile)
from coala_quickstart.generation.SettingsClass import (
collect_bear_settings)
from coala_quickstart.green_mode.green_mode_core import green_mode
MAX_ARGS_GREEN_MODE = 5
MAX_VALUES_GREEN_MODE = 5
def _get_arg_parser():
description = """
coala-quickstart automatically creates a .coafile for use by coala.
"""
arg_parser = argparse.ArgumentParser(
prog='coala-quickstart',
description=description,
add_help=True
)
arg_parser.add_argument(
'-v', '--version', action='version', version=__version__)
arg_parser.add_argument(
'-C', '--non-interactive', const=True, action='store_const',
help='run coala-quickstart in non interactive mode')
arg_parser.add_argument(
'--ci', action='store_const', dest='non_interactive', const=True,
help='continuous integration run, alias for `--non-interactive`')
arg_parser.add_argument(
'--allow-incomplete-sections', action='store_const',
dest='incomplete_sections', const=True,
help='generate coafile with only `bears` and `files` field in sections')
arg_parser.add_argument(
'--no-filter-by-capabilities', action='store_const',
dest='no_filter_by_capabilities', const=True,
help='disable filtering of bears by their capabilties.')
arg_parser.add_argument(
'-g', '--green-mode', const=True, action='store_const',
help='Produce "green" config files for you project. Green config files'
' don\'t generate any error in the project and match the coala'
' configuration as closely as possible to your project.')
arg_parser.add_argument(
'--max-args', nargs='?', type=int,
help='Maximum number of optional settings allowed to be checked'
' by green_mode for each bear.')
arg_parser.add_argument(
'--max-values', nargs='?', type=int,
help='Maximum number of values to optional settings allowed to be'
' checked by green_mode for each bear.')
return arg_parser
def main():
global MAX_ARGS_GREEN_MODE, MAX_VALUES_GREEN_MODE
arg_parser = _get_arg_parser()
args = arg_parser.parse_args()
logging.basicConfig(stream=sys.stdout)
printer = ConsolePrinter()
logging.getLogger(__name__)
fpc = None
project_dir = os.getcwd()
if args.green_mode:
args.no_filter_by_capabilities = None
args.incomplete_sections = None
if args.max_args:
MAX_ARGS_GREEN_MODE = args.max_args
if args.max_values:
MAX_VALUES_GREEN_MODE = args.max_values
if not args.green_mode and (args.max_args or args.max_values):
logging.warning(' --max-args and --max-values can be used '
'only with --green-mode. The arguments will '
'be ignored.')
if not args.non_interactive and not args.green_mode:
fpc = FilePathCompleter()
fpc.activate()
print_welcome_message(printer)
printer.print(PROJECT_DIR_HELP)
project_dir = ask_question(
'What is your project directory?',
default=project_dir,
typecast=valid_path)
fpc.deactivate()
project_files, ignore_globs = get_project_files(
None,
printer,
project_dir,
fpc,
args.non_interactive)
used_languages = list(get_used_languages(project_files))
used_languages = ask_to_select_languages(used_languages, printer,
args.non_interactive)
extracted_information = collect_info(project_dir)
relevant_bears = filter_relevant_bears(
used_languages, printer, arg_parser, extracted_information)
if args.green_mode:
bear_settings_obj = collect_bear_settings(relevant_bears)
green_mode(
project_dir, ignore_globs, relevant_bears, bear_settings_obj,
MAX_ARGS_GREEN_MODE,
MAX_VALUES_GREEN_MODE,
project_files,
printer,
)
exit()
print_relevant_bears(printer, relevant_bears)
if args.non_interactive and not args.incomplete_sections:
unusable_bears = get_non_optional_settings_bears(relevant_bears)
remove_unusable_bears(relevant_bears, unusable_bears)
print_relevant_bears(printer, relevant_bears, 'usable')
settings = generate_settings(
project_dir,
project_files,
ignore_globs,
relevant_bears,
extracted_information,
args.incomplete_sections)
write_coafile(printer, project_dir, settings)
| agpl-3.0 | 2,764,760,061,642,943,000 | 32.385542 | 80 | 0.656803 | false |
mytliulei/DCNRobotInstallPackages | windows/win32/pygal-1.7.0/setup.py | 1 | 2698 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2014 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import re
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
ROOT = os.path.dirname(__file__)
# Explicitly specify the encoding of pygal/__init__.py if we're on py3.
kwargs = {}
if sys.version_info[0] == 3:
kwargs['encoding'] = 'utf-8'
with open(os.path.join(ROOT, 'pygal', '__init__.py'), **kwargs) as fd:
__version__ = re.search("__version__ = '([^']+)'", fd.read()).group(1)
setup(
name="pygal",
version=__version__,
description="A python svg graph plotting library",
author="Kozea",
url="http://pygal.org/",
author_email="[email protected]",
license="GNU LGPL v3+",
platforms="Any",
packages=find_packages(),
provides=['pygal'],
scripts=["pygal_gen.py"],
keywords=[
"svg", "chart", "graph", "diagram", "plot", "histogram", "kiviat"],
tests_require=["pytest", "pyquery", "flask", "cairosvg"],
cmdclass={'test': PyTest},
package_data={'pygal': ['css/*', 'graph/*.svg']},
extras_require={
'lxml': ['lxml'],
'png': ['cairosvg']
},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: "
"GNU Lesser General Public License v3 or later (LGPLv3+)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Multimedia :: Graphics :: Presentation"])
| apache-2.0 | 1,092,240,996,864,592,800 | 31.890244 | 79 | 0.642566 | false |
srajag/nova | nova/tests/virt/baremetal/test_volume_driver.py | 11 | 11573 | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal volume driver."""
from oslo.config import cfg
from nova import exception
from nova import test
from nova.virt.baremetal import volume_driver
from nova.virt import fake
from nova.virt.libvirt import volume as libvirt_volume
CONF = cfg.CONF
SHOW_OUTPUT = """Target 1: iqn.2010-10.org.openstack:volume-00000001
System information:
Driver: iscsi
State: ready
I_T nexus information:
I_T nexus: 8
Initiator: iqn.1993-08.org.debian:01:7780c6a16b4
Connection: 0
IP Address: 172.17.12.10
LUN information:
LUN: 0
Type: controller
SCSI ID: IET 00010000
SCSI SN: beaf10
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET 00010001
SCSI SN: beaf11
Size: 1074 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/nova-volumes/volume-00000001
Backing store flags:
Account information:
ACL information:
ALL
Target 2: iqn.2010-10.org.openstack:volume-00000002
System information:
Driver: iscsi
State: ready
I_T nexus information:
LUN information:
LUN: 0
Type: controller
SCSI ID: IET 00020000
SCSI SN: beaf20
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET 00020001
SCSI SN: beaf21
Size: 2147 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/nova-volumes/volume-00000002
Backing store flags:
Account information:
ACL information:
ALL
Target 1000001: iqn.2010-10.org.openstack.baremetal:1000001-dev.vdc
System information:
Driver: iscsi
State: ready
I_T nexus information:
LUN information:
LUN: 0
Type: controller
SCSI ID: IET f42410000
SCSI SN: beaf10000010
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET f42410001
SCSI SN: beaf10000011
Size: 1074 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/disk/by-path/ip-172.17.12.10:3260-iscsi-\
iqn.2010-10.org.openstack:volume-00000001-lun-1
Backing store flags:
Account information:
ACL information:
ALL
"""
def fake_show_tgtadm():
return SHOW_OUTPUT
class BareMetalVolumeTestCase(test.NoDBTestCase):
def setUp(self):
super(BareMetalVolumeTestCase, self).setUp()
self.stubs.Set(volume_driver, '_show_tgtadm', fake_show_tgtadm)
def test_list_backingstore_path(self):
l = volume_driver._list_backingstore_path()
self.assertEqual(len(l), 3)
self.assertIn('/dev/nova-volumes/volume-00000001', l)
self.assertIn('/dev/nova-volumes/volume-00000002', l)
self.assertIn('/dev/disk/by-path/ip-172.17.12.10:3260-iscsi-'
'iqn.2010-10.org.openstack:volume-00000001-lun-1', l)
def test_get_next_tid(self):
tid = volume_driver._get_next_tid()
self.assertEqual(1000002, tid)
def test_find_tid_found(self):
tid = volume_driver._find_tid(
'iqn.2010-10.org.openstack.baremetal:1000001-dev.vdc')
self.assertEqual(1000001, tid)
def test_find_tid_not_found(self):
tid = volume_driver._find_tid(
'iqn.2010-10.org.openstack.baremetal:1000002-dev.vdc')
self.assertIsNone(tid)
def test_get_iqn(self):
self.flags(iscsi_iqn_prefix='iqn.2012-12.a.b', group='baremetal')
iqn = volume_driver._get_iqn('instname', '/dev/vdx')
self.assertEqual('iqn.2012-12.a.b:instname-dev-vdx', iqn)
class FakeConf(object):
def __init__(self, source_path):
self.source_path = source_path
class BareMetalLibVirtVolumeDriverTestCase(test.TestCase):
def setUp(self):
super(BareMetalLibVirtVolumeDriverTestCase, self).setUp()
self.flags(volume_drivers=[
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'fake2=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
], group='libvirt')
self.driver = volume_driver.LibvirtVolumeDriver(fake.FakeVirtAPI())
self.disk_info = {
'dev': 'vdc',
'bus': 'baremetal',
'type': 'baremetal',
}
self.connection_info = {'driver_volume_type': 'fake'}
self.mount_point = '/dev/vdc'
self.mount_device = 'vdc'
self.source_path = '/dev/sdx'
self.instance = {'uuid': '12345678-1234-1234-1234-123467890123456',
'name': 'instance-00000001'}
self.fixed_ips = [{'address': '10.2.3.4'},
{'address': '172.16.17.18'},
]
self.iqn = 'iqn.fake:instance-00000001-dev-vdc'
self.tid = 100
def test_init_loads_volume_drivers(self):
self.assertIsInstance(self.driver.volume_drivers['fake'],
libvirt_volume.LibvirtFakeVolumeDriver)
self.assertIsInstance(self.driver.volume_drivers['fake2'],
libvirt_volume.LibvirtFakeVolumeDriver)
self.assertEqual(len(self.driver.volume_drivers), 2)
def test_fake_connect_volume(self):
"""Check connect_volume returns without exceptions."""
self.driver._connect_volume(self.connection_info,
self.disk_info)
def test_volume_driver_method_ok(self):
fake_driver = self.driver.volume_drivers['fake']
self.mox.StubOutWithMock(fake_driver, 'connect_volume')
fake_driver.connect_volume(self.connection_info, self.disk_info)
self.mox.ReplayAll()
self.driver._connect_volume(self.connection_info,
self.disk_info)
def test_volume_driver_method_driver_type_not_found(self):
self.connection_info['driver_volume_type'] = 'qwerty'
self.assertRaises(exception.VolumeDriverNotFound,
self.driver._connect_volume,
self.connection_info,
self.disk_info)
def test_publish_iscsi(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_get_next_tid')
self.mox.StubOutWithMock(volume_driver, '_create_iscsi_export_tgtadm')
self.mox.StubOutWithMock(volume_driver, '_allow_iscsi_tgtadm')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._get_next_tid().AndReturn(self.tid)
volume_driver._create_iscsi_export_tgtadm(self.source_path,
self.tid,
self.iqn)
volume_driver._allow_iscsi_tgtadm(self.tid,
self.fixed_ips[0]['address'])
volume_driver._allow_iscsi_tgtadm(self.tid,
self.fixed_ips[1]['address'])
self.mox.ReplayAll()
self.driver._publish_iscsi(self.instance,
self.mount_point,
self.fixed_ips,
self.source_path)
def test_depublish_iscsi_ok(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_find_tid')
self.mox.StubOutWithMock(volume_driver, '_delete_iscsi_export_tgtadm')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._find_tid(self.iqn).AndReturn(self.tid)
volume_driver._delete_iscsi_export_tgtadm(self.tid)
self.mox.ReplayAll()
self.driver._depublish_iscsi(self.instance, self.mount_point)
def test_depublish_iscsi_do_nothing_if_tid_is_not_found(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_find_tid')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._find_tid(self.iqn).AndReturn(None)
self.mox.ReplayAll()
self.driver._depublish_iscsi(self.instance, self.mount_point)
def test_attach_volume(self):
self.mox.StubOutWithMock(volume_driver, '_get_fixed_ips')
self.mox.StubOutWithMock(self.driver, '_connect_volume')
self.mox.StubOutWithMock(self.driver, '_publish_iscsi')
volume_driver._get_fixed_ips(self.instance).AndReturn(self.fixed_ips)
self.driver._connect_volume(self.connection_info, self.disk_info).\
AndReturn(FakeConf(self.source_path))
self.driver._publish_iscsi(self.instance, self.mount_point,
self.fixed_ips, self.source_path)
self.mox.ReplayAll()
self.driver.attach_volume(self.connection_info,
self.instance,
self.mount_point)
def test_detach_volume(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_find_tid')
self.mox.StubOutWithMock(volume_driver, '_delete_iscsi_export_tgtadm')
self.mox.StubOutWithMock(self.driver, '_disconnect_volume')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._find_tid(self.iqn).AndReturn(self.tid)
volume_driver._delete_iscsi_export_tgtadm(self.tid)
self.driver._disconnect_volume(self.connection_info,
self.mount_device)
self.mox.ReplayAll()
self.driver.detach_volume(self.connection_info,
self.instance,
self.mount_point)
| apache-2.0 | 7,891,827,835,375,948,000 | 38.363946 | 78 | 0.586883 | false |
simonwydooghe/ansible | lib/ansible/modules/cloud/podman/podman_image_info.py | 21 | 9173 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
module: podman_image_info
author:
- Sam Doran (@samdoran)
version_added: '2.8'
short_description: Gather info about images using podman
notes:
- Podman may required elevated privileges in order to run properly.
description:
- Gather info about images using C(podman)
options:
executable:
description:
- Path to C(podman) executable if it is not in the C($PATH) on the machine running C(podman)
default: 'podman'
type: str
name:
description:
- List of tags or UID to gather info about. If no name is given return info about all images.
"""
EXAMPLES = """
- name: Gather info for all images
podman_image_info:
- name: Gather info on a specific image
podman_image_info:
name: nginx
- name: Gather info on several images
podman_image_info:
name:
- redis
- quay.io/bitnami/wildfly
"""
RETURN = """
images:
description: info from all or specified images
returned: always
type: dict
sample: [
{
"Annotations": {},
"Architecture": "amd64",
"Author": "",
"Comment": "from Bitnami with love",
"ContainerConfig": {
"Cmd": [
"nami",
"start",
"--foreground",
"wildfly"
],
"Entrypoint": [
"/app-entrypoint.sh"
],
"Env": [
"PATH=/opt/bitnami/java/bin:/opt/bitnami/wildfly/bin:/opt/bitnami/nami/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"IMAGE_OS=debian-9",
"NAMI_VERSION=0.0.9-0",
"GPG_KEY_SERVERS_LIST=ha.pool.sks-keyservers.net \
hkp://p80.pool.sks-keyservers.net:80 keyserver.ubuntu.com hkp://keyserver.ubuntu.com:80 pgp.mit.edu",
"TINI_VERSION=v0.13.2",
"TINI_GPG_KEY=595E85A6B1B4779EA4DAAEC70B588DFF0527A9B7",
"GOSU_VERSION=1.10",
"GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4",
"BITNAMI_IMAGE_VERSION=14.0.1-debian-9-r12",
"BITNAMI_APP_NAME=wildfly",
"WILDFLY_JAVA_HOME=",
"WILDFLY_JAVA_OPTS=",
"WILDFLY_MANAGEMENT_HTTP_PORT_NUMBER=9990",
"WILDFLY_PASSWORD=bitnami",
"WILDFLY_PUBLIC_CONSOLE=true",
"WILDFLY_SERVER_AJP_PORT_NUMBER=8009",
"WILDFLY_SERVER_HTTP_PORT_NUMBER=8080",
"WILDFLY_SERVER_INTERFACE=0.0.0.0",
"WILDFLY_USERNAME=user",
"WILDFLY_WILDFLY_HOME=/home/wildfly",
"WILDFLY_WILDFLY_OPTS=-Dwildfly.as.deployment.ondemand=false"
],
"ExposedPorts": {
"8080/tcp": {},
"9990/tcp": {}
},
"Labels": {
"maintainer": "Bitnami <[email protected]>"
}
},
"Created": "2018-09-25T04:07:45.934395523Z",
"Digest": "sha256:5c7d8e2dd66dcf4a152a4032a1d3c5a33458c67e1c1335edd8d18d738892356b",
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/containers/storage/overlay/a9dbf5616cc16919a8ac0dfc60aff87a72b5be52994c4649fcc91a089a12931\
f/diff:/var/lib/containers/storage/overlay/67129bd46022122a7d8b7acb490092af6c7ce244ce4fbd7d9e2d2b7f5979e090/diff:/var/lib/containers/storage/overlay/7c51242c\
4c5db5c74afda76d7fdbeab6965d8b21804bb3fc597dee09c770b0ca/diff:/var/lib/containers/storage/overlay/f97315dc58a9c002ba0cabccb9933d4b0d2113733d204188c88d72f75569b57b/diff:/var/lib/containers/storage/overlay/1dbde2dd497ddde2b467727125b900958a051a72561e58d29abe3d660dcaa9a7/diff:/var/lib/containers/storage/overlay/4aad9d80f30c3f0608f58173558b7554d84dee4dc4479672926eca29f75e6e33/diff:/var/lib/containers/storage/overlay/6751fc9b6868254870c062d75a511543fc8cfda2ce6262f4945f107449219632/diff:/var/lib/containers/storage/overlay/a27034d79081347421dd24d7e9e776c18271cd9a6e51053cb39af4d3d9c400e8/diff:/var/lib/containers/storage/overlay/537cf0045ed9cd7989f7944e7393019c81b16c1799a2198d8348cd182665397f/diff:/var/lib/containers/storage/overlay/27578615c5ae352af4e8449862d61aaf5c11b105a7d5905af55bd01b0c656d6e/diff:/var/lib/containers/storage/overlay/566542742840fe3034b3596f7cb9e62a6274c95a69f368f9e713746f8712c0b6/diff",
"MergedDir": "/var/lib/containers/storage/overlay/72bb96d6\
c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/merged",
"UpperDir": "/var/lib/containers/storage/overlay/72bb96d6c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/diff",
"WorkDir": "/var/lib/containers/storage/overlay/72bb96d6c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/work"
},
"Name": "overlay"
},
"Id": "bcacbdf7a119c0fa934661ca8af839e625ce6540d9ceb6827cdd389f823d49e0",
"Labels": {
"maintainer": "Bitnami <[email protected]>"
},
"ManifestType": "application/vnd.docker.distribution.manifest.v1+prettyjws",
"Os": "linux",
"Parent": "",
"RepoDigests": [
"quay.io/bitnami/wildfly@sha256:5c7d8e2dd66dcf4a152a4032a1d3c5a33458c67e1c1335edd8d18d738892356b"
],
"RepoTags": [
"quay.io/bitnami/wildfly:latest"
],
"RootFS": {
"Layers": [
"sha256:75391df2c87e076b0c2f72d20c95c57dc8be7ee684cc07273416cce622b43367",
"sha256:7dd303f041039bfe8f0833092673ac35f93137d10e0fbc4302021ea65ad57731",
"sha256:720d9edf0cd2a9bb56b88b80be9070dbfaad359514c70094c65066963fed485d",
"sha256:6a567ecbf97725501a634fcb486271999aa4591b633b4ae9932a46b40f5aaf47",
"sha256:59e9a6db8f178f3da868614564faabb2820cdfb69be32e63a4405d6f7772f68c",
"sha256:310a82ccb092cd650215ab375da8943d235a263af9a029b8ac26a281446c04db",
"sha256:36cb91cf4513543a8f0953fed785747ea18b675bc2677f3839889cfca0aac79e"
],
"Type": "layers"
},
"Size": 569919342,
"User": "",
"Version": "17.06.0-ce",
"VirtualSize": 569919342
}
]
"""
import json
from ansible.module_utils.basic import AnsibleModule
def image_exists(module, executable, name):
command = [executable, 'image', 'exists', name]
rc, out, err = module.run_command(command)
if rc == 1:
return False
elif 'Command "exists" not found' in err:
# The 'exists' test is available in podman >= 0.12.1
command = [executable, 'image', 'ls', '-q', name]
rc2, out2, err2 = module.run_command(command)
if rc2 != 0:
return False
return True
def filter_invalid_names(module, executable, name):
valid_names = []
names = name
if not isinstance(name, list):
names = [name]
for name in names:
if image_exists(module, executable, name):
valid_names.append(name)
return valid_names
def get_image_info(module, executable, name):
names = name
if not isinstance(name, list):
names = [name]
if len(names) > 0:
command = [executable, 'image', 'inspect']
command.extend(names)
rc, out, err = module.run_command(command)
if rc != 0:
module.fail_json(msg="Unable to gather info for '{0}': {1}".format(', '.join(names), err))
return out
else:
return json.dumps([])
def get_all_image_info(module, executable):
command = [executable, 'image', 'ls', '-q']
rc, out, err = module.run_command(command)
name = out.strip().split('\n')
out = get_image_info(module, executable, name)
return out
def main():
module = AnsibleModule(
argument_spec=dict(
executable=dict(type='str', default='podman'),
name=dict(type='list')
),
supports_check_mode=True,
)
executable = module.params['executable']
name = module.params.get('name')
executable = module.get_bin_path(executable, required=True)
if name:
valid_names = filter_invalid_names(module, executable, name)
results = json.loads(get_image_info(module, executable, valid_names))
else:
results = json.loads(get_all_image_info(module, executable))
results = dict(
changed=False,
images=results
)
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,724,656,344,407,487,000 | 37.380753 | 911 | 0.613976 | false |
dhoffman34/django | django/utils/lorem_ipsum.py | 81 | 4910 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
from __future__ import unicode_literals
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = ', '.join(sections)
# Convert to sentence case and add end punctuation.
return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return ' '.join(sentence() for i in range(random.randint(1, 4)))
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return ' '.join(word_list)
| bsd-3-clause | 538,633,246,083,180,400 | 44.88785 | 459 | 0.60998 | false |
ESS-LLP/erpnext | erpnext/patches/v11_0/change_healthcare_desktop_icons.py | 4 | 2450 | import frappe
from frappe import _
change_icons_map = [
{
"module_name": "Patient",
"color": "#6BE273",
"icon": "fa fa-user",
"doctype": "Patient",
"type": "link",
"link": "List/Patient",
"label": _("Patient")
},
{
"module_name": "Patient Encounter",
"color": "#2ecc71",
"icon": "fa fa-stethoscope",
"doctype": "Patient Encounter",
"type": "link",
"link": "List/Patient Encounter",
"label": _("Patient Encounter"),
},
{
"module_name": "Healthcare Practitioner",
"color": "#2ecc71",
"icon": "fa fa-user-md",
"doctype": "Healthcare Practitioner",
"type": "link",
"link": "List/Healthcare Practitioner",
"label": _("Healthcare Practitioner")
},
{
"module_name": "Patient Appointment",
"color": "#934F92",
"icon": "fa fa-calendar-plus-o",
"doctype": "Patient Appointment",
"type": "link",
"link": "List/Patient Appointment",
"label": _("Patient Appointment")
},
{
"module_name": "Lab Test",
"color": "#7578f6",
"icon": "octicon octicon-beaker",
"doctype": "Lab Test",
"type": "link",
"link": "List/Lab Test",
"label": _("Lab Test")
}
]
def execute():
change_healthcare_desktop_icons()
def change_healthcare_desktop_icons():
doctypes = ["patient", "patient_encounter", "healthcare_practitioner",
"patient_appointment", "lab_test"]
for doctype in doctypes:
frappe.reload_doc("healthcare", "doctype", doctype)
for spec in change_icons_map:
frappe.db.sql("""
delete from `tabDesktop Icon`
where _doctype = '{0}'
""".format(spec['doctype']))
desktop_icon = frappe.new_doc("Desktop Icon")
desktop_icon.hidden = 1
desktop_icon.standard = 1
desktop_icon.icon = spec['icon']
desktop_icon.color = spec['color']
desktop_icon.module_name = spec['module_name']
desktop_icon.label = spec['label']
desktop_icon.app = "erpnext"
desktop_icon.type = spec['type']
desktop_icon._doctype = spec['doctype']
desktop_icon.link = spec['link']
desktop_icon.save(ignore_permissions=True)
frappe.db.sql("""
delete from `tabDesktop Icon`
where module_name = 'Healthcare' and type = 'module'
""")
desktop_icon = frappe.new_doc("Desktop Icon")
desktop_icon.hidden = 1
desktop_icon.standard = 1
desktop_icon.icon = "fa fa-heartbeat"
desktop_icon.color = "#FF888B"
desktop_icon.module_name = "Healthcare"
desktop_icon.label = _("Healthcare")
desktop_icon.app = "erpnext"
desktop_icon.type = 'module'
desktop_icon.save(ignore_permissions=True)
| gpl-3.0 | -147,861,678,315,254,530 | 25.344086 | 71 | 0.650204 | false |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/urllib3/contrib/_securetransport/low_level.py | 136 | 12062 | """
Low-level helpers for the SecureTransport bindings.
These are Python functions that are not directly related to the high-level APIs
but are necessary to get them to work. They include a whole bunch of low-level
CoreFoundation messing about and memory management. The concerns in this module
are almost entirely about trying to avoid memory leaks and providing
appropriate and useful assistance to the higher-level code.
"""
import base64
import ctypes
import itertools
import re
import os
import ssl
import tempfile
from .bindings import Security, CoreFoundation, CFConst
# This regular expression is used to grab PEM data out of a PEM bundle.
_PEM_CERTS_RE = re.compile(
b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
)
def _cf_data_from_bytes(bytestring):
"""
Given a bytestring, create a CFData object from it. This CFData object must
be CFReleased by the caller.
"""
return CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
)
def _cf_dictionary_from_tuples(tuples):
"""
Given a list of Python tuples, create an associated CFDictionary.
"""
dictionary_size = len(tuples)
# We need to get the dictionary keys and values out in the same order.
keys = (t[0] for t in tuples)
values = (t[1] for t in tuples)
cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
cf_keys,
cf_values,
dictionary_size,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
def _cf_string_to_unicode(value):
"""
Creates a Unicode string from a CFString object. Used entirely for error
reporting.
Yes, it annoys me quite a lot that this function is this complex.
"""
value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
string = CoreFoundation.CFStringGetCStringPtr(
value_as_void_p,
CFConst.kCFStringEncodingUTF8
)
if string is None:
buffer = ctypes.create_string_buffer(1024)
result = CoreFoundation.CFStringGetCString(
value_as_void_p,
buffer,
1024,
CFConst.kCFStringEncodingUTF8
)
if not result:
raise OSError('Error copying C string from CFStringRef')
string = buffer.value
if string is not None:
string = string.decode('utf-8')
return string
def _assert_no_error(error, exception_class=None):
"""
Checks the return code and throws an exception if there is an error to
report
"""
if error == 0:
return
cf_error_string = Security.SecCopyErrorMessageString(error, None)
output = _cf_string_to_unicode(cf_error_string)
CoreFoundation.CFRelease(cf_error_string)
if output is None or output == u'':
output = u'OSStatus %s' % error
if exception_class is None:
exception_class = ssl.SSLError
raise exception_class(output)
def _cert_array_from_pem(pem_bundle):
"""
Given a bundle of certs in PEM format, turns them into a CFArray of certs
that can be used to validate a cert chain.
"""
der_certs = [
base64.b64decode(match.group(1))
for match in _PEM_CERTS_RE.finditer(pem_bundle)
]
if not der_certs:
raise ssl.SSLError("No root certificates specified")
cert_array = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
)
if not cert_array:
raise ssl.SSLError("Unable to allocate memory!")
try:
for der_bytes in der_certs:
certdata = _cf_data_from_bytes(der_bytes)
if not certdata:
raise ssl.SSLError("Unable to allocate memory!")
cert = Security.SecCertificateCreateWithData(
CoreFoundation.kCFAllocatorDefault, certdata
)
CoreFoundation.CFRelease(certdata)
if not cert:
raise ssl.SSLError("Unable to build cert object!")
CoreFoundation.CFArrayAppendValue(cert_array, cert)
CoreFoundation.CFRelease(cert)
except Exception:
# We need to free the array before the exception bubbles further.
# We only want to do that if an error occurs: otherwise, the caller
# should free.
CoreFoundation.CFRelease(cert_array)
return cert_array
def _is_cert(item):
"""
Returns True if a given CFTypeRef is a certificate.
"""
expected = Security.SecCertificateGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _is_identity(item):
"""
Returns True if a given CFTypeRef is an identity.
"""
expected = Security.SecIdentityGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _temporary_keychain():
"""
This function creates a temporary Mac keychain that we can use to work with
credentials. This keychain uses a one-time password and a temporary file to
store the data. We expect to have one keychain per socket. The returned
SecKeychainRef must be freed by the caller, including calling
SecKeychainDelete.
Returns a tuple of the SecKeychainRef and the path to the temporary
directory that contains it.
"""
# Unfortunately, SecKeychainCreate requires a path to a keychain. This
# means we cannot use mkstemp to use a generic temporary file. Instead,
# we're going to create a temporary directory and a filename to use there.
# This filename will be 8 random bytes expanded into base64. We also need
# some random bytes to password-protect the keychain we're creating, so we
# ask for 40 random bytes.
random_bytes = os.urandom(40)
filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8
tempdirectory = tempfile.mkdtemp()
keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
# We now want to create the keychain itself.
keychain = Security.SecKeychainRef()
status = Security.SecKeychainCreate(
keychain_path,
len(password),
password,
False,
None,
ctypes.byref(keychain)
)
_assert_no_error(status)
# Having created the keychain, we want to pass it off to the caller.
return keychain, tempdirectory
def _load_items_from_file(keychain, path):
"""
Given a single file, loads all the trust objects from it into arrays and
the keychain.
Returns a tuple of lists: the first list is a list of identities, the
second a list of certs.
"""
certificates = []
identities = []
result_array = None
with open(path, 'rb') as f:
raw_filedata = f.read()
try:
filedata = CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault,
raw_filedata,
len(raw_filedata)
)
result_array = CoreFoundation.CFArrayRef()
result = Security.SecItemImport(
filedata, # cert data
None, # Filename, leaving it out for now
None, # What the type of the file is, we don't care
None, # what's in the file, we don't care
0, # import flags
None, # key params, can include passphrase in the future
keychain, # The keychain to insert into
ctypes.byref(result_array) # Results
)
_assert_no_error(result)
# A CFArray is not very useful to us as an intermediary
# representation, so we are going to extract the objects we want
# and then free the array. We don't need to keep hold of keys: the
# keychain already has them!
result_count = CoreFoundation.CFArrayGetCount(result_array)
for index in range(result_count):
item = CoreFoundation.CFArrayGetValueAtIndex(
result_array, index
)
item = ctypes.cast(item, CoreFoundation.CFTypeRef)
if _is_cert(item):
CoreFoundation.CFRetain(item)
certificates.append(item)
elif _is_identity(item):
CoreFoundation.CFRetain(item)
identities.append(item)
finally:
if result_array:
CoreFoundation.CFRelease(result_array)
CoreFoundation.CFRelease(filedata)
return (identities, certificates)
def _load_client_cert_chain(keychain, *paths):
"""
Load certificates and maybe keys from a number of files. Has the end goal
of returning a CFArray containing one SecIdentityRef, and then zero or more
SecCertificateRef objects, suitable for use as a client certificate trust
chain.
"""
# Ok, the strategy.
#
# This relies on knowing that macOS will not give you a SecIdentityRef
# unless you have imported a key into a keychain. This is a somewhat
# artificial limitation of macOS (for example, it doesn't necessarily
# affect iOS), but there is nothing inside Security.framework that lets you
# get a SecIdentityRef without having a key in a keychain.
#
# So the policy here is we take all the files and iterate them in order.
# Each one will use SecItemImport to have one or more objects loaded from
# it. We will also point at a keychain that macOS can use to work with the
# private key.
#
# Once we have all the objects, we'll check what we actually have. If we
# already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
# we'll take the first certificate (which we assume to be our leaf) and
# ask the keychain to give us a SecIdentityRef with that cert's associated
# key.
#
# We'll then return a CFArray containing the trust chain: one
# SecIdentityRef and then zero-or-more SecCertificateRef objects. The
# responsibility for freeing this CFArray will be with the caller. This
# CFArray must remain alive for the entire connection, so in practice it
# will be stored with a single SSLSocket, along with the reference to the
# keychain.
certificates = []
identities = []
# Filter out bad paths.
paths = (path for path in paths if path)
try:
for file_path in paths:
new_identities, new_certs = _load_items_from_file(
keychain, file_path
)
identities.extend(new_identities)
certificates.extend(new_certs)
# Ok, we have everything. The question is: do we have an identity? If
# not, we want to grab one from the first cert we have.
if not identities:
new_identity = Security.SecIdentityRef()
status = Security.SecIdentityCreateWithCertificate(
keychain,
certificates[0],
ctypes.byref(new_identity)
)
_assert_no_error(status)
identities.append(new_identity)
# We now want to release the original certificate, as we no longer
# need it.
CoreFoundation.CFRelease(certificates.pop(0))
# We now need to build a new CFArray that holds the trust chain.
trust_chain = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
for item in itertools.chain(identities, certificates):
# ArrayAppendValue does a CFRetain on the item. That's fine,
# because the finally block will release our other refs to them.
CoreFoundation.CFArrayAppendValue(trust_chain, item)
return trust_chain
finally:
for obj in itertools.chain(identities, certificates):
CoreFoundation.CFRelease(obj)
| apache-2.0 | -2,842,825,234,003,177,500 | 34.166181 | 79 | 0.657685 | false |
wolfv/AutobahnPython | examples/twisted/websocket/streaming/frame_based_server.py | 18 | 2612 | ###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import hashlib
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol, \
listenWS
class FrameBasedHashServerProtocol(WebSocketServerProtocol):
"""
Frame-based WebSockets server that computes a running SHA-256 for message
data received. It will respond after every frame received with the digest
computed up to that point. It can receive messages of unlimited number
of frames. Digest is reset upon new message.
"""
def onMessageBegin(self, isBinary):
WebSocketServerProtocol.onMessageBegin(self, isBinary)
self.sha256 = hashlib.sha256()
def onMessageFrame(self, payload):
l = 0
for data in payload:
l += len(data)
self.sha256.update(data)
digest = self.sha256.hexdigest()
print("Received frame with payload length {}, compute digest: {}".format(l, digest))
self.sendMessage(digest.encode('utf8'))
def onMessageEnd(self):
self.sha256 = None
if __name__ == '__main__':
factory = WebSocketServerFactory("ws://localhost:9000")
factory.protocol = FrameBasedHashServerProtocol
enableCompression = False
if enableCompression:
from autobahn.websocket.compress import PerMessageDeflateOffer, \
PerMessageDeflateOfferAccept
## Function to accept offers from the client ..
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
factory.setProtocolOptions(perMessageCompressionAccept = accept)
listenWS(factory)
reactor.run()
| apache-2.0 | -4,212,204,309,070,818,300 | 35.314286 | 90 | 0.621746 | false |
klmitch/nova | nova/tests/functional/api_sample_tests/test_security_groups.py | 4 | 6316 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api_sample_tests import test_servers
import nova.tests.functional.api_samples_test_base as astb
def fake_get(*args, **kwargs):
nova_group = {}
nova_group['id'] = 1
nova_group['description'] = 'default'
nova_group['name'] = 'default'
nova_group['project_id'] = astb.PROJECT_ID
nova_group['rules'] = []
return nova_group
def fake_get_instances_security_groups_bindings(context, servers,
detailed=False):
result = {}
for s in servers:
result[s.get('id')] = [{'name': 'test'}]
return result
def fake_add_to_instance(context, instance, security_group_name):
pass
def fake_remove_from_instance(context, instance, security_group_name):
pass
def fake_list(context, names=None, ids=None, project=None, search_opts=None):
return [fake_get()]
def fake_get_instance_security_groups(context, instance_uuid,
detailed=False):
return [fake_get()]
def fake_create_security_group(context, name, description):
return fake_get()
def fake_create_security_group_rule(context, security_group, new_rule):
return {
'from_port': 22,
'to_port': 22,
'cidr': '10.0.0.0/24',
'id': '00000000-0000-0000-0000-000000000000',
'parent_group_id': '11111111-1111-1111-1111-111111111111',
'protocol': 'tcp',
'group_id': None
}
def fake_remove_rules(context, security_group, rule_ids):
pass
def fake_get_rule(context, id):
return {
'id': id,
'parent_group_id': '11111111-1111-1111-1111-111111111111'
}
class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
sample_dir = 'os-security-groups'
def setUp(self):
super(SecurityGroupsJsonTest, self).setUp()
path = 'nova.network.security_group_api.'
self.stub_out(path + 'get', fake_get)
self.stub_out(path + 'get_instances_security_groups_bindings',
fake_get_instances_security_groups_bindings)
self.stub_out(path + 'add_to_instance', fake_add_to_instance)
self.stub_out(path + 'remove_from_instance', fake_remove_from_instance)
self.stub_out(path + 'list', fake_list)
self.stub_out(path + 'get_instance_security_groups',
fake_get_instance_security_groups)
self.stub_out(path + 'create_security_group',
fake_create_security_group)
self.stub_out(path + 'create_security_group_rule',
fake_create_security_group_rule)
self.stub_out(path + 'remove_rules',
fake_remove_rules)
self.stub_out(path + 'get_rule',
fake_get_rule)
def _get_create_subs(self):
return {
'group_name': 'default',
"description": "default",
}
def _create_security_group(self):
subs = self._get_create_subs()
return self._do_post('os-security-groups',
'security-group-post-req', subs)
def _add_group(self, uuid):
subs = {
'group_name': 'test'
}
return self._do_post('servers/%s/action' % uuid,
'security-group-add-post-req', subs)
def test_security_group_create(self):
response = self._create_security_group()
subs = self._get_create_subs()
self._verify_response('security-groups-create-resp', subs,
response, 200)
def test_security_groups_list(self):
# Get api sample of security groups get list request.
response = self._do_get('os-security-groups')
self._verify_response('security-groups-list-get-resp',
{}, response, 200)
def test_security_groups_get(self):
# Get api sample of security groups get request.
security_group_id = '11111111-1111-1111-1111-111111111111'
response = self._do_get('os-security-groups/%s' % security_group_id)
self._verify_response('security-groups-get-resp', {}, response, 200)
def test_security_groups_list_server(self):
# Get api sample of security groups for a specific server.
uuid = self._post_server()
response = self._do_get('servers/%s/os-security-groups' % uuid)
self._verify_response('server-security-groups-list-resp',
{}, response, 200)
def test_security_groups_add(self):
self._create_security_group()
uuid = self._post_server()
response = self._add_group(uuid)
self.assertEqual(202, response.status_code)
self.assertEqual('', response.text)
def test_security_groups_remove(self):
self._create_security_group()
uuid = self._post_server()
self._add_group(uuid)
subs = {
'group_name': 'test'
}
response = self._do_post('servers/%s/action' % uuid,
'security-group-remove-post-req', subs)
self.assertEqual(202, response.status_code)
self.assertEqual('', response.text)
def test_security_group_rules_create(self):
response = self._do_post('os-security-group-rules',
'security-group-rules-post-req', {})
self._verify_response('security-group-rules-post-resp', {}, response,
200)
def test_security_group_rules_remove(self):
response = self._do_delete(
'os-security-group-rules/00000000-0000-0000-0000-000000000000')
self.assertEqual(202, response.status_code)
| apache-2.0 | 2,508,124,101,927,034,000 | 34.886364 | 79 | 0.600697 | false |
OpringaoDoTurno/airflow | airflow/migrations/versions/8504051e801b_xcom_dag_task_indices.py | 46 | 1080 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""xcom dag task indices
Revision ID: 8504051e801b
Revises: 4addfa1236f1
Create Date: 2016-11-29 08:13:03.253312
"""
# revision identifiers, used by Alembic.
revision = '8504051e801b'
down_revision = '4addfa1236f1'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('idx_xcom_dag_task_date', 'xcom', ['dag_id', 'task_id', 'execution_date'], unique=False)
def downgrade():
op.drop_index('idx_xcom_dag_task_date', table_name='xcom')
| apache-2.0 | 4,510,521,712,680,377,300 | 27.421053 | 108 | 0.734259 | false |
mathwuyue/py-wireless-sys-sim | d2d/rrm.py | 1 | 8801 | import operator
import itertools
import numpy as np
import scipy.optimize
from core import cal_thermal_noise, cal_umi_nlos, cal_umi_exp_los
from functools import reduce
def _sum(func, *args):
return reduce(operator.add, map(func, *args), 0)
def cal_D2D_basic_tp(d2d_ues, g_d2d_bs, kappa, bw, alpha, freq):
"""
This function calculates the transmit power for D2D UEs (Spectrum Sharing Scheme Between Cellular Users and Ad-hoc Device-to-Device Users)
Args:
d2d_ues (numpy array): d2d_ues positions
g_d2d_cc (): channel gain between d2d and cc ues
kappa (float): scale param for cc
bw (float): bandwidth for d2d_ues
alpha (float): pathloss parameter
freq (float): frequency
Returns:
numpy array. The transmit power of D2D UEs.
"""
noise = cal_thermal_noise(bw, 273)
pathloss = cal_umi_nlos(np.abs(d2d_ues), alpha, freq)
return (kappa - 1) * pathloss * noise / g_d2d_bs
def cal_D2D_opt_tp(d2d_ues, cc_ues,
pmax_d, pmax_c,
g_d2d_bs, g_cc, g_d2d, g_cc_d2d,
sinr_d2d, sinr_cc,
bw, alpha, freq):
"""
This function calculates the RRM for D2D UEs (Device-to-Device Communications
Underlaying Cellular Networks)
Args:
d2d_ues (numpy array): d2d_ues positions
g_d2d_cc (): channel gain between d2d and cc ues
kappa (float): scale param for cc
bw (float): bandwidth for d2d_ues
alpha (float): pathloss parameter
freq (float): frequency
Returns:
list of numpy array. The transmit power of D2D UEs and CC UEs.
::TODO: only consider one D2D
"""
noise = cal_thermal_noise(bw, 273)
# set up reuse array
idx_avail = []
p_c = (g_d2d*sinr_cc+g_d2d_bs*sinr_cc*sinr_d2d)*noise / \
(g_d2d*g_cc-sinr_d2d*sinr_cc*g_cc_d2d*g_d2d_bs)
p_d2d = (g_cc_d2d*sinr_cc*sinr_d2d+g_cc*sinr_d2d)*noise / \
(g_d2d*g_cc-sinr_cc*sinr_d2d*g_cc_d2d*g_d2d_bs)
for i in range(cc_ues.size):
if (p_d2d > 0 and p_d2d <= pmax_c) and (p_c > 0 and p_c <= pmax_c):
idx_avail.append(i)
# calculate optimal transmit power
# FIXME: one D2D
def _argmax(tp_pairs):
f = 0
idx = 0
for i, (pc, pd) in enumerate(tp_pairs):
fc = np.log2(1+pc*g_cc/(pd*g_d2d_bs+noise))+np.log2(1+pd*g_d2d/(pc*g_cc_d2d+noise))
if fc > f:
f = fc
idx = i
return tp_pairs[idx]
p1 = (pmax_c*g_cc_d2d[idx_avail]+noise)*sinr_d2d/g_d2d
p2 = (pmax_c*g_cc[idx_avail]-sinr_cc*noise)/(sinr_cc*g_d2d_bs)
p3 = (pmax_d*g_d2d-sinr_d2d*noise)/(sinr_d2d*g_cc_d2d[idx_avail])
p4 = (pmax_d*g_d2d_bs+noise)*sinr_cc/g_cc[idx_avail]
opt_tp_pairs = []
for i, j in enumerate(idx_avail):
if (pmax_c*g_cc[i])/(noise+pmax_d*g_d2d_bs) <= sinr_cc:
opt_tp_pairs.append(_argmax([(pmax_c, p1[j]), (pmax_c, p2[j])]))
elif pmax_d*g_d2d/(noise+pmax_c*g_cc_d2d[i]) < sinr_d2d:
opt_tp_pairs.append(_argmax([(p3[j], pmax_d), (p4[j], pmax_d)]))
else:
opt_tp_pairs.append(_argmax([(pmax_c, p1[j]), (pmax_c, pmax_d), (p4[j], pmax_d)]))
# calculate channel allocation.
return _argmax(opt_tp_pairs)
def cal_D2D_ergodic_tp(d2d_tr, d2d_rc, cc_ue, rc, a_gain_c, a_gain_d,
k_los, k_nlos, alpha_los, alpha_nlos, l):
def _f(x):
return x*np.log2(x)/(np.log(2)*(x-1))
a_c = a_gain_c/a_gain_d # antenna gain from CC to D2D
a_d = 1 # antenna gain from D2D to BS
d1_d = np.abs(d2d_tr - d2d_rc)
d2_c = np.abs(d2d_tr)
d1_c = np.abs(cc_ue)
d2_d = np.abs(cc_ue - d2d_rc)
# M, N
def _m1(a, d1, d2):
return a*(d1/d2)**(-alpha_los)
def _m2(a, d1, d2):
return a*k_los*d1**(-alpha_los) / (k_nlos*d2**(-alpha_nlos))
def _m3(a, d1, d2):
return a*k_nlos*d1**(-alpha_nlos) / (k_los*d2**(-alpha_los))
def _m4(a, d1, d2):
return a*(d1/d2)**(-alpha_nlos)
def _n1(d1, d2):
return np.exp(-(d1**2+d2**2)/l**2)
def _n2(d1, d2):
return np.exp(-d1**2/l**2) * (1-np.exp(-d2**2/l**2))
def _n3(d1, d2):
return np.exp(-d2**2/l**2) * (1-np.exp(-d1**2/l**2))
def _n4(d1, d2):
return (1-np.exp(-d1**2/l**2)) * (1-np.exp(-d2**2/l**2))
# equation
def _f_beta_delta(beta):
delta = (rc - _sum(lambda x, y: y*_f(x/beta),
[_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c),
_m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)],
[_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)])) / \
_sum(lambda x, y: y*_f(beta*x)-y*_f(x/beta),
[_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c),
_m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)],
[_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)])
# lambda1 = _sum(lambda x, y: y*_f(beta*x)-y*_f(x/beta),
# [_m1(a_d, d1_d, d2_d), _m2(a_d, d1_d, d2_d),
# _m3(a_d, d1_d, d2_d), _m4(a_d, d1_d, d2_d)],
# [_n1(d1_d, d2_d), _n2(d1_d, d2_d), _n3(d1_d, d2_d), _n4(d1_d, d2_d)]) / \
# _sum(lambda x, y: y*_f(x/beta)+y*_f(beta*x),
# [_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c),
# _m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)],
# [_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)])
lambda1 = 0
# h1, h2
def _h1(x, y):
return x*y*(1-x/beta)/np.log(2)+np.log2(x/beta)/(np.log(2)*(x-beta)**2)
def _h2(x, y):
return y*_f(beta*x)/beta + beta*x*y*((1-1.0/(beta*x))/np.log(2)-x*np.log2(beta*x)) / \
(np.log(2)*(beta*x-1)**2)
return _sum(lambda xc, yc, xd, yd: delta*_h1(xd, yd)+(1-delta)*_h2(xd, yd)-lambda1*((1-delta)*_h1(xc, yc)+delta*_h2(xc, yc)),
[_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c),
_m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)],
[_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)],
[_m1(a_d, d1_d, d2_d), _m2(a_d, d1_d, d2_d),
_m3(a_d, d1_d, d2_d), _m4(a_d, d1_d, d2_d)],
[_n1(d1_d, d2_d), _n2(d1_d, d2_d), _n3(d1_d, d2_d), _n4(d1_d, d2_d)])
opt_beta = scipy.optimize.brentq(_f_beta_delta, 1e-5, 1-1e-5)
opt_delta = (rc - _sum(lambda x, y: y*_f(x/opt_beta),
[_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c),
_m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)],
[_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)])) / \
_sum(lambda x, y: y*_f(opt_beta*x)-y*_f(x/opt_beta),
[_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c),
_m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)],
[_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)])
return opt_beta, opt_delta
def cal_ergodic_subopt_tp(d2d_tr, d2d_rc, cc_ue, rc_ratio, a_gain_c, a_gain_d, beta,
k_los, k_nlos, alpha_los, alpha_nlos, l):
def _f(x):
return x*np.log2(x)/(np.log(2)*(x-1))
a_c = a_gain_c/a_gain_d # antenna gain from CC to D2D
d2_c = np.abs(d2d_tr)
d1_c = np.abs(cc_ue)
# M, N
def _m1(a, d1, d2):
return a*(d1/d2)**(-alpha_los)
def _m2(a, d1, d2):
return a*k_los*d1**(-alpha_los) / (k_nlos*d2**(-alpha_nlos))
def _m3(a, d1, d2):
return a*k_nlos*d1**(-alpha_nlos) / (k_los*d2**(-alpha_los))
def _m4(a, d1, d2):
return a*(d1/d2)**(-alpha_nlos)
def _n1(d1, d2):
return np.exp(-(d1**2+d2**2)/l**2)
def _n2(d1, d2):
return np.exp(-d1**2/l**2) * (1-np.exp(-d2**2/l**2))
def _n3(d1, d2):
return np.exp(-d2**2/l**2) * (1-np.exp(-d1**2/l**2))
def _n4(d1, d2):
return (1-np.exp(-d1**2/l**2)) * (1-np.exp(-d2**2/l**2))
# rc
max_rc = _sum(lambda x, y: y*_f(x/beta),
[_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c),
_m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)],
[_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)])
min_rc = _sum(lambda x, y: y*_f(x*beta),
[_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c),
_m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)],
[_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)])
rc = rc_ratio * (max_rc - min_rc) + min_rc
print(rc)
delta = (rc - max_rc) / (min_rc - max_rc)
return delta
| mit | -7,870,594,230,414,663,000 | 38.466368 | 142 | 0.476764 | false |
artefactual/archivematica-history | src/MCPClient/lib/clientScripts/checkForSubmissionDocumenation.py | 1 | 1358 | #!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2012 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <[email protected]>
# @version svn: $Id$
import os
import sys
target = sys.argv[1]
if not os.path.isdir(target):
print >>sys.stderr, "Directory doesn't exist: ", target
os.mkdir(target)
if os.listdir(target) == []:
print >>sys.stderr, "Directory is empty: ", target
fileName = os.path.join(target, "submissionDocumentation.log")
f = open(fileName, 'a')
f.write("No submission documentation added")
f.close()
os.chmod(fileName, 488)
else:
exit(0)
| agpl-3.0 | 861,534,549,331,733,900 | 33.820513 | 77 | 0.731222 | false |
repotvsupertuga/tvsupertuga.repository | script.module.resolveurl/lib/resolveurl/plugins/speedwatch.py | 2 | 1350 | '''
SpeedWatch.io resolveurl plugin
Copyright (C) 2019 gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
class SpeedWatchResolver(ResolveUrl):
name = "speedwatch"
domains = ["speedwatch.io"]
pattern = r'(?://|\.)(speedwatch\.io)/(?:plyr|e)/([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
return helpers.get_media_url(self.get_url(host, media_id), patterns=[r'''sources\s*:\s*\["(?P<url>[^"]+)'''], generic_patterns=False)
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://www.{host}/e/{media_id}.html')
| gpl-2.0 | -526,018,856,346,253,000 | 36.5 | 141 | 0.717037 | false |
vivekanand1101/anitya | tests/test_plugins.py | 2 | 2944 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
anitya tests of the plugins.
'''
__requires__ = ['SQLAlchemy >= 0.7']
import pkg_resources
import datetime
import unittest
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
import anitya.lib.plugins as plugins
from tests import Modeltests
class Pluginstests(Modeltests):
""" Plugins tests. """
def test_load_plugins(self):
""" Test the plugins.load_plugins function. """
plgns = [plg.name for plg in plugins.load_plugins(self.session)]
self.assertEqual(len(plgns), 23)
exp = [
'CPAN (perl)', 'Debian project', 'Drupal6', 'Drupal7', 'Freshmeat',
'GNOME', 'GNU project', 'GitHub', 'Google code', 'Hackage',
'Launchpad', 'Maven Central', 'PEAR', 'PECL', 'Packagist', 'PyPI',
'Rubygems', 'Sourceforge', 'Stackage', 'custom', 'folder', 'npmjs',
'pagure',
]
self.assertEqual(sorted(plgns), exp)
def test_plugins_get_plugin_names(self):
""" Test the plugins.get_plugin_names function. """
plgns = plugins.get_plugin_names()
self.assertEqual(len(plgns), 23)
exp = [
'CPAN (perl)', 'Debian project', 'Drupal6', 'Drupal7', 'Freshmeat',
'GNOME', 'GNU project', 'GitHub', 'Google code', 'Hackage',
'Launchpad', 'Maven Central', 'PEAR', 'PECL', 'Packagist', 'PyPI',
'Rubygems', 'Sourceforge', 'Stackage', 'custom', 'folder', 'npmjs',
'pagure',
]
self.assertEqual(sorted(plgns), exp)
def test_plugins_get_plugin(self):
""" Test the plugins.get_plugin function. """
plgns = plugins.get_plugin('PyPI')
self.assertEqual(
str(plgns), "<class 'anitya.lib.backends.pypi.PypiBackend'>")
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(Pluginstests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-2.0 | -7,160,053,987,844,768,000 | 35.333333 | 79 | 0.648998 | false |
Br1an6/ACS_Netplumber_Implementation | hsa-python/net_plumbing/examples/load_stanford_backbone.py | 5 | 12031 | '''
<Loads Stanford backbone network into appropriate objects (e.g. emulated_tf)>
Copyright 2012, Stanford University. This file is licensed under GPL v2 plus
a special exception, as described in included LICENSE_EXCEPTION.txt.
Created on Aug 13, 2011
@author: Peyman Kazemian
'''
from headerspace.tf import *
from headerspace.hs import *
from headerspace.nu_smv_generator import *
from examples.emulated_tf import *
from utils.helper import dotted_ip_to_int
from config_parser.cisco_router_parser import cisco_router
from utils.helper import compose_standard_rules
rtr_names = ["bbra_rtr",
"bbrb_rtr",
"boza_rtr",
"bozb_rtr",
"coza_rtr",
"cozb_rtr",
"goza_rtr",
"gozb_rtr",
"poza_rtr",
"pozb_rtr",
"roza_rtr",
"rozb_rtr",
"soza_rtr",
"sozb_rtr",
"yoza_rtr",
"yozb_rtr",
]
def load_stanford_backbone_ntf():
'''
Loads Stanford backbone network transfer functions into an emulated_tf object with 3 layers.
'''
emul_tf = emulated_tf(3)
emul_tf.set_fwd_engine_stage(1)
for rtr_name in rtr_names:
f = TF(1)
f.load_object_from_file("tf_stanford_backbone/%s.tf"%rtr_name)
f.activate_hash_table([15,14])
emul_tf.append_tf(f)
emul_tf.length = f.length
return emul_tf
def load_stanford_backbone_ttf():
'''
Loads Stanford backbone topology transfer functions into a transfer function object
'''
f = TF(1)
f.load_object_from_file("tf_stanford_backbone/backbone_topology.tf")
return f
def load_stanford_ip_fwd_ntf():
'''
Loads IP forwarding part of Stanford backbone network transfer functions into an emulated_tf object with 1 layer.
'''
emul_tf = emulated_tf(1)
emul_tf.set_fwd_engine_stage(0)
emul_tf.output_port_const = 0
for rtr_name in rtr_names:
f = TF(1)
f.load_object_from_file("tf_simple_stanford_backbone/%s.tf"%rtr_name)
#f.activate_hash_table([3,2])
emul_tf.append_tf(f)
emul_tf.length = f.length
return emul_tf
def load_stanford_ip_fwd_ttf():
'''
Loads Stanford backbone topology transfer functions into a transfer function object.
this should be used together with load_stanford_ip_fwd_ntf.
'''
f = TF(1)
f.load_object_from_file("tf_simple_stanford_backbone/backbone_topology.tf")
return f
def load_port_to_id_map(path):
'''
load the map from port ID to name of box-port name.
'''
f = open("%s/port_map.txt"%path,'r')
id_to_name = {}
map = {}
rtr = ""
cs = cisco_router(1)
for line in f:
if line.startswith("$"):
rtr = line[1:].strip()
map[rtr] = {}
elif line != "":
tokens = line.strip().split(":")
map[rtr][tokens[0]] = int(tokens[1])
id_to_name[tokens[1]] = "%s-%s"%(rtr,tokens[0])
out_port = int(tokens[1]) + cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST
id_to_name["%s"%out_port] = "%s-%s"%(rtr,tokens[0])
return (map,id_to_name)
def load_stanford_backbone_port_to_id_map():
return load_port_to_id_map("tf_stanford_backbone")
def load_replicated_stanford_network(replicate,path):
'''
Load the transfer functions created by generate_augmented_stanford_backbone_tf.py
'''
ttf = TF(1)
ttf.load_object_from_file("%s/backbone_topology.tf"%path)
(name_to_id,id_to_name) = load_port_to_id_map(path)
emul_tf = emulated_tf(3)
for i in range(replicate):
for rtr_name in rtr_names:
f = TF(1)
f.load_object_from_file("%s/%s%d.tf"%(path,rtr_name,i+1))
#f.activate_hash_table([15,14])
emul_tf.append_tf(f)
f = TF(1)
f.load_object_from_file("%s/root.tf"%(path))
f.activate_hash_table([15,14])
emul_tf.append_tf(f)
return (emul_tf,ttf,name_to_id,id_to_name)
def add_internet(ntf,ttf,port_map,cs,campus_ip_list):
'''
Campus IP list is a list of (ip address,subnet mask) for each IP subnet on campus
'''
s = TF(cs.HS_FORMAT()["length"]*2)
s.set_prefix_id("internet")
for entry in campus_ip_list:
match = byte_array_get_all_x(cs.HS_FORMAT()["length"]*2)
cs.set_field(match,'ip_dst',dotted_ip_to_int(entry[0]),32-entry[1])
rule = TF.create_standard_rule([0], match, [0], None, None, "", [])
s.add_fwd_rule(rule)
ntf.append_tf(s)
bbra_internet_port1 = port_map["bbra_rtr"]["te1/1"]
bbra_internet_port2 = port_map["bbra_rtr"]["te7/4"]
bbrb_internet_port1 = port_map["bbrb_rtr"]["te1/4"]
bbrb_internet_port2 = port_map["bbrb_rtr"]["te7/3"]
rule = TF.create_standard_rule([bbra_internet_port1], None,[0], None, None, "", [])
ttf.add_link_rule(rule)
rule = TF.create_standard_rule([bbra_internet_port2], None,[0], None, None, "", [])
ttf.add_link_rule(rule)
rule = TF.create_standard_rule([bbrb_internet_port1], None,[0], None, None, "", [])
ttf.add_link_rule(rule)
rule = TF.create_standard_rule([bbrb_internet_port2], None,[0], None, None, "", [])
ttf.add_link_rule(rule)
rule = TF.create_standard_rule([0], None,[bbra_internet_port1], None, None, "", [])
ttf.add_link_rule(rule)
rule = TF.create_standard_rule([0], None,[bbra_internet_port2], None, None, "", [])
ttf.add_link_rule(rule)
rule = TF.create_standard_rule([0], None,[bbrb_internet_port1], None, None, "", [])
ttf.add_link_rule(rule)
rule = TF.create_standard_rule([0], None,[bbrb_internet_port2], None, None, "", [])
ttf.add_link_rule(rule)
def get_end_ports(name_to_id,index):
linked_ports = [("bbra_rtr","te7/3"),
("bbra_rtr","te7/2"),
("bbra_rtr","te7/1"),
("bbra_rtr","te1/3"),
("bbra_rtr","te1/4"),
("bbra_rtr","te6/1"),
("bbra_rtr","te6/3"),
("bbrb_rtr","te7/1"),
("bbrb_rtr","te7/2"),
("bbrb_rtr","te7/4"),
("bbrb_rtr","te6/3"),
("bbrb_rtr","te6/1"),
("bbrb_rtr","te1/1"),
("bbrb_rtr","te1/3"),
("boza_rtr","te2/1"),
("boza_rtr","te3/1"),
("boza_rtr","te2/3"),
("bozb_rtr","te2/3"),
("bozb_rtr","te2/1"),
("bozb_rtr","te3/1"),
("coza_rtr","te3/1"),
("coza_rtr","te2/1"),
("coza_rtr","te2/3"),
("cozb_rtr","te2/3"),
("cozb_rtr","te2/1"),
("cozb_rtr","te3/1"),
("goza_rtr","te2/1"),
("goza_rtr","te3/1"),
("goza_rtr","te2/3"),
("gozb_rtr","te2/3"),
("gozb_rtr","te2/1"),
("gozb_rtr","te3/1"),
("poza_rtr","te2/1"),
("poza_rtr","te3/1"),
("poza_rtr","te2/3"),
("pozb_rtr","te2/3"),
("pozb_rtr","te2/1"),
("pozb_rtr","te3/1"),
("roza_rtr","te3/1"),
("roza_rtr","te2/1"),
("roza_rtr","te2/3"),
("rozb_rtr","te2/3"),
("rozb_rtr","te2/1"),
("rozb_rtr","te3/1"),
("soza_rtr","te2/1"),
("soza_rtr","te3/1"),
("soza_rtr","te2/3"),
("sozb_rtr","te2/3"),
("sozb_rtr","te3/1"),
("sozb_rtr","te2/1"),
("yoza_rtr","te7/1"),
("yoza_rtr","te1/3"),
("yoza_rtr","te1/1"),
("yoza_rtr","te1/2"),
("yozb_rtr","te1/2"),
("yozb_rtr","te1/3"),
("yozb_rtr","te2/1"),
("yozb_rtr","te1/1"),
]
end_ports = []
cs = cisco_router(1)
for rtr_name in rtr_names:
mod_rtr_name = "%s%s"%(rtr_name,index)
for rtr_port in name_to_id[mod_rtr_name]:
if (rtr_name,rtr_port) not in linked_ports:
end_ports.append(name_to_id[mod_rtr_name][rtr_port] + cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST)
return end_ports
def load_tf_to_nusmv():
'''
For Model Checking Project.
Creates NuSMV file from transfer function objects of Stanford network.
'''
nusmv = NuSMV()
cs = cisco_router(1)
nusmv.set_output_port_offset(cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST)
for rtr_name in rtr_names:
f = TF(1)
f.load_object_from_file("tf_stanford_backbone/%s.tf"%rtr_name)
nusmv.generate_nusmv_trans(f, [])
(port_map,port_reverse_map) = load_stanford_backbone_port_to_id_map()
end_ports = get_end_ports(port_map,"")
f = TF(1)
f.load_object_from_file("tf_stanford_backbone/backbone_topology.tf")
nusmv.generate_nusmv_trans(f,end_ports)
nusmv.generate_nusmv_input()
return nusmv
def load_augmented_tf_to_nusmv(replication_factor,dir_path):
'''
For Model Checking Project.
Creates NuSMV file from transfer function objects of replicated Stanford network.
'''
nusmv = NuSMV()
cs = cisco_router(1)
nusmv.set_output_port_offset(cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST)
(port_map,port_reverse_map) = load_port_to_id_map(dir_path)
end_ports = []
for replicate in range(1,replication_factor+1):
for rtr_name in rtr_names:
f = TF(1)
f.load_object_from_file("%s/%s%d.tf"%(dir_path,rtr_name,replicate))
nusmv.generate_nusmv_trans(f, [])
end_ports_subset = get_end_ports(port_map,"%d"%replicate)
end_ports.extend(end_ports_subset)
f = TF(1)
f.load_object_from_file("%s/root.tf"%(dir_path))
nusmv.generate_nusmv_trans(f, [])
f = TF(1)
f.load_object_from_file("%s/backbone_topology.tf"%dir_path)
nusmv.generate_nusmv_trans(f,end_ports)
nusmv.generate_nusmv_input()
return nusmv
def generate_stanford_backbne_one_layer_tf():
'''
Tries to merge the 3 layers of transfer function into one layer.
WARNING: the result will be huge.
'''
for rtr_name in rtr_names:
f = TF(1)
f.load_object_from_file("tf_stanford_backbone/%s.tf"%rtr_name)
stage1_rules = []
stage2_rules = []
stage3_rules = []
stage1_2_rules = []
for rule in f.rules:
if (rule["in_ports"][0] % 100000) == 0:
stage2_rules.append(rule)
elif ((rule["in_ports"][0] % 100000)/ 10000) == 0:
stage1_rules.append(rule)
elif ((rule["in_ports"][0] % 100000)/ 10000) == 1:
stage3_rules.append(rule)
for stage2_rule in stage2_rules:
for stage1_rule in stage1_rules:
r = compose_standard_rules(stage1_rule,stage2_rule)
if r != None:
stage1_2_rules.append(r)
for stage3_rule in stage3_rules:
for stage1_2_rule in stage1_2_rules:
r = compose_standard_rules(stage3_rule,stage1_2_rule)
if r != None:
if r["mask"] == None:
f.add_fwd_rule(r)
else:
f.add_rewrite_rule(r)
f.save_object_to_file("tf_stanford_backbone/%s_one_layer.tf"%rtr_name)
| gpl-2.0 | -693,699,386,573,911,400 | 35.460606 | 122 | 0.514504 | false |
PaloAltoNetworks-BD/ansible-pan | library/panos_email_server.py | 1 | 4152 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# Copyright 2019 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_email_server
short_description: Manage email servers in an email profile.
description:
- Manages email servers in an email server profile.
author: "Garfield Lee Freeman (@shinmog)"
version_added: "2.8"
requirements:
- pan-python
- pandevice >= 0.11.1
notes:
- Panorama is supported.
- Check mode is supported.
extends_documentation_fragment:
- panos.transitional_provider
- panos.vsys_shared
- panos.device_group
options:
email_profile:
description:
- Name of the email server profile.
required: True
name:
description:
- Server name.
required: True
display_name:
description:
- Display name
from_email:
description:
- From email address
to_email:
description:
- Destination email address.
also_to_email:
description:
- Additional destination email address
email_gateway:
description:
- IP address or FQDN of email gateway to use.
'''
EXAMPLES = '''
# Create a profile
- name: Create email server in an email profile
panos_email_server:
provider: '{{ provider }}'
email_profile: 'my-profile'
name: 'my-email-server'
from_email: '[email protected]'
to_email: '[email protected]'
email_gateway: 'smtp.example.com'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.panos.panos import get_connection
try:
from pandevice.device import EmailServerProfile
from pandevice.device import EmailServer
from pandevice.errors import PanDeviceError
except ImportError:
pass
def main():
helper = get_connection(
vsys_shared=True,
device_group=True,
with_state=True,
with_classic_provider_spec=True,
min_pandevice_version=(0, 11, 1),
min_panos_version=(7, 1, 0),
argument_spec=dict(
email_profile=dict(required=True),
name=dict(required=True),
display_name=dict(),
from_email=dict(),
to_email=dict(),
also_to_email=dict(),
email_gateway=dict(),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=True,
required_one_of=helper.required_one_of,
)
# Verify imports, build pandevice object tree.
parent = helper.get_pandevice_parent(module)
sp = EmailServerProfile(module.params['email_profile'])
parent.add(sp)
try:
sp.refresh()
except PanDeviceError as e:
module.fail_json(msg='Failed refresh: {0}'.format(e))
listing = sp.findall(EmailServer)
spec = {
'name': module.params['name'],
'display_name': module.params['display_name'],
'from': module.params['from_email'],
'to': module.params['to_email'],
'also_to': module.params['also_to_email'],
'email_gateway': module.params['email_gateway'],
}
obj = EmailServer(**spec)
sp.add(obj)
changed = helper.apply_state(obj, listing, module)
module.exit_json(changed=changed, msg='Done')
if __name__ == '__main__':
main()
| isc | 5,484,111,409,566,127,000 | 26.865772 | 75 | 0.633671 | false |
xHeliotrope/injustice_dropper | env/lib/python3.4/site-packages/django/db/models/query.py | 10 | 71207 | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F, Date, DateTime
from django.db.models.fields import AutoField, Empty
from django.db.models.query_utils import (
Q, InvalidQuery, deferred_class_factory,
)
from django.db.models.sql.constants import CURSOR
from django.utils import six, timezone
from django.utils.functional import partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
def _pickle_queryset(class_bases, class_dict):
"""
Used by `__reduce__` to create the initial version of the `QuerySet` class
onto which the output of `__getstate__` will be applied.
See `__reduce__` for more details.
"""
new = Empty()
new.__class__ = type(class_bases[0].__name__, class_bases, class_dict)
return new
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled queryset instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __reduce__(self):
"""
Used by pickle to deal with the types that we create dynamically when
specialized queryset such as `ValuesQuerySet` are used in conjunction
with querysets that are *subclasses* of `QuerySet`.
See `_clone` implementation for more details.
"""
if hasattr(self, '_specialized_queryset_class'):
class_bases = (
self._specialized_queryset_class,
self._base_queryset_class,
)
class_dict = {
'_specialized_queryset_class': self._specialized_queryset_class,
'_base_queryset_class': self._base_queryset_class,
}
return _pickle_queryset, (class_bases, class_dict), self.__getstate__()
return super(QuerySet, self).__reduce__()
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
db = self.db
compiler = self.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql()
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
if klass_info is None:
return
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set]
model_cls = deferred_class_factory(model_cls, skip)
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if self._known_related_objects:
for field, rel_objs in self._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field.
"""
# So this case is fun. When you bulk insert you don't get the primary
# keys back (if it's an autoincrement), so you can't insert into the
# child tables which references this. There are two workarounds, 1)
# this could be implemented if you didn't have an autoincrement pk,
# and 2) you could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back, and then doing a single bulk
# insert into the childmost table. Some databases might allow doing
# this by using RETURNING clause for the insert query. We're punting
# on these for now because they are relatively rare cases.
assert batch_size is None or batch_size > 0
if self.model._meta.parents:
raise ValueError("Can't bulk create an inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.local_concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
self._batched_insert(objs_without_pk, fields, batch_size)
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
obj = self.get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in six.iteritems(defaults):
setattr(obj, k, v)
with transaction.atomic(using=self.db, savepoint=False):
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model,
params=params, translations=translations,
using=using)
def values(self, *fields):
return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Date(field_name, kind),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=DateTime(field_name, kind, tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, nowait=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
obj = self._clone()
names = getattr(self, '_fields', None)
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
# Add the annotations to the query
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
obj.query.add_annotation(annotation, alias, is_summary=False)
# expressions need to be added to the query before we know if they contain aggregates
added_aggregates = []
for alias, annotation in obj.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
added_aggregates.append(alias)
if added_aggregates:
obj._setup_aggregate_query(list(added_aggregates))
return obj
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
for batch in [objs[i:i + batch_size]
for i in range(0, len(objs), batch_size)]:
self.model._base_manager._insert(batch, fields=fields,
using=self.db)
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
elif not issubclass(self.__class__, klass):
base_queryset_class = getattr(self, '_base_queryset_class', self.__class__)
class_bases = (klass, base_queryset_class)
class_dict = {
'_base_queryset_class': base_queryset_class,
'_specialized_queryset_class': klass,
}
klass = type(klass.__name__, class_bases, class_dict)
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
c = klass(model=self.model, query=query, using=self._db, hints=self._hints)
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c.__dict__.update(kwargs)
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes. By default
this does nothing, but see the ValuesQuerySet for an example of where
it's useful.
"""
pass
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
if self.query.group_by is None:
self.query.group_by = True
def _prepare(self):
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
obj = self.values("pk")
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
def is_compatible_query_object_type(self, opts):
model = self.model
return (
model == opts.concrete_model or
opts.concrete_model in model._meta.get_parent_list() or
model in opts.get_parent_list()
)
is_compatible_query_object_type.queryset_only = True
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return instance.query.is_empty()
class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class ValuesQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
super(ValuesQuerySet, self).__init__(*args, **kwargs)
# select_related isn't supported in values(). (FIXME -#3358)
self.query.select_related = False
# QuerySet.clone() will also set up the _fields attribute with the
# names of the model fields to select.
def only(self, *fields):
raise NotImplementedError("ValuesQuerySet does not implement only()")
def defer(self, *fields):
raise NotImplementedError("ValuesQuerySet does not implement defer()")
def iterator(self):
# Purge any extra columns that haven't been explicitly asked for
extra_names = list(self.query.extra_select)
field_names = self.field_names
annotation_names = list(self.query.annotation_select)
names = extra_names + field_names + annotation_names
for row in self.query.get_compiler(self.db).results_iter():
yield dict(zip(names, row))
def delete(self):
# values().delete() doesn't work currently - make sure it raises an
# user friendly error.
raise TypeError("Queries with .values() or .values_list() applied "
"can't be deleted")
def _setup_query(self):
"""
Constructs the field_names list that the values query will be
retrieving.
Called by the _clone() method after initializing the rest of the
instance.
"""
if self.query.group_by is True:
self.query.add_fields([f.attname for f in self.model._meta.concrete_fields], False)
self.query.set_group_by()
self.query.clear_deferred_loading()
self.query.clear_select_fields()
if self._fields:
self.extra_names = []
self.annotation_names = []
if not self.query._extra and not self.query._annotations:
# Short cut - if there are no extra or annotations, then
# the values() clause must be just field names.
self.field_names = list(self._fields)
else:
self.query.default_cols = False
self.field_names = []
for f in self._fields:
# we inspect the full extra_select list since we might
# be adding back an extra select item that we hadn't
# had selected previously.
if self.query._extra and f in self.query._extra:
self.extra_names.append(f)
elif f in self.query.annotation_select:
self.annotation_names.append(f)
else:
self.field_names.append(f)
else:
# Default to all fields.
self.extra_names = None
self.field_names = [f.attname for f in self.model._meta.concrete_fields]
self.annotation_names = None
self.query.select = []
if self.extra_names is not None:
self.query.set_extra_mask(self.extra_names)
self.query.add_fields(self.field_names, True)
if self.annotation_names is not None:
self.query.set_annotation_mask(self.annotation_names)
def _clone(self, klass=None, setup=False, **kwargs):
"""
Cloning a ValuesQuerySet preserves the current fields.
"""
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
if not hasattr(c, '_fields'):
# Only clone self._fields if _fields wasn't passed into the cloning
# call directly.
c._fields = self._fields[:]
c.field_names = self.field_names
c.extra_names = self.extra_names
c.annotation_names = self.annotation_names
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _merge_sanity_check(self, other):
super(ValuesQuerySet, self)._merge_sanity_check(other)
if (set(self.extra_names) != set(other.extra_names) or
set(self.field_names) != set(other.field_names) or
self.annotation_names != other.annotation_names):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
self.query.set_group_by()
if self.annotation_names is not None:
self.annotation_names.extend(aggregates)
self.query.set_annotation_mask(self.annotation_names)
super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
def _as_sql(self, connection):
"""
For ValuesQuerySet (and subclasses like ValuesListQuerySet), they can
only be used as nested queries if they're already set up to select only
a single field (in which case, that is the field column that is
returned). This differs from QuerySet.as_sql(), where the column to
select is set up by Django.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
obj = self._clone()
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
def _prepare(self):
"""
Validates that we aren't trying to do a query like
value__in=qs.values('value1', 'value2'), which isn't valid.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
return self
def is_compatible_query_object_type(self, opts):
"""
ValueQuerySets do not need to be checked for compatibility.
We trust that users of ValueQuerySets know what they are doing.
"""
return True
class ValuesListQuerySet(ValuesQuerySet):
def iterator(self):
compiler = self.query.get_compiler(self.db)
if self.flat and len(self._fields) == 1:
for row in compiler.results_iter():
yield row[0]
elif not self.query.extra_select and not self.query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
# When extra(select=...) or an annotation is involved, the extra
# cols are always at the start of the row, and we need to reorder
# the fields to match the order in self._fields.
extra_names = list(self.query.extra_select)
field_names = self.field_names
annotation_names = list(self.query.annotation_select)
names = extra_names + field_names + annotation_names
# If a field list has been specified, use it. Otherwise, use the
# full list of fields, including extras and annotations.
if self._fields:
fields = list(self._fields) + [f for f in annotation_names if f not in self._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
def _clone(self, *args, **kwargs):
clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
if not hasattr(clone, "flat"):
# Only assign flat if the clone didn't already get it from kwargs
clone.flat = self.flat
return clone
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
fields = [self.model_fields.get(c, None) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<RawQuerySet: %s>" % self.query
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
class Prefetch(object):
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_through(self, level):
return LOOKUP_SEP.join(self.prefetch_through.split(LOOKUP_SEP)[:level + 1])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(result_cache, related_lookups):
"""
Helper function for prefetch_related functionality
Populates prefetched objects caches for a list of results
from a QuerySet
"""
if len(result_cache) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = result_cache
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except AttributeError:
# Must be in a QuerySet subclass that is not returning
# Model instances, either in Django or 3rd
# party. prefetch_related() doesn't make sense, so quit
# now.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
additional_lookups = getattr(rel_qs, '_prefetch_related_lookups', [])
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
to_attr, as_attr = lookup.get_current_to_attr(level)
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
# Cache in the QuerySet.all().
qs = getattr(obj, to_attr).all()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator(object):
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = self.get_deferred_cls(klass_info, self.init_list)
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.rel.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.rel.get_cache_name()
def get_deferred_cls(self, klass_info, init_list):
model_cls = klass_info['model']
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [
f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set
]
model_cls = deferred_class_factory(model_cls, skip)
return model_cls
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| mit | 5,433,555,596,517,188,000 | 39.229944 | 115 | 0.587456 | false |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/test/test_heapq.py | 111 | 14475 | """Unittests for heapq."""
import sys
import random
import unittest
from test import support
from unittest import TestCase, skipUnless
py_heapq = support.import_fresh_module('heapq', blocked=['_heapq'])
c_heapq = support.import_fresh_module('heapq', fresh=['_heapq'])
# _heapq.nlargest/nsmallest are saved in heapq._nlargest/_smallest when
# _heapq is imported, so check them there
func_names = ['heapify', 'heappop', 'heappush', 'heappushpop',
'heapreplace', '_nlargest', '_nsmallest']
class TestModules(TestCase):
def test_py_functions(self):
for fname in func_names:
self.assertEqual(getattr(py_heapq, fname).__module__, 'heapq')
@skipUnless(c_heapq, 'requires _heapq')
def test_c_functions(self):
for fname in func_names:
self.assertEqual(getattr(c_heapq, fname).__module__, '_heapq')
class TestHeap:
def test_push_pop(self):
# 1) Push 256 random numbers and pop them off, verifying all's OK.
heap = []
data = []
self.check_invariant(heap)
for i in range(256):
item = random.random()
data.append(item)
self.module.heappush(heap, item)
self.check_invariant(heap)
results = []
while heap:
item = self.module.heappop(heap)
self.check_invariant(heap)
results.append(item)
data_sorted = data[:]
data_sorted.sort()
self.assertEqual(data_sorted, results)
# 2) Check that the invariant holds for a sorted array
self.check_invariant(results)
self.assertRaises(TypeError, self.module.heappush, [])
try:
self.assertRaises(TypeError, self.module.heappush, None, None)
self.assertRaises(TypeError, self.module.heappop, None)
except AttributeError:
pass
def check_invariant(self, heap):
# Check the heap invariant.
for pos, item in enumerate(heap):
if pos: # pos 0 has no parent
parentpos = (pos-1) >> 1
self.assertTrue(heap[parentpos] <= item)
def test_heapify(self):
for size in range(30):
heap = [random.random() for dummy in range(size)]
self.module.heapify(heap)
self.check_invariant(heap)
self.assertRaises(TypeError, self.module.heapify, None)
def test_naive_nbest(self):
data = [random.randrange(2000) for i in range(1000)]
heap = []
for item in data:
self.module.heappush(heap, item)
if len(heap) > 10:
self.module.heappop(heap)
heap.sort()
self.assertEqual(heap, sorted(data)[-10:])
def heapiter(self, heap):
# An iterator returning a heap's elements, smallest-first.
try:
while 1:
yield self.module.heappop(heap)
except IndexError:
pass
def test_nbest(self):
# Less-naive "N-best" algorithm, much faster (if len(data) is big
# enough <wink>) than sorting all of data. However, if we had a max
# heap instead of a min heap, it could go faster still via
# heapify'ing all of data (linear time), then doing 10 heappops
# (10 log-time steps).
data = [random.randrange(2000) for i in range(1000)]
heap = data[:10]
self.module.heapify(heap)
for item in data[10:]:
if item > heap[0]: # this gets rarer the longer we run
self.module.heapreplace(heap, item)
self.assertEqual(list(self.heapiter(heap)), sorted(data)[-10:])
self.assertRaises(TypeError, self.module.heapreplace, None)
self.assertRaises(TypeError, self.module.heapreplace, None, None)
self.assertRaises(IndexError, self.module.heapreplace, [], None)
def test_nbest_with_pushpop(self):
data = [random.randrange(2000) for i in range(1000)]
heap = data[:10]
self.module.heapify(heap)
for item in data[10:]:
self.module.heappushpop(heap, item)
self.assertEqual(list(self.heapiter(heap)), sorted(data)[-10:])
self.assertEqual(self.module.heappushpop([], 'x'), 'x')
def test_heappushpop(self):
h = []
x = self.module.heappushpop(h, 10)
self.assertEqual((h, x), ([], 10))
h = [10]
x = self.module.heappushpop(h, 10.0)
self.assertEqual((h, x), ([10], 10.0))
self.assertEqual(type(h[0]), int)
self.assertEqual(type(x), float)
h = [10];
x = self.module.heappushpop(h, 9)
self.assertEqual((h, x), ([10], 9))
h = [10];
x = self.module.heappushpop(h, 11)
self.assertEqual((h, x), ([11], 10))
def test_heapsort(self):
# Exercise everything with repeated heapsort checks
for trial in range(100):
size = random.randrange(50)
data = [random.randrange(25) for i in range(size)]
if trial & 1: # Half of the time, use heapify
heap = data[:]
self.module.heapify(heap)
else: # The rest of the time, use heappush
heap = []
for item in data:
self.module.heappush(heap, item)
heap_sorted = [self.module.heappop(heap) for i in range(size)]
self.assertEqual(heap_sorted, sorted(data))
def test_merge(self):
inputs = []
for i in range(random.randrange(5)):
row = sorted(random.randrange(1000) for j in range(random.randrange(10)))
inputs.append(row)
self.assertEqual(sorted(chain(*inputs)), list(self.module.merge(*inputs)))
self.assertEqual(list(self.module.merge()), [])
def test_merge_does_not_suppress_index_error(self):
# Issue 19018: Heapq.merge suppresses IndexError from user generator
def iterable():
s = list(range(10))
for i in range(20):
yield s[i] # IndexError when i > 10
with self.assertRaises(IndexError):
list(self.module.merge(iterable(), iterable()))
def test_merge_stability(self):
class Int(int):
pass
inputs = [[], [], [], []]
for i in range(20000):
stream = random.randrange(4)
x = random.randrange(500)
obj = Int(x)
obj.pair = (x, stream)
inputs[stream].append(obj)
for stream in inputs:
stream.sort()
result = [i.pair for i in self.module.merge(*inputs)]
self.assertEqual(result, sorted(result))
def test_nsmallest(self):
data = [(random.randrange(2000), i) for i in range(1000)]
for f in (None, lambda x: x[0] * 547 % 2000):
for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100):
self.assertEqual(list(self.module.nsmallest(n, data)),
sorted(data)[:n])
self.assertEqual(list(self.module.nsmallest(n, data, key=f)),
sorted(data, key=f)[:n])
def test_nlargest(self):
data = [(random.randrange(2000), i) for i in range(1000)]
for f in (None, lambda x: x[0] * 547 % 2000):
for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100):
self.assertEqual(list(self.module.nlargest(n, data)),
sorted(data, reverse=True)[:n])
self.assertEqual(list(self.module.nlargest(n, data, key=f)),
sorted(data, key=f, reverse=True)[:n])
def test_comparison_operator(self):
# Issue 3051: Make sure heapq works with both __lt__
# For python 3.0, __le__ alone is not enough
def hsort(data, comp):
data = [comp(x) for x in data]
self.module.heapify(data)
return [self.module.heappop(data).x for i in range(len(data))]
class LT:
def __init__(self, x):
self.x = x
def __lt__(self, other):
return self.x > other.x
class LE:
def __init__(self, x):
self.x = x
def __le__(self, other):
return self.x >= other.x
data = [random.random() for i in range(100)]
target = sorted(data, reverse=True)
self.assertEqual(hsort(data, LT), target)
self.assertRaises(TypeError, data, LE)
class TestHeapPython(TestHeap, TestCase):
module = py_heapq
@skipUnless(c_heapq, 'requires _heapq')
class TestHeapC(TestHeap, TestCase):
module = c_heapq
#==============================================================================
class LenOnly:
"Dummy sequence class defining __len__ but not __getitem__."
def __len__(self):
return 10
class GetOnly:
"Dummy sequence class defining __getitem__ but not __len__."
def __getitem__(self, ndx):
return 10
class CmpErr:
"Dummy element that always raises an error during comparison"
def __eq__(self, other):
raise ZeroDivisionError
__ne__ = __lt__ = __le__ = __gt__ = __ge__ = __eq__
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class SideEffectLT:
def __init__(self, value, heap):
self.value = value
self.heap = heap
def __lt__(self, other):
self.heap[:] = []
return self.value < other.value
class TestErrorHandling:
def test_non_sequence(self):
for f in (self.module.heapify, self.module.heappop):
self.assertRaises((TypeError, AttributeError), f, 10)
for f in (self.module.heappush, self.module.heapreplace,
self.module.nlargest, self.module.nsmallest):
self.assertRaises((TypeError, AttributeError), f, 10, 10)
def test_len_only(self):
for f in (self.module.heapify, self.module.heappop):
self.assertRaises((TypeError, AttributeError), f, LenOnly())
for f in (self.module.heappush, self.module.heapreplace):
self.assertRaises((TypeError, AttributeError), f, LenOnly(), 10)
for f in (self.module.nlargest, self.module.nsmallest):
self.assertRaises(TypeError, f, 2, LenOnly())
def test_get_only(self):
for f in (self.module.heapify, self.module.heappop):
self.assertRaises(TypeError, f, GetOnly())
for f in (self.module.heappush, self.module.heapreplace):
self.assertRaises(TypeError, f, GetOnly(), 10)
for f in (self.module.nlargest, self.module.nsmallest):
self.assertRaises(TypeError, f, 2, GetOnly())
def test_get_only(self):
seq = [CmpErr(), CmpErr(), CmpErr()]
for f in (self.module.heapify, self.module.heappop):
self.assertRaises(ZeroDivisionError, f, seq)
for f in (self.module.heappush, self.module.heapreplace):
self.assertRaises(ZeroDivisionError, f, seq, 10)
for f in (self.module.nlargest, self.module.nsmallest):
self.assertRaises(ZeroDivisionError, f, 2, seq)
def test_arg_parsing(self):
for f in (self.module.heapify, self.module.heappop,
self.module.heappush, self.module.heapreplace,
self.module.nlargest, self.module.nsmallest):
self.assertRaises((TypeError, AttributeError), f, 10)
def test_iterable_args(self):
for f in (self.module.nlargest, self.module.nsmallest):
for s in ("123", "", range(1000), (1, 1.2), range(2000,2200,5)):
for g in (G, I, Ig, L, R):
self.assertEqual(list(f(2, g(s))), list(f(2,s)))
self.assertEqual(list(f(2, S(s))), [])
self.assertRaises(TypeError, f, 2, X(s))
self.assertRaises(TypeError, f, 2, N(s))
self.assertRaises(ZeroDivisionError, f, 2, E(s))
# Issue #17278: the heap may change size while it's being walked.
def test_heappush_mutating_heap(self):
heap = []
heap.extend(SideEffectLT(i, heap) for i in range(200))
# Python version raises IndexError, C version RuntimeError
with self.assertRaises((IndexError, RuntimeError)):
self.module.heappush(heap, SideEffectLT(5, heap))
def test_heappop_mutating_heap(self):
heap = []
heap.extend(SideEffectLT(i, heap) for i in range(200))
# Python version raises IndexError, C version RuntimeError
with self.assertRaises((IndexError, RuntimeError)):
self.module.heappop(heap)
class TestErrorHandlingPython(TestErrorHandling, TestCase):
module = py_heapq
@skipUnless(c_heapq, 'requires _heapq')
class TestErrorHandlingC(TestErrorHandling, TestCase):
module = c_heapq
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | 5,418,000,759,083,349,000 | 33.71223 | 85 | 0.570086 | false |
spvkgn/youtube-dl | youtube_dl/extractor/lecture2go.py | 87 | 2402 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
determine_protocol,
parse_duration,
int_or_none,
)
class Lecture2GoIE(InfoExtractor):
_VALID_URL = r'https?://lecture2go\.uni-hamburg\.de/veranstaltungen/-/v/(?P<id>\d+)'
_TEST = {
'url': 'https://lecture2go.uni-hamburg.de/veranstaltungen/-/v/17473',
'md5': 'ac02b570883020d208d405d5a3fd2f7f',
'info_dict': {
'id': '17473',
'ext': 'mp4',
'title': '2 - Endliche Automaten und reguläre Sprachen',
'creator': 'Frank Heitmann',
'duration': 5220,
},
'params': {
# m3u8 download
'skip_download': True,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<em[^>]+class="title">(.+)</em>', webpage, 'title')
formats = []
for url in set(re.findall(r'var\s+playerUri\d+\s*=\s*"([^"]+)"', webpage)):
ext = determine_ext(url)
protocol = determine_protocol({'url': url})
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(url, video_id, f4m_id='hds'))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(url, video_id, ext='mp4', m3u8_id='hls'))
else:
if protocol == 'rtmp':
continue # XXX: currently broken
formats.append({
'format_id': protocol,
'url': url,
})
self._sort_formats(formats)
creator = self._html_search_regex(
r'<div[^>]+id="description">([^<]+)</div>', webpage, 'creator', fatal=False)
duration = parse_duration(self._html_search_regex(
r'Duration:\s*</em>\s*<em[^>]*>([^<]+)</em>', webpage, 'duration', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'Views:\s*</em>\s*<em[^>]+>(\d+)</em>', webpage, 'view count', fatal=False))
return {
'id': video_id,
'title': title,
'formats': formats,
'creator': creator,
'duration': duration,
'view_count': view_count,
}
| unlicense | 8,057,718,562,335,027,000 | 32.816901 | 99 | 0.511037 | false |
sjerdo/letsencrypt | acme/acme/fields.py | 53 | 1742 | """ACME JSON fields."""
import logging
import pyrfc3339
from acme import jose
logger = logging.getLogger(__name__)
class Fixed(jose.Field):
"""Fixed field."""
def __init__(self, json_name, value):
self.value = value
super(Fixed, self).__init__(
json_name=json_name, default=value, omitempty=False)
def decode(self, value):
if value != self.value:
raise jose.DeserializationError('Expected {0!r}'.format(self.value))
return self.value
def encode(self, value):
if value != self.value:
logger.warn(
'Overriding fixed field (%s) with %r', self.json_name, value)
return value
class RFC3339Field(jose.Field):
"""RFC3339 field encoder/decoder.
Handles decoding/encoding between RFC3339 strings and aware (not
naive) `datetime.datetime` objects
(e.g. ``datetime.datetime.now(pytz.utc)``).
"""
@classmethod
def default_encoder(cls, value):
return pyrfc3339.generate(value)
@classmethod
def default_decoder(cls, value):
try:
return pyrfc3339.parse(value)
except ValueError as error:
raise jose.DeserializationError(error)
class Resource(jose.Field):
"""Resource MITM field."""
def __init__(self, resource_type, *args, **kwargs):
self.resource_type = resource_type
super(Resource, self).__init__(
'resource', default=resource_type, *args, **kwargs)
def decode(self, value):
if value != self.resource_type:
raise jose.DeserializationError(
'Wrong resource type: {0} instead of {1}'.format(
value, self.resource_type))
return value
| apache-2.0 | 1,574,681,558,885,888,500 | 25.393939 | 80 | 0.6062 | false |
karan1276/servo | tests/wpt/web-platform-tests/tools/py/bench/localpath.py | 215 | 1883 |
import py
import timeit
class Listdir:
numiter = 100000
numentries = 100
def setup(self):
tmpdir = py.path.local.make_numbered_dir(self.__class__.__name__)
for i in range(self.numentries):
tmpdir.join(str(i))
self.tmpdir = tmpdir
def run(self):
return self.tmpdir.listdir()
class Listdir_arg(Listdir):
numiter = 100000
numentries = 100
def run(self):
return self.tmpdir.listdir("47")
class Join_onearg(Listdir):
def run(self):
self.tmpdir.join("17")
self.tmpdir.join("18")
self.tmpdir.join("19")
class Join_multi(Listdir):
def run(self):
self.tmpdir.join("a", "b")
self.tmpdir.join("a", "b", "c")
self.tmpdir.join("a", "b", "c", "d")
class Check(Listdir):
def run(self):
self.tmpdir.check()
self.tmpdir.check()
self.tmpdir.check()
class CheckDir(Listdir):
def run(self):
self.tmpdir.check(dir=1)
self.tmpdir.check(dir=1)
assert not self.tmpdir.check(dir=0)
class CheckDir2(Listdir):
def run(self):
self.tmpdir.stat().isdir()
self.tmpdir.stat().isdir()
assert self.tmpdir.stat().isdir()
class CheckFile(Listdir):
def run(self):
self.tmpdir.check(file=1)
assert not self.tmpdir.check(file=1)
assert self.tmpdir.check(file=0)
if __name__ == "__main__":
import time
for cls in [Listdir, Listdir_arg,
Join_onearg, Join_multi,
Check, CheckDir, CheckDir2, CheckFile,]:
inst = cls()
inst.setup()
now = time.time()
for i in xrange(cls.numiter):
inst.run()
elapsed = time.time() - now
print "%s: %d loops took %.2f seconds, per call %.6f" %(
cls.__name__,
cls.numiter, elapsed, elapsed / cls.numiter)
| mpl-2.0 | 71,073,297,045,612,080 | 24.106667 | 73 | 0.562931 | false |
trondhindenes/ansible | lib/ansible/modules/network/avi/avi_controllerproperties.py | 20 | 16486 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.2
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_controllerproperties
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of ControllerProperties Avi RESTful Object
description:
- This module is used to configure ControllerProperties object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
allow_ip_forwarding:
description:
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
allow_unauthenticated_apis:
description:
- Allow unauthenticated access for special apis.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
allow_unauthenticated_nodes:
description:
- Boolean flag to set allow_unauthenticated_nodes.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
api_idle_timeout:
description:
- Allowed values are 0-1440.
- Default value when not specified in API or module is interpreted by Avi Controller as 15.
- Units(MIN).
appviewx_compat_mode:
description:
- Export configuration in appviewx compatibility mode.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
attach_ip_retry_interval:
description:
- Number of attach_ip_retry_interval.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
- Units(SEC).
attach_ip_retry_limit:
description:
- Number of attach_ip_retry_limit.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
bm_use_ansible:
description:
- Use ansible for se creation in baremetal.
- Field introduced in 17.2.2.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.5"
type: bool
cluster_ip_gratuitous_arp_period:
description:
- Number of cluster_ip_gratuitous_arp_period.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
- Units(MIN).
crashed_se_reboot:
description:
- Number of crashed_se_reboot.
- Default value when not specified in API or module is interpreted by Avi Controller as 900.
- Units(SEC).
dead_se_detection_timer:
description:
- Number of dead_se_detection_timer.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
- Units(SEC).
dns_refresh_period:
description:
- Number of dns_refresh_period.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
- Units(MIN).
dummy:
description:
- Number of dummy.
enable_memory_balancer:
description:
- Enable/disable memory balancer.
- Field introduced in 17.2.8.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.6"
type: bool
fatal_error_lease_time:
description:
- Number of fatal_error_lease_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 120.
- Units(SEC).
max_dead_se_in_grp:
description:
- Number of max_dead_se_in_grp.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
max_pcap_per_tenant:
description:
- Maximum number of pcap files stored per tenant.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
max_seq_attach_ip_failures:
description:
- Maximum number of consecutive attach ip failures that halts vs placement.
- Field introduced in 17.2.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.
version_added: "2.5"
max_seq_vnic_failures:
description:
- Number of max_seq_vnic_failures.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.
persistence_key_rotate_period:
description:
- Allowed values are 1-1051200.
- Special values are 0 - 'disabled'.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
- Units(MIN).
portal_token:
description:
- Token used for uploading tech-support to portal.
- Field introduced in 16.4.6,17.1.2.
version_added: "2.4"
query_host_fail:
description:
- Number of query_host_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 180.
- Units(SEC).
safenet_hsm_version:
description:
- Version of the safenet package installed on the controller.
- Field introduced in 16.5.2,17.2.3.
version_added: "2.5"
se_create_timeout:
description:
- Number of se_create_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 900.
- Units(SEC).
se_failover_attempt_interval:
description:
- Interval between attempting failovers to an se.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
- Units(SEC).
se_offline_del:
description:
- Number of se_offline_del.
- Default value when not specified in API or module is interpreted by Avi Controller as 172000.
- Units(SEC).
se_vnic_cooldown:
description:
- Number of se_vnic_cooldown.
- Default value when not specified in API or module is interpreted by Avi Controller as 120.
- Units(SEC).
secure_channel_cleanup_timeout:
description:
- Number of secure_channel_cleanup_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
- Units(MIN).
secure_channel_controller_token_timeout:
description:
- Number of secure_channel_controller_token_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
- Units(MIN).
secure_channel_se_token_timeout:
description:
- Number of secure_channel_se_token_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
- Units(MIN).
seupgrade_fabric_pool_size:
description:
- Pool size used for all fabric commands during se upgrade.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
seupgrade_segroup_min_dead_timeout:
description:
- Time to wait before marking segroup upgrade as stuck.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
- Units(SEC).
ssl_certificate_expiry_warning_days:
description:
- Number of days for ssl certificate expiry warning.
- Units(DAYS).
unresponsive_se_reboot:
description:
- Number of unresponsive_se_reboot.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
- Units(SEC).
upgrade_dns_ttl:
description:
- Time to account for dns ttl during upgrade.
- This is in addition to vs_scalein_timeout_for_upgrade in se_group.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.
- Units(SEC).
upgrade_lease_time:
description:
- Number of upgrade_lease_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
- Units(SEC).
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vnic_op_fail_time:
description:
- Number of vnic_op_fail_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 180.
- Units(SEC).
vs_apic_scaleout_timeout:
description:
- Time to wait for the scaled out se to become ready before marking the scaleout done, applies to apic configuration only.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
- Units(SEC).
vs_awaiting_se_timeout:
description:
- Number of vs_awaiting_se_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
- Units(SEC).
vs_key_rotate_period:
description:
- Allowed values are 1-1051200.
- Special values are 0 - 'disabled'.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
- Units(MIN).
vs_se_attach_ip_fail:
description:
- Time to wait before marking attach ip operation on an se as failed.
- Field introduced in 17.2.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 3600.
- Units(SEC).
version_added: "2.5"
vs_se_bootup_fail:
description:
- Number of vs_se_bootup_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 480.
- Units(SEC).
vs_se_create_fail:
description:
- Number of vs_se_create_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 1500.
- Units(SEC).
vs_se_ping_fail:
description:
- Number of vs_se_ping_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
- Units(SEC).
vs_se_vnic_fail:
description:
- Number of vs_se_vnic_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
- Units(SEC).
vs_se_vnic_ip_fail:
description:
- Number of vs_se_vnic_ip_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 120.
- Units(SEC).
warmstart_se_reconnect_wait_time:
description:
- Number of warmstart_se_reconnect_wait_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
- Units(SEC).
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create ControllerProperties object
avi_controllerproperties:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_controllerproperties
"""
RETURN = '''
obj:
description: ControllerProperties (api/controllerproperties) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
allow_ip_forwarding=dict(type='bool',),
allow_unauthenticated_apis=dict(type='bool',),
allow_unauthenticated_nodes=dict(type='bool',),
api_idle_timeout=dict(type='int',),
appviewx_compat_mode=dict(type='bool',),
attach_ip_retry_interval=dict(type='int',),
attach_ip_retry_limit=dict(type='int',),
bm_use_ansible=dict(type='bool',),
cluster_ip_gratuitous_arp_period=dict(type='int',),
crashed_se_reboot=dict(type='int',),
dead_se_detection_timer=dict(type='int',),
dns_refresh_period=dict(type='int',),
dummy=dict(type='int',),
enable_memory_balancer=dict(type='bool',),
fatal_error_lease_time=dict(type='int',),
max_dead_se_in_grp=dict(type='int',),
max_pcap_per_tenant=dict(type='int',),
max_seq_attach_ip_failures=dict(type='int',),
max_seq_vnic_failures=dict(type='int',),
persistence_key_rotate_period=dict(type='int',),
portal_token=dict(type='str', no_log=True,),
query_host_fail=dict(type='int',),
safenet_hsm_version=dict(type='str',),
se_create_timeout=dict(type='int',),
se_failover_attempt_interval=dict(type='int',),
se_offline_del=dict(type='int',),
se_vnic_cooldown=dict(type='int',),
secure_channel_cleanup_timeout=dict(type='int',),
secure_channel_controller_token_timeout=dict(type='int',),
secure_channel_se_token_timeout=dict(type='int',),
seupgrade_fabric_pool_size=dict(type='int',),
seupgrade_segroup_min_dead_timeout=dict(type='int',),
ssl_certificate_expiry_warning_days=dict(type='list',),
unresponsive_se_reboot=dict(type='int',),
upgrade_dns_ttl=dict(type='int',),
upgrade_lease_time=dict(type='int',),
url=dict(type='str',),
uuid=dict(type='str',),
vnic_op_fail_time=dict(type='int',),
vs_apic_scaleout_timeout=dict(type='int',),
vs_awaiting_se_timeout=dict(type='int',),
vs_key_rotate_period=dict(type='int',),
vs_se_attach_ip_fail=dict(type='int',),
vs_se_bootup_fail=dict(type='int',),
vs_se_create_fail=dict(type='int',),
vs_se_ping_fail=dict(type='int',),
vs_se_vnic_fail=dict(type='int',),
vs_se_vnic_ip_fail=dict(type='int',),
warmstart_se_reconnect_wait_time=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'controllerproperties',
set(['portal_token']))
if __name__ == '__main__':
main()
| gpl-3.0 | 8,953,998,339,727,534,000 | 41.489691 | 134 | 0.61622 | false |
costadorione/purestream | core/tmdb.py | 1 | 65766 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand 5
# Copyright 2015 [email protected]
# http://www.mimediacenter.info/foro/viewforum.php?f=36
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of streamondemand 5.
#
# streamondemand 5 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# streamondemand 5 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with streamondemand 5. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------------------
import copy
import re
import time
from core import jsontools
from core import logger
from core import scrapertools
from platformcode import platformtools
# -----------------------------------------------------------------------------------------------------------
# Conjunto de funciones relacionadas con las infoLabels.
# version 1.0:
# Version inicial
#
# Incluyen:
# set_infoLabels(source, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos extras de una o
# varias series, capitulos o peliculas.
# set_infoLabels_item(item, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos extras de una
# serie, capitulo o pelicula.
# set_infoLabels_itemlist(item_list, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos
# extras de una lista de series, capitulos o peliculas.
# infoLabels_tostring(item): Retorna un str con la lista ordenada con los infoLabels del item
#
# Uso:
# tmdb.set_infoLabels(item, seekTmdb = True)
#
# Obtener datos basicos de una pelicula:
# Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.fulltitle
# o en item.contentTitle y el año en item.infoLabels['year'].
#
# Obtener datos basicos de una serie:
# Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.show o en
# item.contentSerieName.
#
# Obtener mas datos de una pelicula o serie:
# Despues de obtener los datos basicos en item.infoLabels['tmdb'] tendremos el codigo de la serie o pelicula.
# Tambien podriamos directamente fijar este codigo, si se conoce, o utilizar los codigo correspondientes de:
# IMDB (en item.infoLabels['IMDBNumber'] o item.infoLabels['code'] o item.infoLabels['imdb_id']), TVDB
# (solo series, en item.infoLabels['tvdb_id']),
# Freebase (solo series, en item.infoLabels['freebase_mid']),TVRage (solo series, en
# item.infoLabels['tvrage_id'])
#
# Obtener datos de una temporada:
# Antes de llamar al metodo set_infoLabels el titulo de la serie debe estar en item.show o en
# item.contentSerieName,
# el codigo TMDB de la serie debe estar en item.infoLabels['tmdb'] (puede fijarse automaticamente mediante
# la consulta de datos basica)
# y el numero de temporada debe estar en item.infoLabels['season'].
#
# Obtener datos de un episodio:
# Antes de llamar al metodo set_infoLabels el titulo de la serie debe estar en item.show o en
# item.contentSerieName,
# el codigo TMDB de la serie debe estar en item.infoLabels['tmdb'] (puede fijarse automaticamente mediante la
# consulta de datos basica),
# el numero de temporada debe estar en item.infoLabels['season'] y el numero de episodio debe estar en
# item.infoLabels['episode'].
#
#
# --------------------------------------------------------------------------------------------------------------
otmdb_global = None
def cb_select_from_tmdb(item, tmdb_result):
if tmdb_result is None:
logger.debug("he pulsado 'cancelar' en la ventana de info de la serie/pelicula")
return None
else:
return tmdb_result
def find_and_set_infoLabels_tmdb(item, ask_video=True):
global otmdb_global
contentType = item.contentType if item.contentType else ("movie" if not item.contentSerieName else "tvshow")
title = item.contentSerieName if contentType == "tvshow" else item.contentTitle
season = int(item.contentSeason) if item.contentSeason else ""
episode = int(item.contentEpisodeNumber) if item.contentEpisodeNumber else ""
contentType = "episode" if contentType == "tvshow" and item.contentSeason and item.contentEpisodeNumber else \
contentType
year = item.infoLabels.get('year', '')
video_type = "tv" if contentType in ["tvshow", "episode"] else "movie"
tmdb_result = None
while not tmdb_result:
if not item.infoLabels.get("tmdb_id"):
otmdb_global = Tmdb(texto_buscado=title, tipo=video_type, year=year)
elif not otmdb_global or otmdb_global.result.get("id") != item.infoLabels['tmdb_id']:
otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=video_type, idioma_busqueda="es")
results = otmdb_global.get_list_resultados()
if len(results) > 1 and ask_video:
tmdb_result = platformtools.show_video_info(results, caption="[{0}]: Selecciona la {1} correcta"
.format(title, "serie" if video_type == "tv" else "pelicula"),
callback='cb_select_from_tmdb', item=item)
elif len(results) > 0:
tmdb_result = results[0]
if tmdb_result is None:
if platformtools.dialog_yesno("{0} no encontrada".
format("Serie" if video_type == "tv" else "Pelicula") ,
"No se ha encontrado la {0}:".
format("serie" if video_type == "tv" else "pelicula"),
title,
'¿Desea introducir otro nombre?'):
# Pregunta el titulo
it = platformtools.dialog_input(title, "Introduzca el nombre de la {0} a buscar".
format("serie" if video_type == "tv" else "pelicula"))
if it is not None:
title = it
else:
logger.debug("he pulsado 'cancelar' en la ventana 'introduzca el nombre correcto'")
break
else:
break
infoLabels = item.infoLabels if type(item.infoLabels) == dict else {}
if not tmdb_result:
item.infoLabels = infoLabels
return False
infoLabels = otmdb_global.get_infoLabels(infoLabels, tmdb_result)
infoLabels["mediatype"] = contentType
if infoLabels["mediatype"] == "episode":
try:
episodio = otmdb_global.get_episodio(season, episode)
except:
pass
# No se ha podido buscar
else:
if episodio:
# Actualizar datos
infoLabels['title'] = episodio['episodio_titulo']
infoLabels['season'] = season
infoLabels['episode'] = episode
if episodio['episodio_sinopsis']:
infoLabels['plot'] = episodio['episodio_sinopsis']
if episodio['episodio_imagen']:
infoLabels['thumbnail'] = episodio['episodio_imagen']
item.infoLabels = infoLabels
return True
def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='it', lock=None):
# -----------------------------------------------------------------------------------------------------------
# Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula.
#
# Parametros:
# item: (Item) Objeto Item que representa un pelicula, serie o capitulo. El diccionario item.infoLabels sera
# modificado incluyendo los datos extras localizados.
# (opcional) seekTmdb: (bool) Si es True hace una busqueda en www.themoviedb.org para obtener los datos,
# en caso contrario obtiene los datos del propio Item si existen.
# (opcional) idioma_busqueda: (str) Codigo del idioma segun ISO 639-1, en caso de busqueda en
# www.themoviedb.org.
# Retorna:
# Un numero cuyo valor absoluto representa la cantidad de elementos incluidos en el diccionario
# item.infoLabels.
# Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y negativo en caso contrario.
# ---------------------------------------------------------------------------------------------------------
global otmdb_global
def __inicializar():
# Inicializar con valores por defecto
if 'year' not in item.infoLabels:
item.infoLabels['year'] = ''
if 'IMDBNumber' not in item.infoLabels:
item.infoLabels['IMDBNumber'] = ''
if 'code' not in item.infoLabels:
item.infoLabels['code'] = ''
if 'imdb_id' not in item.infoLabels:
item.infoLabels['imdb_id'] = ''
if 'plot' not in item.infoLabels:
item.infoLabels['plot'] = item.plot if item.plot != '' else item.contentPlot
if 'genre' not in item.infoLabels:
item.infoLabels['genre'] = item.category
item.infoLabels['duration'] = item.duration
item.infoLabels['AudioLanguage'] = item.language
titulo = item.fulltitle if item.fulltitle != '' else \
(item.contentTitle if item.contentTitle != '' else item.title)
if 'title' not in item.infoLabels:
item.infoLabels['title'] = titulo
item.infoLabels['tvshowtitle'] = item.show if item.show != '' else item.contentSerieName
if 'mediatype' not in item.infoLabels:
item.infoLabels['mediatype'] = 'movie' if item.infoLabels['tvshowtitle'] == '' else 'tvshow'
def obtener_datos_item():
if item.contentSeason != '':
item.infoLabels['mediatype'] = 'season'
if item.contentEpisodeNumber != '' or item.contentEpisodeTitle != '':
item.infoLabels['mediatype'] = 'episode'
if item.contentTitle == '':
item.contentTitle = item.title
return -1 * len(item.infoLabels)
def __leer_datos(otmdb_aux):
item.infoLabels = otmdb_aux.get_infoLabels(item.infoLabels)
if 'thumbnail' in item.infoLabels:
item.thumbnail = item.infoLabels['thumbnail']
if 'fanart' in item.infoLabels:
item.fanart = item.infoLabels['fanart']
if seekTmdb:
# Comprobamos q tipo de contenido es...
if 'mediatype' not in item.infoLabels:
item.infoLabels['tvshowtitle'] = item.show if item.show != '' else item.contentSerieName
item.infoLabels['mediatype'] = 'movie' if item.infoLabels['tvshowtitle'] == '' else 'tvshow'
tipo = 'movie' if item.infoLabels['mediatype'] == 'movie' else 'tv'
if 'season' in item.infoLabels and 'tmdb_id' in item.infoLabels:
try:
numtemporada = int(item.infoLabels['season'])
except ValueError:
logger.debug("El numero de temporada no es valido")
return obtener_datos_item()
if lock:
lock.acquire()
if not otmdb_global:
otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo, idioma_busqueda=idioma_busqueda)
__leer_datos(otmdb_global)
temporada = otmdb_global.get_temporada(numtemporada)
if lock:
lock.release()
if 'episode' in item.infoLabels:
try:
episode = int(item.infoLabels['episode'])
except ValueError:
logger.debug("El número de episodio (%s) no es valido" % repr(item.infoLabels['episode']))
return obtener_datos_item()
# Tenemos numero de temporada y numero de episodio validos...
# ... buscar datos episodio
item.infoLabels['mediatype'] = 'episode'
episodio = otmdb_global.get_episodio(numtemporada, episode)
if episodio:
# Actualizar datos
__leer_datos(otmdb_global)
item.infoLabels['title'] = episodio['episodio_titulo']
if episodio['episodio_sinopsis']:
item.infoLabels['plot'] = episodio['episodio_sinopsis']
if episodio['episodio_imagen']:
item.infoLabels['poster_path'] = episodio['episodio_imagen']
item.thumbnail = item.infoLabels['poster_path']
if episodio['episodio_air_date']:
item.infoLabels['aired'] = episodio['episodio_air_date']
if episodio['episodio_vote_average']:
item.infoLabels['rating'] = episodio['episodio_vote_average']
item.infoLabels['votes'] = episodio['episodio_vote_count']
return len(item.infoLabels)
else:
# Tenemos numero de temporada valido pero no numero de episodio...
# ... buscar datos temporada
item.infoLabels['mediatype'] = 'season'
temporada = otmdb_global.get_temporada(numtemporada)
if temporada:
# Actualizar datos
__leer_datos(otmdb_global)
logger.debug(str(item.infoLabels))
logger.debug(str(temporada))
item.infoLabels['title'] = temporada['name']
if temporada['overview']:
item.infoLabels['plot'] = temporada['overview']
if temporada['air_date']:
item.infoLabels['aired'] = temporada['air_date']
if temporada['poster_path']:
item.infoLabels['poster_path'] = 'http://image.tmdb.org/t/p/original' + temporada['poster_path']
item.thumbnail = item.infoLabels['poster_path']
return len(item.infoLabels)
# Buscar...
else:
__inicializar()
otmdb = copy.copy(otmdb_global)
# Busquedas por ID...
if 'tmdb_id' in item.infoLabels and item.infoLabels['tmdb_id']:
# ...Busqueda por tmdb_id
otmdb = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo, idioma_busqueda=idioma_busqueda)
elif item.infoLabels['IMDBNumber'] or item.infoLabels['code'] or item.infoLabels['imdb_id']:
if item.infoLabels['IMDBNumber']:
item.infoLabels['code'] = item.infoLabels['IMDBNumber']
item.infoLabels['imdb_id'] = item.infoLabels['IMDBNumber']
elif item.infoLabels['code']:
item.infoLabels['IMDBNumber'] = item.infoLabels['code']
item.infoLabels['imdb_id'] = item.infoLabels['code']
else:
item.infoLabels['code'] = item.infoLabels['imdb_id']
item.infoLabels['IMDBNumber'] = item.infoLabels['imdb_id']
# ...Busqueda por imdb code
otmdb = Tmdb(external_id=item.infoLabels['imdb_id'], external_source="imdb_id", tipo=tipo,
idioma_busqueda=idioma_busqueda)
elif tipo == 'tv': # buscar con otros codigos
if 'tvdb_id' in item.infoLabels and item.infoLabels['tvdb_id']:
# ...Busqueda por tvdb_id
otmdb = Tmdb(external_id=item.infoLabels['tvdb_id'], external_source="tvdb_id", tipo=tipo,
idioma_busqueda=idioma_busqueda)
elif 'freebase_mid' in item.infoLabels and item.infoLabels['freebase_mid']:
# ...Busqueda por freebase_mid
otmdb = Tmdb(external_id=item.infoLabels['freebase_mid'], external_source="freebase_mid",
tipo=tipo, idioma_busqueda=idioma_busqueda)
elif 'freebase_id' in item.infoLabels and item.infoLabels['freebase_id']:
# ...Busqueda por freebase_id
otmdb = Tmdb(external_id=item.infoLabels['freebase_id'], external_source="freebase_id",
tipo=tipo, idioma_busqueda=idioma_busqueda)
elif 'tvrage_id' in item.infoLabels and item.infoLabels['tvrage_id']:
# ...Busqueda por tvrage_id
otmdb = Tmdb(external_id=item.infoLabels['tvrage_id'], external_source="tvrage_id",
tipo=tipo, idioma_busqueda=idioma_busqueda)
if otmdb is None:
# No se ha podido buscar por ID...
# hacerlo por titulo
if item.infoLabels['title'] != '':
if tipo == 'tv':
# Busqueda de serie por titulo y filtrando sus resultados si es necesario
otmdb = Tmdb(texto_buscado=item.infoLabels['tvshowtitle'], tipo=tipo,
idioma_busqueda=idioma_busqueda, filtro=item.infoLabels.get('filtro', {}),
year=str(item.infoLabels.get('year', '')))
else:
# Busqueda de pelicula por titulo...
if item.infoLabels['year'] or 'filtro' in item.infoLabels:
# ...y año o filtro
titulo_buscado = item.fulltitle if item.fulltitle != '' else item.contentTitle
otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo,
idioma_busqueda=idioma_busqueda,
filtro=item.infoLabels.get('filtro', {}),
year=str(item.infoLabels.get('year', '')))
if otmdb is None or not otmdb.get_id():
# La busqueda no ha dado resultado
return obtener_datos_item()
else:
# La busqueda ha encontrado un resultado valido
__leer_datos(otmdb)
return len(item.infoLabels)
else:
__inicializar()
return obtener_datos_item()
def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda='it'):
"""
De manera concurrente, obtiene los datos de los items incluidos en la lista item_list.
La API tiene un limite de 40 peticiones por IP cada 10'' y por eso la lista no deberia tener mas de 30 items
para asegurar un buen funcionamiento de esta funcion.
:param item_list: listado de objetos Item que representan peliculas, series o capitulos. El diccionario
item.infoLabels de cada objeto Item sera modificado incluyendo los datos extras localizados.
:type item_list: list
:param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario
obtiene los datos del propio Item si existen.
:type seekTmdb: bool
:param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org.
:type idioma_busqueda: str
:return: Una lista de numeros cuyo valor absoluto representa la cantidad de elementos incluidos en el diccionario
item.infoLabels de cada Item. Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y
negativo en caso contrario.
:rtype: list
"""
import threading
semaforo = threading.Semaphore(20)
lock = threading.Lock()
r_list = list()
i = 0
l_hilo = list()
def sub_get(item, _i, _seekTmdb):
semaforo.acquire()
ret = set_infoLabels_item(item, _seekTmdb, idioma_busqueda, lock)
# logger.debug(str(ret) + "item: " + item.tostring())
semaforo.release()
r_list.append((_i, item, ret))
for item in item_list:
t = threading.Thread(target=sub_get, args=(item, i, seekTmdb))
t.start()
i += 1
l_hilo.append(t)
# esperar q todos los hilos terminen
for x in l_hilo:
x.join()
# Ordenar lista de resultados por orden de llamada para mantener el mismo orden q item_list
r_list.sort(key=lambda i: i[0])
# Reconstruir y devolver la lista solo con los resultados de las llamadas individuales
return [ii[2] for ii in r_list]
def set_infoLabels(source, seekTmdb=False, idioma_busqueda='it'):
"""
Dependiendo del tipo de dato de source obtiene y fija (item.infoLabels) los datos extras de una o varias series,
capitulos o peliculas.
@param source: variable que contiene la información para establecer infoLabels
@type source: list, item
@param seekTmdb: si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario
obtiene los datos del propio Item.
@type seekTmdb: bool
@param idioma_busqueda: fija el valor de idioma en caso de busqueda en www.themoviedb.org
@type idioma_busqueda: str
@return: un numero o lista de numeros con el resultado de las llamadas a set_infoLabels_item
@rtype: int, list
"""
start_time = time.time()
if type(source) == list:
ret = set_infoLabels_itemlist(source, seekTmdb, idioma_busqueda)
logger.debug("Se han obtenido los datos de %i enlaces en %f segundos" % (len(source), time.time() - start_time))
else:
ret = set_infoLabels_item(source, seekTmdb, idioma_busqueda)
logger.debug("Se han obtenido los datos del enlace en %f segundos" % (time.time() - start_time))
return ret
def infoLabels_tostring(item, separador="\n"):
"""
Retorna un str con la lista ordenada con los infoLabels del item
@param item: item
@type item: item
@param separador: tipo de separador de los campos
@type separador: str
@return: la lista ordenada con los infoLabels del item
@rtype: str
"""
return separador.join([var + "= " + str(item.infoLabels[var]) for var in sorted(item.infoLabels)])
# ---------------------------------------------------------------------------------------------------------------
# class Tmdb:
# Scraper para streamondemand basado en el Api de https://www.themoviedb.org/
# version 1.4:
# - Documentada limitacion de uso de la API (ver mas abajo).
# - Añadido metodo get_temporada()
# version 1.3:
# - Corregido error al devolver None el path_poster y el backdrop_path
# - Corregido error que hacia que en el listado de generos se fueran acumulando de una llamada a otra
# - Añadido metodo get_generos()
# - Añadido parametros opcional idioma_alternativo al metodo get_sinopsis()
#
#
# Uso:
# Metodos constructores:
# Tmdb(texto_buscado, tipo)
# Parametros:
# texto_buscado:(str) Texto o parte del texto a buscar
# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie"
# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1
# (opcional) include_adult: (bool) Se incluyen contenidos para adultos en la busqueda o no. Por defecto
# 'False'
# (opcional) year: (str) Año de lanzamiento.
# (opcional) page: (int) Cuando hay muchos resultados para una busqueda estos se organizan por paginas.
# Podemos cargar la pagina que deseemos aunque por defecto siempre es la primera.
# Return:
# Esta llamada devuelve un objeto Tmdb que contiene la primera pagina del resultado de buscar 'texto_buscado'
# en la web themoviedb.org. Cuantos mas parametros opcionales se incluyan mas precisa sera la busqueda.
# Ademas el objeto esta inicializado con el primer resultado de la primera pagina de resultados.
# Tmdb(id_Tmdb,tipo)
# Parametros:
# id_Tmdb: (str) Codigo identificador de una determinada pelicula o serie en themoviedb.org
# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie"
# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1
# Return:
# Esta llamada devuelve un objeto Tmdb que contiene el resultado de buscar una pelicula o serie con el
# identificador id_Tmd
# en la web themoviedb.org.
# Tmdb(external_id, external_source, tipo)
# Parametros:
# external_id: (str) Codigo identificador de una determinada pelicula o serie en la web referenciada por
# 'external_source'.
# external_source: (Para series:"imdb_id","freebase_mid","freebase_id","tvdb_id","tvrage_id"; Para
# peliculas:"imdb_id")
# tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie"
# (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1
# Return:
# Esta llamada devuelve un objeto Tmdb que contiene el resultado de buscar una pelicula o serie con el
# identificador 'external_id' de
# la web referenciada por 'external_source' en la web themoviedb.org.
#
# Metodos principales:
# get_id(): Retorna un str con el identificador Tmdb de la pelicula o serie cargada o una cadena vacia si no hubiese
# nada cargado.
# get_sinopsis(idioma_alternativo): Retorna un str con la sinopsis de la serie o pelicula cargada.
# get_poster (tipo_respuesta,size): Obtiene el poster o un listado de posters.
# get_backdrop (tipo_respuesta,size): Obtiene una imagen de fondo o un listado de imagenes de fondo.
# get_fanart (tipo,idioma,temporada): Obtiene un listado de imagenes del tipo especificado de la web Fanart.tv
# get_temporada(temporada): Obtiene un diccionario con datos especificos de la temporada.
# get_episodio (temporada, capitulo): Obtiene un diccionario con datos especificos del episodio.
# get_generos(): Retorna un str con la lista de generos a los que pertenece la pelicula o serie.
#
#
# Otros metodos:
# load_resultado(resultado, page): Cuando la busqueda devuelve varios resultados podemos seleccionar que resultado
# concreto y de que pagina cargar los datos.
#
# Limitaciones:
# El uso de la API impone un limite de 20 conexiones simultaneas (concurrencia) o 30 peticiones en 10 segundos por IP
# Informacion sobre la api : http://docs.themoviedb.apiary.io
# -------------------------------------------------------------------------------------------------------------------
class Tmdb(object):
# Atributo de clase
dic_generos = {}
'''
dic_generos={"id_idioma1": {"tv": {"id1": "name1",
"id2": "name2"
},
"movie": {"id1": "name1",
"id2": "name2"
}
}
}
'''
def __search(self, index_resultado=0, page=1):
# http://api.themoviedb.org/3/search/movie?api_key=f7f51775877e0bb6703520952b3c7840&query=superman&language=es
# &include_adult=false&page=1
url = ('http://api.themoviedb.org/3/search/%s?api_key=f7f51775877e0bb6703520952b3c7840&query=%s&language=%s'
'&include_adult=%s&page=%s' % (self.busqueda["tipo"], self.busqueda["texto"].replace(' ', '%20'),
self.busqueda["idioma"], self.busqueda["include_adult"], str(page)))
if self.busqueda["year"] != '':
url += '&year=' + str(self.busqueda["year"])
buscando = self.busqueda["texto"].capitalize()
logger.info("[Tmdb.py] Buscando %s en pagina %s:\n%s" % (buscando, page, url))
response_dic = {}
try:
response_dic = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url))
self.total_results = response_dic["total_results"]
self.total_pages = response_dic["total_pages"]
except:
self.total_results = 0
if self.total_results > 0:
self.results = response_dic["results"]
if len(self.results) > 0:
if self.busqueda['filtro']:
# TODO documentar esta parte
for key, value in dict(self.busqueda['filtro']).items():
for r in self.results[:]:
''' # Opcion mas permisiva
if r.has_key(k) and r[k] != v:
self.results.remove(r)
self.total_results -= 1
'''
# Opcion mas precisa
if key not in r or r[key] != value:
self.results.remove(r)
self.total_results -= 1
if index_resultado < len(self.results):
self.__leer_resultado(self.results[index_resultado])
else:
logger.error("La busqueda de '{0}' no dio {1} resultados para la pagina {2}"
.format(buscando, index_resultado + 1, page))
else:
# No hay resultados de la busqueda
logger.error("La busqueda de '%s' no dio resultados para la pagina %s" % (buscando, page))
def __by_id(self, source="tmdb"):
if source == "tmdb":
# http://api.themoviedb.org/3/movie/1924?api_key=f7f51775877e0bb6703520952b3c7840&language=es
# &append_to_response=images,videos,external_ids,credits&include_image_language=es,null
# http://api.themoviedb.org/3/tv/1407?api_key=f7f51775877e0bb6703520952b3c7840&language=es
# &append_to_response=images,videos,external_ids,credits&include_image_language=es,null
url = ('http://api.themoviedb.org/3/%s/%s?api_key=f7f51775877e0bb6703520952b3c7840&language=%s'
'&append_to_response=images,videos,external_ids,credits&include_image_language=%s,null' %
(self.busqueda["tipo"], self.busqueda["id"], self.busqueda["idioma"], self.busqueda["idioma"]))
buscando = "id_Tmdb: " + self.busqueda["id"]
else:
# http://api.themoviedb.org/3/find/%s?external_source=imdb_id&api_key=f7f51775877e0bb6703520952b3c7840
url = ('http://api.themoviedb.org/3/find/%s?external_source=%s&api_key=f7f51775877e0bb6703520952b3c7840'
'&language=%s' % (self.busqueda["id"], source, self.busqueda["idioma"]))
buscando = source.capitalize() + ": " + self.busqueda["id"]
logger.info("[Tmdb.py] Buscando %s:\n%s" % (buscando, url))
try:
resultado = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url))
if source != "tmdb":
if self.busqueda["tipo"] == "movie":
resultado = resultado["movie_results"]
else:
resultado = resultado["tv_results"]
if len(resultado) > 0:
resultado = resultado[0]
except:
resultado = {}
if len(resultado) > 0:
self.result = resultado
if self.total_results == 0:
self.results.append(resultado)
self.total_results = 1
self.total_pages = 1
self.__leer_resultado(resultado)
else: # No hay resultados de la busqueda
logger.debug("La busqueda de %s no dio resultados." % buscando)
def __inicializar(self):
# Inicializamos las colecciones de resultados, fanart y temporada
for i in (self.result, self.fanart, self.temporada):
for k in i.keys():
if type(i[k]) == str:
i[k] = ""
elif type(i[k]) == list:
i[k] = []
elif type(i[k]) == dict:
i[k] = {}
def __init__(self, **kwargs):
self.page = kwargs.get('page', 1)
self.results = []
self.total_pages = 0
self.total_results = 0
self.fanart = {}
self.temporada = {}
self.busqueda = {'id': "",
'texto': "",
'tipo': kwargs.get('tipo', 'movie'),
'idioma': kwargs.get('idioma_busqueda', 'es'),
'include_adult': str(kwargs.get('include_adult', 'false')),
'year': kwargs.get('year', ''),
'filtro': kwargs.get('filtro', {})
}
self.result = {'adult': "",
'backdrop_path': "", # ruta imagen de fondo mas valorada
# belongs_to_collection
'budget': "", # Presupuesto
'genres': [], # lista de generos
'homepage': "",
'id': "", 'imdb_id': "", 'freebase_mid': "", 'freebase_id': "", 'tvdb_id': "", 'tvrage_id': "",
# IDs equivalentes
'original_language': "",
'original_title': "",
'overview': "", # sinopsis
# popularity
'poster_path': "",
'production_companies': [],
'production_countries': [],
'origin_country': [],
'release_date': "",
'first_air_date': "",
'revenue': "", # recaudacion
'runtime': "", # runtime duracion
# spoken_languages
'status': "",
'tagline': "",
'title': "",
'video': "", # ("true" o "false") indica si la busqueda movies/id/videos devolvera algo o no
'vote_average': "",
'vote_count': "",
'name': "", # nombre en caso de personas o series (tv)
'profile_path': "", # ruta imagenes en caso de personas
'known_for': {}, # Diccionario de peliculas en caso de personas (id_pelicula:titulo)
'images_backdrops': [],
'images_posters': [],
'images_profiles': [],
'videos': []
}
def rellenar_dic_generos():
# Rellenar diccionario de generos del tipo e idioma seleccionados
if self.busqueda["idioma"] not in Tmdb.dic_generos:
Tmdb.dic_generos[self.busqueda["idioma"]] = {}
if self.busqueda["tipo"] not in Tmdb.dic_generos[self.busqueda["idioma"]]:
Tmdb.dic_generos[self.busqueda["idioma"]][self.busqueda["tipo"]] = {}
url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=f7f51775877e0bb6703520952b3c7840&language=%s'
% (self.busqueda["tipo"], self.busqueda["idioma"]))
try:
lista_generos = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url))["genres"]
except:
pass
for i in lista_generos:
Tmdb.dic_generos[self.busqueda["idioma"]][self.busqueda["tipo"]][str(i["id"])] = i["name"]
if self.busqueda["tipo"] == 'movie' or self.busqueda["tipo"] == "tv":
if self.busqueda["idioma"] not in Tmdb.dic_generos:
rellenar_dic_generos()
elif self.busqueda["tipo"] not in Tmdb.dic_generos[self.busqueda["idioma"]]:
rellenar_dic_generos()
else:
# La busqueda de personas no esta soportada en esta version.
raise Exception("Parametros no validos al crear el objeto Tmdb.\nConsulte los modos de uso.")
if 'id_Tmdb' in kwargs:
self.busqueda["id"] = kwargs.get('id_Tmdb')
self.__by_id()
elif 'texto_buscado' in kwargs:
self.busqueda["texto"] = kwargs.get('texto_buscado')
self.__search(page=self.page)
elif 'external_source' in kwargs and 'external_id' in kwargs:
# TV Series: imdb_id, freebase_mid, freebase_id, tvdb_id, tvrage_id
# Movies: imdb_id
if (self.busqueda["tipo"] == 'movie' and kwargs.get('external_source') == "imdb_id") or \
(self.busqueda["tipo"] == 'tv' and kwargs.get('external_source') in (
"imdb_id", "freebase_mid", "freebase_id", "tvdb_id", "tvrage_id")):
self.busqueda["id"] = kwargs.get('external_id')
self.__by_id(source=kwargs.get('external_source'))
else:
raise Exception("Parametros no validos al crear el objeto Tmdb.\nConsulte los modos de uso.")
def __leer_resultado(self, data):
for k, v in data.items():
if k == "genre_ids": # Lista de generos (lista con los id de los generos)
self.result["genres"] = []
for i in v:
try:
self.result["genres"].append(
self.dic_generos[self.busqueda["idioma"]][self.busqueda["tipo"]][str(i)])
except:
pass
elif k == "genre" or k == "genres": # Lista de generos (lista de objetos {id,nombre})
self.result["genres"] = []
for i in v:
self.result["genres"].append(i['name'])
elif k == "known_for": # Lista de peliculas de un actor
for i in v:
self.result["known_for"][i['id']] = i['title']
elif k == "images": # Se incluyen los datos de las imagenes
if "backdrops" in v:
self.result["images_backdrops"] = v["backdrops"]
if "posters" in v:
self.result["images_posters"] = v["posters"]
if "profiles" in v:
self.result["images_profiles"] = v["profiles"]
elif k == "credits": # Se incluyen los creditos
if "cast" in v:
self.result["credits_cast"] = v["cast"]
if "crew" in v:
self.result["credits_crew"] = v["crew"]
elif k == "videos": # Se incluyen los datos de los videos
self.result["videos"] = v["results"]
elif k == "external_ids": # Listado de IDs externos
for kj, _id in v.items():
# print kj + ":" + str(id)
if kj in self.result:
self.result[kj] = str(_id)
elif k in self.result: # el resto
if type(v) == list or type(v) == dict:
self.result[k] = v
elif v is None:
self.result[k] = ""
else:
self.result[k] = str(v)
def load_resultado(self, index_resultado=0, page=1):
# Si no hay mas de un resultado no podemos cambiar
if self.total_results <= 1:
return None
if page < 1 or page > self.total_pages:
page = 1
if index_resultado < 0:
index_resultado = 0
self.__inicializar()
if page != self.page:
self.__search(index_resultado=index_resultado, page=page)
self.page = page
else:
# print self.result["genres"]
self.__leer_resultado(self.results[index_resultado])
def get_list_resultados(self, numResult=20):
# TODO documentar
res = []
numResult = numResult if numResult > 0 else self.total_results
numResult = min([numResult, self.total_results])
cr = 0
for p in range(1, self.total_pages + 1):
for r in range(0, len(self.results)):
try:
self.load_resultado(r, p)
self.result['type'] = self.busqueda.get("tipo", "movie")
self.result['thumbnail'] = self.get_poster(size="w300")
self.result['fanart'] = self.get_backdrop()
res.append(self.result.copy())
cr += 1
if cr >= numResult:
return res
except:
continue
return res
def get_generos(self):
# --------------------------------------------------------------------------------------------------------------------------------------------
# Parametros:
# none
# Return: (str)
# Devuelve la lista de generos a los que pertenece la pelicula o serie.
# --------------------------------------------------------------------------------------------------------------------------------------------
return ', '.join(self.result["genres"])
def get_id(self):
"""
:return: Devuelve el identificador Tmdb de la pelicula o serie cargada o una cadena vacia en caso de que no
hubiese nada cargado. Se puede utilizar este metodo para saber si una busqueda ha dado resultado o no.
:rtype: str
"""
return str(self.result['id'])
def get_sinopsis(self, idioma_alternativo=""):
"""
:param idioma_alternativo: codigo del idioma, segun ISO 639-1, en el caso de que en el idioma fijado para la
busqueda no exista sinopsis.
Por defecto, se utiliza el idioma original. Si se utiliza None como idioma_alternativo, solo se buscara en
el idioma fijado.
:type idioma_alternativo: str
:return: Devuelve la sinopsis de una pelicula o serie
:rtype: str
"""
ret = ""
if self.result['id']:
ret = self.result['overview']
if self.result['overview'] == "" and str(idioma_alternativo).lower() != 'none':
# Vamos a lanzar una busqueda por id y releer de nuevo la sinopsis
self.busqueda["id"] = str(self.result["id"])
if idioma_alternativo:
self.busqueda["idioma"] = idioma_alternativo
else:
self.busqueda["idioma"] = self.result['original_language']
url = ('http://api.themoviedb.org/3/%s/%s?api_key=f7f51775877e0bb6703520952b3c7840&language=%s' %
(self.busqueda["tipo"], self.busqueda["id"], self.busqueda["idioma"]))
try:
resultado = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url))
except:
pass
if resultado:
if 'overview' in resultado:
self.result['overview'] = resultado['overview']
ret = self.result['overview']
return ret
def get_poster(self, tipo_respuesta="str", size="original"):
"""
@param tipo_respuesta: Tipo de dato devuelto por este metodo. Por defecto "str"
@type tipo_respuesta: list, str
@param size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original")
Indica la anchura(w) o altura(h) de la imagen a descargar. Por defecto "original"
@return: Si el tipo_respuesta es "list" devuelve un listado con todas las urls de las imagenes tipo poster del
tamaño especificado.
Si el tipo_respuesta es "str" devuelve la url de la imagen tipo poster, mas valorada, del tamaño
especificado.
Si el tamaño especificado no existe se retornan las imagenes al tamaño original.
@rtype: list, str
"""
ret = []
if size not in ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280",
"original"):
size = "original"
if self.result["poster_path"] is None or self.result["poster_path"] == "":
poster_path = ""
else:
poster_path = 'http://image.tmdb.org/t/p/' + size + self.result["poster_path"]
if tipo_respuesta == 'str':
return poster_path
elif self.result["id"] == "":
return []
if len(self.result['images_posters']) == 0:
# Vamos a lanzar una busqueda por id y releer de nuevo todo
self.busqueda["id"] = str(self.result["id"])
self.__by_id()
if len(self.result['images_posters']) > 0:
for i in self.result['images_posters']:
imagen_path = i['file_path']
if size != "original":
# No podemos pedir tamaños mayores que el original
if size[1] == 'w' and int(imagen_path['width']) < int(size[1:]):
size = "original"
elif size[1] == 'h' and int(imagen_path['height']) < int(size[1:]):
size = "original"
ret.append('http://image.tmdb.org/t/p/' + size + imagen_path)
else:
ret.append(poster_path)
return ret
def get_backdrop(self, tipo_respuesta="str", size="original"):
"""
Devuelve las imagenes de tipo backdrop
@param tipo_respuesta: Tipo de dato devuelto por este metodo. Por defecto "str"
@type tipo_respuesta: list, str
@param size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original")
Indica la anchura(w) o altura(h) de la imagen a descargar. Por defecto "original"
@type size: str
@return: Si el tipo_respuesta es "list" devuelve un listado con todas las urls de las imagenes tipo backdrop del
tamaño especificado.
Si el tipo_respuesta es "str" devuelve la url de la imagen tipo backdrop, mas valorada, del tamaño especificado.
Si el tamaño especificado no existe se retornan las imagenes al tamaño original.
@rtype: list, str
"""
ret = []
if size not in ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280",
"original"):
size = "original"
if self.result["backdrop_path"] is None or self.result["backdrop_path"] == "":
backdrop_path = ""
else:
backdrop_path = 'http://image.tmdb.org/t/p/' + size + self.result["backdrop_path"]
if tipo_respuesta == 'str':
return backdrop_path
elif self.result["id"] == "":
return []
if len(self.result['images_backdrops']) == 0:
# Vamos a lanzar una busqueda por id y releer de nuevo todo
self.busqueda["id"] = str(self.result["id"])
self.__by_id()
if len(self.result['images_backdrops']) > 0:
for i in self.result['images_backdrops']:
imagen_path = i['file_path']
if size != "original":
# No podemos pedir tamaños mayores que el original
if size[1] == 'w' and int(imagen_path['width']) < int(size[1:]):
size = "original"
elif size[1] == 'h' and int(imagen_path['height']) < int(size[1:]):
size = "original"
ret.append('http://image.tmdb.org/t/p/' + size + imagen_path)
else:
ret.append(backdrop_path)
return ret
def get_fanart(self, tipo="hdclearart", idioma=None, temporada="all"):
"""
@param tipo: ("hdclearlogo", "poster", "banner", "thumbs", "hdclearart", "clearart", "background", "clearlogo",
"characterart", "seasonthumb", "seasonposter", "seasonbanner", "moviedisc")
Indica el tipo de Art que se desea obtener, segun la web Fanart.tv. Alguno de estos tipos pueden estar solo
disponibles para peliculas o series segun el caso. Por defecto "hdclearart"
@type tipo: str
@param idioma: (opcional) Codigos del idioma segun ISO 639-1, "all" (por defecto) para todos los idiomas o "00"
para ninguno. Por ejemplo: idioma=["es","00","en"] Incluiria los resultados en español, sin idioma definido
y en ingles, en este orden.
@type idioma: list
@param temporada: (opcional solo para series) Un numero entero que representa el numero de temporada, el numero
cero para especiales o "all" para imagenes validas para cualquier temporada. Por defecto "all".
@type: temporada: str
@return: Retorna una lista con las url de las imagenes segun los parametros de entrada y ordenadas segun las
votaciones de Fanart.tv
@rtype: list
"""
if idioma is None:
idioma = ["all"]
if self.result["id"] == "":
return []
if len(self.fanart) == 0: # Si esta vacio acceder a Fanart.tv y cargar el resultado
if self.busqueda['tipo'] == 'movie':
# http://assets.fanart.tv/v3/movies/1924?api_key=dffe90fba4d02c199ae7a9e71330c987
url = "http://assets.fanart.tv/v3/movies/" + str(
self.result["id"]) + "?api_key=dffe90fba4d02c199ae7a9e71330c987"
temporada = ""
elif self.busqueda['tipo'] == 'tv':
# En este caso necesitamos el tvdb_id
if self.result["tvdb_id"] == '':
# Vamos lanzar una busqueda por id y releer de nuevo todo
self.busqueda["id"] = str(self.result["id"])
self.__by_id()
# http://assets.fanart.tv/v3/tv/153021?api_key=dffe90fba4d02c199ae7a9e71330c987
url = "http://assets.fanart.tv/v3/tv/" + str(
self.result["tvdb_id"]) + "?api_key=dffe90fba4d02c199ae7a9e71330c987"
else:
# 'person' No soportado
return None
fanarttv = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url))
if fanarttv is None: # Si el item buscado no esta en Fanart.tv devolvemos una lista vacia
return []
for k, v in fanarttv.items():
if k in ("hdtvlogo", "hdmovielogo"):
self.fanart["hdclearlogo"] = v
elif k in ("tvposter", "movieposter"):
self.fanart["poster"] = v
elif k in ("tvbanner", "moviebanner"):
self.fanart["banner"] = v
elif k in ("tvthumb", "moviethumb"):
self.fanart["thumbs"] = v
elif k in ("hdclearart", "hdmovieclearart"):
self.fanart["hdclearart"] = v
elif k in ("clearart", "movieart"):
self.fanart["clearart"] = v
elif k in ("showbackground", "moviebackground"):
self.fanart["background"] = v
elif k in ("clearlogo", "movielogo"):
self.fanart["clearlogo"] = v
elif k in ("characterart", "seasonthumb", "seasonposter", "seasonbanner", "moviedisc"):
self.fanart[k] = v
# inicializamos el diccionario con los idiomas
ret_dic = {}
for i in idioma:
ret_dic[i] = []
for i in self.fanart[tipo]:
if i["lang"] in idioma:
if "season" not in i:
ret_dic[i["lang"]].append(i["url"])
elif temporada == "" or (temporada == 'all' and i["season"] == 'all'):
ret_dic[i["lang"]].append(i["url"])
else:
if i["season"] == "":
i["season"] = 0
else:
i["season"] = int(i["season"])
if i["season"] == int(temporada):
ret_dic[i["lang"]].append(i["url"])
elif "all" in idioma:
ret_dic["all"].append(i["url"])
ret_list = []
for i in idioma:
ret_list.extend(ret_dic[i])
# print ret_list
return ret_list
def get_episodio(self, numtemporada=1, capitulo=1):
# --------------------------------------------------------------------------------------------------------------------------------------------
# Parametros:
# numtemporada(opcional): (int) Numero de temporada. Por defecto 1.
# capitulo: (int) Numero de capitulo. Por defecto 1.
# Return: (dic)
# Devuelve un dicionario con los siguientes elementos:
# "temporada_nombre", "temporada_sinopsis", "temporada_poster", "temporada_num_episodios"(int),
# "episodio_titulo", "episodio_sinopsis", "episodio_imagen", "episodio_air_date", "episodio_air_date",
# "episodio_crew", "episodio_guest_stars", "episodio_vote_count" y "episodio_vote_average"
# --------------------------------------------------------------------------------------------------------------------------------------------
if self.result["id"] == "" or self.busqueda["tipo"] != "tv":
return {}
capitulo = int(capitulo)
if capitulo < 1:
capitulo = 1
temporada = self.get_temporada(numtemporada)
if not temporada:
# Se ha producido un error
return {}
if len(temporada["episodes"]) < capitulo:
# Se ha producido un error
logger.error("Episodio %d de la temporada %d no encontrado." % (capitulo, numtemporada))
return {}
ret_dic = dict()
ret_dic["temporada_nombre"] = temporada["name"]
ret_dic["temporada_sinopsis"] = temporada["overview"]
ret_dic["temporada_poster"] = ('http://image.tmdb.org/t/p/original' + temporada["poster_path"]) if temporada[
"poster_path"] else ""
ret_dic["temporada_num_episodios"] = len(temporada["episodes"])
episodio = temporada["episodes"][capitulo - 1]
ret_dic["episodio_titulo"] = episodio["name"]
ret_dic["episodio_sinopsis"] = episodio["overview"]
ret_dic["episodio_imagen"] = ('http://image.tmdb.org/t/p/original' + episodio["still_path"]) if episodio[
"still_path"] else ""
ret_dic["episodio_air_date"] = episodio["air_date"]
ret_dic["episodio_crew"] = episodio["crew"]
ret_dic["episodio_guest_stars"] = episodio["guest_stars"]
ret_dic["episodio_vote_count"] = episodio["vote_count"]
ret_dic["episodio_vote_average"] = episodio["vote_average"]
return ret_dic
def get_temporada(self, numtemporada=1):
# --------------------------------------------------------------------------------------------------------------------------------------------
# Parametros:
# numtemporada: (int) Numero de temporada. Por defecto 1.
# Return: (dic)
# Devuelve un dicionario con datos sobre la temporada.
# Puede obtener mas informacion sobre los datos devueltos en:
# http://docs.themoviedb.apiary.io/#reference/tv-seasons/tvidseasonseasonnumber/get
# http://docs.themoviedb.apiary.io/#reference/tv-seasons/tvidseasonseasonnumbercredits/get
# --------------------------------------------------------------------------------------------------------------------------------------------
if self.result["id"] == "" or self.busqueda["tipo"] != "tv":
return {}
numtemporada = int(numtemporada)
if numtemporada < 0:
numtemporada = 1
# if not self.temporada.has_key("season_number") or self.temporada["season_number"] != numtemporada:
# if numtemporada > len(self.temporada) or self.temporada[numtemporada] is None:
if not self.temporada.has_key(numtemporada) or not self.temporada[numtemporada]:
# Si no hay datos sobre la temporada solicitada, consultar en la web
# http://api.themoviedb.org/3/tv/1407/season/1?api_key=f7f51775877e0bb6703520952b3c7840&language=es&
# append_to_response=credits
url = "http://api.themoviedb.org/3/tv/%s/season/%s?api_key=f7f51775877e0bb6703520952b3c7840&language=%s" \
"&append_to_response=credits" % (self.result["id"], numtemporada, self.busqueda["idioma"])
buscando = "id_Tmdb: " + str(self.result["id"]) + " temporada: " + str(numtemporada) + "\nURL: " + url
logger.info("[Tmdb.py] Buscando " + buscando)
try:
self.temporada[numtemporada] = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url))
except:
self.temporada[numtemporada] = ["status_code"]
if "status_code" in self.temporada[numtemporada]:
# Se ha producido un error
self.temporada[numtemporada] = {}
logger.error("La busqueda de " + buscando + " no dio resultados.")
return {}
return self.temporada[numtemporada]
def get_videos(self):
"""
:return: Devuelve una lista ordenada (idioma/resolucion/tipo) de objetos Dict en la que cada uno de
sus elementos corresponde con un trailer, teaser o clip de youtube.
:rtype: list of Dict
"""
ret = []
if self.result['id']:
if not self.result['videos']:
# Primera búsqueda de videos en el idioma de busqueda
url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=f7f51775877e0bb6703520952b3c7840&language=%s" \
% (self.busqueda['tipo'], self.result['id'], self.busqueda["idioma"])
try:
dict_videos = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url))
except:
pass
if dict_videos['results']:
dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size']))
self.result["videos"] = dict_videos['results']
# Si el idioma de busqueda no es ingles, hacer una segunda búsqueda de videos en inglés
if self.busqueda["idioma"] != 'en':
url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=f7f51775877e0bb6703520952b3c7840" \
% (self.busqueda['tipo'], self.result['id'])
try:
dict_videos = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url))
except:
pass
if dict_videos['results']:
dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size']))
self.result["videos"].extend(dict_videos['results'])
# Si las busqueda han obtenido resultados devolver un listado de objetos
for i in self.result['videos']:
if i['site'] == "YouTube":
ret.append({'name': i['name'],
'url': "https://www.youtube.com/watch?v=%s" % i['key'],
'size': str(i['size']),
'type': i['type'],
'language': i['iso_639_1']})
return ret
def get_infoLabels(self, infoLabels=None, origen=None):
"""
:param infoLabels: Informacion extra de la pelicula, serie, temporada o capitulo.
:type infoLabels: Dict
:return: Devuelve la informacion extra obtenida del objeto actual. Si se paso el parametro infoLables, el valor
devuelto sera el leido como parametro debidamente actualizado.
:rtype: Dict
"""
ret_infoLabels = copy.copy(infoLabels) if infoLabels else {}
items = self.result.items() if not origen else origen.items()
for k, v in items:
if v == '':
continue
elif type(v) == str:
v = re.sub(r"\n|\r|\t", "", v)
if k == 'overview':
ret_infoLabels['plot'] = self.get_sinopsis()
elif k == 'runtime':
ret_infoLabels['duration'] = v
elif k == 'release_date':
ret_infoLabels['year'] = int(v[:4])
elif k == 'first_air_date':
ret_infoLabels['year'] = int(v[:4])
ret_infoLabels['aired'] = v
ret_infoLabels['premiered'] = v
elif k == 'original_title':
ret_infoLabels['originaltitle'] = v
elif k == 'vote_average':
ret_infoLabels['rating'] = float(v)
elif k == 'vote_count':
ret_infoLabels['votes'] = v
elif k == 'poster_path':
ret_infoLabels['thumbnail'] = 'http://image.tmdb.org/t/p/original' + v
elif k == 'backdrop_path':
ret_infoLabels['fanart'] = 'http://image.tmdb.org/t/p/original' + v
elif k == 'id':
ret_infoLabels['tmdb_id'] = v
elif k == 'imdb_id':
ret_infoLabels['imdb_id'] = v
ret_infoLabels['IMDBNumber'] = v
ret_infoLabels['code'] = v
elif k == 'genres':
ret_infoLabels['genre'] = self.get_generos()
elif k == 'name':
ret_infoLabels['title'] = v
elif k == 'production_companies':
ret_infoLabels['studio'] = ", ".join(i['name'] for i in v)
elif k == 'production_countries' or k == 'origin_country':
if 'country' not in ret_infoLabels:
ret_infoLabels['country'] = ", ".join(i if type(i) == str else i['name'] for i in v)
else:
ret_infoLabels['country'] = ", " + ", ".join(i if type(i) == str else i['name'] for i in v)
elif k == 'credits_cast':
ret_infoLabels['castandrole'] = []
for c in sorted(v, key=lambda c: c.get("order")):
ret_infoLabels['castandrole'].append((c['name'], c['character']))
elif k == 'credits_crew':
l_director = []
l_writer = []
for crew in v:
if crew['job'].lower() == 'director':
l_director.append(crew['name'])
elif crew['job'].lower() in ('screenplay', 'writer'):
l_writer.append(crew['name'])
if l_director:
ret_infoLabels['director'] = ", ".join(l_director)
if l_writer:
if 'writer' not in ret_infoLabels:
ret_infoLabels['writer'] = ", ".join(l_writer)
else:
ret_infoLabels['writer'] += "," + (",".join(l_writer))
elif k == 'created_by':
l_writer = []
for cr in v:
l_writer.append(cr['name'])
if 'writer' not in ret_infoLabels:
ret_infoLabels['writer'] = ",".join(l_writer)
else:
ret_infoLabels['writer'] += "," + (",".join(l_writer))
elif k == 'videos' and len(v) > 0:
if v[0]["site"] == "YouTube":
ret_infoLabels['trailer'] = "https://www.youtube.com/watch?v=" + v[0]["key"]
elif type(v) == str:
ret_infoLabels[k] = v
# logger.debug(k +'= '+ v)
return ret_infoLabels
####################################################################################################
# for StreamOnDemand by costaplus
# ====================================================================================================
def infoSod(item, tipo="movie"):
'''
:param item: item
:return: ritorna un'item completo esente da errori di codice
'''
logger.info("streamondemand.core.tmdb infoSod")
logger.info("channel=[" + item.channel + "], action=[" + item.action + "], title[" + item.title + "], url=[" + item.url + "], thumbnail=[" + item.thumbnail + "], tipo=[" + tipo + "]")
try:
tmdbtitle = item.fulltitle.split("|")[0].split("{")[0].split("[")[0].split("(")[0].split("Sub-ITA")[0].split("Sub ITA")[0].split("20")[0].split("19")[0].split("S0")[0].split("Serie")[0].split("HD ")[0]
year = scrapertools.find_single_match(item.fulltitle, '\((\d{4})\)')
otmdb = Tmdb(texto_buscado=tmdbtitle,
tipo=tipo,
idioma_busqueda='it',
year=year)
item.infoLabels = otmdb.get_infoLabels()
if 'thumbnail' in item.infoLabels:
item.thumbnail = item.infoLabels['thumbnail']
if 'fanart' in item.infoLabels:
item.fanart = item.infoLabels['fanart']
except:
pass
return item
# ===================================================================================================
| gpl-3.0 | -5,761,422,043,173,886,000 | 47.589061 | 209 | 0.537062 | false |
ygol/odoo | addons/project_issue_sheet/project_issue_sheet.py | 381 | 2875 | #-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv,orm
from openerp.tools.translate import _
class project_issue(osv.osv):
_inherit = 'project.issue'
_description = 'project issue'
_columns = {
'timesheet_ids': fields.one2many('hr.analytic.timesheet', 'issue_id', 'Timesheets'),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
}
def on_change_project(self, cr, uid, ids, project_id, context=None):
if not project_id:
return {}
result = super(project_issue, self).on_change_project(cr, uid, ids, project_id, context=context)
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if 'value' not in result:
result['value'] = {}
account = project.analytic_account_id
if account:
result['value']['analytic_account_id'] = account.id
return result
def on_change_account_id(self, cr, uid, ids, account_id, context=None):
if not account_id:
return {}
account = self.pool.get('account.analytic.account').browse(cr, uid, account_id, context=context)
result = {}
if account and account.state == 'pending':
result = {'warning' : {'title' : _('Analytic Account'), 'message' : _('The Analytic Account is pending !')}}
return result
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'account analytic line'
_columns = {
'create_date' : fields.datetime('Create Date', readonly=True),
}
class hr_analytic_issue(osv.osv):
_inherit = 'hr.analytic.timesheet'
_description = 'hr analytic timesheet'
_columns = {
'issue_id' : fields.many2one('project.issue', 'Issue'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,794,433,713,497,821,000 | 35.392405 | 120 | 0.611826 | false |
mattpap/sympy-polys | sympy/physics/paulialgebra.py | 10 | 1595 | """
This module implements Pauli algebra by subclassing Symbol. Only algebraic
properties of Pauli matrices are used (we don't use the Matrix class).
See the documentation to the class Pauli for examples.
See also:
http://en.wikipedia.org/wiki/Pauli_matrices
"""
from sympy import Symbol, I
def delta(i,j):
if i==j:
return 1
else:
return 0
def epsilon(i,j,k):
if (i,j,k) in [(1,2,3), (2,3,1), (3,1,2)]:
return 1
elif (i,j,k) in [(1,3,2), (3,2,1), (2,1,3)]:
return -1
else:
return 0
class Pauli(Symbol):
"""
>>> from sympy.physics.paulialgebra import Pauli
>>> Pauli(1)
sigma1
>>> Pauli(1)*Pauli(2)
I*sigma3
>>> Pauli(1)*Pauli(1)
1
>>> Pauli(3)**4
1
>>> Pauli(1)*Pauli(2)*Pauli(3)
I
"""
__slots__ = ["i"]
def __new__(cls, i):
if not i in [1,2,3]:
raise IndexError("Invalid Pauli index")
obj = Symbol.__new__(cls, "sigma%d"%i, commutative=False)
obj.i=i
return obj
def __getnewargs__(self):
return (self.i,)
# FIXME don't work for -I*Pauli(2)*Pauli(3)
def __mul__(self, other):
if isinstance(other, Pauli):
j=self.i
k=other.i
return delta(j,k) \
+I*epsilon(j,k,1)*Pauli(1) \
+I*epsilon(j,k,2)*Pauli(2) \
+I*epsilon(j,k,3)*Pauli(3)
return super(Pauli, self).__mul__(other)
def _eval_power(b, e):
if e.is_Integer and e.is_positive:
return super(Pauli, b).__pow__(int(e) % 2)
| bsd-3-clause | 7,608,750,713,065,738,000 | 22.455882 | 74 | 0.522257 | false |
openstack/nova | nova/api/validation/extra_specs/pci_passthrough.py | 3 | 1271 | # Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Validators for ``pci_passthrough`` namespaced extra specs."""
from nova.api.validation.extra_specs import base
EXTRA_SPEC_VALIDATORS = [
base.ExtraSpecValidator(
name='pci_passthrough:alias',
description=(
'Specify the number of ``$alias`` PCI device(s) to attach to the '
'instance. Must be of format ``$alias:$number``. Use commas to '
'specify multiple values.'
),
value={
'type': str,
# one or more comma-separated '$alias:$num' values
'pattern': r'[^:]+:\d+(?:\s*,\s*[^:]+:\d+)*',
},
),
]
def register():
return EXTRA_SPEC_VALIDATORS
| apache-2.0 | -8,583,038,482,373,635,000 | 32.447368 | 78 | 0.649882 | false |
bright-sparks/chromium-spacewalk | chrome/common/extensions/docs/server2/caching_file_system.py | 4 | 5389 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
import sys
from file_system import FileSystem, StatInfo, FileNotFoundError
from future import Future
from path_util import IsDirectory, ToDirectory
from third_party.json_schema_compiler.memoize import memoize
class CachingFileSystem(FileSystem):
'''FileSystem which implements a caching layer on top of |file_system|. It's
smart, using Stat() to decided whether to skip Read()ing from |file_system|,
and only Stat()ing directories never files.
'''
def __init__(self, file_system, object_store_creator):
self._file_system = file_system
def create_object_store(category, **optargs):
return object_store_creator.Create(
CachingFileSystem,
category='%s/%s' % (file_system.GetIdentity(), category),
**optargs)
self._stat_object_store = create_object_store('stat')
# The read caches can start populated (start_empty=False) because file
# updates are picked up by the stat, so it doesn't need the force-refresh
# which starting empty is designed for. Without this optimisation, cron
# runs are extra slow.
self._read_object_store = create_object_store('read', start_empty=False)
def Refresh(self):
return self._file_system.Refresh()
def Stat(self, path):
return self.StatAsync(path).Get()
def StatAsync(self, path):
'''Stats the directory given, or if a file is given, stats the file's parent
directory to get info about the file.
'''
# Always stat the parent directory, since it will have the stat of the child
# anyway, and this gives us an entire directory's stat info at once.
dir_path, file_path = posixpath.split(path)
dir_path = ToDirectory(dir_path)
def make_stat_info(dir_stat):
'''Converts a dir stat into the correct resulting StatInfo; if the Stat
was for a file, the StatInfo should just contain that file.
'''
if path == dir_path:
return dir_stat
# Was a file stat. Extract that file.
file_version = dir_stat.child_versions.get(file_path)
if file_version is None:
raise FileNotFoundError('No stat found for %s in %s (found %s)' %
(path, dir_path, dir_stat.child_versions))
return StatInfo(file_version)
dir_stat = self._stat_object_store.Get(dir_path).Get()
if dir_stat is not None:
return Future(value=make_stat_info(dir_stat))
def next(dir_stat):
assert dir_stat is not None # should have raised a FileNotFoundError
# We only ever need to cache the dir stat.
self._stat_object_store.Set(dir_path, dir_stat)
return make_stat_info(dir_stat)
return self._MemoizedStatAsyncFromFileSystem(dir_path).Then(next)
@memoize
def _MemoizedStatAsyncFromFileSystem(self, dir_path):
'''This is a simple wrapper to memoize Futures to directory stats, since
StatAsync makes heavy use of it. Only cache directories so that the
memoized cache doesn't blow up.
'''
assert IsDirectory(dir_path)
return self._file_system.StatAsync(dir_path)
def Read(self, paths, skip_not_found=False):
'''Reads a list of files. If a file is in memcache and it is not out of
date, it is returned. Otherwise, the file is retrieved from the file system.
'''
cached_read_values = self._read_object_store.GetMulti(paths).Get()
cached_stat_values = self._stat_object_store.GetMulti(paths).Get()
# Populate a map of paths to Futures to their stat. They may have already
# been cached in which case their Future will already have been constructed
# with a value.
stat_futures = {}
def handle(error):
if isinstance(error, FileNotFoundError):
return None
raise error
for path in paths:
stat_value = cached_stat_values.get(path)
if stat_value is None:
stat_future = self.StatAsync(path)
if skip_not_found:
stat_future = stat_future.Then(lambda x: x, handle)
else:
stat_future = Future(value=stat_value)
stat_futures[path] = stat_future
# Filter only the cached data which is fresh by comparing to the latest
# stat. The cached read data includes the cached version. Remove it for
# the result returned to callers.
fresh_data = dict(
(path, data) for path, (data, version) in cached_read_values.iteritems()
if stat_futures[path].Get().version == version)
if len(fresh_data) == len(paths):
# Everything was cached and up-to-date.
return Future(value=fresh_data)
def next(new_results):
# Update the cache. This is a path -> (data, version) mapping.
self._read_object_store.SetMulti(
dict((path, (new_result, stat_futures[path].Get().version))
for path, new_result in new_results.iteritems()))
new_results.update(fresh_data)
return new_results
# Read in the values that were uncached or old.
return self._file_system.Read(set(paths) - set(fresh_data.iterkeys()),
skip_not_found=skip_not_found).Then(next)
def GetIdentity(self):
return self._file_system.GetIdentity()
def __repr__(self):
return '%s of <%s>' % (type(self).__name__, repr(self._file_system))
| bsd-3-clause | -3,837,480,362,151,991,000 | 39.216418 | 80 | 0.673965 | false |
ktosiek/spacewalk | client/solaris/smartpm/smart/commands/update.py | 5 | 3102 | #
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.option import OptionParser
from smart.const import NEVER
from smart import *
import string
import time
import re
USAGE=_("smart update [options] [channelalias] ...")
DESCRIPTION=_("""
This command will update the known information about the
given channels. If no channels are given, all channels
which are not disabled or setup for manual updates will
be updated.
""")
EXAMPLES=_("""
smart update
smart update mychannel
smart update mychannel1 mychannel2
""")
def parse_options(argv):
parser = OptionParser(usage=USAGE,
description=DESCRIPTION,
examples=EXAMPLES)
parser.add_option("--after", metavar="MIN", type="int",
help=_("only update if the last successful update "
"happened before the given delay"))
opts, args = parser.parse_args(argv)
opts.args = args
return opts
def main(ctrl, opts):
sysconf.assertWritable()
if opts.after is not None:
lastupdate = sysconf.get("last-update", 0)
if lastupdate >= time.time()-(opts.after*60):
return 1
ctrl.rebuildSysConfChannels()
if opts.args:
channels = []
for arg in opts.args:
for channel in ctrl.getChannels():
if channel.getAlias() == arg:
channels.append(channel)
break
else:
raise Error, _("Argument '%s' is not a channel alias.") % arg
else:
channels = None
# First, load current cache to keep track of new packages.
ctrl.reloadChannels()
failed = not ctrl.reloadChannels(channels, caching=NEVER)
cache = ctrl.getCache()
newpackages = pkgconf.filterByFlag("new", cache.getPackages())
if not newpackages:
iface.showStatus(_("Channels have no new packages."))
else:
if len(newpackages) <= 10:
newpackages.sort()
info = ":\n"
for pkg in newpackages:
info += " %s\n" % pkg
else:
info = "."
iface.showStatus(_("Channels have %d new packages%s")
% (len(newpackages), info))
return int(failed)
# vim:ts=4:sw=4:et
| gpl-2.0 | -388,631,972,715,077,440 | 31.652632 | 77 | 0.638943 | false |
evensonbryan/yocto-autobuilder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_changes_pb.py | 4 | 9572 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Test the PB change source.
"""
import mock
from twisted.trial import unittest
from twisted.internet import defer
from buildbot.changes import pb
from buildbot.test.util import changesource, pbmanager
from buildbot.util import epoch2datetime
class TestPBChangeSource(
changesource.ChangeSourceMixin,
pbmanager.PBManagerMixin,
unittest.TestCase):
def setUp(self):
self.setUpPBChangeSource()
d = self.setUpChangeSource()
@d.addCallback
def setup(_):
self.master.pbmanager = self.pbmanager
return d
def test_registration_no_slaveport(self):
return self._test_registration(None,
user='alice', passwd='sekrit')
def test_registration_global_slaveport(self):
return self._test_registration(('9999', 'alice', 'sekrit'),
slavePort='9999', user='alice', passwd='sekrit')
def test_registration_custom_port(self):
return self._test_registration(('8888', 'alice', 'sekrit'),
user='alice', passwd='sekrit', port='8888')
def test_registration_no_userpass(self):
return self._test_registration(('9939', 'change', 'changepw'),
slavePort='9939')
def test_registration_no_userpass_no_global(self):
return self._test_registration(None)
@defer.inlineCallbacks
def _test_registration(self, exp_registration, slavePort=None,
**constr_kwargs):
config = mock.Mock()
config.slavePortnum = slavePort
self.attachChangeSource(pb.PBChangeSource(**constr_kwargs))
self.startChangeSource()
yield self.changesource.reconfigService(config)
if exp_registration:
self.assertRegistered(*exp_registration)
else:
self.assertNotRegistered()
yield self.stopChangeSource()
if exp_registration:
self.assertUnregistered(*exp_registration)
self.assertEqual(self.changesource.registration, None)
def test_perspective(self):
self.attachChangeSource(pb.PBChangeSource('alice', 'sekrit', port='8888'))
persp = self.changesource.getPerspective(mock.Mock(), 'alice')
self.assertIsInstance(persp, pb.ChangePerspective)
def test_describe(self):
cs = pb.PBChangeSource()
self.assertSubstring("PBChangeSource", cs.describe())
def test_describe_prefix(self):
cs = pb.PBChangeSource(prefix="xyz")
self.assertSubstring("PBChangeSource", cs.describe())
self.assertSubstring("xyz", cs.describe())
def test_describe_int(self):
cs = pb.PBChangeSource(port=9989)
self.assertSubstring("PBChangeSource", cs.describe())
@defer.inlineCallbacks
def test_reconfigService_no_change(self):
config = mock.Mock()
self.attachChangeSource(pb.PBChangeSource(port='9876'))
self.startChangeSource()
yield self.changesource.reconfigService(config)
self.assertRegistered('9876', 'change', 'changepw')
yield self.stopChangeSource()
self.assertUnregistered('9876', 'change', 'changepw')
@defer.inlineCallbacks
def test_reconfigService_default_changed(self):
config = mock.Mock()
config.slavePortnum = '9876'
self.attachChangeSource(pb.PBChangeSource())
self.startChangeSource()
yield self.changesource.reconfigService(config)
self.assertRegistered('9876', 'change', 'changepw')
config.slavePortnum = '1234'
yield self.changesource.reconfigService(config)
self.assertUnregistered('9876', 'change', 'changepw')
self.assertRegistered('1234', 'change', 'changepw')
yield self.stopChangeSource()
self.assertUnregistered('1234', 'change', 'changepw')
class TestChangePerspective(unittest.TestCase):
def setUp(self):
self.added_changes = []
self.master = mock.Mock()
def addChange(**chdict):
self.added_changes.append(chdict)
return defer.succeed(mock.Mock())
self.master.addChange = addChange
def test_addChange_noprefix(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(who="bar", files=['a']))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author="bar", files=['a']) ])
d.addCallback(check)
return d
def test_addChange_codebase(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(who="bar", files=[], codebase='cb'))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author="bar", files=[], codebase='cb') ])
d.addCallback(check)
return d
def test_addChange_prefix(self):
cp = pb.ChangePerspective(self.master, 'xx/')
d = cp.perspective_addChange(
dict(who="bar", files=['xx/a', 'yy/b']))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author="bar", files=['a']) ])
d.addCallback(check)
return d
def test_addChange_sanitize_None(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(
dict(project=None, revlink=None, repository=None)
)
def check(_):
self.assertEqual(self.added_changes,
[ dict(project="", revlink="", repository="",
files=[]) ])
d.addCallback(check)
return d
def test_addChange_when_None(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(
dict(when=None)
)
def check(_):
self.assertEqual(self.added_changes,
[ dict(when_timestamp=None, files=[]) ])
d.addCallback(check)
return d
def test_addChange_files_tuple(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(
dict(files=('a', 'b'))
)
def check(_):
self.assertEqual(self.added_changes,
[ dict(files=['a', 'b']) ])
d.addCallback(check)
return d
def test_addChange_unicode(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(author=u"\N{SNOWMAN}",
comments=u"\N{SNOWMAN}",
files=[u'\N{VERY MUCH GREATER-THAN}']))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author=u"\N{SNOWMAN}",
comments=u"\N{SNOWMAN}",
files=[u'\N{VERY MUCH GREATER-THAN}']) ])
d.addCallback(check)
return d
def test_addChange_unicode_as_bytestring(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(author=u"\N{SNOWMAN}".encode('utf8'),
comments=u"\N{SNOWMAN}".encode('utf8'),
files=[u'\N{VERY MUCH GREATER-THAN}'.encode('utf8')]))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author=u"\N{SNOWMAN}",
comments=u"\N{SNOWMAN}",
files=[u'\N{VERY MUCH GREATER-THAN}']) ])
d.addCallback(check)
return d
def test_addChange_non_utf8_bytestring(self):
cp = pb.ChangePerspective(self.master, None)
bogus_utf8 = '\xff\xff\xff\xff'
replacement = bogus_utf8.decode('utf8', 'replace')
d = cp.perspective_addChange(dict(author=bogus_utf8, files=['a']))
def check(_):
self.assertEqual(self.added_changes,
[ dict(author=replacement, files=['a']) ])
d.addCallback(check)
return d
def test_addChange_old_param_names(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(isdir=1, who='me', when=1234,
files=[]))
def check(_):
self.assertEqual(self.added_changes,
[ dict(is_dir=1, author='me', files=[],
when_timestamp=epoch2datetime(1234)) ])
d.addCallback(check)
return d
def test_createUserObject_git_src(self):
cp = pb.ChangePerspective(self.master, None)
d = cp.perspective_addChange(dict(who="c <h@c>", src='git'))
def check_change(_):
self.assertEqual(self.added_changes, [ dict(author="c <h@c>",
files=[],
src='git') ])
d.addCallback(check_change)
return d
| gpl-2.0 | -5,653,281,487,827,793,000 | 35.120755 | 82 | 0.594338 | false |
strk/mapnik | scons/scons-local-2.2.0/SCons/Variables/PathVariable.py | 14 | 5703 | """SCons.Variables.PathVariable
This file defines an option type for SCons implementing path settings.
To be used whenever a a user-specified path override should be allowed.
Arguments to PathVariable are:
option-name = name of this option on the command line (e.g. "prefix")
option-help = help string for option
option-dflt = default value for this option
validator = [optional] validator for option value. Predefined
validators are:
PathAccept -- accepts any path setting; no validation
PathIsDir -- path must be an existing directory
PathIsDirCreate -- path must be a dir; will create
PathIsFile -- path must be a file
PathExists -- path must exist (any type) [default]
The validator is a function that is called and which
should return True or False to indicate if the path
is valid. The arguments to the validator function
are: (key, val, env). The key is the name of the
option, the val is the path specified for the option,
and the env is the env to which the Otions have been
added.
Usage example:
Examples:
prefix=/usr/local
opts = Variables()
opts = Variables()
opts.Add(PathVariable('qtdir',
'where the root of Qt is installed',
qtdir, PathIsDir))
opts.Add(PathVariable('qt_includes',
'where the Qt includes are installed',
'$qtdir/includes', PathIsDirCreate))
opts.Add(PathVariable('qt_libraries',
'where the Qt library is installed',
'$qtdir/lib'))
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/PathVariable.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__all__ = ['PathVariable',]
import os
import os.path
import SCons.Errors
class _PathVariableClass(object):
def PathAccept(self, key, val, env):
"""Accepts any path, no checking done."""
pass
def PathIsDir(self, key, val, env):
"""Validator to check if Path is a directory."""
if not os.path.isdir(val):
if os.path.isfile(val):
m = 'Directory path for option %s is a file: %s'
else:
m = 'Directory path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathIsDirCreate(self, key, val, env):
"""Validator to check if Path is a directory,
creating it if it does not exist."""
if os.path.isfile(val):
m = 'Path for option %s is a file, not a directory: %s'
raise SCons.Errors.UserError(m % (key, val))
if not os.path.isdir(val):
os.makedirs(val)
def PathIsFile(self, key, val, env):
"""validator to check if Path is a file"""
if not os.path.isfile(val):
if os.path.isdir(val):
m = 'File path for option %s is a directory: %s'
else:
m = 'File path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathExists(self, key, val, env):
"""validator to check if Path exists"""
if not os.path.exists(val):
m = 'Path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def __call__(self, key, help, default, validator=None):
# NB: searchfunc is currenty undocumented and unsupported
"""
The input parameters describe a 'path list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
The 'default' option specifies the default path to use if the
user does not specify an override with this option.
validator is a validator, see this file for examples
"""
if validator is None:
validator = self.PathExists
if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key):
return (key, '%s ( /path/to/%s )' % (help, key[0]), default,
validator, None)
else:
return (key, '%s ( /path/to/%s )' % (help, key), default,
validator, None)
PathVariable = _PathVariableClass()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 | 4,087,795,417,573,088,000 | 37.795918 | 114 | 0.622655 | false |
rlouf/patterns-of-segregation | bin/plot_scaling_classes.py | 1 | 3443 | """plot_income_scaling.py
Plot the number of households from a given class as a function of the total
number of households per city
"""
import csv
import math
from matplotlib import pylab as plt
from scipy.stats import linregress
colours = {'Lower':'#4F8F6B',
'Higher':'#C1A62E',
'Middle':'#4B453C'}
# Puerto-rican cities are excluded from the analysis
PR_cities = ['7442','0060','6360','4840']
#
# Read data
#
## List of MSA
msa = {}
with open('data/names/msa.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
if rows[0] not in PR_cities:
msa[rows[0]] = rows[1]
## Classes
classes = {}
with open('extr/classes/msa_average/classes.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
classes[rows[0]] =[int(r) for r in rows[1:]]
## Number of households per class, and total
households_class = {cl:[] for cl in classes}
households = []
for i, city in enumerate(msa):
print "Compute number of households for %s (%s/%s)"%(msa[city],
i+1,
len(msa))
## Import households data
incomes = {}
with open('data/income/msa/%s/income.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
num_cat = len(rows[1:])
incomes[rows[0]] = {cl: sum([int(rows[1+c]) for c in classes[cl]])
for cl in classes}
incomes_cl = {cl: sum([incomes[au][cl] for au in incomes])
for cl in classes}
for cl in classes:
households_class[cl].append(incomes_cl[cl])
households.append(sum(incomes_cl.values()))
#
# Fit
#
slopes = {}
r_values = {}
intercepts = {}
for cl in classes:
print "Power-law fit for %s income class"%cl
slope, intercept, r_value, p_value, std_err = linregress([math.log(p) for
p in households],[math.log(d) for d in households_class[cl]])
slopes[cl] = slope
r_values[cl] = r_value
intercepts[cl] = intercept
print "alpha = %s (R^2=%s)"%(slope, r_value)
#
# Plot
#
fig = plt.figure(figsize=(24,8))
for i,cl in enumerate(classes):
ax = fig.add_subplot(1, len(classes), i+1)
ax.plot(households, households_class[cl], 'o', color=colours[cl],
mec=colours[cl], label=r'$%s$'%cl)
ax.plot(sorted(households),
[math.exp(intercepts[cl])*h**slopes[cl] for h in sorted(households)],
label=r'$H_{%s} \sim H^{\,%.2f}$'%(cl, slopes[cl]),
linestyle='--',
color='black')
ax.set_xlabel(r'$H$', fontsize=20)
ax.set_ylabel(r'$H_{%s}$'%cl, fontsize=20)
ax.set_xscale('log')
ax.set_yscale('log')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_position(('outward', 10)) # outward by 10 points
ax.spines['bottom'].set_position(('outward', 10)) # outward by 10 points
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.legend(loc='upper left', numpoints=1, frameon=False)
plt.savefig('figures/paper/si/scaling_class.pdf', bbox_inches='tight')
plt.show()
| bsd-3-clause | -5,429,180,976,737,945,000 | 30.587156 | 81 | 0.585536 | false |
rsteca/python-social-auth | social/backends/persona.py | 70 | 1845 | """
Mozilla Persona authentication backend, docs at:
http://psa.matiasaguirre.net/docs/backends/persona.html
"""
from social.utils import handle_http_errors
from social.backends.base import BaseAuth
from social.exceptions import AuthFailed, AuthMissingParameter
class PersonaAuth(BaseAuth):
"""BrowserID authentication backend"""
name = 'persona'
def get_user_id(self, details, response):
"""Use BrowserID email as ID"""
return details['email']
def get_user_details(self, response):
"""Return user details, BrowserID only provides Email."""
# {'status': 'okay',
# 'audience': 'localhost:8000',
# 'expires': 1328983575529,
# 'email': '[email protected]',
# 'issuer': 'browserid.org'}
email = response['email']
return {'username': email.split('@', 1)[0],
'email': email,
'fullname': '',
'first_name': '',
'last_name': ''}
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
"""Return users extra data"""
return {'audience': response['audience'],
'issuer': response['issuer']}
@handle_http_errors
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if 'assertion' not in self.data:
raise AuthMissingParameter(self, 'assertion')
response = self.get_json('https://browserid.org/verify', data={
'assertion': self.data['assertion'],
'audience': self.strategy.request_host()
}, method='POST')
if response.get('status') == 'failure':
raise AuthFailed(self)
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
| bsd-3-clause | -716,346,246,308,106,500 | 35.9 | 77 | 0.595664 | false |
MarcosCommunity/odoo | comunity_modules/hr_payroll_cancel/__openerp__.py | 3 | 1902 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2014 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo ([email protected])
############################################################################
# Coded by: Luis Torres ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Hr Payroll Cancel",
"version": "1.0",
"author": "Vauxoo",
"category": "Localization/Mexico",
"description": """
This module change the workflow from hr.payslip to can cancel after to confirm this
""",
"website": "http://www.vauxoo.com/",
"license": "AGPL-3",
"depends": [
"hr_payroll"
],
"demo": [],
"data": [
"hr_payslip_view.xml",
"hr_payslip_workflow.xml",
"test/update_payroll_workflow.yml"
],
"test": [],
"js": [],
"css": [],
"qweb": [],
"installable": True,
"auto_install": False,
"active": False
} | agpl-3.0 | -7,471,669,429,368,059,000 | 35.596154 | 87 | 0.523659 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.