code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 Mark Sandstrom
# Copyright (c) 2011-2015 Raphaël Barrois
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import collections
import logging
from . import containers
from . import declarations
from . import errors
from . import utils
logger = logging.getLogger('factory.generate')
# Strategies
BUILD_STRATEGY = 'build'
CREATE_STRATEGY = 'create'
STUB_STRATEGY = 'stub'
# Factory metaclasses
def get_factory_bases(bases):
"""Retrieve all FactoryMetaClass-derived bases from a list."""
return [b for b in bases if issubclass(b, BaseFactory)]
def resolve_attribute(name, bases, default=None):
"""Find the first definition of an attribute according to MRO order."""
for base in bases:
if hasattr(base, name):
return getattr(base, name)
return default
class FactoryMetaClass(type):
"""Factory metaclass for handling ordered declarations."""
def __call__(cls, **kwargs):
"""Override the default Factory() syntax to call the default strategy.
Returns an instance of the associated class.
"""
if cls._meta.strategy == BUILD_STRATEGY:
return cls.build(**kwargs)
elif cls._meta.strategy == CREATE_STRATEGY:
return cls.create(**kwargs)
elif cls._meta.strategy == STUB_STRATEGY:
return cls.stub(**kwargs)
else:
raise errors.UnknownStrategy('Unknown Meta.strategy: {0}'.format(
cls._meta.strategy))
def __new__(mcs, class_name, bases, attrs):
"""Record attributes as a pattern for later instance construction.
This is called when a new Factory subclass is defined; it will collect
attribute declaration from the class definition.
Args:
class_name (str): the name of the class being created
bases (list of class): the parents of the class being created
attrs (str => obj dict): the attributes as defined in the class
definition
Returns:
A new class
"""
parent_factories = get_factory_bases(bases)
if parent_factories:
base_factory = parent_factories[0]
else:
base_factory = None
attrs_meta = attrs.pop('Meta', None)
attrs_params = attrs.pop('Params', None)
base_meta = resolve_attribute('_meta', bases)
options_class = resolve_attribute('_options_class', bases, FactoryOptions)
meta = options_class()
attrs['_meta'] = meta
new_class = super(FactoryMetaClass, mcs).__new__(
mcs, class_name, bases, attrs)
meta.contribute_to_class(new_class,
meta=attrs_meta,
base_meta=base_meta,
base_factory=base_factory,
params=attrs_params,
)
return new_class
def __str__(cls):
if cls._meta.abstract:
return '<%s (abstract)>' % cls.__name__
else:
return '<%s for %s>' % (cls.__name__, cls._meta.model)
class BaseMeta:
abstract = True
strategy = CREATE_STRATEGY
class OptionDefault(object):
def __init__(self, name, value, inherit=False):
self.name = name
self.value = value
self.inherit = inherit
def apply(self, meta, base_meta):
value = self.value
if self.inherit and base_meta is not None:
value = getattr(base_meta, self.name, value)
if meta is not None:
value = getattr(meta, self.name, value)
return value
def __str__(self):
return '%s(%r, %r, inherit=%r)' % (
self.__class__.__name__,
self.name, self.value, self.inherit)
class FactoryOptions(object):
def __init__(self):
self.factory = None
self.base_factory = None
self.declarations = {}
self.postgen_declarations = {}
self.parameters = {}
self.parameters_dependencies = {}
def _build_default_options(self):
""""Provide the default value for all allowed fields.
Custom FactoryOptions classes should override this method
to update() its return value.
"""
return [
OptionDefault('model', None, inherit=True),
OptionDefault('abstract', False, inherit=False),
OptionDefault('strategy', CREATE_STRATEGY, inherit=True),
OptionDefault('inline_args', (), inherit=True),
OptionDefault('exclude', (), inherit=True),
OptionDefault('rename', {}, inherit=True),
]
def _fill_from_meta(self, meta, base_meta):
# Exclude private/protected fields from the meta
if meta is None:
meta_attrs = {}
else:
meta_attrs = dict((k, v)
for (k, v) in vars(meta).items()
if not k.startswith('_')
)
for option in self._build_default_options():
assert not hasattr(self, option.name), "Can't override field %s." % option.name
value = option.apply(meta, base_meta)
meta_attrs.pop(option.name, None)
setattr(self, option.name, value)
if meta_attrs:
# Some attributes in the Meta aren't allowed here
raise TypeError("'class Meta' for %r got unknown attribute(s) %s"
% (self.factory, ','.join(sorted(meta_attrs.keys()))))
def contribute_to_class(self, factory,
meta=None, base_meta=None, base_factory=None, params=None):
self.factory = factory
self.base_factory = base_factory
self._fill_from_meta(meta=meta, base_meta=base_meta)
self.model = self.factory._load_model_class(self.model)
if self.model is None:
self.abstract = True
self.counter_reference = self._get_counter_reference()
for parent in reversed(self.factory.__mro__[1:]):
if not hasattr(parent, '_meta'):
continue
self.declarations.update(parent._meta.declarations)
self.postgen_declarations.update(parent._meta.postgen_declarations)
self.parameters.update(parent._meta.parameters)
for k, v in vars(self.factory).items():
if self._is_declaration(k, v):
self.declarations[k] = v
if self._is_postgen_declaration(k, v):
self.postgen_declarations[k] = v
if params is not None:
for k, v in vars(params).items():
if not k.startswith('_'):
self.parameters[k] = v
self.parameters_dependencies = self._compute_parameter_dependencies(self.parameters)
def _get_counter_reference(self):
"""Identify which factory should be used for a shared counter."""
if (self.model is not None
and self.base_factory is not None
and self.base_factory._meta.model is not None
and issubclass(self.model, self.base_factory._meta.model)):
return self.base_factory
else:
return self.factory
def _is_declaration(self, name, value):
"""Determines if a class attribute is a field value declaration.
Based on the name and value of the class attribute, return ``True`` if
it looks like a declaration of a default field value, ``False`` if it
is private (name starts with '_') or a classmethod or staticmethod.
"""
if isinstance(value, (classmethod, staticmethod)):
return False
elif isinstance(value, declarations.OrderedDeclaration):
return True
elif isinstance(value, declarations.PostGenerationDeclaration):
return False
return not name.startswith("_")
def _is_postgen_declaration(self, name, value):
"""Captures instances of PostGenerationDeclaration."""
return isinstance(value, declarations.PostGenerationDeclaration)
def _compute_parameter_dependencies(self, parameters):
"""Find out in what order parameters should be called."""
# Warning: parameters only provide reverse dependencies; we reverse them into standard dependencies.
# deep_revdeps: set of fields a field depend indirectly upon
deep_revdeps = collections.defaultdict(set)
# Actual, direct dependencies
deps = collections.defaultdict(set)
for name, parameter in parameters.items():
if isinstance(parameter, declarations.ComplexParameter):
field_revdeps = parameter.get_revdeps(parameters)
if not field_revdeps:
continue
deep_revdeps[name] = set.union(*(deep_revdeps[dep] for dep in field_revdeps))
deep_revdeps[name] |= set(field_revdeps)
for dep in field_revdeps:
deps[dep].add(name)
# Check for cyclical dependencies
cyclic = [name for name, field_deps in deep_revdeps.items() if name in field_deps]
if cyclic:
raise errors.CyclicDefinitionError(
"Cyclic definition detected on %s' Params around %s"
% (self.factory, ', '.join(cyclic)))
return deps
def __str__(self):
return "<%s for %s>" % (self.__class__.__name__, self.factory.__class__.__name__)
def __repr__(self):
return str(self)
# Factory base classes
class _Counter(object):
"""Simple, naive counter.
Attributes:
for_class (obj): the class this counter related to
seq (int): the next value
"""
def __init__(self, seq, for_class):
self.seq = seq
self.for_class = for_class
def next(self):
value = self.seq
self.seq += 1
return value
def reset(self, next_value=0):
self.seq = next_value
def __repr__(self):
return '<_Counter for %s.%s, next=%d>' % (
self.for_class.__module__, self.for_class.__name__, self.seq)
class BaseFactory(object):
"""Factory base support for sequences, attributes and stubs."""
# Backwards compatibility
UnknownStrategy = errors.UnknownStrategy
UnsupportedStrategy = errors.UnsupportedStrategy
def __new__(cls, *args, **kwargs):
"""Would be called if trying to instantiate the class."""
raise errors.FactoryError('You cannot instantiate BaseFactory')
_meta = FactoryOptions()
# ID to use for the next 'declarations.Sequence' attribute.
_counter = None
@classmethod
def reset_sequence(cls, value=None, force=False):
"""Reset the sequence counter.
Args:
value (int or None): the new 'next' sequence value; if None,
recompute the next value from _setup_next_sequence().
force (bool): whether to force-reset parent sequence counters
in a factory inheritance chain.
"""
if cls._meta.counter_reference is not cls:
if force:
cls._meta.base_factory.reset_sequence(value=value)
else:
raise ValueError(
"Cannot reset the sequence of a factory subclass. "
"Please call reset_sequence() on the root factory, "
"or call reset_sequence(force=True)."
)
else:
cls._setup_counter()
if value is None:
value = cls._setup_next_sequence()
cls._counter.reset(value)
@classmethod
def _setup_next_sequence(cls):
"""Set up an initial sequence value for Sequence attributes.
Returns:
int: the first available ID to use for instances of this factory.
"""
return 0
@classmethod
def _setup_counter(cls):
"""Ensures cls._counter is set for this class.
Due to the way inheritance works in Python, we need to ensure that the
``_counter`` attribute has been initialized for *this* Factory subclass,
not one of its parents.
"""
if cls._counter is None or cls._counter.for_class != cls:
first_seq = cls._setup_next_sequence()
cls._counter = _Counter(for_class=cls, seq=first_seq)
logger.debug("%r: Setting up next sequence (%d)", cls, first_seq)
@classmethod
def _generate_next_sequence(cls):
"""Retrieve a new sequence ID.
This will call, in order:
- _generate_next_sequence from the base factory, if provided
- _setup_next_sequence, if this is the 'toplevel' factory and the
sequence counter wasn't initialized yet; then increase it.
"""
# Rely upon our parents
if cls._meta.counter_reference is not cls:
logger.debug("%r: reusing sequence from %r", cls, cls._meta.base_factory)
return cls._meta.base_factory._generate_next_sequence()
# Make sure _counter is initialized
cls._setup_counter()
# Pick current value, then increase class counter for the next call.
return cls._counter.next()
@classmethod
def attributes(cls, create=False, extra=None):
"""Build a dict of attribute values, respecting declaration order.
The process is:
- Handle 'orderless' attributes, overriding defaults with provided
kwargs when applicable
- Handle ordered attributes, overriding them with provided kwargs when
applicable; the current list of computed attributes is available
to the currently processed object.
"""
force_sequence = None
if extra:
force_sequence = extra.pop('__sequence', None)
log_ctx = '%s.%s' % (cls.__module__, cls.__name__)
logger.debug('BaseFactory: Preparing %s.%s(extra=%r)',
cls.__module__,
cls.__name__,
extra,
)
return containers.AttributeBuilder(cls, extra, log_ctx=log_ctx).build(
create=create,
force_sequence=force_sequence,
)
@classmethod
def declarations(cls, extra_defs=None):
"""Retrieve a copy of the declared attributes.
Args:
extra_defs (dict): additional definitions to insert into the
retrieved DeclarationDict.
"""
decls = cls._meta.declarations.copy()
decls.update(extra_defs or {})
return decls
@classmethod
def _rename_fields(cls, **kwargs):
for old_name, new_name in cls._meta.rename.items():
kwargs[new_name] = kwargs.pop(old_name)
return kwargs
@classmethod
def _adjust_kwargs(cls, **kwargs):
"""Extension point for custom kwargs adjustment."""
return kwargs
@classmethod
def _load_model_class(cls, class_definition):
"""Extension point for loading model classes.
This can be overridden in framework-specific subclasses to hook into
existing model repositories, for instance.
"""
return class_definition
@classmethod
def _get_model_class(cls):
"""Retrieve the actual, associated model class."""
definition = cls._meta.model
return cls._load_model_class(definition)
@classmethod
def _prepare(cls, create, **kwargs):
"""Prepare an object for this factory.
Args:
create: bool, whether to create or to build the object
**kwargs: arguments to pass to the creation function
"""
model_class = cls._get_model_class()
kwargs = cls._rename_fields(**kwargs)
kwargs = cls._adjust_kwargs(**kwargs)
# Remove 'hidden' arguments.
for arg in cls._meta.exclude:
del kwargs[arg]
# Remove parameters, if defined
for arg in cls._meta.parameters:
kwargs.pop(arg, None)
# Extract *args from **kwargs
args = tuple(kwargs.pop(key) for key in cls._meta.inline_args)
logger.debug('BaseFactory: Generating %s.%s(%s)',
cls.__module__,
cls.__name__,
utils.log_pprint(args, kwargs),
)
if create:
return cls._create(model_class, *args, **kwargs)
else:
return cls._build(model_class, *args, **kwargs)
@classmethod
def _generate(cls, create, attrs):
"""generate the object.
Args:
create (bool): whether to 'build' or 'create' the object
attrs (dict): attributes to use for generating the object
"""
if cls._meta.abstract:
raise errors.FactoryError(
"Cannot generate instances of abstract factory %(f)s; "
"Ensure %(f)s.Meta.model is set and %(f)s.Meta.abstract "
"is either not set or False." % dict(f=cls.__name__))
# Extract declarations used for post-generation
postgen_declarations = cls._meta.postgen_declarations
postgen_attributes = {}
for name, decl in sorted(postgen_declarations.items()):
postgen_attributes[name] = decl.extract(name, attrs)
# Generate the object
obj = cls._prepare(create, **attrs)
# Handle post-generation attributes
results = {}
for name, decl in sorted(postgen_declarations.items()):
extraction_context = postgen_attributes[name]
results[name] = decl.call(obj, create, extraction_context)
cls._after_postgeneration(obj, create, results)
return obj
@classmethod
def _after_postgeneration(cls, obj, create, results=None):
"""Hook called after post-generation declarations have been handled.
Args:
obj (object): the generated object
create (bool): whether the strategy was 'build' or 'create'
results (dict or None): result of post-generation declarations
"""
pass
@classmethod
def _build(cls, model_class, *args, **kwargs):
"""Actually build an instance of the model_class.
Customization point, will be called once the full set of args and kwargs
has been computed.
Args:
model_class (type): the class for which an instance should be
built
args (tuple): arguments to use when building the class
kwargs (dict): keyword arguments to use when building the class
"""
return model_class(*args, **kwargs)
@classmethod
def _create(cls, model_class, *args, **kwargs):
"""Actually create an instance of the model_class.
Customization point, will be called once the full set of args and kwargs
has been computed.
Args:
model_class (type): the class for which an instance should be
created
args (tuple): arguments to use when creating the class
kwargs (dict): keyword arguments to use when creating the class
"""
return model_class(*args, **kwargs)
@classmethod
def build(cls, **kwargs):
"""Build an instance of the associated class, with overriden attrs."""
attrs = cls.attributes(create=False, extra=kwargs)
return cls._generate(False, attrs)
@classmethod
def build_batch(cls, size, **kwargs):
"""Build a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to build
Returns:
object list: the built instances
"""
return [cls.build(**kwargs) for _ in range(size)]
@classmethod
def create(cls, **kwargs):
"""Create an instance of the associated class, with overriden attrs."""
attrs = cls.attributes(create=True, extra=kwargs)
return cls._generate(True, attrs)
@classmethod
def create_batch(cls, size, **kwargs):
"""Create a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to create
Returns:
object list: the created instances
"""
return [cls.create(**kwargs) for _ in range(size)]
@classmethod
def stub(cls, **kwargs):
"""Retrieve a stub of the associated class, with overriden attrs.
This will return an object whose attributes are those defined in this
factory's declarations or in the extra kwargs.
"""
stub_object = containers.StubObject()
for name, value in cls.attributes(create=False, extra=kwargs).items():
setattr(stub_object, name, value)
return stub_object
@classmethod
def stub_batch(cls, size, **kwargs):
"""Stub a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to stub
Returns:
object list: the stubbed instances
"""
return [cls.stub(**kwargs) for _ in range(size)]
@classmethod
def generate(cls, strategy, **kwargs):
"""Generate a new instance.
The instance will be created with the given strategy (one of
BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).
Args:
strategy (str): the strategy to use for generating the instance.
Returns:
object: the generated instance
"""
assert strategy in (STUB_STRATEGY, BUILD_STRATEGY, CREATE_STRATEGY)
action = getattr(cls, strategy)
return action(**kwargs)
@classmethod
def generate_batch(cls, strategy, size, **kwargs):
"""Generate a batch of instances.
The instances will be created with the given strategy (one of
BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).
Args:
strategy (str): the strategy to use for generating the instance.
size (int): the number of instances to generate
Returns:
object list: the generated instances
"""
assert strategy in (STUB_STRATEGY, BUILD_STRATEGY, CREATE_STRATEGY)
batch_action = getattr(cls, '%s_batch' % strategy)
return batch_action(size, **kwargs)
@classmethod
def simple_generate(cls, create, **kwargs):
"""Generate a new instance.
The instance will be either 'built' or 'created'.
Args:
create (bool): whether to 'build' or 'create' the instance.
Returns:
object: the generated instance
"""
strategy = CREATE_STRATEGY if create else BUILD_STRATEGY
return cls.generate(strategy, **kwargs)
@classmethod
def simple_generate_batch(cls, create, size, **kwargs):
"""Generate a batch of instances.
These instances will be either 'built' or 'created'.
Args:
size (int): the number of instances to generate
create (bool): whether to 'build' or 'create' the instances.
Returns:
object list: the generated instances
"""
strategy = CREATE_STRATEGY if create else BUILD_STRATEGY
return cls.generate_batch(strategy, size, **kwargs)
Factory = FactoryMetaClass('Factory', (BaseFactory,), {
'Meta': BaseMeta,
'__doc__': """Factory base with build and create support.
This class has the ability to support multiple ORMs by using custom creation
functions.
""",
})
# Backwards compatibility
Factory.AssociatedClassError = errors.AssociatedClassError # pylint: disable=W0201
class StubFactory(Factory):
class Meta:
strategy = STUB_STRATEGY
model = containers.StubObject
@classmethod
def build(cls, **kwargs):
return cls.stub(**kwargs)
@classmethod
def create(cls, **kwargs):
raise errors.UnsupportedStrategy()
class BaseDictFactory(Factory):
"""Factory for dictionary-like classes."""
class Meta:
abstract = True
@classmethod
def _build(cls, model_class, *args, **kwargs):
if args:
raise ValueError(
"DictFactory %r does not support Meta.inline_args.", cls)
return model_class(**kwargs)
@classmethod
def _create(cls, model_class, *args, **kwargs):
return cls._build(model_class, *args, **kwargs)
class DictFactory(BaseDictFactory):
class Meta:
model = dict
class BaseListFactory(Factory):
"""Factory for list-like classes."""
class Meta:
abstract = True
@classmethod
def _build(cls, model_class, *args, **kwargs):
if args:
raise ValueError(
"ListFactory %r does not support Meta.inline_args.", cls)
values = [v for k, v in sorted(kwargs.items())]
return model_class(values)
@classmethod
def _create(cls, model_class, *args, **kwargs):
return cls._build(model_class, *args, **kwargs)
class ListFactory(BaseListFactory):
class Meta:
model = list
def use_strategy(new_strategy):
"""Force the use of a different strategy.
This is an alternative to setting default_strategy in the class definition.
"""
def wrapped_class(klass):
klass._meta.strategy = new_strategy
return klass
return wrapped_class
| z1gm4/desarrollo_web_udp | env/lib/python2.7/site-packages/factory/base.py | Python | gpl-3.0 | 26,259 |
# -*- coding:utf-8 -*-
from ...errors.httpinternalservererrorexception import HttpInternalServerErrorException
import saklient
# module saklient.cloud.errors.unknownexception
class UnknownException(HttpInternalServerErrorException):
## 予期しないエラーが発生しました。このエラーが繰り返し発生する場合は、サポートサイトやメンテナンス情報をご確認ください。
## @param {int} status
# @param {str} code=None
# @param {str} message=""
def __init__(self, status, code=None, message=""):
super(UnknownException, self).__init__(status, code, "予期しないエラーが発生しました。このエラーが繰り返し発生する場合は、サポートサイトやメンテナンス情報をご確認ください。" if message is None or message == "" else message)
| hnakamur/saklient.python | saklient/cloud/errors/unknownexception.py | Python | mit | 863 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0009_auto_20150320_2009'),
]
operations = [
migrations.AlterField(
model_name='post',
name='content_type',
field=models.CharField(max_length=32, choices=[(b'text/plain', b'Plain text'), (b'text/x-markdown', b'Markdown'), (b'text/html', b'HTML')]),
preserve_default=True,
),
]
| CMPUT410W15T02/CMPUT410W15-project | social_distribution/posts/migrations/0010_auto_20150320_2041.py | Python | gpl-2.0 | 541 |
import sqlite3
import sys
from tkinter import *
db=sqlite3.connect('animal.db')
ans=''
def init_database():
try:
db.execute('CREATE TABLE objects(id integer primary key AUTOINCREMENT,name text);')
db.execute('CREATE TABLE questions(id integer primary key AUTOINCREMENT,q text);')
db.execute('CREATE TABLE data(object_id integer,question_id integer,value text);')
db.execute('insert into objects(name) values(?)',('elephant',))
db.execute('insert into objects(name) values(?)',('zebra',))
db.execute('insert into objects(name) values(?)',('cheetah',))
db.execute('insert into objects(name) values(?)',('cat',))
db.execute('insert into objects(name) values(?)',('dog',))
db.execute('insert into questions(q) values(?)',('Is it a carnivore',))
db.execute('insert into questions(q) values(?)',('Does the animal has specific colour/pattern',))
db.execute('insert into questions(q) values(?)',('Is it domesticable',))
db.execute('insert into questions(q) values(?)',('Is it Bigger than buffallo',))
db.execute('insert into questions(q) values(?)',('Is it a stronger than human',))
db.execute('insert into questions(q) values(?)',('Is it called the mans best friend',))
db.execute('insert into data values(?,?,?)',(1,1,'no'))
db.execute('insert into data values(?,?,?)',(2,1,'no'))
db.execute('insert into data values(?,?,?)',(3,1,'yes'))
db.execute('insert into data values(?,?,?)',(4,1,'yes'))
db.execute('insert into data values(?,?,?)',(5,1,'yes'))
db.execute('insert into data values(?,?,?)',(1,2,'yes'))
db.execute('insert into data values(?,?,?)',(2,2,'yes'))
db.execute('insert into data values(?,?,?)',(3,2,'yes'))
db.execute('insert into data values(?,?,?)',(4,2,'no'))
db.execute('insert into data values(?,?,?)',(5,2,'no'))
db.execute('insert into data values(?,?,?)',(1,3,'no'))
db.execute('insert into data values(?,?,?)',(2,3,'no'))
db.execute('insert into data values(?,?,?)',(3,3,'no'))
db.execute('insert into data values(?,?,?)',(4,3,'yes'))
db.execute('insert into data values(?,?,?)',(5,3,'yes'))
db.execute('insert into data values(?,?,?)',(1,4,'yes'))
db.execute('insert into data values(?,?,?)',(2,4,'no'))
db.execute('insert into data values(?,?,?)',(3,4,'no'))
db.execute('insert into data values(?,?,?)',(4,4,'no'))
db.execute('insert into data values(?,?,?)',(5,4,'no'))
db.execute('insert into data values(?,?,?)',(1,5,'yes'))
db.execute('insert into data values(?,?,?)',(2,5,'no'))
db.execute('insert into data values(?,?,?)',(3,5,'yes'))
db.execute('insert into data values(?,?,?)',(4,5,'no'))
db.execute('insert into data values(?,?,?)',(5,5,'no'))
db.execute('insert into data values(?,?,?)',(1,6,'no'))
db.execute('insert into data values(?,?,?)',(2,6,'no'))
db.execute('insert into data values(?,?,?)',(3,6,'no'))
db.execute('insert into data values(?,?,?)',(4,6,'no'))
db.execute('insert into data values(?,?,?)',(5,6,'yes'))
db.commit()
except :
pass
def entropy(objects,question):
'''Entropy is low if for a given question, the number of yes and no
answers is approx equal.'''
yeses= get_num_positives(objects, question)
nos= get_num_negatives(objects, question)
question_entropy = 0
question_entropy += yeses * 1
question_entropy -= nos * 1
return abs(question_entropy)
def get_num_positives(object_tuple, question_id):
'''Returns the number of objects in the object_tuple where the value for the
given question_id is positive.'''
where = "object_id IN %s AND question_id=%d AND value='yes'" %(object_tuple, question_id)
rows = db.execute('select count(*) from data where '+where)
return int((tuple(rows))[0][0])
def get_num_negatives(object_tuple, question_id):
'''Returns the number of objects in the object_tuple where the value for the
given question_id is negative.'''
where = "object_id in %s AND question_id=%d AND value='no'" %(object_tuple, question_id)
rows = db.execute('select count(*) from data where '+where)
return int((tuple(rows))[0][0])
def get_objects():
'''Returns all the objects in database'''
return db.execute('select * from objects')
def get_data(ans, question_id,objects):
'''Returns the object ids which satisfy condition in the database.'''
l=[]
where = "value='"+ans+"' AND question_id="+str(question_id)
for i in db.execute("select object_id from data where "+where):
if i[0] in objects:
l.append(i[0])
return l
def get_questions():
'''Returns all the questions in the database'''
return db.execute('select * from questions')
def yes_count(q_id):
'''counts the number of yes for a particular question in the data table '''
return tuple(db.execute("select count(*) from data where question_id=%d AND value='yes'" %(q_id)))[0][0]
class diag:
def __init__(self, parent,quest,yesno):
self.parent=parent
self.yesno=yesno
self.parent.bind("<Return>", self.ok)
self.parent.bind("<Escape>", self.quit)
self.l1=Label(self.parent, text=(quest+self.yesno))
self.l1.pack()
self.e1=Entry(self.parent, bd =5)
self.e1.pack()
self.e1.focus_set()
self.b1=Button(self.parent, borderwidth=2, text="OK", bd=5)
self.b1.pack(side=LEFT)
self.b1.bind("<ButtonPress-1>", self.ok)
self.b2=Button(self.parent, borderwidth=2, text="Quit", bd=5)
self.b2.pack(side = RIGHT)
self.b2.bind("<ButtonPress-1>", self.quit)
def ok(self, event=None):
global ans
ans=self.e1.get()
self.parent.destroy()
def quit(self, event=None):
sys.exit(0)
def main():
global ans
init_database()
objects = get_objects()
objects = [obj[0] for obj in objects]
questions =list(get_questions())
while len(objects)!=1 and questions:
index=0
q_id=questions[0][0]
minimum=entropy(tuple(objects), questions[0][0])
for i,question in enumerate(questions):
if minimum>entropy(tuple(objects), question[0]):
q_id=question[0]
index=i
minimum=entropy(tuple(objects), question[0])
root = Tk()
diag(root,questions[index][1]," (yes/no)")
root.mainloop()
objects=get_data(ans,q_id,objects)
questions.pop(index)
i=objects[0]
objects = tuple(get_objects())
root = Tk()
diag(root,"our guess is "+objects[i-1][1]+' is it right '," (yes/no)")
root.mainloop()
if ans=='no':
root = Tk()
diag(root,"could you please provide some details so that we can enter it in our database"," (yes/no)")
root.mainloop()
if ans=='yes':
root = Tk()
diag(root,"enter the name of the animal"," ")
root.mainloop()
db.execute('insert into objects(name) values(?)',(ans,))
root = Tk()
diag(root,"enter a question which is specific to it(which has an answer yes to it)",'')
root.mainloop()
db.execute('insert into questions(q) values(?)',(ans,))
objects = tuple(get_objects())
questions =tuple(get_questions())
obj=objects[-1][0]
question=questions[-1][0]
db.execute('insert into data values(?,?,?)',(obj,question,'yes'))
for i in objects:
if i[0]!=objects[-1][0]:
db.execute('insert into data values(?,?,?)',(i[0],question,'no'))
root = Tk()
diag(root,"please answer a few more questions","")
root.mainloop()
for i in questions :
if i[0] !=question:
if yes_count(i[0]) != 1 :
root = Tk()
diag(root,i[1]," (yes/no)")
root.mainloop()
db.execute('insert into data values(?,?,?)',(obj,i[0],ans))
else:
db.execute('insert into data values(?,?,?)',(obj,i[0],'no'))
db.commit()
main() | sharadboni/Telepathist | Telepathist.py | Python | mit | 8,700 |
from .base import BaseHandler
class IndexHandler(BaseHandler):
def get(self):
self.template('front/index.html')
| jsannemo/programming-battle | battle/battle/frontend/controllers/front.py | Python | bsd-2-clause | 127 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model configurations for nasnet.
Paper: https://arxiv.org/abs/1707.07012
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from models import model
from models.tf1_only import nasnet_utils
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import slim
from tensorflow.contrib import training as contrib_training
arg_scope = contrib_framework.arg_scope
# Notes for training NASNet Cifar Model
# -------------------------------------
# batch_size: 32
# learning rate: 0.025
# cosine (single period) learning rate decay
# auxiliary head loss weighting: 0.4
# clip global norm of all gradients by 5
def _cifar_config(is_training=True, data_format=None, total_steps=None):
drop_path_keep_prob = 1.0 if not is_training else 0.6
return contrib_training.HParams(
stem_multiplier=3.0,
drop_path_keep_prob=drop_path_keep_prob,
num_cells=18,
use_aux_head=1,
num_conv_filters=32,
dense_dropout_keep_prob=1.0,
filter_scaling_rate=2.0,
num_reduction_layers=2,
skip_reduction_layer_input=0,
data_format=data_format or 'NHWC',
# 600 epochs with a batch size of 32
# This is used for the drop path probabilities since it needs to increase
# the drop out probability over the course of training.
total_training_steps=total_steps or 937500,
)
# Notes for training large NASNet model on ImageNet
# -------------------------------------
# batch size (per replica): 16
# learning rate: 0.015 * 100
# learning rate decay factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 100 replicas
# auxiliary head loss weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def _large_imagenet_config(is_training=True, data_format=None,
total_steps=None):
drop_path_keep_prob = 1.0 if not is_training else 0.7
return contrib_training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=18,
filter_scaling_rate=2.0,
num_conv_filters=168,
drop_path_keep_prob=drop_path_keep_prob,
use_aux_head=1,
num_reduction_layers=2,
skip_reduction_layer_input=1,
data_format=data_format or 'NHWC',
total_training_steps=total_steps or 250000,
)
# Notes for training the mobile NASNet ImageNet model
# -------------------------------------
# batch size (per replica): 32
# learning rate: 0.04 * 50
# learning rate scaling factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 50 replicas
# auxiliary head weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def _mobile_imagenet_config(data_format=None, total_steps=None):
return contrib_training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
filter_scaling_rate=2.0,
drop_path_keep_prob=1.0,
num_conv_filters=44,
use_aux_head=1,
num_reduction_layers=2,
skip_reduction_layer_input=0,
data_format=data_format or 'NHWC',
total_training_steps=total_steps or 250000,
)
def nasnet_cifar_arg_scope(weight_decay=5e-4,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5):
"""Defines the default arg scope for the NASNet-A Cifar model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Cifar Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = contrib_layers.l2_regularizer(weight_decay)
weights_initializer = contrib_layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope(
[slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'):
with arg_scope(
[slim.conv2d, slim.separable_conv2d],
activation_fn=None,
biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_mobile_arg_scope(weight_decay=4e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Mobile ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Mobile Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = contrib_layers.l2_regularizer(weight_decay)
weights_initializer = contrib_layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope(
[slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'):
with arg_scope(
[slim.conv2d, slim.separable_conv2d],
activation_fn=None,
biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_large_arg_scope(weight_decay=5e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Large ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Large Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = contrib_layers.l2_regularizer(weight_decay)
weights_initializer = contrib_layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope(
[slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'):
with arg_scope(
[slim.conv2d, slim.separable_conv2d],
activation_fn=None,
biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def _build_aux_head(net, end_points, num_classes, hparams, scope):
"""Auxiliary head used for all models across all datasets."""
with tf.variable_scope(scope):
aux_logits = tf.identity(net)
with tf.variable_scope('aux_logits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn0')
aux_logits = tf.nn.relu(aux_logits)
# Shape of feature map before the final layer.
shape = aux_logits.shape
if hparams.data_format == 'NHWC':
shape = shape[1:3]
else:
shape = shape[2:4]
aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1')
aux_logits = tf.nn.relu(aux_logits)
aux_logits = contrib_layers.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes)
end_points['AuxLogits'] = aux_logits
def _imagenet_stem(inputs, hparams, stem_cell):
"""Stem used for models trained on ImageNet."""
num_stem_cells = 2
# 149 x 149 x 32
num_stem_filters = int(32 * hparams.stem_multiplier)
net = slim.conv2d(
inputs,
num_stem_filters, [3, 3],
stride=2,
scope='conv0',
padding='VALID')
net = slim.batch_norm(net, scope='conv0_bn')
# Run the reduction cells
cell_outputs = [None, net]
filter_scaling = 1.0 / (hparams.filter_scaling_rate**num_stem_cells)
for cell_num in range(num_stem_cells):
net = stem_cell(
net,
scope='cell_stem_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=cell_num)
cell_outputs.append(net)
filter_scaling *= hparams.filter_scaling_rate
return net, cell_outputs
def _cifar_stem(inputs, hparams):
"""Stem used for models trained on Cifar."""
num_stem_filters = int(hparams.num_conv_filters * hparams.stem_multiplier)
net = slim.conv2d(inputs, num_stem_filters, 3, scope='l1_stem_3x3')
net = slim.batch_norm(net, scope='l1_stem_bn')
return net, [None, net]
def build_nasnet_cifar(images,
num_classes=None,
is_training=True,
data_format=None,
total_steps=None):
"""Build NASNet model for the Cifar Dataset."""
hparams = _cifar_config(
is_training=is_training, data_format=data_format, total_steps=total_steps)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope(
[
slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,
slim.separable_conv2d, nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim
],
data_format=hparams.data_format):
return _build_nasnet_base(
images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='cifar')
build_nasnet_cifar.default_image_size = 32
def build_nasnet_mobile(images,
num_classes=None,
is_training=True,
data_format=None,
total_steps=None,
final_endpoint=None):
"""Build NASNet Mobile model for the ImageNet Dataset."""
hparams = _mobile_imagenet_config(
data_format=data_format, total_steps=total_steps)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope(
[
slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,
slim.separable_conv2d, nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim
],
data_format=hparams.data_format):
return _build_nasnet_base(
images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint)
build_nasnet_mobile.default_image_size = 224
def build_nasnet_large(images,
num_classes=None,
is_training=True,
data_format=None,
total_steps=None,
final_endpoint=None):
"""Build NASNet Large model for the ImageNet Dataset."""
hparams = _large_imagenet_config(
is_training=is_training, data_format=data_format, total_steps=total_steps)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells,
hparams.total_training_steps)
with arg_scope(
[slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope(
[
slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm,
slim.separable_conv2d, nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim
],
data_format=hparams.data_format):
return _build_nasnet_base(
images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint)
build_nasnet_large.default_image_size = 331
def _build_nasnet_base(images,
normal_cell,
reduction_cell,
num_classes,
hparams,
is_training,
stem_type,
final_endpoint=None):
"""Constructs a NASNet image model."""
end_points = {}
def add_and_check_endpoint(endpoint_name, net):
end_points[endpoint_name] = net
return final_endpoint and (endpoint_name == final_endpoint)
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
stem_cell = reduction_cell
if stem_type == 'imagenet':
stem = lambda: _imagenet_stem(images, hparams, stem_cell)
elif stem_type == 'cifar':
stem = lambda: _cifar_stem(images, hparams)
else:
raise ValueError('Unknown stem_type: ', stem_type)
net, cell_outputs = stem()
if add_and_check_endpoint('Stem', net):
return net, end_points
# Setup for building in the auxiliary head.
aux_head_cell_idxes = []
if len(reduction_indices) >= 2:
aux_head_cell_idxes.append(reduction_indices[1] - 1)
# Run the cells
filter_scaling = 1.0
# true_cell_num accounts for the stem cells
true_cell_num = 2 if stem_type == 'imagenet' else 0
for cell_num in range(hparams.num_cells):
stride = 1
if hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
if cell_num in reduction_indices:
filter_scaling *= hparams.filter_scaling_rate
net = reduction_cell(
net,
scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=true_cell_num)
if add_and_check_endpoint(
'Reduction_Cell_{}'.format(reduction_indices.index(cell_num)), net):
return net, end_points
true_cell_num += 1
cell_outputs.append(net)
if not hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num)
if add_and_check_endpoint('Cell_{}'.format(cell_num), net):
return net, end_points
true_cell_num += 1
if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and
num_classes and is_training):
aux_net = tf.nn.relu(net)
_build_aux_head(
aux_net,
end_points,
num_classes,
hparams,
scope='aux_{}'.format(cell_num))
cell_outputs.append(net)
# Final softmax layer
with tf.variable_scope('final_layer'):
net = tf.nn.relu(net)
net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or num_classes is None:
return net, end_points
net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')
logits = slim.fully_connected(net, num_classes)
if add_and_check_endpoint('Logits', logits):
return net, end_points
predictions = tf.nn.softmax(logits, name='predictions')
if add_and_check_endpoint('Predictions', predictions):
return net, end_points
return logits, end_points
class NasnetModel(model.CNNModel):
"""Nasnet model configuration."""
def __init__(self, params=None):
super(NasnetModel, self).__init__('nasnet', 224, 32, 0.005, params=params)
def add_inference(self, cnn):
tf.logging.info('input_image_shape: {}'.format(cnn.top_layer.shape))
cnn.top_layer, _ = build_nasnet_mobile(
images=cnn.top_layer,
is_training=cnn.phase_train,
data_format=cnn.data_format)
cnn.top_size = cnn.top_layer.shape[-1].value
class NasnetLargeModel(model.CNNModel):
"""Nasnet model configuration."""
def __init__(self, params=None):
super(NasnetLargeModel, self).__init__(
'nasnet', 331, 16, 0.005, params=params)
def add_inference(self, cnn):
tf.logging.info('input_image_shape: {}'.format(cnn.top_layer.shape))
cnn.top_layer, _ = build_nasnet_large(
images=cnn.top_layer,
is_training=cnn.phase_train,
data_format=cnn.data_format)
cnn.top_size = cnn.top_layer.shape[-1].value
class NasnetCifarModel(model.CNNModel):
"""Nasnet cifar model configuration."""
def __init__(self, params=None):
super(NasnetCifarModel, self).__init__(
'nasnet', 32, 32, 0.025, params=params)
def add_inference(self, cnn):
tf.logging.info('input_image_shape: {}'.format(cnn.top_layer.shape))
cnn.top_layer, _ = build_nasnet_cifar(
images=cnn.top_layer,
is_training=cnn.phase_train,
data_format=cnn.data_format)
cnn.top_size = cnn.top_layer.shape[-1].value
| tensorflow/benchmarks | scripts/tf_cnn_benchmarks/models/tf1_only/nasnet_model.py | Python | apache-2.0 | 20,912 |
from django.core.exceptions import ValidationError
from django.forms.fields import MultipleChoiceField
from django.utils.translation import ugettext_lazy as _
__title__ = 'fobi.contrib.plugins.form_elements.fields.' \
'select_multiple_with_max.fields'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('MultipleChoiceWithMaxField',)
class MultipleChoiceWithMaxField(MultipleChoiceField):
"""Multiple choice with max field."""
def __init__(self, max_choices=None, choices=(), required=True,
widget=None, label=None, initial=None, help_text='', *args,
**kwargs):
"""Constructor."""
super(MultipleChoiceWithMaxField, self).__init__(
choices=choices, required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs
)
self.max_choices = max_choices
def validate(self, value):
"""Validate."""
super(MultipleChoiceWithMaxField, self).validate(value)
if self.max_choices:
if len(value) > self.max_choices:
raise ValidationError(_("You must choose no more than {0} "
"values.".format(self.max_choices)))
| mansonul/events | events/contrib/plugins/form_elements/fields/select_multiple_with_max/fields.py | Python | mit | 1,343 |
from os import system
import sys
for q in range(1,112):
q2 = (3-len(str(q)))*'0' + str(q)
system("./scfeatures none house/houses/house" + str(q) + " house/house" + q2 + ".scf")
| val-iisc/sketch-parse | retrieval-src/fgm-master/data/cmum/house/scfs.py | Python | mit | 182 |
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
GPSTest is a simple example using the SerialPort transport and the NMEA 0183
and Rockwell Zodiac GPS protocols to display fix data as it is received from
the device.
"""
from twisted.python import log, usage
import sys
if sys.platform == 'win32':
from twisted.internet import win32eventreactor
win32eventreactor.install()
class GPSFixLogger:
def handle_fix(self, *args):
"""
handle_fix gets called whenever either rockwell.Zodiac or nmea.NMEAReceiver
receives and decodes fix data. Generally, GPS receivers will report a
fix at 1hz. Implementing only this method is sufficient for most purposes
unless tracking of ground speed, course, utc date, or detailed satellite
information is necessary.
For example, plotting a map from MapQuest or a similar service only
requires longitude and latitude.
"""
log.msg('fix:\n' +
'\n'.join(map(lambda n: ' %s = %s' % tuple(n), zip(('utc', 'lon', 'lat', 'fix', 'sat', 'hdp', 'alt', 'geo', 'dgp'), map(repr, args)))))
class GPSOptions(usage.Options):
optFlags = [
['zodiac', 'z', 'Use Rockwell Zodiac (DeLorme Earthmate) [default: NMEA 0183]'],
]
optParameters = [
['outfile', 'o', None, 'Logfile [default: sys.stdout]'],
['baudrate', 'b', None, 'Serial baudrate [default: 4800 for NMEA, 9600 for Zodiac]'],
['port', 'p', '/dev/ttyS0', 'Serial Port device'],
]
if __name__ == '__main__':
from twisted.internet import reactor
from twisted.internet.serialport import SerialPort
o = GPSOptions()
try:
o.parseOptions()
except usage.UsageError, errortext:
print '%s: %s' % (sys.argv[0], errortext)
print '%s: Try --help for usage details.' % (sys.argv[0])
raise SystemExit, 1
logFile = o.opts['outfile']
if logFile is None:
logFile = sys.stdout
log.startLogging(logFile)
if o.opts['zodiac']:
from twisted.protocols.gps.rockwell import Zodiac as GPSProtocolBase
baudrate = 9600
else:
from twisted.protocols.gps.nmea import NMEAReceiver as GPSProtocolBase
baudrate = 4800
class GPSTest(GPSProtocolBase, GPSFixLogger):
pass
if o.opts['baudrate']:
baudrate = int(o.opts['baudrate'])
port = o.opts['port']
log.msg('Attempting to open %s at %dbps as a %s device' % (port, baudrate, GPSProtocolBase.__name__))
s = SerialPort(GPSTest(), o.opts['port'], reactor, baudrate=baudrate)
reactor.run()
| waseem18/oh-mainline | vendor/packages/twisted/doc/core/examples/gpsfix.py | Python | agpl-3.0 | 2,616 |
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import uuid
import vobject
from trytond.model import fields, Unique
from trytond.report import Report
from trytond import backend
from trytond.transaction import Transaction
from trytond.pool import Pool, PoolMeta
__all__ = ['Party', 'Address', 'ActionReport', 'VCard']
__metaclass__ = PoolMeta
class Party:
__name__ = 'party.party'
uuid = fields.Char('UUID', required=True,
help='Universally Unique Identifier')
vcard = fields.Binary('VCard')
@classmethod
def __setup__(cls):
super(Party, cls).__setup__()
t = cls.__table__()
cls._sql_constraints += [
('uuid_uniq', Unique(t, t.uuid),
'The UUID of the party must be unique.'),
]
@classmethod
def __register__(cls, module_name):
TableHandler = backend.get('TableHandler')
cursor = Transaction().connection.cursor()
table = TableHandler(cls, module_name)
sql_table = cls.__table__()
if not table.column_exist('uuid'):
table.add_column('uuid', cls.uuid._sql_type)
cursor.execute(*sql_table.select(sql_table.id))
for id, in cursor.fetchall():
cursor.execute(*sql_table.update(
columns=[sql_table.uuid],
values=[cls.default_uuid()],
where=sql_table.id == id))
super(Party, cls).__register__(module_name)
@staticmethod
def default_uuid():
return str(uuid.uuid4())
@classmethod
def create(cls, vlist):
Collection = Pool().get('webdav.collection')
parties = super(Party, cls).create(vlist)
# Restart the cache for vcard
Collection._vcard_cache.clear()
return parties
@classmethod
def copy(cls, parties, default=None):
if default is None:
default = {}
new_parties = []
for party in parties:
current_default = default.copy()
current_default['uuid'] = cls.default_uuid()
new_party, = super(Party, cls).copy([party],
default=current_default)
new_parties.append(new_party)
return new_parties
@classmethod
def write(cls, parties, values, *args):
Collection = Pool().get('webdav.collection')
super(Party, cls).write(parties, values, *args)
# Restart the cache for vcard
Collection._vcard_cache.clear()
@classmethod
def delete(cls, parties):
Collection = Pool().get('webdav.collection')
super(Party, cls).delete(parties)
# Restart the cache for vcard
Collection._vcard_cache.clear()
def vcard2values(self, vcard):
'''
Convert vcard to values for create or write
'''
Address = Pool().get('party.address')
res = {}
res['name'] = vcard.fn.value
if not hasattr(vcard, 'n'):
vcard.add('n')
vcard.n.value = vobject.vcard.Name(vcard.fn.value)
res['vcard'] = vcard.serialize()
if not self.id:
if hasattr(vcard, 'uid'):
res['uuid'] = vcard.uid.value
res['addresses'] = []
to_create = []
for adr in vcard.contents.get('adr', []):
vals = Address.vcard2values(adr)
to_create.append(vals)
if to_create:
res['addresses'].append(('create', to_create))
res['contact_mechanisms'] = []
to_create = []
for email in vcard.contents.get('email', []):
vals = {}
vals['type'] = 'email'
vals['value'] = email.value
to_create.append(vals)
if to_create:
res['contact_mechanisms'].append(('create', to_create))
to_create = []
for tel in vcard.contents.get('tel', []):
vals = {}
vals['type'] = 'phone'
if hasattr(tel, 'type_param') \
and 'cell' in tel.type_param.lower():
vals['type'] = 'mobile'
vals['value'] = tel.value
to_create.append(vals)
if to_create:
res['contact_mechanisms'].append(('create', to_create))
else:
i = 0
res['addresses'] = []
addresses_todelete = []
for address in self.addresses:
try:
adr = vcard.contents.get('adr', [])[i]
except IndexError:
addresses_todelete.append(address.id)
i += 1
continue
if not hasattr(adr, 'value'):
addresses_todelete.append(address.id)
i += 1
continue
vals = Address.vcard2values(adr)
res['addresses'].append(('write', [address.id], vals))
i += 1
if addresses_todelete:
res['addresses'].append(('delete', addresses_todelete))
try:
new_addresses = vcard.contents.get('adr', [])[i:]
except IndexError:
new_addresses = []
to_create = []
for adr in new_addresses:
if not hasattr(adr, 'value'):
continue
vals = Address.vcard2values(adr)
to_create.append(vals)
if to_create:
res['addresses'].append(('create', to_create))
i = 0
res['contact_mechanisms'] = []
contact_mechanisms_todelete = []
for cm in self.contact_mechanisms:
if cm.type != 'email':
continue
try:
email = vcard.contents.get('email', [])[i]
except IndexError:
contact_mechanisms_todelete.append(cm.id)
i += 1
continue
vals = {}
vals['value'] = email.value
res['contact_mechanisms'].append(('write', [cm.id], vals))
i += 1
try:
new_emails = vcard.contents.get('email', [])[i:]
except IndexError:
new_emails = []
to_create = []
for email in new_emails:
if not hasattr(email, 'value'):
continue
vals = {}
vals['type'] = 'email'
vals['value'] = email.value
to_create.append(vals)
if to_create:
res['contact_mechanisms'].append(('create', to_create))
i = 0
for cm in self.contact_mechanisms:
if cm.type not in ('phone', 'mobile'):
continue
try:
tel = vcard.contents.get('tel', [])[i]
except IndexError:
contact_mechanisms_todelete.append(cm.id)
i += 1
continue
vals = {}
vals['value'] = tel.value
res['contact_mechanisms'].append(('write', [cm.id], vals))
i += 1
try:
new_tels = vcard.contents.get('tel', [])[i:]
except IndexError:
new_tels = []
to_create = []
for tel in new_tels:
if not hasattr(tel, 'value'):
continue
vals = {}
vals['type'] = 'phone'
if hasattr(tel, 'type_param') \
and 'cell' in tel.type_param.lower():
vals['type'] = 'mobile'
vals['value'] = tel.value
to_create.append(vals)
if to_create:
res['contact_mechanisms'].append(('create', to_create))
if contact_mechanisms_todelete:
res['contact_mechanisms'].append(('delete',
contact_mechanisms_todelete))
return res
class Address:
__name__ = 'party.address'
@classmethod
def vcard2values(cls, adr):
'''
Convert adr from vcard to values for create or write
'''
pool = Pool()
Country = pool.get('country.country')
Subdivision = pool.get('country.subdivision')
vals = {}
vals['street'] = adr.value.street or ''
vals['city'] = adr.value.city or ''
vals['zip'] = adr.value.code or ''
if adr.value.country:
countries = Country.search([
('rec_name', '=', adr.value.country),
], limit=1)
if countries:
country, = countries
vals['country'] = country.id
if adr.value.region:
subdivisions = Subdivision.search([
('rec_name', '=', adr.value.region),
('country', '=', country.id),
], limit=1)
if subdivisions:
subdivision, = subdivisions
vals['subdivision'] = subdivision.id
return vals
class ActionReport:
__name__ = 'ir.action.report'
@classmethod
def __setup__(cls):
super(ActionReport, cls).__setup__()
new_ext = ('vcf', 'VCard file')
if new_ext not in cls.extension.selection:
cls.extension.selection.append(new_ext)
class VCard(Report):
__name__ = 'party_vcarddav.party.vcard'
@classmethod
def render(cls, report, report_context):
return ''.join(cls.create_vcard(party).serialize()
for party in report_context['records'])
@classmethod
def convert(cls, report, data):
return 'vcf', data
@classmethod
def create_vcard(cls, party):
'''
Return a vcard instance of vobject for the party
'''
if party.vcard:
vcard = vobject.readOne(str(party.vcard))
else:
vcard = vobject.vCard()
if not hasattr(vcard, 'n'):
vcard.add('n')
if not vcard.n.value:
vcard.n.value = vobject.vcard.Name(party.name)
if not hasattr(vcard, 'fn'):
vcard.add('fn')
vcard.fn.value = party.full_name
if not hasattr(vcard, 'uid'):
vcard.add('uid')
vcard.uid.value = party.uuid
i = 0
for address in party.addresses:
try:
adr = vcard.contents.get('adr', [])[i]
except IndexError:
adr = None
if not adr:
adr = vcard.add('adr')
if not hasattr(adr, 'value'):
adr.value = vobject.vcard.Address()
adr.value.street = address.street or ''
adr.value.city = address.city or ''
if address.subdivision:
adr.value.region = address.subdivision.name or ''
adr.value.code = address.zip or ''
if address.country:
adr.value.country = address.country.name or ''
i += 1
try:
older_addresses = vcard.contents.get('adr', [])[i:]
except IndexError:
older_addresses = []
for adr in older_addresses:
vcard.contents['adr'].remove(adr)
email_count = 0
tel_count = 0
for cm in party.contact_mechanisms:
if cm.type == 'email':
try:
email = vcard.contents.get('email', [])[email_count]
except IndexError:
email = None
if not email:
email = vcard.add('email')
email.value = cm.value
if not hasattr(email, 'type_param'):
email.type_param = 'internet'
elif 'internet' not in email.type_param.lower():
email.type_param += ',internet'
email_count += 1
elif cm.type in ('phone', 'mobile'):
try:
tel = vcard.contents.get('tel', [])[tel_count]
except IndexError:
tel = None
if not tel:
tel = vcard.add('tel')
tel.value = cm.value
if cm.type == 'mobile':
if not hasattr(tel, 'type_param'):
tel.type_param = 'cell'
elif 'cell' not in tel.type_param.lower():
tel.type_param += ',cell'
else:
if not hasattr(tel, 'type_param'):
tel.type_param = 'voice'
tel_count += 1
try:
older_emails = vcard.contents.get('email', [])[email_count:]
except IndexError:
older_emails = []
for email in older_emails:
vcard.contents['email'].remove(email)
try:
older_tels = vcard.contents.get('tel', [])[tel_count:]
except IndexError:
older_tels = []
for tel in older_tels:
vcard.contents['tel'].remove(tel)
return vcard
| tryton/party_vcarddav | party.py | Python | gpl-3.0 | 13,404 |
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import chigger
reader = chigger.exodus.ExodusReader('../input/mug_blocks_out.e', timestep=0)
mug = chigger.exodus.ExodusResult(reader, variable='diffused', min=0.5, max=1.8)
cbar = chigger.exodus.ExodusColorBar(mug, primary={'precision':2, 'num_ticks':3, 'notation':'fixed'})
window = chigger.RenderWindow(mug, cbar, size=[300,300], test=True)
for i in range(2):
reader.setOptions(timestep=i)
window.write('minmax_' + str(i) + '.png')
window.start()
| nuclear-wizard/moose | python/chigger/tests/range/minmax.py | Python | lgpl-2.1 | 826 |
'''
Created on 05/09/2017
@author: chernomirdinmacuvele
'''
from ui_compAmostra import Ui_frmDistComprimento
from PyQt5.Qt import QSpinBox, QDialog, QDoubleSpinBox, QSqlQuery
import FuncSQL
import rscForm
import QT_msg
class dialog_CompAmost(QDialog, Ui_frmDistComprimento):
def __init__(self, parent=None, TblName=None, dbcon=None, Id=None, lstValToEdit=None, dictRules = None):
super(dialog_CompAmost, self).__init__(parent)
self.setupUi(self)
self.rowCount= 100#Ler do ficherio de configuracao
self.dbcon =dbcon
self.tblName = TblName
self.lstToEdit = lstValToEdit
self.dictRules = dictRules
self.Id = Id
self.isEdit = False
self.setLines()
self.bOK= (False, None)
self.PBSalvar.clicked.connect(self.toSave)
self.PBCancelar.clicked.connect(self.close)
def getAmostSexo(self):
quer= """
SELECT ref_table.nome, round ( cast(id_intervalo_class as numeric), 2)as int_class,
comp_minimo, peso, n_indiv
FROM public.t_amost_comp_sexo
left join public.ref_table on t_amost_comp_sexo.id_medida_comp = ref_table.id
where t_amost_comp_sexo.id= {Id}
""".format(Id= self.Id)
bOK, lstOut= FuncSQL.anySelectScript(scpt=quer)
if bOK:
medida, intClass, compMinimo, peso, n_indiv = lstOut[0], lstOut[1], lstOut[2], lstOut[3], lstOut[4]
else:
medida, intClass, compMinimo, peso, n_indiv = None, None, None, None, None
return medida, intClass, compMinimo, peso, n_indiv
def toEdit(self):
quer = "SELECT class_comp, n_indiv from public.t_comp_amost where id_amost_comp_sexo= {id_amost_comp_sexo} ".format(id_amost_comp_sexo=self.Id)
bOK, lstOut= FuncSQL.multLineSelect(scpt=quer)
if bOK:
if lstOut != []:
self.isEdit = True
for val in lstOut:
for row in range(self.rowCount):
tstDBSpin = QDoubleSpinBox()
tstDBSpin.setValue(float(val[0]))
newVal = rscForm.getText(widg= tstDBSpin)
item_0 = rscForm.getText(widg= self.TWComprimentos.cellWidget(row, 0))
if item_0 == newVal:
wdg1= self.TWComprimentos.cellWidget(row, 1)
rscForm.setTxtToWidget(widget=wdg1, val=val[1])
def setLines(self):
_, intClass, compMini, _, _ = self.getAmostSexo()
if intClass is not None and compMini is not None:
mini = float(compMini)
intClass = float(intClass)
self.TWComprimentos.setRowCount(self.rowCount)
for row in range(self.rowCount):
spinBox = QSpinBox() #Criando objecto Spin e double box
doubleSpin = QDoubleSpinBox()
if row == 0:
idx = mini
else:
idx = mini + (row * intClass) #formula para determinar o valor inicial
doubleSpin.setMinimum(idx)
doubleSpin.setReadOnly(True)
self.TWComprimentos.setCellWidget(row, 0, doubleSpin)#Configurando os valores nas possicoes
self.TWComprimentos.setCellWidget(row, 1, spinBox)
self.toEdit()
def getVal(self):
lstOut=[]
for val in range(self.rowCount):
item_0 = rscForm.getText(widg= self.TWComprimentos.cellWidget(val, 0))
item_1 = self.TWComprimentos.cellWidget(val, 1).text()
if int(item_1) > 0:
lstOut.append((item_0, item_1))
return lstOut
def toSave(self):
lstIn = self.getVal()
lstInserted= []
for val in lstIn:
bOK, idx =FuncSQL.getLast(tblName=self.tblName)
newLast = int(idx)+1
if bOK:
classeCom, n_indiv = val
dictVal = {'id': newLast,
'id_amost_comp_sexo':self.Id,
'class_comp':classeCom,
'n_indiv':n_indiv
}
quer = '''INSERT INTO public.t_comp_amost
(id, id_amost_comp_sexo, class_comp, n_indiv)
VALUES ({id}, {id_amost_comp_sexo}, '{class_comp}', {n_indiv});'''.format(**dictVal)
toSave = QSqlQuery()
bOK = toSave.exec_(quer)
if bOK == False:
lstInserted.append(dictVal)
bOK, msgOut = self.vefSaved(lstdictIN= lstInserted)
self.bOK = bOK, msgOut
self.close()
def vefSaved(self, lstdictIN=None):
toSave = QSqlQuery()
bOK=True
msgOut= "Operacao Realizada Com sucesso"
for dictIN in lstdictIN:
quer = "SELECT id_amost_comp_sexo, class_comp, n_indiv from public.t_comp_amost where class_comp= '{class_comp}' and id_amost_comp_sexo= {id_amost_comp_sexo} ".format(**dictIN)
_, lstOut = FuncSQL.anySelectScript(scpt= quer)
if lstOut[0] == dictIN['id_amost_comp_sexo'] and lstOut[1] == dictIN['class_comp'] and lstOut[2] == dictIN['n_indiv']:
bOK =True
elif lstOut[0] == dictIN['id_amost_comp_sexo'] and lstOut[1] != dictIN['class_comp'] or lstOut[2] != dictIN['n_indiv']:
quer= ''' UPDATE public.t_comp_amost SET n_indiv= {n_indiv}
WHERE class_comp= '{class_comp}' and id_amost_comp_sexo= {id_amost_comp_sexo};'''.format(**dictIN)
bOK = toSave.exec_(quer)
else:
bOK =False
vTxt= toSave.lastError().text()
QT_msg.error(txt='Error Nao sera possivel realizar a opercao', verbTxt=vTxt)
msgOut=None
break
return bOK, msgOut
def setDict(self):
self.dictFields= {
'fldName': ["id", "id_comp_sexo", "class_comp", "n_indiv"],
'fldWidget':[None, None, self.DSBClass_comp, self.DSBN_indiv_Medidos],
'toDefault':[False, False, False, False],
'objName': ['id', 'id_comp_sexo', 'DSBClass_comp', 'DSBN_indiv_Medidos'],
'isRel':[False, False, False, False],
'toCheck': [False, False, True, True],
"toQuote":[False, False, True, True]
}
| InUrSys/PescArt2.0 | src/Fichas_src/dialog_CompAmost.py | Python | gpl-3.0 | 6,726 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-05 02:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('registration', '0002_auto_20161130_0157'),
('bike', '0001_bike_with_fsm'),
]
operations = [
migrations.AddField(
model_name='bike',
name='purchased_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='purchased_bike', to='registration.Member'),
),
migrations.AlterField(
model_name='bike',
name='claimed_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='claimed_bike', to='registration.Member'),
),
]
| BridgeCityBicycleCoop/workstand | bike/migrations/0002_auto_20170105_0238.py | Python | mit | 884 |
from .auth import (
INDEX_NAME,
INDEX_URL,
encode_header,
load_config,
resolve_authconfig,
resolve_repository_name,
) # flake8: noqa | Sorsly/subtle | google-cloud-sdk/lib/third_party/docker/docker/auth/__init__.py | Python | mit | 157 |
"""
Collection of helper classes to layout the gui using Widgets and Panels.
A widget is a any gui entity. A panel is a widget which can have children widgets.
The gui automatically does layout in a fashion sillier to wxPython.
A Widget is a gui entity that displays something. IE; button, text label.
A panel is a gui widget which can hold other widgets.
"""
from .flags import *
from .widget import *
from .text import *
from .button import *
from .panels import *
| chrisrossx/DotStar_Emulator | DotStar_Emulator/emulator/gui/__init__.py | Python | mit | 470 |
from setuptools import setup, find_packages
setup(
name='segments',
version="2.1.4.dev0",
description='',
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author='Steven Moran and Robert Forkel',
author_email='[email protected]',
url='https://github.com/cldf/segments',
install_requires=[
'regex',
'csvw>=1.5.6',
'clldutils>=1.7.3',
],
extras_require={
'dev': ['flake8', 'wheel', 'twine'],
'test': [
'pytest>=3.6',
'pytest-mock',
'mock',
'pytest-cov',
],
},
license='Apache 2.0',
zip_safe=False,
keywords='tokenizer',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
packages=find_packages(where="src"),
package_dir={"": "src"},
include_package_data=True,
entry_points={
'console_scripts': [
"segments = segments.__main__:main",
]
},
)
| bambooforest/segments | setup.py | Python | apache-2.0 | 1,528 |
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
import urllib, re
class AppURLopener(urllib.FancyURLopener):
version = "Mozilla/5.0"
urllib._urlopener = AppURLopener()
def suche( source , country, code ):
for line in source:
result = re.match ( '(.*)(upload.wikimedia)(.*)'+ country +'(.*)', line )
if result > -1 :
svgurl = line[ line.find("http") : line.find(".svg") + 4 ].strip()
if svgurl.find('thumb') == -1 and len(svgurl) > 3 :
svgsource = urllib.urlopen(svgurl)
data = svgsource.read()
svgsource.close()
out_file = open('flag_' + code.strip().lower() + '.svg','w')
out_file.write(data)
out_file.close()
print svgurl
break
isourlstring = 'http://www.iso.org/' + 'iso/en/prods-services/iso3166ma/02iso-3166-code-lists/list-en1-semic.txt'
isosource = urllib.urlopen(isourlstring).readlines()
for line in isosource:
if len(line) < 80 and len(line) > 5 :
print line
linelist = line.split(';')
linelist[0] = linelist[0].replace("KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF","NORTH KOREA")
linelist[0] = linelist[0].replace("KOREA, REPUBLIC OF","SOUTH KOREA")
linelist[0] = linelist[0].replace("CONGO","REPUBLIC OF THE CONGO")
linelist[0] = linelist[0].replace("REPUBLIC OF THE CONGO, THE DEMOCRATIC REPUBLIC OF THE","DEMOCRATIC REPUBLIC OF THE CONGO")
linelist[0] = linelist[0].replace("KOREA, REPUBLIC OF","SOUTH KOREA")
linelist[0] = linelist[0].replace('VIRGIN ISLANDS, BRITISH','BRITISH VIRGIN ISLANDS')
linelist[0] = linelist[0].replace('VIRGIN ISLANDS, U.S.','UNITED STATES VIRGIN ISLANDS')
linelist[0] = linelist[0].split(',')[0].rstrip()
linelist[0] = linelist[0].split('(')[0].rstrip()
namelist = linelist[0].split(' ')
fullname = ""
for word in namelist:
if fullname != "" :
fullname = fullname + "_"
if word == 'AND' or word == 'THE' or word == 'OF' or word.find('D\'') > -1:
word = word.lower()
else :
word = word.capitalize()
if word.find('\'') > -1 :
word = word.split('\'')[0] + '%27' + word.split('\'')[1].capitalize()
if word.find('-') > -1 :
word = word.split('-')[0] + '-' + word.split('-')[1].capitalize()
fullname = fullname + word
fullname.strip()
if fullname.find('Islands') > -1 or fullname.find('United') > -1 or fullname.find('Antilles') > -1 or fullname.find('Seychelles') > -1 or fullname.find('Philippines') > -1 or fullname.find('Republic') > -1 or fullname.find('Bahamas') > -1 or fullname.find('Territory') > -1 or fullname.find('Comoros') > -1 or fullname.find('Netherlands') > -1 or fullname.find('Isle') > -1:
fullname = 'the_' + fullname
if fullname.find("land_Islands") > -1 :
fullname ='Aaland'
fullname = fullname.replace('Timor-Leste','East_Timor')
fullname = fullname.replace('the_Syrian_Arab_Republic','Syria')
fullname = fullname.replace('Svalbard_and_Jan_Mayen','Norway')
fullname = fullname.replace('Saint_Pierre','Saint-Pierre')
fullname = fullname.replace('Russian_Federation','Russia')
fullname = fullname.replace('Libyan_Arab_Jamahiriya','Libya')
fullname = fullname.replace('the_Lao_People\'S_Democratic_Republic','Laos')
fullname = fullname.replace('Holy_See','')
fullname = fullname.replace('the_Heard_Island_and_Mcdonald_Islands','Australia')
fullname = fullname.replace('French_Southern_Territories','France')
fullname = fullname.replace('Mayotte','France')
fullname = fullname.replace('Guadeloupe','France')
fullname = fullname.replace('Reunion','France')
fullname = fullname.replace('Gambia','The_Gambia')
fullname = fullname.replace('Tokelau','New_Zealand')
fullname = fullname.replace('Taiwan','the_Republic_of_China')
fullname = fullname.replace('Viet_Nam','Vietnam')
fullname = fullname.replace('French_Guiana','France')
fullname = fullname.replace('Brunei_Darussalam','Brunei')
fullname = fullname.replace('Pitcairn','the_Pitcairn_Islands')
fullname = fullname.replace('Macao','Macau')
fullname = fullname.replace('Bouvet_Island','Norway')
fullname = fullname.replace('the_Palestinian_Territory','Palestine')
fullname = fullname.replace('the_United_States_Minor_Outlying_Islands','the_United_States')
fullname = fullname.replace('the_South_Georgia_and_the_South_Sandwich_Islands','South_Georgia_and_the_South_Sandwich_Islands')
fullname = fullname.replace('Cocos','the_Cocos_%28Keeling%29_Islands')
wpurlstring = 'http://de.wikipedia.org/wiki/Bild:Flag_of_'.strip() + fullname.strip() + '.svg'.strip()
wpsource = urllib.urlopen(wpurlstring).readlines()
if fullname !='' :
print wpurlstring
suche( wpsource, fullname, linelist[1] )
| tzapzoor/marble | data/flags/downloadflags.py | Python | lgpl-2.1 | 4,626 |
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
POLICY_ROOT = 'os_compute_api:servers:migrations:%s'
servers_migrations_policies = [
base.create_rule_default(
POLICY_ROOT % 'show',
base.RULE_ADMIN_API,
"Show details for an in-progress live migration for a given server",
[
{
'method': 'GET',
'path': '/servers/{server_id}/migrations/{migration_id}'
}
]),
base.create_rule_default(
POLICY_ROOT % 'force_complete',
base.RULE_ADMIN_API,
"Force an in-progress live migration for a given server to complete",
[
{
'method': 'POST',
'path': '/servers/{server_id}/migrations/{migration_id}'
'/action (force_complete)'
}
]),
base.create_rule_default(
POLICY_ROOT % 'delete',
base.RULE_ADMIN_API,
"Delete(Abort) an in-progress live migration",
[
{
'method': 'DELETE',
'path': '/servers/{server_id}/migrations/{migration_id}'
}
]),
base.create_rule_default(
POLICY_ROOT % 'index',
base.RULE_ADMIN_API,
"Lists in-progress live migrations for a given server",
[
{
'method': 'GET',
'path': '/servers/{server_id}/migrations'
}
]),
policy.RuleDefault(
name='os_compute_api:server-migrations:discoverable',
check_str=base.RULE_ANY),
]
def list_rules():
return servers_migrations_policies
| vmturbo/nova | nova/policies/servers_migrations.py | Python | apache-2.0 | 2,279 |
#!/usr/bin/env python
'''
A scratch 2 parser written in Python
Copyright 2013 Joseph Lewis <[email protected]>
'''
import zipfile
import json
# an enumeration of all the hat blocks available
HAT_BLOCKS = frozenset([u"whenKeyPressed", u"whenClicked", u"whenSceneStarts",
u"whenSensorGreaterThan", u"whenIReceive", u"whenCloned", u"procDef",
u"whenGreenFlag"])
MOTION_BLOCKS = frozenset([u'bounceOffEdge', u'changeXposBy:', u'changeYposBy:',
u'forward:', u'glideSecs:toX:y:elapsed:from:', u'gotoSpriteOrMouse:',
u'gotoX:y:', u'heading', u'heading:', u'pointTowards:', u'setRotationStyle',
u'turnLeft:', u'turnRight:', u'xpos', u'xpos:', u'ypos', u'ypos:'])
LOOKS_BLOCKS = frozenset([u'backgroundIndex', u'changeGraphicEffect:by:',
u'changeSizeBy:', u'comeToFront', u'costumeIndex', u'filterReset',
u'goBackByLayers:', u'hide', u'lookLike:', u'nextCostume', u'nextScene',
u'say:', u'say:duration:elapsed:from:', u'scale', u'sceneName',
u'setGraphicEffect:to:', u'setSizeTo:', u'show', u'startScene',
u'startSceneAndWait', u'think:', u'think:duration:elapsed:from:'])
SOUND_BLOCKS = frozenset([u'changeTempoBy:', u'changeVolumeBy:',
u'doPlaySoundAndWait', u'instrument:', u'noteOn:duration:elapsed:from:',
u'playDrum', u'playSound:', u'rest:elapsed:from:', u'setTempoTo:',
u'setVolumeTo:', u'stopAllSounds', u'tempo', u'volume'])
PEN_BLOCKS = frozenset([u'changePenHueBy:', u'changePenShadeBy:',
u'changePenSizeBy:', u'clearPenTrails', u'penColor:', u'penSize:',
u'putPenDown', u'putPenUp', u'setPenHueTo:', u'setPenShadeTo:',
u'stampCostume'])
LIST_BLOCKS = frozenset([u'append:toList:', u'contentsOfList:',
u'deleteLine:ofList:', u'getLine:ofList:', u'hideList:',
u'insert:at:ofList:', u'lineCountOfList:', u'list:contains:',
u'setLine:ofList:to:', u'showList:'])
VARIABLE_BLOCKS = frozenset([u'changeVar:by:', u'hideVariable:',
u'readVariable', u'setVar:to:', u'showVariable:'])
EVENTS_BLOCKS = frozenset([u'broadcast:', u'doBroadcastAndWait',u'whenClicked',
u'whenGreenFlag', u'whenIReceive', u'whenKeyPressed', u'whenSceneStarts',
u'whenSensorGreaterThan'])
CONTROL_BLOCKS = frozenset([u'createCloneOf', u'deleteClone', u'doForever',
u'doIf', u'doIfElse', u'doRepeat', u'doUntil', u'doWaitUntil',
u'stopScripts', u'wait:elapsed:from:', u'whenCloned'])
SENSING_BLOCKS = frozenset([u'answer', u'color:sees:', u'distanceTo:',
u'doAsk', u'getAttribute:of:', u'getUserName', u'keyPressed:',
u'mousePressed', u'mouseX', u'mouseY', u'senseVideoMotion',
u'setVideoState', u'setVideoTransparency', u'soundLevel', u'timeAndDate',
u'timer', u'timerReset', u'timestamp', u'touching:', u'touchingColor:'])
OPERATORS_BLOCKS = frozenset([u'%', u'&', u'*', u'+', u'-', u'/', u'<', u'=',
u'>', u'computeFunction:of:', u'concatenate:with:', u'letter:of:',
u'not', u'randomFrom:to:', u'rounded', u'stringLength:', u'|'])
CUSTOM_BLOCKS = frozenset([u'call', u'procDef'])
# Meta categories
USER_INTERACTION_BLOCKS = frozenset([u'whenClicked', u'whenKeyPressed',
u'keyPressed:', u'mousePressed', u'mouseX', u'mouseY'])
class Scratch2Project:
__jsondoc = None
def __init__(self, filepath):
'''Takes a filepath and parses the file.'''
with zipfile.ZipFile(filepath) as fp:
with fp.open("project.json") as js_doc:
self.__jsondoc = json.load(js_doc)
def sprites(self):
'''Returns an array containing all sprites for the project'''
return self.__jsondoc["children"]
def stage(self):
'''Returns the stage for the project.'''
return self.__jsondoc
def _from_stage_and_sprites(self, property_name):
tmp = []
tmp += self.stage().get(property_name, [])
for sprite in self.sprites():
tmp += sprite.get(property_name, [])
return tmp
def scripts(self, return_non_attached=False):
'''Returns an array containing all of the scripts in the project.
'''
tmp = self._from_stage_and_sprites('scripts')
# the third param is the actual script array
scripts = [t[2] for t in tmp]
if return_non_attached:
return scripts
return [s for s in scripts if s[0][0] in HAT_BLOCKS]
def blocks(self, return_non_attached=False):
'''Returns a list containing all the blocks that are rechable in the
project
'''
blocks = []
for script in self.scripts(return_non_attached):
for block in script:
blocks.append(block)
# fetch hidden inner blocks like operators
extrablocks = []
def lookforlists(block, extrablocks):
for item in block:
if type(item) != list:
continue
if len(item) == 0: # ignore empty lists
continue
if type(item[0]) in [type(u''), type('')]:
extrablocks += [item]
lookforlists(item, extrablocks)
if type(item[0]) == list:
lookforlists(item, extrablocks)
lookforlists(blocks, extrablocks)
return extrablocks
def costumes(self):
'''Returns a list containing all the costumes and backgrounds
for the project.
'''
return self._from_stage_and_sprites('costumes')
def sounds(self):
'''Returns a list containing all the sounds in the project.
'''
return self._from_stage_and_sprites('sounds')
def variables(self):
'''Returns a list containing all of the variables in the project.
'''
return self._from_stage_and_sprites('variables')
def lists(self):
'''Returns a list containing all of the lists in the project.
'''
return self._from_stage_and_sprites('lists')
def info(self):
'''Returns information about the scratch project.
'''
return self.__jsondoc["info"]
def count_blocks_of_type(self, block_types):
'''Returns the number of blocks that fit with the given category.
'''
return sum([1 for b in self.blocks() if b[0] in block_types])
| josephlewis42/magpie | magpie/plugins/scratch2/decompiler.py | Python | bsd-3-clause | 5,722 |
import feedparser
import time
# Create display instance on default I2C address (0x70) and bus number.
from Adafruit_LED_Backpack import AlphaNum4
display = AlphaNum4.AlphaNum4()
# Initialize the display. Must be called once before using the display.
display.begin()
#create string(s) with rss address for multiple feeds
RssAddress = "http://feeds.reuters.com/Reuters/domesticNews"
#create feed caled Rss
Rss = feedparser.parse(RssAddress)
#Loop to iterate through all titles in feed sleeping for 1 second between printing
display.clear()
display.write_display()
#Loop through each title of feed
for i in Rss.entries:
#prints title to console
print (i.title)
#reset position to begining
pos = 0
#Change string to Uppercase for readability and add --* buffer to begining and end to distinguish titles
CapString = "---*" + i.title.upper() + "*---"
# Dashed line in console for aesthetics
print("----------------------------------------------------------------")
#Loop for scrolling through title
for x in range(0,len(CapString)-4):
# Print a 4 character string to the display buffer.
display.print_str(CapString[pos:pos+4])
# Write the display buffer to the hardware. This must be called to
# update the actual display LEDs.
display.write_display()
# Increment position. Wrap back to 0 when the end is reached.
pos += 1
if pos > len(CapString)-4:
pos = 0
# Delay for 0.15 of a second. This can be changed to speed up or slow down the scroll.
time.sleep(0.15)
# Clear out display
display.print_str(" ")
display.write_display()
| Epikarsios/RssLEDBackpack | RssLED.py | Python | gpl-3.0 | 1,628 |
"""Wrapper service for kraken api."""
class ApiService:
"""Serivce for kraken api call."""
def __init__(self):
"""Create service object."""
pass
| saukymo/kraken | src/services/api.py | Python | mit | 172 |
import os
from SystemInfo import SystemInfo
from Tools.HardwareInfo import HardwareInfo
from Tools.BoundFunction import boundFunction
from config import config, ConfigSubsection, ConfigSelection, ConfigFloat, ConfigSatlist, ConfigYesNo, ConfigInteger, ConfigSubList, ConfigNothing, ConfigSubDict, ConfigOnOff, ConfigDateTime, ConfigText
from enigma import eDVBFrontendParametersSatellite, eDVBSatelliteEquipmentControl as secClass, eDVBSatelliteDiseqcParameters as diseqcParam, eDVBSatelliteSwitchParameters as switchParam, eDVBSatelliteRotorParameters as rotorParam, eDVBResourceManager, eDVBDB, eEnv
from time import localtime, mktime
from datetime import datetime
import xml.etree.cElementTree
from Components.About import about
config.unicable = ConfigSubsection()
def orbStr(pos):
return pos > 3600 and "N/A" or "%d.%d\xc2\xb0%s" % (pos > 1800 and ((3600 - pos) / 10, (3600 - pos) % 10, "W") or (pos / 10, pos % 10, "E"))
def getConfigSatlist(orbpos, satlist):
default_orbpos = None
for x in satlist:
if x[0] == orbpos:
default_orbpos = orbpos
break
return ConfigSatlist(satlist, default_orbpos)
class SecConfigure:
def __init__(self, nimmgr):
self.NimManager = nimmgr
self.configuredSatellites = set()
self.update()
def getConfiguredSats(self):
return self.configuredSatellites
def addSatellite(self, sec, orbpos):
sec.addSatellite(orbpos)
self.configuredSatellites.add(orbpos)
def addLNBSimple(self, sec, slotid, diseqcmode, toneburstmode=diseqcParam.NO, diseqcpos=diseqcParam.SENDNO, orbpos=0, longitude=0, latitude=0, loDirection=0, laDirection=0, turningSpeed=rotorParam.FAST, useInputPower=True, inputPowerDelta=50, fastDiSEqC=False, setVoltageTone=True, diseqc13V=False, CircularLNB=False):
if orbpos is None or orbpos == 3600 or orbpos == 3601:
return
#simple defaults
sec.addLNB()
tunermask = 1 << slotid
if slotid in self.equal:
for slot in self.equal[slotid]:
tunermask |= (1 << slot)
if slotid in self.linked:
for slot in self.linked[slotid]:
tunermask |= (1 << slot)
sec.setLNBSatCRformat(0)
sec.setLNBNum(1)
sec.setLNBLOFL(CircularLNB and 10750000 or 9750000)
sec.setLNBLOFH(CircularLNB and 10750000 or 10600000)
sec.setLNBThreshold(CircularLNB and 10750000 or 11700000)
sec.setLNBIncreasedVoltage(False)
sec.setRepeats(0)
sec.setFastDiSEqC(fastDiSEqC)
sec.setSeqRepeat(False)
sec.setCommandOrder(0)
#user values
sec.setDiSEqCMode(3 if diseqcmode == 4 else diseqcmode)
sec.setToneburst(toneburstmode)
sec.setCommittedCommand(diseqcpos)
sec.setUncommittedCommand(0) # SENDNO
if 0 <= diseqcmode < 3:
self.addSatellite(sec, orbpos)
if setVoltageTone:
sec.setVoltageMode(switchParam.HV_13 if diseqc13V else switchParam.HV)
sec.setToneMode(switchParam.HILO)
else:
sec.setVoltageMode(switchParam._14V)
sec.setToneMode(switchParam.OFF)
elif 3 <= diseqcmode < 5: # diseqc 1.2
if slotid in self.satposdepends:
for slot in self.satposdepends[slotid]:
tunermask |= (1 << slot)
sec.setLatitude(latitude)
sec.setLaDirection(laDirection)
sec.setLongitude(longitude)
sec.setLoDirection(loDirection)
sec.setUseInputpower(useInputPower)
sec.setInputpowerDelta(inputPowerDelta)
sec.setRotorTurningSpeed(turningSpeed)
user_satList = self.NimManager.satList
if diseqcmode == 4:
user_satList = []
if orbpos and isinstance(orbpos, str):
orbpos = orbpos.replace("]", "").replace("[", "")
for user_sat in self.NimManager.satList:
sat_str = str(user_sat[0])
if ("," not in orbpos and sat_str == orbpos) or ((', ' + sat_str + ',' in orbpos) or (orbpos.startswith(sat_str + ',')) or (orbpos.endswith(', ' + sat_str))):
user_satList.append(user_sat)
for x in user_satList:
print "[SecConfigure] Add sat " + str(x[0])
self.addSatellite(sec, int(x[0]))
sec.setVoltageMode(switchParam.HV_13 if diseqc13V else switchParam.HV)
sec.setToneMode(switchParam.HILO)
sec.setRotorPosNum(0) # USALS
sec.setLNBSlotMask(tunermask)
def setSatposDepends(self, sec, nim1, nim2):
print "[SecConfigure] tuner", nim1, "depends on satpos of", nim2
sec.setTunerDepends(nim1, nim2)
def linkInternally(self, slotid):
nim = self.NimManager.getNim(slotid)
if nim.internallyConnectableTo is not None:
nim.setInternalLink()
def linkNIMs(self, sec, nim1, nim2):
print "[SecConfigure] link tuner", nim1, "to tuner", nim2
# for internally connect tuner A to B
if '7356' not in about.getChipSetString() and nim2 == (nim1 - 1):
self.linkInternally(nim1)
elif '7356' in about.getChipSetString():
self.linkInternally(nim1)
sec.setTunerLinked(nim1, nim2)
def getRoot(self, slotid, connto):
visited = []
while self.NimManager.getNimConfig(connto).configMode.value in ("satposdepends", "equal", "loopthrough"):
connto = int(self.NimManager.getNimConfig(connto).connectedTo.value)
if connto in visited: # prevent endless loop
return slotid
visited.append(connto)
return connto
def update(self):
sec = secClass.getInstance()
self.configuredSatellites = set()
self.maxLnbNum = sec.getMaxLnbNum()
for slotid in self.NimManager.getNimListOfType("DVB-S"):
if self.NimManager.nimInternallyConnectableTo(slotid) is not None:
self.NimManager.nimRemoveInternalLink(slotid)
sec.clear() ## this do unlinking NIMs too !!
print "[SecConfigure] sec config cleared"
self.linked = {}
self.satposdepends = {}
self.equal = {}
nim_slots = self.NimManager.nim_slots
used_nim_slots = []
for slot in nim_slots:
if slot.type is not None:
used_nim_slots.append((slot.slot, slot.description, slot.config.configMode.value != "nothing" and True or False, slot.isCompatible("DVB-S2"), slot.frontend_id is None and -1 or slot.frontend_id))
eDVBResourceManager.getInstance().setFrontendSlotInformations(used_nim_slots)
for slot in nim_slots:
x = slot.slot
nim = slot.config
if slot.isCompatible("DVB-S"):
# save what nim we link to/are equal to/satposdepends to.
# this is stored in the *value* (not index!) of the config list
if nim.configMode.value == "equal":
connto = self.getRoot(x, int(nim.connectedTo.value))
if connto not in self.equal:
self.equal[connto] = []
self.equal[connto].append(x)
elif nim.configMode.value == "loopthrough":
self.linkNIMs(sec, x, int(nim.connectedTo.value))
connto = self.getRoot(x, int(nim.connectedTo.value))
if connto not in self.linked:
self.linked[connto] = []
self.linked[connto].append(x)
elif nim.configMode.value == "satposdepends":
self.setSatposDepends(sec, x, int(nim.connectedTo.value))
connto = self.getRoot(x, int(nim.connectedTo.value))
if connto not in self.satposdepends:
self.satposdepends[connto] = []
self.satposdepends[connto].append(x)
for slot in nim_slots:
x = slot.slot
nim = slot.config
if slot.isCompatible("DVB-S"):
clear_lastsatrotorposition = True
print "[SecConfigure] slot: " + str(x) + " configmode: " + str(nim.configMode.value)
if nim.configMode.value in ("loopthrough", "satposdepends", "nothing"):
pass
else:
sec.setSlotNotLinked(x)
if nim.configMode.value == "equal":
clear_lastsatrotorposition = False
elif nim.configMode.value == "simple": #simple config
print "[SecConfigure] diseqcmode: ", nim.diseqcMode.value
if nim.diseqcMode.value == "single": #single
currentCircular = False
if nim.diseqcA.value in ("360", "560"):
currentCircular = nim.simpleDiSEqCSetCircularLNB.value
if nim.simpleSingleSendDiSEqC.value:
self.addLNBSimple(sec, slotid=x, orbpos=nim.diseqcA.orbital_position, toneburstmode=diseqcParam.NO, diseqcmode=diseqcParam.V1_0, diseqcpos=diseqcParam.AA, diseqc13V=nim.diseqc13V.value, CircularLNB=currentCircular)
else:
self.addLNBSimple(sec, slotid=x, orbpos=nim.diseqcA.orbital_position, toneburstmode=diseqcParam.NO, diseqcmode=diseqcParam.NONE, diseqcpos=diseqcParam.SENDNO, diseqc13V=nim.diseqc13V.value, CircularLNB=currentCircular)
elif nim.diseqcMode.value == "toneburst_a_b": #Toneburst A/B
self.addLNBSimple(sec, slotid=x, orbpos=nim.diseqcA.orbital_position, toneburstmode=diseqcParam.A, diseqcmode=diseqcParam.V1_0, diseqcpos=diseqcParam.SENDNO, diseqc13V=nim.diseqc13V.value)
self.addLNBSimple(sec, slotid=x, orbpos=nim.diseqcB.orbital_position, toneburstmode=diseqcParam.B, diseqcmode=diseqcParam.V1_0, diseqcpos=diseqcParam.SENDNO, diseqc13V=nim.diseqc13V.value)
elif nim.diseqcMode.value == "diseqc_a_b": #DiSEqC A/B
fastDiSEqC = nim.simpleDiSEqCOnlyOnSatChange.value
setVoltageTone = nim.simpleDiSEqCSetVoltageTone.value
self.addLNBSimple(sec, slotid=x, orbpos=nim.diseqcA.orbital_position, toneburstmode=diseqcParam.NO, diseqcmode=diseqcParam.V1_0, diseqcpos=diseqcParam.AA, fastDiSEqC=fastDiSEqC, setVoltageTone=setVoltageTone, diseqc13V=nim.diseqc13V.value)
self.addLNBSimple(sec, slotid=x, orbpos=nim.diseqcB.orbital_position, toneburstmode=diseqcParam.NO, diseqcmode=diseqcParam.V1_0, diseqcpos=diseqcParam.AB, fastDiSEqC=fastDiSEqC, setVoltageTone=setVoltageTone, diseqc13V=nim.diseqc13V.value)
elif nim.diseqcMode.value == "diseqc_a_b_c_d": #DiSEqC A/B/C/D
fastDiSEqC = nim.simpleDiSEqCOnlyOnSatChange.value
setVoltageTone = nim.simpleDiSEqCSetVoltageTone.value
self.addLNBSimple(sec, slotid=x, orbpos=nim.diseqcA.orbital_position, toneburstmode=diseqcParam.NO, diseqcmode=diseqcParam.V1_0, diseqcpos=diseqcParam.AA, fastDiSEqC=fastDiSEqC, setVoltageTone=setVoltageTone, diseqc13V=nim.diseqc13V.value)
self.addLNBSimple(sec, slotid=x, orbpos=nim.diseqcB.orbital_position, toneburstmode=diseqcParam.NO, diseqcmode=diseqcParam.V1_0, diseqcpos=diseqcParam.AB, fastDiSEqC=fastDiSEqC, setVoltageTone=setVoltageTone, diseqc13V=nim.diseqc13V.value)
self.addLNBSimple(sec, slotid=x, orbpos=nim.diseqcC.orbital_position, toneburstmode=diseqcParam.NO, diseqcmode=diseqcParam.V1_0, diseqcpos=diseqcParam.BA, fastDiSEqC=fastDiSEqC, setVoltageTone=setVoltageTone, diseqc13V=nim.diseqc13V.value)
self.addLNBSimple(sec, slotid=x, orbpos=nim.diseqcD.orbital_position, toneburstmode=diseqcParam.NO, diseqcmode=diseqcParam.V1_0, diseqcpos=diseqcParam.BB, fastDiSEqC=fastDiSEqC, setVoltageTone=setVoltageTone, diseqc13V=nim.diseqc13V.value)
elif nim.diseqcMode.value in ("positioner", "positioner_select"): #Positioner
clear_lastsatrotorposition = False
current_mode = 3
sat = 0
if nim.diseqcMode.value == "positioner_select":
current_mode = 4
sat = nim.userSatellitesList.value
if nim.latitudeOrientation.value == "north":
laValue = rotorParam.NORTH
else:
laValue = rotorParam.SOUTH
if nim.longitudeOrientation.value == "east":
loValue = rotorParam.EAST
else:
loValue = rotorParam.WEST
inputPowerDelta = nim.powerThreshold.value
useInputPower = False
turning_speed = 0
if nim.powerMeasurement.value:
useInputPower = True
turn_speed_dict = {"fast": rotorParam.FAST, "slow": rotorParam.SLOW}
if nim.turningSpeed.value in turn_speed_dict:
turning_speed = turn_speed_dict[nim.turningSpeed.value]
else:
beg_time = localtime(nim.fastTurningBegin.value)
end_time = localtime(nim.fastTurningEnd.value)
turning_speed = ((beg_time.tm_hour + 1) * 60 + beg_time.tm_min + 1) << 16
turning_speed |= (end_time.tm_hour + 1) * 60 + end_time.tm_min + 1
self.addLNBSimple(sec, slotid=x, diseqcmode=current_mode,
orbpos=sat,
longitude=nim.longitude.float,
loDirection=loValue,
latitude=nim.latitude.float,
laDirection=laValue,
turningSpeed=turning_speed,
useInputPower=useInputPower,
inputPowerDelta=inputPowerDelta,
diseqc13V=nim.diseqc13V.value)
elif nim.configMode.value == "advanced": #advanced config
clear_lastsatrotorposition = not self.NimManager.getRotorSatListForNim(x, only_first=True)
self.updateAdvanced(sec, x)
if clear_lastsatrotorposition and nim.lastsatrotorposition.value:
nim.lastsatrotorposition.value = ""
nim.lastsatrotorposition.save()
print "[SecConfigure] sec config completed"
def updateAdvanced(self, sec, slotid):
try:
if config.Nims[slotid].advanced.unicableconnected is not None:
if config.Nims[slotid].advanced.unicableconnected.value:
config.Nims[slotid].advanced.unicableconnectedTo.save_forced = True
self.linkNIMs(sec, slotid, int(config.Nims[slotid].advanced.unicableconnectedTo.value))
connto = self.getRoot(slotid, int(config.Nims[slotid].advanced.unicableconnectedTo.value))
if connto not in self.linked:
self.linked[connto] = []
self.linked[connto].append(slotid)
else:
config.Nims[slotid].advanced.unicableconnectedTo.save_forced = False
except:
pass
lnbSat = {}
for x in range(1, 72):
lnbSat[x] = []
#wildcard for all satellites ( for rotor )
for x in range(3601, 3605):
lnb = int(config.Nims[slotid].advanced.sat[x].lnb.value)
if lnb != 0:
for x in self.NimManager.satList:
print "[SecConfigure] add", x[0], "to", lnb
lnbSat[lnb].append(x[0])
#wildcard for user satellites ( for rotor )
for x in range(3605, 3607):
lnb = int(config.Nims[slotid].advanced.sat[x].lnb.value)
if lnb != 0:
userSatlist = config.Nims[slotid].advanced.sat[x].userSatellitesList.value
userSatlist = userSatlist.replace("]", "").replace("[", "")
for user_sat in self.NimManager.satList:
sat_str = str(user_sat[0])
if userSatlist and ("," not in userSatlist and sat_str == userSatlist) or ((', ' + sat_str + ',' in userSatlist) or (userSatlist.startswith(sat_str + ',')) or (userSatlist.endswith(', ' + sat_str))):
print "[SecConfigure] add", user_sat[0], "to", lnb
lnbSat[lnb].append(user_sat[0])
for x in self.NimManager.satList:
lnb = int(config.Nims[slotid].advanced.sat[x[0]].lnb.value)
if lnb != 0:
print "[SecConfigure] add", x[0], "to", lnb
lnbSat[lnb].append(x[0])
lnb = int(config.Nims[slotid].advanced.sat[3607].lnb.value)
if lnb != 0:
root_id = int(config.Nims[slotid].connectedTo.value)
rotor_sat_list = self.NimManager.getRotorSatListForNim(root_id)
if rotor_sat_list:
for x in rotor_sat_list:
print "[SecConfigure] add", x[0], "to", lnb
lnbSat[lnb].append(x[0])
else:
config.Nims[slotid].advanced.sat[3607].lnb.value = "0"
config.Nims[slotid].advanced.sat[3607].lnb.save()
if len(self.NimManager.getSatListForNim(slotid)) < 1:
config.Nims[slotid].configMode.value = "nothing"
config.Nims[slotid].configMode.save()
for x in range(1, 72):
if len(lnbSat[x]) > 0:
currLnb = config.Nims[slotid].advanced.lnb[x]
sec.addLNB()
if x == 71:
root_id = int(config.Nims[slotid].connectedTo.value)
sec.setLNBsatposdepends(root_id)
if x <= self.maxLnbNum:
sec.setLNBNum(x)
tunermask = 1 << slotid
if slotid in self.equal:
for slot in self.equal[slotid]:
tunermask |= (1 << slot)
if slotid in self.linked:
for slot in self.linked[slotid]:
tunermask |= (1 << slot)
if currLnb.lof.value != "unicable":
sec.setLNBSatCRformat(0) # Unicable / JESS disabled, 0 = SatCR_format_none
if currLnb.lof.value == "universal_lnb":
sec.setLNBLOFL(9750000)
sec.setLNBLOFH(10600000)
sec.setLNBThreshold(11700000)
elif currLnb.lof.value == "unicable":
sec.setLNBLOFL(currLnb.lofl.value * 1000)
sec.setLNBLOFH(currLnb.lofh.value * 1000)
sec.setLNBThreshold(currLnb.threshold.value * 1000)
sec.setLNBSatCR(currLnb.scrList.index)
sec.setLNBSatCRvco(currLnb.scrfrequency.value * 1000)
sec.setLNBSatCRPositionNumber(int(currLnb.positionNumber.value) + int(currLnb.positionsOffset.value))
sec.setLNBSatCRformat(currLnb.format.value == "jess" and 2 or 1)
sec.setLNBBootupTime(currLnb.bootuptime.value)
elif currLnb.lof.value == "c_band":
sec.setLNBLOFL(5150000)
sec.setLNBLOFH(5150000)
sec.setLNBThreshold(5150000)
elif currLnb.lof.value == "user_defined":
sec.setLNBLOFL(currLnb.lofl.value * 1000)
sec.setLNBLOFH(currLnb.lofh.value * 1000)
sec.setLNBThreshold(currLnb.threshold.value * 1000)
elif currLnb.lof.value == "circular_lnb":
sec.setLNBLOFL(10750000)
sec.setLNBLOFH(10750000)
sec.setLNBThreshold(10750000)
elif currLnb.lof.value == "ka_sat":
sec.setLNBLOFL(21200000)
sec.setLNBLOFH(21200000)
sec.setLNBThreshold(21200000)
if currLnb.increased_voltage.value:
sec.setLNBIncreasedVoltage(True)
else:
sec.setLNBIncreasedVoltage(False)
dm = currLnb.diseqcMode.value
if dm == "none":
sec.setDiSEqCMode(diseqcParam.NONE)
elif dm == "1_0":
sec.setDiSEqCMode(diseqcParam.V1_0)
elif dm == "1_1":
sec.setDiSEqCMode(diseqcParam.V1_1)
elif dm == "1_2":
sec.setDiSEqCMode(diseqcParam.V1_2)
if slotid in self.satposdepends:
for slot in self.satposdepends[slotid]:
tunermask |= (1 << slot)
if dm != "none":
if currLnb.toneburst.value == "none":
sec.setToneburst(diseqcParam.NO)
elif currLnb.toneburst.value == "A":
sec.setToneburst(diseqcParam.A)
elif currLnb.toneburst.value == "B":
sec.setToneburst(diseqcParam.B)
# Committed Diseqc Command
cdc = currLnb.commitedDiseqcCommand.value
c = {"none": diseqcParam.SENDNO,
"AA": diseqcParam.AA,
"AB": diseqcParam.AB,
"BA": diseqcParam.BA,
"BB": diseqcParam.BB}
if cdc in c:
sec.setCommittedCommand(c[cdc])
else:
sec.setCommittedCommand(long(cdc))
sec.setFastDiSEqC(currLnb.fastDiseqc.value)
sec.setSeqRepeat(currLnb.sequenceRepeat.value)
if currLnb.diseqcMode.value == "1_0":
currCO = currLnb.commandOrder1_0.value
sec.setRepeats(0)
else:
currCO = currLnb.commandOrder.value
udc = int(currLnb.uncommittedDiseqcCommand.value)
if udc > 0:
sec.setUncommittedCommand(0xF0 | (udc - 1))
else:
sec.setUncommittedCommand(0) # SENDNO
sec.setRepeats({"none": 0, "one": 1, "two": 2, "three": 3}[currLnb.diseqcRepeats.value])
setCommandOrder = False
# 0 "committed, toneburst",
# 1 "toneburst, committed",
# 2 "committed, uncommitted, toneburst",
# 3 "toneburst, committed, uncommitted",
# 4 "uncommitted, committed, toneburst"
# 5 "toneburst, uncommitted, commmitted"
order_map = {"ct": 0, "tc": 1, "cut": 2, "tcu": 3, "uct": 4, "tuc": 5}
sec.setCommandOrder(order_map[currCO])
if dm == "1_2":
latitude = currLnb.latitude.float
sec.setLatitude(latitude)
longitude = currLnb.longitude.float
sec.setLongitude(longitude)
if currLnb.latitudeOrientation.value == "north":
sec.setLaDirection(rotorParam.NORTH)
else:
sec.setLaDirection(rotorParam.SOUTH)
if currLnb.longitudeOrientation.value == "east":
sec.setLoDirection(rotorParam.EAST)
else:
sec.setLoDirection(rotorParam.WEST)
if currLnb.powerMeasurement.value:
sec.setUseInputpower(True)
sec.setInputpowerDelta(currLnb.powerThreshold.value)
turn_speed_dict = {"fast": rotorParam.FAST, "slow": rotorParam.SLOW}
if currLnb.turningSpeed.value in turn_speed_dict:
turning_speed = turn_speed_dict[currLnb.turningSpeed.value]
else:
beg_time = localtime(currLnb.fastTurningBegin.value)
end_time = localtime(currLnb.fastTurningEnd.value)
turning_speed = ((beg_time.tm_hour + 1) * 60 + beg_time.tm_min + 1) << 16
turning_speed |= (end_time.tm_hour + 1) * 60 + end_time.tm_min + 1
sec.setRotorTurningSpeed(turning_speed)
else:
sec.setUseInputpower(False)
sec.setLNBSlotMask(tunermask)
sec.setLNBPrio(int(currLnb.prio.value))
# finally add the orbital positions
for y in lnbSat[x]:
self.addSatellite(sec, y)
if x > 64:
satpos = x > 64 and (3606 - (70 - x)) or y
else:
satpos = y
currSat = config.Nims[slotid].advanced.sat[satpos]
if currSat.voltage.value == "polarization":
if config.Nims[slotid].diseqc13V.value:
sec.setVoltageMode(switchParam.HV_13)
else:
sec.setVoltageMode(switchParam.HV)
elif currSat.voltage.value == "13V":
sec.setVoltageMode(switchParam._14V)
elif currSat.voltage.value == "18V":
sec.setVoltageMode(switchParam._18V)
if currSat.tonemode.value == "band":
sec.setToneMode(switchParam.HILO)
elif currSat.tonemode.value == "on":
sec.setToneMode(switchParam.ON)
elif currSat.tonemode.value == "off":
sec.setToneMode(switchParam.OFF)
if not currSat.usals.value and x < 65:
sec.setRotorPosNum(currSat.rotorposition.value)
else:
sec.setRotorPosNum(0) #USALS
class NIM(object):
def __init__(self, slot, type, description, has_outputs=True, internally_connectable=None, multi_type={}, frontend_id=None, i2c=None, is_empty=False, supports_blind_scan=False, number_of_slots=0):
nim_types = ["DVB-S", "DVB-S2", "DVB-S2X", "DVB-C", "DVB-T", "DVB-T2", "ATSC"]
if type and type not in nim_types:
print "[NIM] warning: unknown NIM type %s, not using." % type
type = None
self.slot = slot
self.type = type
self.description = description
self.number_of_slots = number_of_slots
self.has_outputs = has_outputs
self.internally_connectable = internally_connectable
self.multi_type = multi_type
self.supports_blind_scan = supports_blind_scan
self.i2c = i2c
self.frontend_id = frontend_id
self.__is_empty = is_empty
self.compatible = {
None: (None,),
"DVB-S": ("DVB-S", None),
"DVB-C": ("DVB-C", None),
"DVB-T": ("DVB-T", None),
"DVB-S2": ("DVB-S", "DVB-S2", None),
"DVB-S2X": ("DVB-S", "DVB-S2", "DVB-S2X", None),
"DVB-C2": ("DVB-C", "DVB-C2", None),
"DVB-T2": ("DVB-T", "DVB-T2", None),
"ATSC": ("ATSC", None),
}
# get multi type using delsys information
self.combined = False
if self.frontend_id is not None:
types = [type for type in nim_types if eDVBResourceManager.getInstance().frontendIsCompatible(self.frontend_id, type)]
if "DVB-T2" in types:
# DVB-T2 implies DVB-T support
types.remove("DVB-T")
if "DVB-S2" in types:
# DVB-S2 implies DVB-S support
types.remove("DVB-S")
if len(types) > 1:
self.multi_type = {}
self.combined = not(os.path.exists("/proc/stb/frontend/%d/mode" % self.frontend_id) or self.isFBCTuner())
for type in types:
self.multi_type[str(types.index(type))] = type
elif len(self.multi_type) > 1:
print "[NIM] DVB API not reporting tuner %d as multitype" % self.frontend_id
def getTunerTypesEnabled(self):
try:
if self.combined:
return [x for x in self.multi_type.values() if
self.config.configModeDVBS.value and x.startswith("DVB-S") or
self.config.configModeDVBC.value and x.startswith("DVB-C") or
self.config.configModeDVBT.value and x.startswith("DVB-T") or
self.config.configModeATSC.value and x.startswith("ATSC")]
except:
pass
return [self.getType()]
def isCompatible(self, what):
return self.isSupported() and bool([x for x in self.getTunerTypesEnabled() if what in self.compatible[x]])
def canBeCompatible(self, what):
return self.isSupported() and bool([x for x in self.multi_type.values() if what in self.compatible[x]] if self.multi_type else self.isCompatible(what))
def getType(self):
try:
if self.isCombined():
return [x for x in self.multi_type.values() if x.startswith("DVB-S")][0]
if self.isMultiType():
return self.multi_type[self.config.multiType.value]
except:
pass
return self.type
def connectableTo(self):
connectable = {
"DVB-S": ("DVB-S", "DVB-S2", "DVB-S2X"),
"DVB-C": ("DVB-C", "DVB-C2"),
"DVB-T": ("DVB-T", "DVB-T2"),
"DVB-S2": ("DVB-S", "DVB-S2", "DVB-S2X"),
"DVB-S2X": ("DVB-S", "DVB-S2", "DVB-S2X"),
"DVB-C2": ("DVB-C", "DVB-C2"),
"DVB-T2": ("DVB-T", "DVB-T2"),
"ATSC": ("ATSC"),
}
return connectable[self.getType()]
def getSlotID(self, slot=None):
return chr(ord('A') + (slot if slot is not None else self.slot))
def getSlotName(self, slot=None):
# get a friendly description for a slot name.
# we name them "Tuner A/B/C/...", because that's what's usually written on the back
# of the device.
return "%s %s" % (_("Tuner"), self.getSlotID(slot))
def getI2C(self):
return self.i2c
def hasOutputs(self):
return self.has_outputs
def internallyConnectableTo(self):
return self.internally_connectable
def setInternalLink(self):
if self.internally_connectable is not None:
print "[NimManager] setting internal link on frontend id", self.frontend_id
f = open("/proc/stb/frontend/%d/rf_switch" % self.frontend_id, "w")
f.write("internal")
f.close()
def removeInternalLink(self):
if self.internally_connectable is not None:
print "[NimManager] removing internal link on frontend id", self.frontend_id
f = open("/proc/stb/frontend/%d/rf_switch" % self.frontend_id, "w")
f.write("external")
f.close()
def isMultiType(self):
return not self.isCombined() and bool(len(self.multi_type))
def isCombined(self):
return self.combined
def isEmpty(self):
return self.__is_empty
# empty tuners are supported!
def isSupported(self):
return (self.frontend_id is not None) or self.__is_empty
def isMultistream(self):
multistream = self.frontend_id is not None and eDVBResourceManager.getInstance().frontendIsMultistream(self.frontend_id) or False
# HACK due to poor support for VTUNER_SET_FE_INFO
# When vtuner does not accept fe_info we have to fallback to detection using tuner name
# More tuner names will be added when confirmed as multistream (FE_CAN_MULTISTREAM)
if not multistream and "TBS" in self.description:
multistream = True
return multistream
def isT2MI(self):
# Check if t2mi feature exists using procfs due to missing FE_CAN_T2MI in DVB API
# TODO: Ask manufactures to add /proc/stb/frontend/%d/t2mi entry on supported frontends
return os.path.exists("/proc/stb/frontend/%d/t2mi" % self.frontend_id)
def supportsBlindScan(self):
return self.supports_blind_scan
# returns dict {<slotid>: <type>}
def getMultiTypeList(self):
return self.multi_type
def isFBCTuner(self):
return self.frontend_id is not None and (self.frontend_id / 8 + 1) * 8 <= self.number_of_slots and os.access("/proc/stb/frontend/%d/fbc_id" % self.frontend_id, os.F_OK)
def isFBCRoot(self):
return self.isFBCTuner() and (self.slot % 8 < (self.getType() == "DVB-C" and 1 or 2))
def isFBCLink(self):
return self.isFBCTuner() and not (self.slot % 8 < (self.getType() == "DVB-C" and 1 or 2))
def isNotFirstFBCTuner(self):
return self.isFBCTuner() and self.slot % 8 and True
def getFriendlyType(self):
if self.multi_type.values():
returnValue = "/".join([x[1].replace("DVB-", "") for x in sorted([({"DVB-S": 1, "DVB-C": 2, "DVB-T": 3, "ATSC": 4}[x[:5]], x) for x in self.multi_type.values()])])
return "%s %s" % (_("Combined") if self.combined else _("MultiType"), returnValue if returnValue == 'ATSC' else "DVB-%s" % returnValue)
return self.getType() or _("empty")
def getFullDescription(self):
return self.empty and _("(empty)") or "%s (%s)" % (self.description, self.isSupported() and self.friendly_type or _("not supported"))
def getFriendlyFullDescription(self):
return "%s: %s" % (self.slot_name, self.getFullDescription())
def getFriendlyFullDescriptionCompressed(self):
if self.isFBCTuner():
return "%s-%s: %s" % (self.getSlotName(self.slot & ~7), self.getSlotID((self.slot & ~7) + 7), self.getFullDescription())
#compress by combining dual tuners by checking if the next tuner has a rf switch
elif self.frontend_id is not None and self.number_of_slots > self.frontend_id + 1 and os.access("/proc/stb/frontend/%d/rf_switch" % (self.frontend_id + 1), os.F_OK):
return "%s-%s: %s" % (self.slot_name, self.getSlotID(self.slot + 1), self.getFullDescription())
return self.getFriendlyFullDescription()
def isFBCLinkEnabled(self):
return self.isFBCLink() and (config.Nims[(self.slot >> 3 << 3)].configMode.value != "nothing" or self.getType() != "DVB-C" and config.Nims[(self.slot >> 3 << 3) + 1].configMode.value != "nothing")
def isEnabled(self):
return self.config_mode != "nothing" or self.isFBCLinkEnabled() or self.internally_connectable is not None and config.Nims[self.internally_connectable].configMode.value != "nothing"
slot_id = property(getSlotID)
slot_name = property(getSlotName)
friendly_full_description = property(getFriendlyFullDescription)
friendly_full_description_compressed = property(getFriendlyFullDescriptionCompressed)
friendly_type = property(getFriendlyType)
config_mode = property(lambda self: config.Nims[self.slot].configMode.value)
config = property(lambda self: config.Nims[self.slot])
empty = property(lambda self: self.getType() is None)
enabled = property(isEnabled)
class NimManager:
def __init__(self):
self.satList = []
self.cablesList = []
self.terrestrialsList = []
self.atscList = []
self.enumerateNIMs()
self.readTransponders()
InitNimManager(self) #init config stuff
def getConfiguredSats(self):
return self.sec.getConfiguredSats()
def getTransponders(self, pos, feid=None):
if pos in self.transponders:
if feid is None or self.nim_slots[feid].isMultistream():
return self.transponders[pos]
else: # remove multistream transponders
def isMultistreamTP(tp):
# since we are using Gold sequences there is no need to check the PLS Mode
return tp[5] == eDVBFrontendParametersSatellite.System_DVB_S2 and (tp[10] > eDVBFrontendParametersSatellite.No_Stream_Id_Filter or tp[12] > eDVBFrontendParametersSatellite.PLS_Default_Gold_Code)
return [tp for tp in self.transponders[pos] if not isMultistreamTP(tp)]
else:
return []
def getTranspondersCable(self, nim):
nimConfig = config.Nims[nim]
if nimConfig.configMode.value != "nothing" and nimConfig.cable.scan_type.value == "provider":
return self.transponderscable[self.cablesList[nimConfig.cable.scan_provider.index][0]]
return []
def getTranspondersTerrestrial(self, region):
return self.transpondersterrestrial[region]
def getTranspondersATSC(self, nim):
nimConfig = config.Nims[nim]
if nimConfig.configMode.value != "nothing":
return self.transpondersatsc[self.atscList[nimConfig.atsc.index][0]]
return []
def getCablesList(self):
return self.cablesList
def getCablesCountrycodeList(self):
countrycodes = []
for x in self.cablesList:
if x[2] and x[2] not in countrycodes:
countrycodes.append(x[2])
return countrycodes
def getCablesByCountrycode(self, countrycode):
if countrycode:
return [x for x in self.cablesList if x[2] == countrycode]
return []
def getCableDescription(self, nim):
return self.cablesList[config.Nims[nim].cable.scan_provider.index][0]
def getCableFlags(self, nim):
return self.cablesList[config.Nims[nim].cable.scan_provider.index][1]
def getCableCountrycode(self, nim):
return self.cablesList and self.cablesList[config.Nims[nim].cable.scan_provider.index][2] or None
def getTerrestrialsList(self):
return self.terrestrialsList
def getTerrestrialsCountrycodeList(self):
countrycodes = []
for x in self.terrestrialsList:
if x[2] and x[2] not in countrycodes:
countrycodes.append(x[2])
return countrycodes
def getTerrestrialsByCountrycode(self, countrycode):
if countrycode:
return [x for x in self.terrestrialsList if x[2] == countrycode]
return []
def getTerrestrialDescription(self, nim):
return self.terrestrialsList[config.Nims[nim].terrestrial.index][0]
def getTerrestrialFlags(self, nim):
return self.terrestrialsList[config.Nims[nim].terrestrial.index][1]
def getTerrestrialCountrycode(self, nim):
return self.terrestrialsList and self.terrestrialsList[config.Nims[nim].terrestrial.index][2] or None
def getSatDescription(self, pos):
return self.satellites[pos]
def sortFunc(self, x):
orbpos = x[0]
if orbpos > 1800:
return orbpos - 3600
else:
return orbpos + 1800
def readTransponders(self):
self.satellites = {}
self.transponders = {}
self.transponderscable = {}
self.transpondersterrestrial = {}
self.transpondersatsc = {}
db = eDVBDB.getInstance()
if self.hasNimType("DVB-S"):
print "[NimManager] Reading satellites.xml"
db.readSatellites(self.satList, self.satellites, self.transponders)
self.satList.sort() # sort by orbpos
if self.hasNimType("DVB-C") or self.hasNimType("DVB-T"):
print "[NimManager] Reading cables.xml"
db.readCables(self.cablesList, self.transponderscable)
print "[NimManager] Reading terrestrial.xml"
db.readTerrestrials(self.terrestrialsList, self.transpondersterrestrial)
if self.hasNimType("ATSC"):
print "[NimManager] Reading atsc.xml"
db.readATSC(self.atscList, self.transpondersatsc)
def enumerateNIMs(self):
# enum available NIMs. This is currently very dreambox-centric and uses the /proc/bus/nim_sockets interface.
# the result will be stored into nim_slots.
# the content of /proc/bus/nim_sockets looks like:
# NIM Socket 0:
# Type: DVB-S
# Name: BCM4501 DVB-S2 NIM (internal)
# NIM Socket 1:
# Type: DVB-S
# Name: BCM4501 DVB-S2 NIM (internal)
# NIM Socket 2:
# Type: DVB-T
# Name: Philips TU1216
# NIM Socket 3:
# Type: DVB-S
# Name: Alps BSBE1 702A
#
# Type will be either "DVB-S", "DVB-S2", "DVB-T", "DVB-C" or None.
# nim_slots is an array which has exactly one entry for each slot, even for empty ones.
self.nim_slots = []
if config.clientmode.enabled.value:
print "[NimManager][enumerateNIMs] Receiver in client mode. Local NIMs will be ignored."
return
try:
nimfile = open("/proc/bus/nim_sockets")
except IOError:
return
current_slot = None
entries = {}
for line in nimfile:
if not line:
break
line = line.strip()
if line.startswith("NIM Socket"):
parts = line.split(" ")
current_slot = int(parts[2][:-1])
entries[current_slot] = {}
elif line.startswith("Type:"):
entries[current_slot]["type"] = str(line[6:])
entries[current_slot]["isempty"] = False
elif line.startswith("Name:"):
entries[current_slot]["name"] = str(line[6:])
entries[current_slot]["isempty"] = False
elif line.startswith("Has_Outputs:"):
input = str(line[len("Has_Outputs:") + 1:])
entries[current_slot]["has_outputs"] = (input == "yes")
elif line.startswith("Internally_Connectable:"):
input = int(line[len("Internally_Connectable:") + 1:])
entries[current_slot]["internally_connectable"] = input
elif line.startswith("Supports_Blind_Scan:"):
input = str(line[len("Supports_Blind_Scan:") + 1:])
entries[current_slot]["supports_blind_scan"] = (input == "yes")
elif line.startswith("Frontend_Device:"):
input = int(line[len("Frontend_Device:") + 1:])
entries[current_slot]["frontend_device"] = input
elif line.startswith("Mode"):
# "Mode 0: DVB-T" -> ["Mode 0", "DVB-T"]
split = line.split(": ")
if len(split) > 1 and split[1]:
# "Mode 0" -> ["Mode", "0"]
split2 = split[0].split(" ")
modes = entries[current_slot].get("multi_type", {})
modes[split2[1]] = split[1].strip()
entries[current_slot]["multi_type"] = modes
elif line.startswith("I2C_Device:"):
input = int(line[len("I2C_Device:") + 1:])
entries[current_slot]["i2c"] = input
elif line.startswith("empty"):
entries[current_slot]["type"] = None
entries[current_slot]["name"] = _("N/A")
entries[current_slot]["isempty"] = True
nimfile.close()
self.number_of_slots = len(entries.keys())
for id, entry in entries.items():
if not ("name" in entry and "type" in entry):
entry["name"] = _("N/A")
entry["type"] = None
if "i2c" not in entry:
entry["i2c"] = None
if "has_outputs" not in entry:
entry["has_outputs"] = entry["name"] in SystemInfo["HasPhysicalLoopthrough"] # "Has_Outputs: yes" not in /proc/bus/nim_sockets NIM, but the physical loopthrough exist
if "frontend_device" in entry: # check if internally connectable
if os.path.exists("/proc/stb/frontend/%d/rf_switch" % entry["frontend_device"]) and (not id or entries[id]["name"] == entries[id - 1]["name"]):
entry["internally_connectable"] = entry["frontend_device"] - 1
else:
entry["internally_connectable"] = None
else:
entry["frontend_device"] = entry["internally_connectable"] = None
if "multi_type" not in entry:
entry["multi_type"] = {}
if "supports_blind_scan" not in entry:
entry["supports_blind_scan"] = False
self.nim_slots.append(NIM(slot=id, description=entry["name"], type=entry["type"], has_outputs=entry["has_outputs"], internally_connectable=entry["internally_connectable"], multi_type=entry["multi_type"], frontend_id=entry["frontend_device"], i2c=entry["i2c"], is_empty=entry["isempty"], supports_blind_scan=entry["supports_blind_scan"], number_of_slots=self.number_of_slots))
def hasNimType(self, chktype):
return any(slot.canBeCompatible(chktype) for slot in self.nim_slots)
def getNimType(self, slotid):
return self.nim_slots[slotid].type
def getNimDescription(self, slotid):
return self.nim_slots[slotid].friendly_full_description
def getNimName(self, slotid):
return self.nim_slots[slotid].description
def getNim(self, slotid):
return self.nim_slots[slotid]
def getI2CDevice(self, slotid):
return self.nim_slots[slotid].getI2C()
def getNimListOfType(self, type, exception=-1):
# returns a list of indexes for NIMs compatible to the given type, except for 'exception'
return [x.slot for x in self.nim_slots if x.isCompatible(type) and x.slot != exception]
def getEnabledNimListOfType(self, type, exception=-1):
def enabled(n):
if n.isCompatible(type) and n.slot != exception and n.config_mode != "nothing":
if type.startswith("DVB-S") and n.config_mode in ("loopthrough", "satposdepends"):
root_id = nimmanager.sec.getRoot(n.slot_id, int(n.config.connectedTo.value))
if n.type == nimmanager.nim_slots[root_id].type: # check if connected from a DVB-S to DVB-S2 Nim or vice versa
return False
return True
return [x.slot for x in self.nim_slots if x.slot != exception and enabled(x)]
# get a list with the friendly full description
def nimList(self):
return [slot.friendly_full_description for slot in self.nim_slots]
def nimListCompressed(self):
return [slot.friendly_full_description_compressed for slot in self.nim_slots if not(slot.isNotFirstFBCTuner() or slot.internally_connectable >= 0)]
def getSlotCount(self):
return len(self.nim_slots)
def hasOutputs(self, slotid):
return self.nim_slots[slotid].hasOutputs()
def nimInternallyConnectableTo(self, slotid):
return self.nim_slots[slotid].internallyConnectableTo()
def nimRemoveInternalLink(self, slotid):
self.nim_slots[slotid].removeInternalLink()
def canConnectTo(self, slotid):
slots = []
if self.nim_slots[slotid].internallyConnectableTo() is not None:
slots.append(self.nim_slots[slotid].internallyConnectableTo())
for type in self.nim_slots[slotid].connectableTo():
for slot in self.getNimListOfType(type, exception=slotid):
if slot not in slots and (self.hasOutputs(slot) or self.nim_slots[slotid].isFBCRoot()):
slots.append(slot)
# remove nims, that have a conntectedTo reference on
for testnim in slots[:]:
for nim in self.getNimListOfType("DVB-S", slotid):
nimConfig = self.getNimConfig(nim)
if not(self.nim_slots[testnim].isFBCRoot() and slotid >> 3 == testnim >> 3) and (self.nim_slots[nim].isFBCLink() or "configMode" in nimConfig.content.items and nimConfig.configMode.value == "loopthrough" and int(nimConfig.connectedTo.value) == testnim):
slots.remove(testnim)
break
return slots
def canEqualTo(self, slotid):
type = self.getNimType(slotid)
type = type[:5] # DVB-S2X --> DVB-S, DVB-S2 --> DVB-S, DVB-T2 --> DVB-T, DVB-C2 --> DVB-C
nimList = self.getNimListOfType(type, slotid)
for nim in nimList[:]:
mode = self.getNimConfig(nim)
if self.nim_slots[nim].isFBCLink() or mode.configMode.value in ("loopthrough", "satposdepends", "equal"):
nimList.remove(nim)
return nimList
def canDependOn(self, slotid, advanced_satposdepends=""):
type = self.getNimType(slotid)
type = type[:5] # DVB-S2X --> DVB-S, DVB-S2 --> DVB-S, DVB-T2 --> DVB-T, DVB-C2 --> DVB-C
nimList = self.getNimListOfType(type, slotid)
positionerList = []
for nim in nimList[:]:
mode = self.getNimConfig(nim)
nimHaveRotor = mode.configMode.value == "simple" and mode.diseqcMode.value in ("positioner", "positioner_select")
if not nimHaveRotor and mode.configMode.value == "advanced":
for x in range(3601, 3607):
lnb = int(mode.advanced.sat[x].lnb.value)
if lnb != 0:
nimHaveRotor = True
break
if not nimHaveRotor:
for sat in mode.advanced.sat.values():
lnb_num = int(sat.lnb.value)
diseqcmode = lnb_num and mode.advanced.lnb[lnb_num].diseqcMode.value or ""
if diseqcmode == "1_2":
nimHaveRotor = True
break
if nimHaveRotor:
if advanced_satposdepends:
if advanced_satposdepends == "all" or self.nim_slots[nim].isFBCRoot():
positionerList.append(nim)
else:
alreadyConnected = False
for testnim in nimList:
testmode = self.getNimConfig(testnim)
if testmode.configMode.value == "satposdepends" and int(testmode.connectedTo.value) == int(nim):
alreadyConnected = True
break
if not alreadyConnected:
positionerList.append(nim)
return positionerList
def getNimConfig(self, slotid):
return config.Nims[slotid]
def getSatName(self, pos):
for sat in self.satList:
if sat[0] == pos:
return sat[1]
return _("N/A")
def getSatList(self):
return self.satList
# returns True if something is configured to be connected to this nim
# if slotid == -1, returns if something is connected to ANY nim
def somethingConnected(self, slotid=-1):
if slotid == -1:
for id in range(self.getSlotCount()):
if self.somethingConnected(id) and not (self.nim_slots[id].isFBCLink() or self.getNimConfig(id).configMode.value == "loopthrough"):
return True
return False
else:
nim = config.Nims[slotid]
configMode = nim.configMode.value
if any([self.nim_slots[slotid].isCompatible(x) for x in "DVB-S", "DVB-T", "DVB-C", "ATSC"]):
return not (configMode == "nothing")
def getSatListForNim(self, slotid):
list = []
if self.nim_slots[slotid].isCompatible("DVB-S"):
nim = config.Nims[slotid]
#print "slotid:", slotid
#print "self.satellites:", self.satList[config.Nims[slotid].diseqcA.index]
#print "diseqcA:", config.Nims[slotid].diseqcA.value
configMode = nim.configMode.value
if configMode == "equal":
slotid = int(nim.connectedTo.value)
nim = config.Nims[slotid]
configMode = nim.configMode.value
elif configMode == "loopthrough":
slotid = self.sec.getRoot(slotid, int(nim.connectedTo.value))
nim = config.Nims[slotid]
configMode = nim.configMode.value
if configMode == "simple":
dm = nim.diseqcMode.value
if dm in ("single", "toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
if nim.diseqcA.orbital_position < 3600:
list.append(self.satList[nim.diseqcA.index - 2])
if dm in ("toneburst_a_b", "diseqc_a_b", "diseqc_a_b_c_d"):
if nim.diseqcB.orbital_position < 3600:
list.append(self.satList[nim.diseqcB.index - 2])
if dm == "diseqc_a_b_c_d":
if nim.diseqcC.orbital_position < 3600:
list.append(self.satList[nim.diseqcC.index - 2])
if nim.diseqcD.orbital_position < 3600:
list.append(self.satList[nim.diseqcD.index - 2])
if dm == "positioner":
for x in self.satList:
list.append(x)
if dm == "positioner_select":
userSatlist = nim.userSatellitesList.value
userSatlist = userSatlist.replace("]", "").replace("[", "")
for x in self.satList:
sat_str = str(x[0])
if userSatlist and ("," not in userSatlist and sat_str == userSatlist) or ((', ' + sat_str + ',' in userSatlist) or (userSatlist.startswith(sat_str + ',')) or (userSatlist.endswith(', ' + sat_str))):
list.append(x)
elif configMode == "advanced":
for x in range(3601, 3605):
if int(nim.advanced.sat[x].lnb.value) != 0:
for x in self.satList:
list.append(x)
if not list:
for x in self.satList:
if int(nim.advanced.sat[x[0]].lnb.value) != 0:
list.append(x)
for x in range(3605, 3607):
if int(nim.advanced.sat[x].lnb.value) != 0:
userSatlist = nim.advanced.sat[x].userSatellitesList.value
userSatlist = userSatlist.replace("]", "").replace("[", "")
for user_sat in self.satList:
sat_str = str(user_sat[0])
if userSatlist and ("," not in userSatlist and sat_str == userSatlist) or ((', ' + sat_str + ',' in userSatlist) or (userSatlist.startswith(sat_str + ',')) or (userSatlist.endswith(', ' + sat_str))) and user_sat not in list:
list.append(user_sat)
return list
def getNimListForSat(self, orb_pos):
return [nim.slot for nim in self.nim_slots if nim.isCompatible("DVB-S") and not nim.isFBCLink() and orb_pos in [sat[0] for sat in self.getSatListForNim(nim.slot)]]
def rotorLastPositionForNim(self, slotid, number=True):
available_slot = False
for slot in self.nim_slots:
if slot.slot == slotid:
available_slot = True
break
if available_slot:
if self.getRotorSatListForNim(slotid, only_first=True):
lastrotorposition = secClass.getInstance().frontendLastRotorOrbitalPosition(slotid)
if lastrotorposition == -1:
return number and -1 or _("undefined")
else:
return number and lastrotorposition or orbStr(lastrotorposition)
else:
return number and 9999 or _("not valid frontend")
return number and 9998 or _("rotor is not used")
def getRotorSatListForNim(self, slotid, only_first=False):
list = []
if self.nim_slots[slotid].isCompatible("DVB-S"):
nim = config.Nims[slotid]
configMode = nim.configMode.value
if configMode == "simple":
if nim.diseqcMode.value == "positioner":
for x in self.satList:
if only_first:
return True
list.append(x)
elif nim.diseqcMode.value == "positioner_select":
userSatlist = nim.userSatellitesList.value
userSatlist = userSatlist.replace("]", "").replace("[", "")
for x in self.satList:
sat_str = str(x[0])
if userSatlist and ("," not in userSatlist and sat_str == userSatlist) or ((', ' + sat_str + ',' in userSatlist) or (userSatlist.startswith(sat_str + ',')) or (userSatlist.endswith(', ' + sat_str))):
if only_first:
return True
list.append(x)
elif configMode == "advanced":
for x in range(3601, 3605):
if int(nim.advanced.sat[x].lnb.value) != 0:
for x in self.satList:
if only_first:
return True
list.append(x)
if not list:
for x in self.satList:
lnbnum = int(nim.advanced.sat[x[0]].lnb.value)
if lnbnum != 0:
lnb = nim.advanced.lnb[lnbnum]
if lnb.diseqcMode.value == "1_2":
if only_first:
return True
list.append(x)
for x in range(3605, 3607):
if int(nim.advanced.sat[x].lnb.value) != 0:
userSatlist = nim.advanced.sat[x].userSatellitesList.value
userSatlist = userSatlist.replace("]", "").replace("[", "")
for user_sat in self.satList:
sat_str = str(user_sat[0])
if userSatlist and ("," not in userSatlist and sat_str == userSatlist) or ((', ' + sat_str + ',' in userSatlist) or (userSatlist.startswith(sat_str + ',')) or (userSatlist.endswith(', ' + sat_str))) and user_sat not in list:
if only_first:
return True
list.append(user_sat)
return list
def InitSecParams():
config.sec = ConfigSubsection()
config.sec.delay_after_continuous_tone_disable_before_diseqc = ConfigInteger(default=25, limits=(0, 9999))
config.sec.delay_after_final_continuous_tone_change = ConfigInteger(default=10, limits=(0, 9999))
config.sec.delay_after_final_voltage_change = ConfigInteger(default=10, limits=(0, 9999))
config.sec.delay_between_diseqc_repeats = ConfigInteger(default=120, limits=(0, 9999))
config.sec.delay_after_last_diseqc_command = ConfigInteger(default=100, limits=(0, 9999))
config.sec.delay_after_toneburst = ConfigInteger(default=50, limits=(0, 9999))
config.sec.delay_after_change_voltage_before_switch_command = ConfigInteger(default=75, limits=(0, 9999))
config.sec.delay_after_enable_voltage_before_switch_command = ConfigInteger(default=200, limits=(0, 9999))
config.sec.delay_between_switch_and_motor_command = ConfigInteger(default=700, limits=(0, 9999))
config.sec.delay_after_voltage_change_before_measure_idle_inputpower = ConfigInteger(default=500, limits=(0, 9999))
config.sec.delay_after_enable_voltage_before_motor_command = ConfigInteger(default=900, limits=(0, 9999))
config.sec.delay_after_motor_stop_command = ConfigInteger(default=500, limits=(0, 9999))
config.sec.delay_after_voltage_change_before_motor_command = ConfigInteger(default=500, limits=(0, 9999))
config.sec.delay_before_sequence_repeat = ConfigInteger(default=70, limits=(0, 9999))
config.sec.motor_running_timeout = ConfigInteger(default=360, limits=(0, 9999))
config.sec.motor_command_retries = ConfigInteger(default=1, limits=(0, 5))
config.sec.delay_after_diseqc_reset_cmd = ConfigInteger(default=50, limits=(0, 9999))
config.sec.delay_after_diseqc_peripherial_poweron_cmd = ConfigInteger(default=150, limits=(0, 9999))
config.sec.unicable_delay_after_enable_voltage_before_switch_command = ConfigInteger(default=200, limits=(0, 9999))
config.sec.unicable_delay_after_change_voltage_before_switch_command = ConfigInteger(default=75, limits=(0, 9999))
config.sec.unicable_delay_after_last_diseqc_command = ConfigInteger(default=150, limits=(0, 9999))
config.sec.delay_before_sequence_repeat.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_BEFORE_SEQUENCE_REPEAT, configElement.value))
config.sec.motor_running_timeout.addNotifier(lambda configElement: secClass.setParam(secClass.MOTOR_RUNNING_TIMEOUT, configElement.value))
config.sec.motor_command_retries.addNotifier(lambda configElement: secClass.setParam(secClass.MOTOR_COMMAND_RETRIES, configElement.value))
config.sec.delay_after_diseqc_reset_cmd.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_DISEQC_RESET_CMD, configElement.value))
config.sec.delay_after_diseqc_peripherial_poweron_cmd.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_DISEQC_PERIPHERIAL_POWERON_CMD, configElement.value))
config.sec.delay_after_voltage_change_before_motor_command.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_VOLTAGE_CHANGE_BEFORE_MOTOR_CMD, configElement.value))
config.sec.delay_after_motor_stop_command.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_MOTOR_STOP_CMD, configElement.value))
config.sec.delay_after_enable_voltage_before_motor_command.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_ENABLE_VOLTAGE_BEFORE_MOTOR_CMD, configElement.value))
config.sec.delay_after_voltage_change_before_measure_idle_inputpower.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_VOLTAGE_CHANGE_BEFORE_MEASURE_IDLE_INPUTPOWER, configElement.value))
config.sec.delay_between_switch_and_motor_command.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_BETWEEN_SWITCH_AND_MOTOR_CMD, configElement.value))
config.sec.delay_after_enable_voltage_before_switch_command.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_ENABLE_VOLTAGE_BEFORE_SWITCH_CMDS, configElement.value))
config.sec.delay_after_change_voltage_before_switch_command.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_VOLTAGE_CHANGE_BEFORE_SWITCH_CMDS, configElement.value))
config.sec.delay_after_toneburst.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_TONEBURST, configElement.value))
config.sec.delay_after_last_diseqc_command.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_LAST_DISEQC_CMD, configElement.value))
config.sec.delay_between_diseqc_repeats.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_BETWEEN_DISEQC_REPEATS, configElement.value))
config.sec.delay_after_final_voltage_change.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_FINAL_VOLTAGE_CHANGE, configElement.value))
config.sec.delay_after_final_continuous_tone_change.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_FINAL_CONT_TONE_CHANGE, configElement.value))
config.sec.delay_after_continuous_tone_disable_before_diseqc.addNotifier(lambda configElement: secClass.setParam(secClass.DELAY_AFTER_CONT_TONE_DISABLE_BEFORE_DISEQC, configElement.value))
config.sec.unicable_delay_after_enable_voltage_before_switch_command.addNotifier(lambda configElement: secClass.setParam(secClass.UNICABLE_DELAY_AFTER_ENABLE_VOLTAGE_BEFORE_SWITCH_CMDS, configElement.value))
config.sec.unicable_delay_after_change_voltage_before_switch_command.addNotifier(lambda configElement: secClass.setParam(secClass.UNICABLE_DELAY_AFTER_VOLTAGE_CHANGE_BEFORE_SWITCH_CMDS, configElement.value))
config.sec.unicable_delay_after_last_diseqc_command.addNotifier(lambda configElement: secClass.setParam(secClass.UNICABLE_DELAY_AFTER_LAST_DISEQC_CMD, configElement.value))
# TODO add support for satpos depending nims to advanced nim configuration
# so a second/third/fourth cable from a motorized lnb can used behind a
# diseqc 1.0 / diseqc 1.1 / toneburst switch
# the C(++) part should can handle this
# the configElement should be only visible when diseqc 1.2 is disabled
def InitNimManager(nimmgr, update_slots=[]):
hw = HardwareInfo()
if not hasattr(config, "Nims"):
InitSecParams()
config.Nims = ConfigSubList()
for x in range(len(nimmgr.nim_slots)):
config.Nims.append(ConfigSubsection())
lnb_choices = {
"universal_lnb": _("Universal LNB"),
"unicable": _("SCR (Unicable/JESS)"),
"c_band": _("C-Band"),
"circular_lnb": _("Circular LNB"),
"ka_sat": _("KA-SAT"),
"user_defined": _("User defined")}
lnb_choices_default = "universal_lnb"
prio_list = [("-1", _("Auto"))]
for prio in range(65) + range(14000, 14065) + range(19000, 19065):
description = ""
if prio == 0:
description = _(" (disabled)")
elif 0 < prio < 65:
description = _(" (lower than any auto)")
elif 13999 < prio < 14066:
description = _(" (higher than rotor any auto)")
elif 18999 < prio < 19066:
description = _(" (higher than any auto)")
prio_list.append((str(prio), str(prio) + description))
advanced_lnb_csw_choices = [("none", _("None")), ("AA", _("Port A")), ("AB", _("Port B")), ("BA", _("Port C")), ("BB", _("Port D"))]
advanced_lnb_ucsw_choices = [("0", _("None"))] + [(str(y), _("Input ") + str(y)) for y in range(1, 17)]
diseqc_mode_choices = [
("single", _("Single")), ("toneburst_a_b", _("Toneburst A/B")),
("diseqc_a_b", "DiSEqC A/B"), ("diseqc_a_b_c_d", "DiSEqC A/B/C/D"),
("positioner", _("Positioner")), ("positioner_select", _("Positioner (selecting satellites)"))]
positioner_mode_choices = [("usals", _("USALS")), ("manual", _("manual"))]
diseqc_satlist_choices = [(3600, _('automatic'), 1), (3601, _('nothing connected'), 1)] + nimmgr.satList
longitude_orientation_choices = [("east", _("East")), ("west", _("West"))]
latitude_orientation_choices = [("north", _("North")), ("south", _("South"))]
turning_speed_choices = [("fast", _("Fast")), ("slow", _("Slow")), ("fast epoch", _("Fast epoch"))]
advanced_satlist_choices = nimmgr.satList + [
(3601, _('All satellites 1 (USALS)'), 1), (3602, _('All satellites 2 (USALS)'), 1),
(3603, _('All satellites 3 (USALS)'), 1), (3604, _('All satellites 4 (USALS)'), 1), (3605, _('Selecting satellites 1 (USALS)'), 1), (3606, _('Selecting satellites 2 (USALS)'), 1)]
advanced_lnb_choices = [("0", _("not configured"))] + [(str(y), "LNB " + str(y)) for y in range(1, 65)]
advanced_voltage_choices = [("polarization", _("Polarization")), ("13V", _("13 V")), ("18V", _("18 V"))]
advanced_tonemode_choices = [("band", _("Band")), ("on", _("On")), ("off", _("Off"))]
advanced_lnb_toneburst_choices = [("none", _("None")), ("A", _("A")), ("B", _("B"))]
advanced_lnb_allsat_diseqcmode_choices = [("1_2", _("1.2"))]
advanced_lnb_satposdepends_diseqcmode_choices = [("none", _("None")), ("1_0", _("1.0")), ("1_1", _("1.1"))]
advanced_lnb_diseqcmode_choices = [("none", _("None")), ("1_0", _("1.0")), ("1_1", _("1.1")), ("1_2", _("1.2"))]
advanced_lnb_commandOrder1_0_choices = [("ct", "DiSEqC 1.0, toneburst"), ("tc", "toneburst, DiSEqC 1.0")]
advanced_lnb_commandOrder_choices = [
("ct", "DiSEqC 1.0, toneburst"), ("tc", "toneburst, DiSEqC 1.0"),
("cut", "DiSEqC 1.0, DiSEqC 1.1, toneburst"), ("tcu", "toneburst, DiSEqC 1.0, DiSEqC 1.1"),
("uct", "DiSEqC 1.1, DiSEqC 1.0, toneburst"), ("tuc", "toneburst, DiSEqC 1.1, DiSEqC 1.0")]
advanced_lnb_diseqc_repeat_choices = [("none", _("None")), ("one", _("One")), ("two", _("Two")), ("three", _("Three"))]
advanced_lnb_fast_turning_btime = mktime(datetime(1970, 1, 1, 7, 0).timetuple())
advanced_lnb_fast_turning_etime = mktime(datetime(1970, 1, 1, 19, 0).timetuple())
def configLOFChanged(configElement):
if configElement.value == "unicable":
x = configElement.slot_id
lnb = configElement.lnb_id
nim = config.Nims[x]
lnbs = nim.advanced.lnb
section = lnbs[lnb]
if isinstance(section.unicable, ConfigNothing):
def setPowerInserter(configEntry):
section.bootuptime.value = 0 if configEntry.value else section.bootuptime.default
def getformat(value, index):
return ("jess" if index >= int(value.split(",")[1] if "," in value else 4) else "unicable") if value.startswith("dSCR") else value
def positionsChanged(configEntry):
section.positionNumber = ConfigSelection(["%d" % (x + 1) for x in range(configEntry.value)], default="%d" % min(lnb, configEntry.value))
def scrListChanged(productparameters, srcfrequencylist, configEntry):
section.format = ConfigSelection([("unicable", _("SCR Unicable")), ("jess", _("SCR JESS"))], default=getformat(productparameters.get("format", "unicable"), configEntry.index))
section.scrfrequency = ConfigInteger(default=int(srcfrequencylist[configEntry.index]))
section.positions = ConfigInteger(default=int(productparameters.get("positions", 1)))
section.positions.addNotifier(positionsChanged)
section.positionsOffset = ConfigInteger(default=int(productparameters.get("positionsoffset", 0)))
section.lofl = ConfigInteger(default=int(productparameters.get("lofl", 9750)), limits=(0, 99999))
section.lofh = ConfigInteger(default=int(productparameters.get("lofh", 10600)), limits=(0, 99999))
section.threshold = ConfigInteger(default=int(productparameters.get("threshold", 11700)), limits=(0, 99999))
def unicableProductChanged(manufacturer, lnb_or_matrix, configEntry):
config.unicable.unicableProduct.value = configEntry.value
config.unicable.unicableProduct.save()
productparameters = [p for p in [m.getchildren() for m in unicable_xml.find(lnb_or_matrix) if m.get("name") == manufacturer][0] if p.get("name") == configEntry.value][0]
section.bootuptime = ConfigInteger(default=int(productparameters.get("bootuptime", 1000)), limits=(0, 9999))
section.bootuptime.save_forced = True
section.powerinserter = ConfigYesNo(default=SystemInfo["FbcTunerPowerAlwaysOn"])
section.powerinserter.save_forced = True
section.powerinserter.addNotifier(setPowerInserter)
srcfrequencylist = productparameters.get("scrs").split(",")
section.scrList = ConfigSelection([("%d" % (x + 1), "User Band %d (%s)" % ((x + 1), srcfrequencylist[x])) for x in range(len(srcfrequencylist))])
section.scrList.save_forced = True
section.scrList.addNotifier(boundFunction(scrListChanged, productparameters, srcfrequencylist))
def unicableManufacturerChanged(lnb_or_matrix, configEntry):
config.unicable.unicableManufacturer.value = configEntry.value
config.unicable.unicableManufacturer.save()
productslist = [p.get("name") for p in [m.getchildren() for m in unicable_xml.find(lnb_or_matrix) if m.get("name") == configEntry.value][0]]
if not config.unicable.content.items.get("unicableProduct", False) or config.unicable.unicableProduct.value not in productslist:
config.unicable.unicableProduct = ConfigSelection(productslist)
config.unicable.unicableProduct.save_forced = True
section.unicableProduct = ConfigSelection(productslist, default=config.unicable.unicableProduct.value)
section.unicableProduct.save_forced = True
section.unicableProduct.addNotifier(boundFunction(unicableProductChanged, configEntry.value, lnb_or_matrix))
def userScrListChanged(srcfrequencyList, configEntry):
section.scrfrequency = ConfigInteger(default=int(srcfrequencyList[configEntry.index]), limits=(0, 99999))
section.lofl = ConfigInteger(default=9750, limits=(0, 99999))
section.lofh = ConfigInteger(default=10600, limits=(0, 99999))
section.threshold = ConfigInteger(default=11700, limits=(0, 99999))
def formatChanged(configEntry):
section.positions = ConfigInteger(default=configEntry.value == "jess" and 64 or 2)
section.positions.addNotifier(positionsChanged)
section.positionsOffset = ConfigInteger(default=0)
section.scrList = ConfigSelection([("%d" % (x + 1), "User Band %d" % (x + 1)) for x in range(configEntry.value == "jess" and 32 or 8)])
section.scrList.save_forced = True
srcfrequencyList = configEntry.value == "jess" and (1210, 1420, 1680, 2040, 984, 1020, 1056, 1092, 1128, 1164, 1256, 1292, 1328, 1364, 1458, 1494, 1530, 1566, 1602,
1638, 1716, 1752, 1788, 1824, 1860, 1896, 1932, 1968, 2004, 2076, 2112, 2148) or (1284, 1400, 1516, 1632, 1748, 1864, 1980, 2096)
section.scrList.addNotifier(boundFunction(userScrListChanged, srcfrequencyList))
section.bootuptime = ConfigInteger(default=1000, limits=(0, 9999))
section.bootuptime.save_forced = True
section.powerinserter = ConfigYesNo(default=SystemInfo["FbcTunerPowerAlwaysOn"])
section.powerinserter.save_forced = True
section.powerinserter.addNotifier(setPowerInserter)
def unicableChanged(configEntry):
config.unicable.unicable.value = configEntry.value
config.unicable.unicable.save()
if configEntry.value == "unicable_matrix":
manufacturerlist = [m.get("name") for m in unicable_xml.find("matrix")]
if not config.unicable.content.items.get("unicableManufacturer", False) or config.unicable.unicableManufacturer.value not in manufacturerlist:
config.unicable.unicableManufacturer = ConfigSelection(manufacturerlist)
section.unicableManufacturer = ConfigSelection(manufacturerlist, default=config.unicable.unicableManufacturer.value)
section.unicableManufacturer.save_forced = True
config.unicable.unicableManufacturer.save_forced = True
section.unicableManufacturer.addNotifier(boundFunction(unicableManufacturerChanged, "matrix"))
elif configEntry.value == "unicable_lnb":
manufacturerlist = [m.get("name") for m in unicable_xml.find("lnb")]
if not config.unicable.content.items.get("unicableManufacturer", False) or config.unicable.unicableManufacturer.value not in manufacturerlist:
config.unicable.unicableManufacturer = ConfigSelection(manufacturerlist)
section.unicableManufacturer = ConfigSelection(manufacturerlist, default=config.unicable.unicableManufacturer.value)
section.unicableManufacturer.save_forced = True
config.unicable.unicableManufacturer.save_forced = True
section.unicableManufacturer.addNotifier(boundFunction(unicableManufacturerChanged, "lnb"))
else:
section.format = ConfigSelection([("unicable", _("SCR Unicable")), ("jess", _("SCR JESS"))])
section.format.addNotifier(formatChanged)
unicable_xml = xml.etree.cElementTree.parse(eEnv.resolve("${datadir}/enigma2/unicable.xml")).getroot()
unicableList = [("unicable_lnb", _("SCR (Unicable/JESS)") + " " + _("LNB")), ("unicable_matrix", _("SCR (Unicable/JESS)") + " " + _("Switch")), ("unicable_user", _("SCR (Unicable/JESS)") + " " + _("User defined"))]
if not config.unicable.content.items.get("unicable", False):
config.unicable.unicable = ConfigSelection(unicableList)
section.unicable = ConfigSelection(unicableList, default=config.unicable.unicable.value)
section.unicable.addNotifier(unicableChanged)
nim.advanced.unicableconnected = ConfigYesNo(default=False)
nim.advanced.unicableconnectedTo = ConfigSelection([(str(id), nimmgr.getNimDescription(id)) for id in nimmgr.getNimListOfType("DVB-S") if id != x])
def configDiSEqCModeChanged(configElement):
section = configElement.section
if configElement.value == "1_2" and isinstance(section.longitude, ConfigNothing):
section.longitude = ConfigFloat(default=[5, 100], limits=[(0, 359), (0, 999)])
section.longitudeOrientation = ConfigSelection(longitude_orientation_choices, "east")
section.latitude = ConfigFloat(default=[50, 767], limits=[(0, 359), (0, 999)])
section.latitudeOrientation = ConfigSelection(latitude_orientation_choices, "north")
section.tuningstepsize = ConfigFloat(default=[0, 360], limits=[(0, 9), (0, 999)])
section.rotorPositions = ConfigInteger(default=99, limits=[1, 999])
section.turningspeedH = ConfigFloat(default=[2, 3], limits=[(0, 9), (0, 9)])
section.turningspeedV = ConfigFloat(default=[1, 7], limits=[(0, 9), (0, 9)])
section.powerMeasurement = ConfigYesNo(default=True)
section.powerThreshold = ConfigInteger(default=15, limits=(0, 100))
section.turningSpeed = ConfigSelection(turning_speed_choices, "fast")
section.fastTurningBegin = ConfigDateTime(default=advanced_lnb_fast_turning_btime, formatstring=_("%H:%M"), increment=600)
section.fastTurningEnd = ConfigDateTime(default=advanced_lnb_fast_turning_etime, formatstring=_("%H:%M"), increment=600)
def configLNBChanged(configElement):
x = configElement.slot_id
nim = config.Nims[x]
if isinstance(configElement.value, tuple):
lnb = int(configElement.value[0])
else:
lnb = int(configElement.value)
lnbs = nim.advanced.lnb
if lnb and lnb not in lnbs:
section = lnbs[lnb] = ConfigSubsection()
section.lofl = ConfigInteger(default=9750, limits=(0, 99999))
section.lofh = ConfigInteger(default=10600, limits=(0, 99999))
section.threshold = ConfigInteger(default=11700, limits=(0, 99999))
section.increased_voltage = ConfigYesNo(False)
section.toneburst = ConfigSelection(advanced_lnb_toneburst_choices, "none")
section.longitude = ConfigNothing()
if 64 < lnb < 71:
tmp = ConfigSelection(advanced_lnb_allsat_diseqcmode_choices, "1_2")
tmp.section = section
configDiSEqCModeChanged(tmp)
else:
tmp = ConfigSelection(lnb == 71 and advanced_lnb_satposdepends_diseqcmode_choices or advanced_lnb_diseqcmode_choices, "none")
tmp.section = section
tmp.addNotifier(configDiSEqCModeChanged)
section.diseqcMode = tmp
section.commitedDiseqcCommand = ConfigSelection(advanced_lnb_csw_choices)
section.fastDiseqc = ConfigYesNo(False)
section.sequenceRepeat = ConfigYesNo(False)
section.commandOrder1_0 = ConfigSelection(advanced_lnb_commandOrder1_0_choices, "ct")
section.commandOrder = ConfigSelection(advanced_lnb_commandOrder_choices, "ct")
section.uncommittedDiseqcCommand = ConfigSelection(advanced_lnb_ucsw_choices)
section.diseqcRepeats = ConfigSelection(advanced_lnb_diseqc_repeat_choices, "none")
section.prio = ConfigSelection(prio_list, "-1")
section.unicable = ConfigNothing()
tmp = ConfigSelection(lnb_choices, lnb_choices_default)
tmp.slot_id = x
tmp.lnb_id = lnb
tmp.addNotifier(configLOFChanged)
section.lof = tmp
def scpcSearchRangeChanged(configElement):
fe_id = configElement.fe_id
slot_id = configElement.slot_id
if os.path.exists("/proc/stb/frontend/%d/use_scpc_optimized_search_range" % fe_id):
f = open("/proc/stb/frontend/%d/use_scpc_optimized_search_range" % (fe_id), "w")
f.write(configElement.value)
f.close()
def toneAmplitudeChanged(configElement):
fe_id = configElement.fe_id
slot_id = configElement.slot_id
if os.path.exists("/proc/stb/frontend/%d/tone_amplitude" % fe_id):
f = open("/proc/stb/frontend/%d/tone_amplitude" % fe_id, "w")
f.write(configElement.value)
f.close()
def t2miRawModeChanged(configElement):
slot = configElement.slot
if os.path.exists("/proc/stb/frontend/%d/t2mirawmode" % slot):
open("/proc/stb/frontend/%d/t2mirawmode" % slot, "w").write(configElement.value)
def createSatConfig(nim, slot_id):
nim.toneAmplitude = ConfigSelection([("11", "340mV"), ("10", "360mV"), ("9", "600mV"), ("8", "700mV"), ("7", "800mV"), ("6", "900mV"), ("5", "1100mV")], "7")
nim.toneAmplitude.fe_id = slot_id
nim.toneAmplitude.slot_id = slot_id
nim.toneAmplitude.addNotifier(toneAmplitudeChanged)
nim.scpcSearchRange = ConfigSelection([("0", _("no")), ("1", _("yes"))], "0")
nim.scpcSearchRange.fe_id = slot_id
nim.scpcSearchRange.slot_id = slot_id
nim.scpcSearchRange.addNotifier(scpcSearchRangeChanged)
nim.t2miRawMode = ConfigSelection([("disable", _("disabled")), ("enable", _("enabled"))], "disable")
nim.t2miRawMode.slot = slot_id
nim.t2miRawMode.addNotifier(t2miRawModeChanged)
nim.diseqc13V = ConfigYesNo(False)
nim.diseqcMode = ConfigSelection(diseqc_mode_choices, "single")
nim.connectedTo = ConfigSelection([(str(id), nimmgr.getNimDescription(id)) for id in nimmgr.getNimListOfType("DVB-S") if id != slot_id])
nim.simpleSingleSendDiSEqC = ConfigYesNo(False)
nim.simpleDiSEqCSetVoltageTone = ConfigYesNo(True)
nim.simpleDiSEqCOnlyOnSatChange = ConfigYesNo(False)
nim.simpleDiSEqCSetCircularLNB = ConfigYesNo(True)
nim.diseqcA = ConfigSatlist(list=diseqc_satlist_choices)
nim.diseqcB = ConfigSatlist(list=diseqc_satlist_choices)
nim.diseqcC = ConfigSatlist(list=diseqc_satlist_choices)
nim.diseqcD = ConfigSatlist(list=diseqc_satlist_choices)
nim.positionerMode = ConfigSelection(positioner_mode_choices, "usals")
nim.userSatellitesList = ConfigText('[]')
nim.pressOKtoList = ConfigNothing()
nim.longitude = ConfigFloat(default=[5, 100], limits=[(0, 359), (0, 999)])
nim.longitudeOrientation = ConfigSelection(longitude_orientation_choices, "east")
nim.latitude = ConfigFloat(default=[50, 767], limits=[(0, 359), (0, 999)])
nim.latitudeOrientation = ConfigSelection(latitude_orientation_choices, "north")
nim.tuningstepsize = ConfigFloat(default=[0, 360], limits=[(0, 9), (0, 999)])
nim.rotorPositions = ConfigInteger(default=99, limits=[1, 999])
nim.lastsatrotorposition = ConfigText()
nim.turningspeedH = ConfigFloat(default=[2, 3], limits=[(0, 9), (0, 9)])
nim.turningspeedV = ConfigFloat(default=[1, 7], limits=[(0, 9), (0, 9)])
nim.powerMeasurement = ConfigYesNo(True)
nim.powerThreshold = ConfigInteger(default=hw.get_device_name() == "dm8000" and 15 or 50, limits=(0, 100))
nim.turningSpeed = ConfigSelection(turning_speed_choices, "fast")
btime = datetime(1970, 1, 1, 7, 0)
nim.fastTurningBegin = ConfigDateTime(default=mktime(btime.timetuple()), formatstring=_("%H:%M"), increment=900)
etime = datetime(1970, 1, 1, 19, 0)
nim.fastTurningEnd = ConfigDateTime(default=mktime(etime.timetuple()), formatstring=_("%H:%M"), increment=900)
nim.advanced = ConfigSubsection()
nim.advanced.sat = ConfigSubDict()
nim.advanced.sats = getConfigSatlist(192, advanced_satlist_choices)
nim.advanced.lnb = ConfigSubDict()
nim.advanced.lnb[0] = ConfigNothing()
for x in nimmgr.satList:
tmp = ConfigSubsection()
tmp.voltage = ConfigSelection(advanced_voltage_choices, "polarization")
tmp.tonemode = ConfigSelection(advanced_tonemode_choices, "band")
tmp.usals = ConfigYesNo(True)
tmp.rotorposition = ConfigInteger(default=1, limits=(1, 255))
lnb = ConfigSelection(advanced_lnb_choices, "0")
lnb.slot_id = slot_id
lnb.addNotifier(configLNBChanged)
tmp.lnb = lnb
nim.advanced.sat[x[0]] = tmp
for x in range(3601, 3608):
tmp = ConfigSubsection()
tmp.voltage = ConfigSelection(advanced_voltage_choices, "polarization")
tmp.tonemode = ConfigSelection(advanced_tonemode_choices, "band")
tmp.usals = ConfigYesNo(True)
tmp.userSatellitesList = ConfigText('[]')
tmp.rotorposition = ConfigInteger(default=1, limits=(1, 255))
lnbnum = 65 + x - 3601
lnb = ConfigSelection([("0", _("not configured")), (str(lnbnum), "LNB %d" % (lnbnum))], "0")
lnb.slot_id = slot_id
lnb.addNotifier(configLNBChanged)
tmp.lnb = lnb
nim.advanced.sat[x] = tmp
def createCableConfig(nim, x):
list = [(x[0], x[0]) for x in nimmgr.cablesList]
nim.cable = ConfigSubsection()
nim.cable.scan_networkid = ConfigInteger(default=0, limits=(0, 99999))
possible_scan_types = [("bands", _("Frequency bands")), ("steps", _("Frequency steps"))]
if list:
possible_scan_types.append(("provider", _("Provider")))
nim.cable.scan_provider = ConfigSelection(default="0", choices=list)
nim.cable.config_scan_details = ConfigYesNo(default=False)
nim.cable.scan_type = ConfigSelection(default="bands", choices=possible_scan_types)
nim.cable.scan_band_EU_VHF_I = ConfigYesNo(default=True)
nim.cable.scan_band_EU_MID = ConfigYesNo(default=True)
nim.cable.scan_band_EU_VHF_III = ConfigYesNo(default=True)
nim.cable.scan_band_EU_UHF_IV = ConfigYesNo(default=True)
nim.cable.scan_band_EU_UHF_V = ConfigYesNo(default=True)
nim.cable.scan_band_EU_SUPER = ConfigYesNo(default=True)
nim.cable.scan_band_EU_HYPER = ConfigYesNo(default=True)
nim.cable.scan_band_US_LOW = ConfigYesNo(default=False)
nim.cable.scan_band_US_MID = ConfigYesNo(default=False)
nim.cable.scan_band_US_HIGH = ConfigYesNo(default=False)
nim.cable.scan_band_US_SUPER = ConfigYesNo(default=False)
nim.cable.scan_band_US_HYPER = ConfigYesNo(default=False)
nim.cable.scan_frequency_steps = ConfigInteger(default=1000, limits=(1000, 10000))
nim.cable.scan_mod_qam16 = ConfigYesNo(default=False)
nim.cable.scan_mod_qam32 = ConfigYesNo(default=False)
nim.cable.scan_mod_qam64 = ConfigYesNo(default=True)
nim.cable.scan_mod_qam128 = ConfigYesNo(default=False)
nim.cable.scan_mod_qam256 = ConfigYesNo(default=True)
nim.cable.scan_sr_6900 = ConfigYesNo(default=True)
nim.cable.scan_sr_6875 = ConfigYesNo(default=True)
nim.cable.scan_sr_ext1 = ConfigInteger(default=0, limits=(0, 7230))
nim.cable.scan_sr_ext2 = ConfigInteger(default=0, limits=(0, 7230))
def createTerrestrialConfig(nim, x):
list = [(x[0], x[0]) for x in nimmgr.terrestrialsList]
nim.terrestrial = ConfigSelection(choices=list)
nim.terrestrial_5V = ConfigOnOff()
def createATSCConfig(nim, x):
list = [(x[0], x[0]) for x in nimmgr.atscList]
nim.atsc = ConfigSelection(choices=list)
def tunerTypeChanged(nimmgr, configElement, initial=False):
fe_id = configElement.fe_id
if configElement.value == 'nothing':
"[InitNimManager] disable multitype tuner %s" % fe_id
eDVBResourceManager.getInstance().setFrontendType(nimmgr.nim_slots[fe_id].frontend_id, "UNDEFINED")
else:
print "[InitNimManager] tunerTypeChanged: setFrontendType %s" % nimmgr.nim_slots[fe_id].getType()
eDVBResourceManager.getInstance().setFrontendType(nimmgr.nim_slots[fe_id].frontend_id, nimmgr.nim_slots[fe_id].getType())
try:
raw_channel = eDVBResourceManager.getInstance().allocateRawChannel(fe_id)
if raw_channel is None:
import NavigationInstance
if NavigationInstance.instance:
NavigationInstance.instance.stopService()
raw_channel = eDVBResourceManager.getInstance().allocateRawChannel(fe_id)
if raw_channel is None:
print "[InitNimManager] %d: tunerTypeChanged to '%s' failed (BUSY)" % (fe_id, configElement.getText())
return
frontend = raw_channel.getFrontend()
is_changed_mode = os.path.exists("/proc/stb/frontend/%d/mode" % fe_id)
if not is_changed_mode and frontend.setDeliverySystem(nimmgr.nim_slots[fe_id].getType()):
print "[InitNimManager] tunerTypeChanged feid %d to mode %d" % (fe_id, int(configElement.value))
InitNimManager(nimmgr, [fe_id])
if not hasattr(config.misc, 'firstrun') or not config.misc.firstrun.value:
configElement.save()
elif is_changed_mode:
cur_type = int(open("/proc/stb/frontend/%d/mode" % (fe_id), "r").read())
if cur_type != int(configElement.value):
print "[InitNimManager] tunerTypeChanged feid %d from %d to mode %d" % (fe_id, cur_type, int(configElement.value))
is_dvb_shutdown_timeout = os.path.exists("/sys/module/dvb_core/parameters/dvb_shutdown_timeout")
if is_dvb_shutdown_timeout:
try:
oldvalue = open("/sys/module/dvb_core/parameters/dvb_shutdown_timeout", "r").readline()
open("/sys/module/dvb_core/parameters/dvb_shutdown_timeout", "w").write("0")
except:
print "[InitNimManager] tunerTypeChanged read /sys/module/dvb_core/parameters/dvb_shutdown_timeout failed"
frontend.closeFrontend()
open("/proc/stb/frontend/%d/mode" % (fe_id), "w").write(configElement.value)
frontend.reopenFrontend()
if is_dvb_shutdown_timeout:
try:
open("/sys/module/dvb_core/parameters/dvb_shutdown_timeout", "w").write(oldvalue)
except:
print "[InitNimManager] tunerTypeChanged write to /sys/module/dvb_core/parameters/dvb_shutdown_timeout failed"
nimmgr.enumerateNIMs()
if initial:
print "[InitNimManager] tunerTypeChanged force update setting"
nimmgr.sec.update()
if not hasattr(config.misc, 'firstrun') or not config.misc.firstrun.value:
configElement.save()
else:
print "[InitNimManager] tunerTypeChanged tuner type is already %d" % cur_type
except Exception as e:
print "[InitNimManager] tunerTypeChanged error: ", e
def combinedConfigChanged(nim, slot, slot_id, configElement=None):
tunersEnabled = slot.getTunerTypesEnabled()
if tunersEnabled:
tunersEnabled = ",".join(tunersEnabled)
print "[InitNimManager] enable combined tuner type(s) %s" % tunersEnabled
eDVBResourceManager.getInstance().setFrontendType(nimmgr.nim_slots[slot_id].frontend_id, tunersEnabled)
if nim.configMode.value == 'nothing':
nim.configMode.value = nim.configMode.default = "simple" if slot.canBeCompatible("DVB-S") else "enabled"
else:
print "[InitNimManager] disable combined tuner"
eDVBResourceManager.getInstance().setFrontendType(nimmgr.nim_slots[slot_id].frontend_id, "UNDEFINED")
nim.configMode.choices.choices.update({"nothing": _("Disabled")})
nim.configMode.value = nim.configMode.default = "nothing"
def createConfig(nim, slot):
slot_id = slot.slot
if slot.isCombined() and slot.canBeCompatible("DVB-S") or slot.isCompatible("DVB-S"):
if slot.isFBCLink():
config_mode_choices = {"nothing": _("FBC automatic"), "advanced": _("FBC SCR (Unicable/JESS)")}
else:
#Just define all and redefine them in Satconfig.py as here all tuners are not defined yet
config_mode_choices = {"nothing": _("Disabled"), "simple": _("Simple"), "advanced": _("Advanced"), "equal": _("Equal to"), "satposdepends": _("Second cable of motorized LNB"), "loopthrough": _("Loop through from")}
nim.configMode = ConfigSelection(config_mode_choices, "nothing" if slot.isFBCLink() else "simple")
nim.configMode.slot_id = slot_id
elif slot.canBeCompatible("DVB-C") or slot.canBeCompatible("DVB-T") or slot.canBeCompatible("ATSC"):
nim.configMode = ConfigSelection(choices={"enabled": _("Enabled"), "nothing": _("Disabled")}, default="enabled")
else:
nim.configMode = ConfigSelection(choices={"nothing": _("Disabled")}, default="nothing")
if not slot.canBeCompatible("DVB-S"):
if slot.type is not None:
print "[InitNimManager] pls add support for this frontend type!", slot.type
for slot in nimmgr.nim_slots:
slot_id = slot.slot
if update_slots and (slot_id not in update_slots):
continue
nim = config.Nims[slot_id]
nim.force_legacy_signal_stats = ConfigYesNo(default=False)
if slot.isCombined():
nim.configModeDVBS = ConfigYesNo()
nim.configModeDVBC = ConfigYesNo()
nim.configModeDVBT = ConfigYesNo()
nim.configModeATSC = ConfigYesNo()
createConfig(nim, slot)
if slot.canBeCompatible("DVB-S"):
createSatConfig(nim, slot_id)
if slot.canBeCompatible("DVB-C"):
createCableConfig(nim, slot_id)
if slot.canBeCompatible("DVB-T"):
createTerrestrialConfig(nim, slot_id)
if slot.canBeCompatible("ATSC"):
createATSCConfig(nim, slot_id)
if slot.isMultiType() and not hasattr(nim, "multiType"):
nim.multiType = ConfigSelection([(id, slot.getMultiTypeList()[id]) for id in slot.getMultiTypeList().keys()] + [("nothing", _("disabled"))], "0")
nim.multiType.fe_id = slot_id
nim.multiType.addNotifier(boundFunction(tunerTypeChanged, nimmgr))
if nim.multiType.value == "nothing":
nim.configMode.value = "nothing"
if slot.isCombined():
#Convert during an upgrade that multiType is 'converted' to combined config when needed
if nim.configMode.value != "nothing" and not slot.getTunerTypesEnabled():
nim.multiType = ConfigText(default="")
if nim.multiType.value:
type = slot.multi_type.get(nim.multiType.value[:5], "")
nim.configModeDVBS.value = type == "DVB-S"
nim.configModeDVBC.value = type == "DVB-C"
nim.configModeDVBT.value = type == "DVB-T"
nim.configModeATSC.value = type == "ATSC"
nim.multiType.value = ""
nim.save()
nim.configModeDVBS.addNotifier(boundFunction(combinedConfigChanged, nim, slot, slot_id), initial_call=False)
nim.configModeDVBC.addNotifier(boundFunction(combinedConfigChanged, nim, slot, slot_id), initial_call=False)
nim.configModeDVBT.addNotifier(boundFunction(combinedConfigChanged, nim, slot, slot_id), initial_call=False)
nim.configModeATSC.addNotifier(boundFunction(combinedConfigChanged, nim, slot, slot_id))
nimmgr.sec = SecConfigure(nimmgr)
nimmanager = NimManager()
| BlackHole/enigma2-obh10 | lib/python/Components/NimManager.py | Python | gpl-2.0 | 82,663 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
# Dummy Variables
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
# Scaling target variables
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Splitting the data into training, testing and validation sets
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
# Build the Network
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes ** -0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes ** -0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x: 1.0/(1+np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
# def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
# self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer - f is sigmoid
# Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer - f is identity function
#### Implement the backward pass here ####
### Backward pass ###
# Output error - Replace this value with your calculations.
error = y-final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(error , self.weights_hidden_to_output.T)
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error # the activation is the identity function so f`(h)=1
hidden_error_term = hidden_error*hidden_outputs*(1-hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:,None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:,None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr*delta_weights_h_o/n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr*delta_weights_i_h/n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
##############################################################################################
# unit tests
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1 / (1 + np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
#######################################################################################################
# Training the network
import sys
### Set the hyperparameters here ###
#iterations = 2000
#learning_rate = 0.8
#hidden_nodes = 8
#output_nodes = 1
iterations = 1500
learning_rate = 0.8
hidden_nodes = 10
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train': [], 'validation': []}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii / float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
##########################################################################################################
# check out predictions
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
ax.plot(predictions[0], label='Prediction')
ax.plot((train_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45) | guyk1971/deep-learning | first-neural-network/firstNN_nb.py | Python | mit | 11,102 |
from multiexplorer.models import IPTracker
from django.http import HttpResponse
from django.conf import settings
class IPLimiterMiddleware(object):
interval = "%s %s" % (
list(settings.IP_FILTER_INTERVAL.values())[0],
list(settings.IP_FILTER_INTERVAL.keys())[0],
)
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
ip = request.META['REMOTE_ADDR']
if not request.path.startswith("/api/") or IPTracker.allow(ip):
response = self.get_response(request)
return response
return HttpResponse(
'{"error": "Too many requests. The limit is %s requests per %s"}' % (
settings.IP_FILTER_HITS, self.interval
),
content_type="application/json",
status=429
)
| priestc/MultiExplorer | multiexplorer/multiexplorer/middleware.py | Python | mit | 856 |
import yt
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import pylab
from yt.analysis_modules.halo_finding.api import HaloFinder
import os
#current files are from 20170706_1407
#previous files are from 20170705_0400
#finds the file
def find(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
#plotting function
def plot(x, y, diff, fileEndName, colorname, plotTitle):
plt.figure()
plt.scatter(x, y,c=diff, marker= 'o',edgecolor='none')
plt.title(plotTitle)
cb = plt.colorbar()
cb.set_label(colorname)
plt.savefig(fileEndName+".png")
#calculating l1 norm
def norm(ycoords, vely, vely1):
sumofy = 0
for i in range(len(ycoords)):
sumofy=sumofy+ ((vely[i]-vely1[i])**2)
sumofy=sumofy**(.5)
return sumofy
#get 1st and last files and retrieve info
def get_files(file1current, file1previous, num, title):
ds = yt.load(file1current)
ds1 = yt.load(file1previous)
ad=ds.all_data()
xcoords=ad["x"]
ycoords=ad["y"]
velx=ad["velx"]
vely=ad["vely"]
dd=ds1.all_data()
xcoords1=dd["x"]
ycoords1=dd["y"]
velx1=dd["velx"]
vely1=dd["vely"]
plot(xcoords,ycoords,velx - velx1, "chk_00" + num + "x" , "Velocity X Diff", "Velocity X Analysis" + title)
plot(xcoords1,ycoords1,vely - vely1, "chk_00" + num, "Velocity Y Diff", "Velocity Y Analysis" + title)
print(norm(ycoords, velx, velx1))
print(norm(ycoords, vely, vely1))
def main():
'''when using server
get_files(find("sedov_2d_4lev_hdf5_chk_0000", "~/yspKatherine/ysp/ProteusTest/results/20170706_1407/comparison/suite/sedov/sedov_2d_4lev"),
find("sedov_2d_4lev_hdf5_chk_0000", "~/yspKatherine/ysp/ProteusTest/results/20170706_1007/comparison/suite/sedov/sedov_2d_4lev"),
"00", "for 1st Checkpoint")
get_files(find("sedov_2d_4lev_hdf5_chk_0056", "~/yspKatherine/ysp/ProteusTest/results/20170705_0400/comparison/suite/sedov/sedov_2d_4lev"),
find("sedov_2d_4lev_hdf5_chk_0056", "~/yspKatherine/ysp/ProteusTest/results/20170705_0400/comparison/suite/sedov/sedov_2d_4lev"),
"56", "for 2nd Checkpoint")
'''
get_files("sedov_2d_4lev_hdf5_chk_0000", "sedov1_2d_4lev_hdf5_chk_0000","00", " for 0000 Checkpoint")
get_files("sedov_2d_4lev_hdf5_chk_0056", "sedov1_2d_4lev_hdf5_chk_0056","56", " for 0056 Checkpoint")
main()
| rjl09c/ysp2017 | katieproteus.py | Python | gpl-3.0 | 2,304 |
import os
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.utils.encoding import force_unicode, smart_str
try:
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
from boto.s3.key import Key
except ImportError:
raise ImproperlyConfigured, "Could not load Boto's S3 bindings.\
\nSee http://code.google.com/p/boto/"
ACCESS_KEY_NAME = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
SECRET_KEY_NAME = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
HEADERS = getattr(settings, 'AWS_HEADERS', {})
STORAGE_BUCKET_NAME = getattr(settings, 'AWS_STORAGE_BUCKET_NAME', None)
AUTO_CREATE_BUCKET = getattr(settings, 'AWS_AUTO_CREATE_BUCKET', True)
DEFAULT_ACL = getattr(settings, 'AWS_DEFAULT_ACL', 'public-read')
BUCKET_ACL = getattr(settings, 'AWS_BUCKET_ACL', DEFAULT_ACL)
QUERYSTRING_AUTH = getattr(settings, 'AWS_QUERYSTRING_AUTH', True)
QUERYSTRING_EXPIRE = getattr(settings, 'AWS_QUERYSTRING_EXPIRE', 3600)
REDUCED_REDUNDANCY = getattr(settings, 'AWS_REDUCED_REDUNDANCY', False)
LOCATION = getattr(settings, 'AWS_LOCATION', '')
CUSTOM_DOMAIN = getattr(settings, 'AWS_S3_CUSTOM_DOMAIN', None)
SECURE_URLS = getattr(settings, 'AWS_S3_SECURE_URLS', True)
FILE_NAME_CHARSET = getattr(settings, 'AWS_S3_FILE_NAME_CHARSET', 'utf-8')
IS_GZIPPED = getattr(settings, 'AWS_IS_GZIPPED', False)
GZIP_CONTENT_TYPES = getattr(settings, 'GZIP_CONTENT_TYPES', (
'text/css',
'application/javascript',
'application/x-javascript'
))
if IS_GZIPPED:
from gzip import GzipFile
def safe_join(base, *paths):
"""
A version of django.utils._os.safe_join for S3 paths.
Joins one or more path components to the base path component intelligently.
Returns a normalized version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
Paths outside the base path indicate a possible security sensitive operation.
"""
from urlparse import urljoin
base_path = force_unicode(base)
paths = map(lambda p: force_unicode(p), paths)
final_path = urljoin(base_path + ("/" if not base_path.endswith("/") else ""), *paths)
# Ensure final_path starts with base_path and that the next character after
# the final path is '/' (or nothing, in which case final_path must be
# equal to base_path).
base_path_len = len(base_path)
if not final_path.startswith(base_path) \
or final_path[base_path_len:base_path_len+1] not in ('', '/'):
raise ValueError('the joined path is located outside of the base path'
' component')
return final_path
class S3BotoStorage(Storage):
"""Amazon Simple Storage Service using Boto"""
def __init__(self, bucket=STORAGE_BUCKET_NAME, access_key=None,
secret_key=None, bucket_acl=BUCKET_ACL, acl=DEFAULT_ACL, headers=HEADERS,
gzip=IS_GZIPPED, gzip_content_types=GZIP_CONTENT_TYPES,
querystring_auth=QUERYSTRING_AUTH, querystring_expire=QUERYSTRING_EXPIRE,
reduced_redundancy=REDUCED_REDUNDANCY,
custom_domain=CUSTOM_DOMAIN, secure_urls=SECURE_URLS,
location=LOCATION, file_name_charset=FILE_NAME_CHARSET):
self.bucket_acl = bucket_acl
self.bucket_name = bucket
self.acl = acl
self.headers = headers
self.gzip = gzip
self.gzip_content_types = gzip_content_types
self.querystring_auth = querystring_auth
self.querystring_expire = querystring_expire
self.reduced_redundancy = reduced_redundancy
self.custom_domain = custom_domain
self.secure_urls = secure_urls
self.location = location or ''
self.location = self.location.lstrip('/')
self.file_name_charset = file_name_charset
if not access_key and not secret_key:
access_key, secret_key = self._get_access_keys()
self.connection = S3Connection(access_key, secret_key)
@property
def bucket(self):
if not hasattr(self, '_bucket'):
self._bucket = self._get_or_create_bucket(self.bucket_name)
return self._bucket
def _get_access_keys(self):
access_key = ACCESS_KEY_NAME
secret_key = SECRET_KEY_NAME
if (access_key or secret_key) and (not access_key or not secret_key):
access_key = os.environ.get(ACCESS_KEY_NAME)
secret_key = os.environ.get(SECRET_KEY_NAME)
if access_key and secret_key:
# Both were provided, so use them
return access_key, secret_key
return None, None
def _get_or_create_bucket(self, name):
"""Retrieves a bucket if it exists, otherwise creates it."""
try:
return self.connection.get_bucket(name)
except S3ResponseError, e:
if AUTO_CREATE_BUCKET:
bucket = self.connection.create_bucket(name)
bucket.set_acl(self.bucket_acl)
return bucket
raise ImproperlyConfigured, ("Bucket specified by "
"AWS_STORAGE_BUCKET_NAME does not exist. Buckets can be "
"automatically created by setting AWS_AUTO_CREATE_BUCKET=True")
def _clean_name(self, name):
# Useful for windows' paths
return os.path.normpath(name).replace('\\', '/')
def _normalize_name(self, name):
try:
return safe_join(self.location, name).lstrip('/')
except ValueError:
raise SuspiciousOperation("Attempted access to '%s' denied." % name)
def _encode_name(self, name):
return smart_str(name, encoding=self.file_name_charset)
def _compress_content(self, content):
"""Gzip a given string."""
zbuf = StringIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(content.read())
zfile.close()
content.file = zbuf
return content
def _open(self, name, mode='rb'):
name = self._normalize_name(self._clean_name(name))
f = S3BotoStorageFile(name, mode, self)
if not f.key:
raise IOError('File does not exist: %s' % name)
return f
def _save(self, name, content):
cleaned_name = self._clean_name(name)
name = self._normalize_name(cleaned_name)
headers = self.headers
content_type = getattr(content,'content_type', mimetypes.guess_type(name)[0] or Key.DefaultContentType)
if self.gzip and content_type in self.gzip_content_types:
content = self._compress_content(content)
headers.update({'Content-Encoding': 'gzip'})
content.name = cleaned_name
k = self.bucket.get_key(self._encode_name(name))
if not k:
k = self.bucket.new_key(self._encode_name(name))
k.set_metadata('Content-Type',content_type)
k.set_contents_from_file(content, headers=headers, policy=self.acl,
reduced_redundancy=self.reduced_redundancy)
return cleaned_name
def delete(self, name):
name = self._normalize_name(self._clean_name(name))
self.bucket.delete_key(self._encode_name(name))
def exists(self, name):
name = self._normalize_name(self._clean_name(name))
k = self.bucket.new_key(self._encode_name(name))
return k.exists()
def listdir(self, name):
name = self._normalize_name(self._clean_name(name))
dirlist = self.bucket.list(self._encode_name(name))
files = []
dirs = set()
base_parts = name.split("/") if name else []
for item in dirlist:
parts = item.name.split("/")
parts = parts[len(base_parts):]
if len(parts) == 1:
# File
files.append(parts[0])
elif len(parts) > 1:
# Directory
dirs.add(parts[0])
return list(dirs),files
def size(self, name):
name = self._normalize_name(self._clean_name(name))
return self.bucket.get_key(self._encode_name(name)).size
def url(self, name):
name = self._normalize_name(self._clean_name(name))
if self.custom_domain:
return "%s://%s/%s" % ('https' if self.secure_urls else 'http', self.custom_domain, name)
else:
return self.connection.generate_url(self.querystring_expire, method='GET', \
bucket=self.bucket.name, key=self._encode_name(name), query_auth=self.querystring_auth, \
force_http=not self.secure_urls)
def get_available_name(self, name):
""" Overwrite existing file with the same name. """
name = self._clean_name(name)
return name
class S3BotoStorageFile(File):
def __init__(self, name, mode, storage):
self._storage = storage
self.name = name[len(self._storage.location):].lstrip('/')
self._mode = mode
self.key = storage.bucket.get_key(self._storage._encode_name(name))
self._is_dirty = False
self._file = None
@property
def size(self):
return self.key.size
@property
def file(self):
if self._file is None:
self._file = StringIO()
if 'r' in self._mode:
self._is_dirty = False
self.key.get_contents_to_file(self._file)
self._file.seek(0)
return self._file
def read(self, *args, **kwargs):
if 'r' not in self._mode:
raise AttributeError("File was not opened in read mode.")
return super(S3BotoStorageFile, self).read(*args, **kwargs)
def write(self, *args, **kwargs):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self._is_dirty = True
return super(S3BotoStorageFile, self).write(*args, **kwargs)
def close(self):
if self._is_dirty:
self.key.set_contents_from_file(self._file, headers=self._storage.headers, policy=self._storage.acl)
self.key.close()
| e-loue/django-storages | storages/backends/s3boto.py | Python | bsd-3-clause | 10,557 |
# -*- coding: utf-8 -*-
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
#
# Make sure IceStorm and the subscriber use the same buffer size for
# sending/receiving datagrams. This ensures the test works with bogus
# OS configurations where the reicever buffer size is smaller than the
# send buffer size (causing the received messages to be
# truncated). See also bug #6070.
#
props = {
"IceStorm.Election.MasterTimeout" : 2,
"IceStorm.Election.ElectionTimeout" : 2,
"IceStorm.Election.ResponseTimeout" : 2
}
icestorm = [ IceStorm(replica=i, nreplicas=3, props = props) for i in range(0,3) ]
class IceStormRep1TestCase(IceStormTestCase):
def runClientSide(self, current):
def checkExpect(output, expect):
if not expect:
return
if type(expect) == str:
expect = [expect]
for e in expect:
if output.find(e) >= 0:
return
else:
raise RuntimeError("unexected output `{0}' (expected `{1}')".format(output, expect))
def adminForReplica(replica, cmd, expect):
checkExpect(self.runadmin(current, cmd, instance=self.icestorm[replica], quiet=True, exitstatus=1),
expect)
def stopReplica(num):
self.icestorm[num].shutdown(current)
self.icestorm[num].stop(current, True)
def startReplica(num):
self.icestorm[num].start(current)
def runtest(s="", p=""):
ClientServerTestCase(client=Publisher(args=p.split(" ")),
server=Subscriber(args=s.split(" "))).run(current)
def runsub2(replica=None, expect=None):
subscriber = Subscriber(exe="sub",
instance=None if replica is None else self.icestorm[replica],
args=["--id", "foo"],
readyCount=0,
quiet=True)
subscriber.run(current, exitstatus=1 if expect else 0)
checkExpect(subscriber.getOutput(current), expect)
def rununsub2(replica=None, expect=None):
sub = Subscriber(exe="sub",
instance=None if replica is None else self.icestorm[replica],
args=["--id", "foo"],
readyCount=0,
quiet=True)
if replica is None:
sub.run(current, args=["--unsub"])
# Else we first subscribe to this replica, then unsub. We
# shouldn't get an AlreadySubscribedException.
sub.run(current, exitstatus=1 if expect else 0)
if expect:
checkExpect(sub.getOutput(current), expect)
return
sub.run(current, args=["--unsub"])
current.write("testing topic creation across replicas... ")
self.runadmin(current, "create single")
for replica in range(0, 3):
adminForReplica(replica, "create single", "error: topic `single' exists")
current.writeln("ok")
current.write("testing topic destruction across replicas... ")
sys.stdout.flush()
self.runadmin(current, "destroy single")
for replica in range(0, 3):
adminForReplica(replica, "destroy single", "error: couldn't find topic `single'")
current.writeln("ok")
current.write("testing topic creation without replica... ")
stopReplica(0)
self.runadmin(current, "create single")
for replica in range(1, 3):
adminForReplica(replica, "create single", "error: topic `single' exists")
adminForReplica(0, "create single", ["ConnectionRefused", "ConnectFailed"])
startReplica(0)
adminForReplica(0, "create single", "error: topic `single' exists")
current.writeln("ok")
self.runadmin(current, "destroy single")
current.write("testing topic creation without master... ")
sys.stdout.flush()
stopReplica(2)
self.runadmin(current, "create single")
for replica in range(0, 2):
adminForReplica(replica, "create single", "error: topic `single' exists")
adminForReplica(2, "create single", ["ConnectionRefused", "ConnectFailed"])
startReplica(2)
adminForReplica(2, "create single", "error: topic `single' exists")
current.writeln("ok")
# All replicas are running
current.write("testing topic destruction without replica... ")
stopReplica(0)
self.runadmin(current, "destroy single")
for replica in range(1, 3):
adminForReplica(replica, "destroy single", "error: couldn't find topic `single'")
adminForReplica(0, "destroy single", ["ConnectionRefused", "ConnectFailed"])
startReplica(0)
adminForReplica(0, "destroy single", "error: couldn't find topic `single'")
current.writeln("ok")
current.write("testing topic destruction without master... ")
sys.stdout.flush()
self.runadmin(current, "create single")
stopReplica(2)
self.runadmin(current, "destroy single")
for replica in range(0, 2):
adminForReplica(replica, "destroy single", "error: couldn't find topic `single'")
adminForReplica(2, "destroy single", ["ConnectionRefused", "ConnectFailed"])
startReplica(2)
adminForReplica(2, "destroy single", "error: couldn't find topic `single'")
current.writeln("ok")
# Now test subscription/unsubscription on all replicas.
self.runadmin(current, "create single")
current.write("testing subscription across replicas... ")
sys.stdout.flush()
runsub2()
for replica in range(0, 3):
runsub2(replica, "IceStorm::AlreadySubscribed")
current.writeln("ok")
current.write("testing unsubscription across replicas... ")
sys.stdout.flush()
rununsub2()
for replica in range(0, 3):
rununsub2(replica)
current.writeln("ok")
current.write("testing subscription without master... ")
sys.stdout.flush()
stopReplica(2)
runsub2()
for replica in range(0, 2):
runsub2(replica, "IceStorm::AlreadySubscribed")
runsub2(2, ["ConnectionRefused", "ConnectFailed"])
startReplica(2)
runsub2(2, "IceStorm::AlreadySubscribed")
current.writeln("ok")
current.write("testing unsubscription without master... ")
sys.stdout.flush()
stopReplica(2)
rununsub2()
for replica in range(0, 2):
rununsub2(replica)
rununsub2(2, ["ConnectionRefused", "ConnectFailed"])
startReplica(2)
rununsub2(2)
current.writeln("ok")
current.write("testing subscription without replica... ")
sys.stdout.flush()
stopReplica(0)
runsub2()
for replica in range(1, 3):
runsub2(replica, "IceStorm::AlreadySubscribed")
runsub2(0, ["ConnectionRefused", "ConnectFailed"])
startReplica(0)
runsub2(0, "IceStorm::AlreadySubscribed")
current.writeln("ok")
current.write("testing unsubscription without replica... ")
stopReplica(0)
rununsub2()
for replica in range(1, 3):
rununsub2(replica)
rununsub2(0, ["ConnectionRefused", "ConnectFailed"])
startReplica(0)
rununsub2(0)
current.writeln("ok")
# All replicas are running
current.write("running twoway subscription test... ")
runtest("--twoway")
current.writeln("ok")
current.write("running ordered subscription test... ")
runtest("--ordered")
current.writeln("ok")
stopReplica(2)
current.write("running twoway, ordered subscription test without master... ")
runtest("--twoway")
runtest("--ordered")
current.writeln("ok")
startReplica(2)
stopReplica(0)
current.write("running twoway, ordered subscription test without replica... ")
runtest("--twoway")
runtest("--ordered")
current.writeln("ok")
startReplica(0)
current.write("running cycle publishing test... ")
sys.stdout.flush()
runtest("--twoway", "--cycle")
current.writeln("ok")
current.write("stopping replicas... ")
sys.stdout.flush()
self.stopIceStorm(current)
current.writeln("ok")
TestSuite(__file__, [ IceStormRep1TestCase("replicated", icestorm=icestorm) ], multihost=False)
| ljx0305/ice | cpp/test/IceStorm/rep1/test.py | Python | gpl-2.0 | 9,054 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient.v1.images import Image, ImageManager
from .utils import TestDataContainer
def data(TEST):
TEST.images = TestDataContainer()
TEST.snapshots = TestDataContainer()
# Snapshots
snapshot_dict = {'name': u'snapshot',
'container_format': u'ami',
'id': 3,
'properties': {'image_type': u'snapshot'}}
snapshot = Image(ImageManager(None), snapshot_dict)
TEST.snapshots.add(snapshot)
# Images
image_dict = {'id': '1',
'name': 'public_image',
'container_format': 'novaImage',
'properties': {'image_type': u'image'}}
public_image = Image(ImageManager(None), image_dict)
image_dict = {'id': '2',
'name': 'private_image',
'container_format': 'aki'}
private_image = Image(ImageManager(None), image_dict)
TEST.images.add(public_image, private_image)
| gyang/horizon | horizon/tests/test_data/glance_data.py | Python | apache-2.0 | 1,563 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from testutil.dott import feature, sh, testtmp # noqa: F401
sh % "hg init"
(
sh % "hg debugdrawdag"
<< r"""
C
|
B
|
A
"""
)
sh % "setconfig 'devel.legacy.revnum=warn'"
# use revnum directly
sh % "hg log -r 0 -T '.\\n'" == r"""
.
hint[revnum-deprecate]: Local revision numbers (ex. 0) are being deprecated and will stop working in the future. Please use commit hashes instead.
hint[hint-ack]: use 'hg hint --ack revnum-deprecate' to silence these hints"""
# negative revnum
sh % "hg update -r -2" == r"""
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
hint[revnum-deprecate]: Local revision numbers (ex. -2) are being deprecated and will stop working in the future. Please use commit hashes instead.
hint[hint-ack]: use 'hg hint --ack revnum-deprecate' to silence these hints"""
# revset operators
sh % "hg log -r 1+2 -T '.\\n'" == r"""
.
.
hint[revnum-deprecate]: Local revision numbers (ex. 1) are being deprecated and will stop working in the future. Please use commit hashes instead.
hint[hint-ack]: use 'hg hint --ack revnum-deprecate' to silence these hints"""
sh % "hg log -r '::2' -T '.\\n'" == r"""
.
.
.
hint[revnum-deprecate]: Local revision numbers (ex. 2) are being deprecated and will stop working in the future. Please use commit hashes instead.
hint[hint-ack]: use 'hg hint --ack revnum-deprecate' to silence these hints"""
sh % "hg log -r 2-1 -T '.\\n'" == r"""
.
hint[revnum-deprecate]: Local revision numbers (ex. 2) are being deprecated and will stop working in the future. Please use commit hashes instead.
hint[hint-ack]: use 'hg hint --ack revnum-deprecate' to silence these hints"""
# revset functions
sh % "hg log -r 'parents(2)' -T '.\\n'" == r"""
.
hint[revnum-deprecate]: Local revision numbers (ex. 2) are being deprecated and will stop working in the future. Please use commit hashes instead.
hint[hint-ack]: use 'hg hint --ack revnum-deprecate' to silence these hints"""
sh % "hg log -r 'sort(2+0)' -T '.\\n'" == r"""
.
.
hint[revnum-deprecate]: Local revision numbers (ex. 2) are being deprecated and will stop working in the future. Please use commit hashes instead.
hint[hint-ack]: use 'hg hint --ack revnum-deprecate' to silence these hints"""
# abort
sh % "setconfig 'devel.legacy.revnum=abort'"
sh % "hg up 0" == r"""
abort: local revision number is disabled in this repo
[255]"""
# smartlog revset
sh % "enable smartlog"
sh % "hg log -r 'smartlog()' -T." == "..."
sh % "hg log -r 'smartlog(1)' -T." == r"""
abort: local revision number is disabled in this repo
[255]"""
# phase
sh % "hg phase" == "112478962961147124edd43549aedd1a335e44bf: draft"
| facebookexperimental/eden | eden/hg-server/tests/test-revnum-deprecate-t.py | Python | gpl-2.0 | 2,980 |
import pytest
from ray.serve.kv_store import RayInternalKVStore
def test_ray_internal_kv(serve_instance):
with pytest.raises(TypeError):
RayInternalKVStore(namespace=1)
RayInternalKVStore(namespace=b"")
kv = RayInternalKVStore()
with pytest.raises(TypeError):
kv.put(1, b"1")
with pytest.raises(TypeError):
kv.put("1", 1)
with pytest.raises(TypeError):
kv.put("1", "1")
kv.put("1", b"2")
assert kv.get("1") == b"2"
kv.put("2", b"4")
assert kv.get("2") == b"4"
kv.put("1", b"3")
assert kv.get("1") == b"3"
assert kv.get("2") == b"4"
def test_ray_internal_kv_collisions(serve_instance):
kv1 = RayInternalKVStore()
kv1.put("1", b"1")
assert kv1.get("1") == b"1"
kv2 = RayInternalKVStore("namespace")
assert kv2.get("1") is None
kv2.put("1", b"-1")
assert kv2.get("1") == b"-1"
assert kv1.get("1") == b"1"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| richardliaw/ray | python/ray/serve/tests/test_kv_store.py | Python | apache-2.0 | 1,027 |
from MathFunc import *
from copy import copy, deepcopy
from model.Game import Game
from model.World import World
from model.Move import Move
from CommonFunctions import *
def get_puck_owner(world: World):
h = None
for h in world.hockeyists:
if world.puck.owner_hockeyist_id == h.id:
return h
class WorldConstants:
def __init__(self, game: (None, Game), world: (None, World), move: (None, Move)):
self.go_rest_pct = 0.7
self.main_period_len = 6000
self.game = game
self.move = move
self.world = world
self.alpha_player = 0.98
self.alpha_puck = 0.999
self.tick = world.tick
self.rink_mid_x = (game.rink_right + game.rink_left)/2
self.rink_mid_y = (game.rink_bottom + game.rink_top)/2
self.rink_len = game.rink_right - game.rink_left
self.rink_width = game.rink_bottom - game.rink_top
self.puck_radius = world.puck.radius
self.opponent_player = world.get_opponent_player()
self.player = world.get_my_player()
self.net_width = self.player.net_bottom-self.player.net_top
self.net_top = self.player.net_top
self.net_bot = self.player.net_bottom
self.goalie_radius = 30
self.player_radius = 30
self.turn_speed = game.hockeyist_turn_angle_factor
self.sgn_x = copysign(1,self.opponent_player.net_front - self.player.net_front)
self.puck_radius = world.puck.radius
self.puck_x = world.puck.x
self.puck_y = world.puck.y
self.puck_x_range_me = abs(self.player.net_front - self.puck_x)
self.puck_x_range_opp = abs(self.opponent_player.net_front - self.puck_x)
self.puck_state = None
self.puck_state_next = None
self.def_x = self.player.net_front + copysign(self.player_radius,self.sgn_x)*2.5
self.def_y = (self.player.net_top + self.player.net_bottom) / 2
self.def_x_front = self.def_x + self.rink_len/4 * self.sgn_x
self.def_y_top = (self.player.net_top + self.player.net_bottom) / 2 - self.rink_width/4
self.def_y_bot = (self.player.net_top + self.player.net_bottom) / 2 + self.rink_width/4
self.puck_owner = get_puck_owner(world)
| pkosukhin/codehockey | Constants.py | Python | mit | 2,234 |
# -*- coding: utf-8 -*-
import logging
import random
import sys
import numpy as np
from copy import deepcopy
from . import SYMBOLS
from .ai import negamax
class BasePlayer(object):
def __init__(self, player):
self.log = logging.getLogger(self.__class__.__name__)
self.player = player
def get_move(self, gs):
raise NotImplementedError
def __repr__(self):
return SYMBOLS[self.player]
@property
def opponent(self):
return -1*self.player
class RandomPlayer(BasePlayer):
def get_move(self, gs):
"""
:gs : gameState object """
return gs.get_random_move()
class ProbRandomPlayer(BasePlayer):
def __init__(self, player, good_moves):
BasePlayer.__init__(self, player)
self.good_moves = good_moves
def get_move(self, gs):
return gs.get_best_random_move(self.good_moves)
class SmartPlayer(BasePlayer):
def get_move(self, gs):
if np.absolute(gs.gameState).sum() == 0:
return (0, 0)
score, move = negamax(gs, self.player)
return move
class RandomConnect4Player(BasePlayer):
def get_move(self, gs):
return random.choice(gs.get_available_moves())
class Connect4SmartPlayer(BasePlayer):
def __init__(self, player, depth=4, use_pruning=True):
BasePlayer.__init__(self, player)
self.depth = depth
self.use_pruning = use_pruning
if not self.use_pruning:
self.log.warning('Prunning is not used')
def get_move(self, gs):
if np.absolute(gs.gameState).sum() == 0:
return 3
if self.use_pruning:
inf = float('inf')
args = (gs, self.player, 100, 100-self.depth, True, -inf, inf, False)
else:
args = (gs, self.player, 100, 100-self.depth, True, None, None, False)
score, move = negamax(*args)
return move
| gosom/tic-tac-toe | tictac/lib/player.py | Python | gpl-3.0 | 1,916 |
# -*- coding: utf-8 -*-
# @Author: Ram Krishna Sharma
# @Author Email: [email protected]
# @Date: 2021-07-26
# @Last Modified by: Ram Krishna Sharma
# @Last Modified time: 2021-08-03
import os
def ExecuteCommand(Command_):
print("===============================================================")
print("Command: {}".format(Command_))
print("===============================================================")
os.system(Command_)
print("Command completed: {}".format(Command_))
print("===============================================================")
def GetTitle(dirr,InputDirPath,ListRemoveStringFromDirName,DictReplaceString):
# Title = dirr
Title = dirr.replace(InputDirPath,"")
for RemoveString in ListRemoveStringFromDirName:
Title = Title.replace(RemoveString,"")
for key in DictReplaceString:
Title = Title.replace(key,DictReplaceString[key])
Title = Title.replace("_","\_")
return Title
def GetDirList(InputDirPath,DirListShouldStartsWith,DirNameShouldContain,DirNameShouldContain2=""):
count=0
GetAllDirNames = []
for (dirpath, dirnames, filenames) in os.walk(InputDirPath):
# print ("dirpath: ",dirpath)
for name in dirnames:
for DirListStartsWith in DirListShouldStartsWith:
# if name.startswith(DirListStartsWith) and (DirNameShouldContain in DirListStartsWith):
if name.startswith(DirListStartsWith):
if DirNameShouldContain2 != "":
if (DirNameShouldContain in name) and (DirNameShouldContain2 in name):
# print ("name: ",name)
count += 1
# if count>10: break
GetAllDirNames.append(dirpath+name)
else:
if (DirNameShouldContain in name):
# print ("name: ",name)
count += 1
# if count>10: break
GetAllDirNames.append(dirpath+name)
GetAllDirNames.sort()
for count_, dir_ in enumerate(GetAllDirNames):
print("{0:3}: {1}".format(count_,dir_))
return GetAllDirNames
| ram1123/PPT_script | templates/utils.py | Python | unlicense | 2,257 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import unittest
from odoo.addons.stock_landed_costs.tests.common import TestStockLandedCostsCommon
from odoo.addons.stock_landed_costs.tests.test_stockvaluationlayer import TestStockValuationLCCommon
from odoo.addons.stock_account.tests.test_stockvaluation import _create_accounting_data
from odoo.tests import tagged, Form
@tagged('post_install', '-at_install')
class TestLandedCosts(TestStockLandedCostsCommon):
def setUp(self):
super(TestLandedCosts, self).setUp()
# Create picking incoming shipment
self.picking_in = self.Picking.create({
'partner_id': self.supplier_id,
'picking_type_id': self.warehouse.in_type_id.id,
'location_id': self.supplier_location_id,
'location_dest_id': self.warehouse.lot_stock_id.id})
self.Move.create({
'name': self.product_refrigerator.name,
'product_id': self.product_refrigerator.id,
'product_uom_qty': 5,
'product_uom': self.product_refrigerator.uom_id.id,
'picking_id': self.picking_in.id,
'location_id': self.supplier_location_id,
'location_dest_id': self.warehouse.lot_stock_id.id})
self.Move.create({
'name': self.product_oven.name,
'product_id': self.product_oven.id,
'product_uom_qty': 10,
'product_uom': self.product_oven.uom_id.id,
'picking_id': self.picking_in.id,
'location_id': self.supplier_location_id,
'location_dest_id': self.warehouse.lot_stock_id.id})
# Create picking outgoing shipment
self.picking_out = self.Picking.create({
'partner_id': self.customer_id,
'picking_type_id': self.warehouse.out_type_id.id,
'location_id': self.warehouse.lot_stock_id.id,
'location_dest_id': self.customer_location_id})
self.Move.create({
'name': self.product_refrigerator.name,
'product_id': self.product_refrigerator.id,
'product_uom_qty': 2,
'product_uom': self.product_refrigerator.uom_id.id,
'picking_id': self.picking_out.id,
'location_id': self.warehouse.lot_stock_id.id,
'location_dest_id': self.customer_location_id})
def test_00_landed_costs_on_incoming_shipment(self):
""" Test landed cost on incoming shipment """
#
# (A) Purchase product
# Services Quantity Weight Volume
# -----------------------------------------------------
# 1. Refrigerator 5 10 1
# 2. Oven 10 20 1.5
# (B) Add some costs on purchase
# Services Amount Split Method
# -------------------------------------------
# 1.labour 10 By Equal
# 2.brokerage 150 By Quantity
# 3.transportation 250 By Weight
# 4.packaging 20 By Volume
# Process incoming shipment
income_ship = self._process_incoming_shipment()
# Create landed costs
stock_landed_cost = self._create_landed_costs({
'equal_price_unit': 10,
'quantity_price_unit': 150,
'weight_price_unit': 250,
'volume_price_unit': 20}, income_ship)
# Compute landed costs
stock_landed_cost.compute_landed_cost()
valid_vals = {
'equal': 5.0,
'by_quantity_refrigerator': 50.0,
'by_quantity_oven': 100.0,
'by_weight_refrigerator': 50.0,
'by_weight_oven': 200,
'by_volume_refrigerator': 5.0,
'by_volume_oven': 15.0}
# Check valuation adjustment line recognized or not
self._validate_additional_landed_cost_lines(stock_landed_cost, valid_vals)
# Validate the landed cost.
stock_landed_cost.button_validate()
self.assertTrue(stock_landed_cost.account_move_id, 'Landed costs should be available account move lines')
account_entry = self.env['account.move.line'].read_group(
[('move_id', '=', stock_landed_cost.account_move_id.id)], ['debit', 'credit', 'move_id'], ['move_id'])[0]
self.assertEqual(account_entry['debit'], account_entry['credit'], 'Debit and credit are not equal')
self.assertEqual(account_entry['debit'], 430.0, 'Wrong Account Entry')
def test_01_negative_landed_costs_on_incoming_shipment(self):
""" Test negative landed cost on incoming shipment """
#
# (A) Purchase Product
# Services Quantity Weight Volume
# -----------------------------------------------------
# 1. Refrigerator 5 10 1
# 2. Oven 10 20 1.5
# (B) Sale refrigerator's part of the quantity
# (C) Add some costs on purchase
# Services Amount Split Method
# -------------------------------------------
# 1.labour 10 By Equal
# 2.brokerage 150 By Quantity
# 3.transportation 250 By Weight
# 4.packaging 20 By Volume
# (D) Decrease cost that already added on purchase
# (apply negative entry)
# Services Amount Split Method
# -------------------------------------------
# 1.labour -5 By Equal
# 2.brokerage -50 By Quantity
# 3.transportation -50 By Weight
# 4.packaging -5 By Volume
# Process incoming shipment
income_ship = self._process_incoming_shipment()
# Refrigerator outgoing shipment.
self._process_outgoing_shipment()
# Apply landed cost for incoming shipment.
stock_landed_cost = self._create_landed_costs({
'equal_price_unit': 10,
'quantity_price_unit': 150,
'weight_price_unit': 250,
'volume_price_unit': 20}, income_ship)
# Compute landed costs
stock_landed_cost.compute_landed_cost()
valid_vals = {
'equal': 5.0,
'by_quantity_refrigerator': 50.0,
'by_quantity_oven': 100.0,
'by_weight_refrigerator': 50.0,
'by_weight_oven': 200.0,
'by_volume_refrigerator': 5.0,
'by_volume_oven': 15.0}
# Check valuation adjustment line recognized or not
self._validate_additional_landed_cost_lines(stock_landed_cost, valid_vals)
# Validate the landed cost.
stock_landed_cost.button_validate()
self.assertTrue(stock_landed_cost.account_move_id, 'Landed costs should be available account move lines')
# Create negative landed cost for previously incoming shipment.
stock_negative_landed_cost = self._create_landed_costs({
'equal_price_unit': -5,
'quantity_price_unit': -50,
'weight_price_unit': -50,
'volume_price_unit': -5}, income_ship)
# Compute negative landed costs
stock_negative_landed_cost.compute_landed_cost()
valid_vals = {
'equal': -2.5,
'by_quantity_refrigerator': -16.67,
'by_quantity_oven': -33.33,
'by_weight_refrigerator': -10.00,
'by_weight_oven': -40.00,
'by_volume_refrigerator': -1.25,
'by_volume_oven': -3.75}
# Check valuation adjustment line recognized or not
self._validate_additional_landed_cost_lines(stock_negative_landed_cost, valid_vals)
# Validate the landed cost.
stock_negative_landed_cost.button_validate()
self.assertEqual(stock_negative_landed_cost.state, 'done', 'Negative landed costs should be in done state')
self.assertTrue(stock_negative_landed_cost.account_move_id, 'Landed costs should be available account move lines')
account_entry = self.env['account.move.line'].read_group(
[('move_id', '=', stock_negative_landed_cost.account_move_id.id)], ['debit', 'credit', 'move_id'], ['move_id'])[0]
self.assertEqual(account_entry['debit'], account_entry['credit'], 'Debit and credit are not equal')
move_lines = [
{'name': 'split by volume - Microwave Oven', 'debit': 3.75, 'credit': 0.0},
{'name': 'split by volume - Microwave Oven', 'debit': 0.0, 'credit': 3.75},
{'name': 'split by weight - Microwave Oven', 'debit': 40.0, 'credit': 0.0},
{'name': 'split by weight - Microwave Oven', 'debit': 0.0, 'credit': 40.0},
{'name': 'split by quantity - Microwave Oven', 'debit': 33.33, 'credit': 0.0},
{'name': 'split by quantity - Microwave Oven', 'debit': 0.0, 'credit': 33.33},
{'name': 'equal split - Microwave Oven', 'debit': 2.5, 'credit': 0.0},
{'name': 'equal split - Microwave Oven', 'debit': 0.0, 'credit': 2.5},
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.5, 'credit': 0.0},
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 0.5},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 4.0, 'credit': 0.0},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 4.0},
{'name': 'split by weight - Refrigerator', 'debit': 0.0, 'credit': 10.0},
{'name': 'split by weight - Refrigerator', 'debit': 10.0, 'credit': 0.0},
{'name': 'split by volume - Refrigerator', 'debit': 0.0, 'credit': 1.25},
{'name': 'split by volume - Refrigerator', 'debit': 1.25, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 6.67, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 6.67},
{'name': 'split by quantity - Refrigerator', 'debit': 16.67, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator', 'debit': 0.0, 'credit': 16.67},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 1.0, 'credit': 0.0},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 1.0},
{'name': 'equal split - Refrigerator', 'debit': 2.5, 'credit': 0.0},
{'name': 'equal split - Refrigerator', 'debit': 0.0, 'credit': 2.5}
]
if stock_negative_landed_cost.account_move_id.company_id.anglo_saxon_accounting:
move_lines += [
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.5, 'credit': 0.0},
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 0.5},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 4.0, 'credit': 0.0},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 4.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 6.67, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 6.67},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 1.0, 'credit': 0.0},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 1.0},
]
self.assertRecordValues(
sorted(stock_negative_landed_cost.account_move_id.line_ids, key=lambda d: (d['name'], d['debit'])),
sorted(move_lines, key=lambda d: (d['name'], d['debit'])),
)
def _process_incoming_shipment(self):
""" Two product incoming shipment. """
# Confirm incoming shipment.
self.picking_in.action_confirm()
# Transfer incoming shipment
res_dict = self.picking_in.button_validate()
wizard = Form(self.env[(res_dict.get('res_model'))].with_context(res_dict.get('context'))).save()
wizard.process()
return self.picking_in
def _process_outgoing_shipment(self):
""" One product Outgoing shipment. """
# Confirm outgoing shipment.
self.picking_out.action_confirm()
# Product assign to outgoing shipments
self.picking_out.action_assign()
# Transfer picking.
res_dict = self.picking_out.button_validate()
wizard = Form(self.env[(res_dict.get('res_model'))].with_context(res_dict['context'])).save()
wizard.process()
def _create_landed_costs(self, value, picking_in):
return self.LandedCost.create(dict(
picking_ids=[(6, 0, [picking_in.id])],
account_journal_id=self.expenses_journal.id,
cost_lines=[
(0, 0, {
'name': 'equal split',
'split_method': 'equal',
'price_unit': value['equal_price_unit'],
'product_id': self.landed_cost.id}),
(0, 0, {
'name': 'split by quantity',
'split_method': 'by_quantity',
'price_unit': value['quantity_price_unit'],
'product_id': self.brokerage_quantity.id}),
(0, 0, {
'name': 'split by weight',
'split_method': 'by_weight',
'price_unit': value['weight_price_unit'],
'product_id': self.transportation_weight.id}),
(0, 0, {
'name': 'split by volume',
'split_method': 'by_volume',
'price_unit': value['volume_price_unit'],
'product_id': self.packaging_volume.id})
],
))
def _validate_additional_landed_cost_lines(self, stock_landed_cost, valid_vals):
for valuation in stock_landed_cost.valuation_adjustment_lines:
add_cost = valuation.additional_landed_cost
split_method = valuation.cost_line_id.split_method
product = valuation.move_id.product_id
if split_method == 'equal':
self.assertEqual(add_cost, valid_vals['equal'], self._error_message(valid_vals['equal'], add_cost))
elif split_method == 'by_quantity' and product == self.product_refrigerator:
self.assertEqual(add_cost, valid_vals['by_quantity_refrigerator'], self._error_message(valid_vals['by_quantity_refrigerator'], add_cost))
elif split_method == 'by_quantity' and product == self.product_oven:
self.assertEqual(add_cost, valid_vals['by_quantity_oven'], self._error_message(valid_vals['by_quantity_oven'], add_cost))
elif split_method == 'by_weight' and product == self.product_refrigerator:
self.assertEqual(add_cost, valid_vals['by_weight_refrigerator'], self._error_message(valid_vals['by_weight_refrigerator'], add_cost))
elif split_method == 'by_weight' and product == self.product_oven:
self.assertEqual(add_cost, valid_vals['by_weight_oven'], self._error_message(valid_vals['by_weight_oven'], add_cost))
elif split_method == 'by_volume' and product == self.product_refrigerator:
self.assertEqual(add_cost, valid_vals['by_volume_refrigerator'], self._error_message(valid_vals['by_volume_refrigerator'], add_cost))
elif split_method == 'by_volume' and product == self.product_oven:
self.assertEqual(add_cost, valid_vals['by_volume_oven'], self._error_message(valid_vals['by_volume_oven'], add_cost))
def _error_message(self, actucal_cost, computed_cost):
return 'Additional Landed Cost should be %s instead of %s' % (actucal_cost, computed_cost)
@tagged('post_install', '-at_install')
class TestLandedCostsWithPurchaseAndInv(TestStockValuationLCCommon):
def test_invoice_after_lc(self):
self.env.company.anglo_saxon_accounting = True
self.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_valuation = 'real_time'
self.price_diff_account = self.env['account.account'].create({
'name': 'price diff account',
'code': 'price diff account',
'user_type_id': self.env.ref('account.data_account_type_current_assets').id,
})
self.product1.property_account_creditor_price_difference = self.price_diff_account
# Create PO
po_form = Form(self.env['purchase.order'])
po_form.partner_id = self.env['res.partner'].create({'name': 'vendor'})
with po_form.order_line.new() as po_line:
po_line.product_id = self.product1
po_line.product_qty = 1
po_line.price_unit = 455.0
order = po_form.save()
order.button_confirm()
# Receive the goods
receipt = order.picking_ids[0]
receipt.move_lines.quantity_done = 1
receipt.button_validate()
# Check SVL and AML
svl = self.env['stock.valuation.layer'].search([('stock_move_id', '=', receipt.move_lines.id)])
self.assertAlmostEqual(svl.value, 455)
aml = self.env['account.move.line'].search([('account_id', '=', self.company_data['default_account_stock_valuation'].id)])
self.assertAlmostEqual(aml.debit, 455)
# Create and validate LC
lc = self.env['stock.landed.cost'].create(dict(
picking_ids=[(6, 0, [receipt.id])],
account_journal_id=self.stock_journal.id,
cost_lines=[
(0, 0, {
'name': 'equal split',
'split_method': 'equal',
'price_unit': 99,
'product_id': self.productlc1.id,
}),
],
))
lc.compute_landed_cost()
lc.button_validate()
# Check LC, SVL and AML
self.assertAlmostEqual(lc.valuation_adjustment_lines.final_cost, 554)
svl = self.env['stock.valuation.layer'].search([('stock_move_id', '=', receipt.move_lines.id)], order='id desc', limit=1)
self.assertAlmostEqual(svl.value, 99)
aml = self.env['account.move.line'].search([('account_id', '=', self.company_data['default_account_stock_valuation'].id)], order='id desc', limit=1)
self.assertAlmostEqual(aml.debit, 99)
# Create an invoice with the same price
move_form = Form(self.env['account.move'].with_context(default_move_type='in_invoice'))
move_form.invoice_date = move_form.date
move_form.partner_id = order.partner_id
move_form.purchase_id = order
move = move_form.save()
move.action_post()
# Check nothing was posted in the price difference account
price_diff_aml = self.env['account.move.line'].search([('account_id','=', self.price_diff_account.id), ('move_id', '=', move.id)])
self.assertEqual(len(price_diff_aml), 0, "No line should have been generated in the price difference account.")
| rven/odoo | addons/stock_landed_costs/tests/test_stock_landed_costs_purchase.py | Python | agpl-3.0 | 19,955 |
import os
import sys
import numpy as np
import tensorflow as tf
# suppress output from tensorflow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Read in data from file
data = np.genfromtxt(sys.argv[1], delimiter=',')
data = np.reshape(data,(2,2))
# Generate graph of gpu computations
graph = tf.Graph()
with graph.as_default():
with tf.device('/gpu:0'):
X = tf.placeholder("float64",[2,2])
npmatrix = np.array([[10.0, 2.0], [-5.0, 8.78]])
matrix = tf.Variable(npmatrix)
y = tf.matmul(X, matrix)
# Run the computations on the input
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(y, {X: data})
# print out results
print(str(output.flatten()))
| CyberReboot/vent-plugins | gpu_example/example.py | Python | apache-2.0 | 744 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
from python cookbook 2nd edition.
"""
import sys
import inspect
class SuperMixin(object):
"""A way to use super.
Example:
>>> class Base(list, SuperMixin):
... pass
...
>>> class DerivedA(Base):
... def dmethod(self):
... print('in DerivedA')
... DerivedA.super()
...
>>> class DerivedB(Base):
... def dmethod(self):
... print('in DerivedB')
... DerivedB.super()
...
>>> class DDerived(DerivedA, DerivedB):
... def dmethod(self):
... print('in DDerived')
... DDerived.super()
...
>>> DDerived().dmethod()
in DDerived
in DerivedA
in DerivedB
"""
def super(cls, *args, **kwargs):
# frame = inspect.currentframe(1)
frame = sys._getframe(1)
self = frame.f_locals['self']
method_name = frame.f_code.co_name
method = getattr(super(cls, self), method_name, None)
if inspect.ismethod(method):
return method(*args, **kwargs)
super = classmethod(super)
| ptrsxu/snippetpy | builtinim/supermixin.py | Python | mit | 1,129 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-15 20:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crowdcop_web', '0002_campaign_num_tips'),
]
operations = [
migrations.AddField(
model_name='campaign',
name='campaign_image_url',
field=models.TextField(default='https://commons.wikimedia.org/wiki/File:Pup2.JPG'),
preserve_default=False,
),
]
| bocaaust/CrowdCop | CrowdCop_test/crowdcop/crowdcop_web/migrations/0003_campaign_campaign_image_url.py | Python | apache-2.0 | 547 |
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
"""Classes for tabular data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import MutableSequence
from copy import copy, deepcopy
from types import NoneType
from pyselection import core
from pyselection.core import int_types
from pyselection.core import str_types
from pyselection.core import is_sized_iterable
from pyselection.core import range
class BaseList(MutableSequence):
@classmethod
def validate_data_types(this, data_types):
if not isinstance(data_types, tuple):
raise TypeError("%s data types must be specified as a tuple" %
this.__name__)
data_types = set(data_types)
if any( x not in core.table_data_types for x in data_types ):
raise TypeError("%s data types must be one or more of %s" %
(this.__name__, str( tuple(x.__name__
for x in core.table_data_types) ) ) )
data_types.add(NoneType)
return tuple( sorted( list(data_types) ) )
@property
def data_types(self):
return self._dtypes
@property
def nom(self):
return repr(self.__class__.__name__)
def __init__(self, contents, data_types=None):
if data_types is not None:
self._dtypes = self.__class__.validate_data_types(data_types)
else:
self._dtypes = core.table_data_types
self.validate_list(contents)
self._list = [ x for x in contents ]
def __add__(self, other):
try:
if isinstance(other, BaseList):
self._verify_combinable(other)
else:
other = self.__class__(other, data_types=self._dtypes)
except TypeError:
raise TypeError("cannot combine objects of type %s and %s" %
(self.nom, repr(type(other).__name__) ) )
if type(other) == type(self):
return self.__class__(self._list + other._list, data_types=self._dtypes)
else:
return other.__radd__(self)
def __bool__(self):
return len(self._list) != 0
def __contains__(self, value):
return value in self._list
def __copy__(self):
return self.__class__(copy(self._list), data_types=self._dtypes)
def __deepcopy__(self, memo=dict() ):
return self.__class__(deepcopy(self._list, memo), data_types=self._dtypes)
def __delitem__(self, key):
if isinstance(key, int_types):
index = self._adapt_index(key)
del self._list[index]
elif isinstance(key, slice):
slc = self._adapt_slice(key)
del self._list[slc]
else:
raise TypeError("invalid %s key (%s)" % (self.nom, repr(key)))
def __eq__(self, other):
try:
if not isinstance(other, BaseList):
other = self.__class__(other, data_types=self._dtypes)
except TypeError:
return False
if type(other) != type(self) and issubclass(type(other), BaseList):
return other.__eq__(self)
else:
return self._dtypes == other._dtypes and self._list == other._list
def __getitem__(self, key):
if isinstance(key, int_types):
item = self.get_element(key)
elif isinstance(key, slice):
item = self.get_slice(key)
else:
raise TypeError("invalid %s key (%s)" % (self.nom, repr(key)))
return item
def __iadd__(self, other):
try:
if not isinstance(other, BaseList):
other = self.__class__(other, data_types=self._dtypes)
elif type(other) == type(self):
self._verify_combinable(other)
else:
raise TypeError
except TypeError:
raise TypeError("cannot append %s to %s" %
(repr(type(other).__name__) ), self.nom)
self = self.__class__(self._list + other._list, data_types=self._dtypes)
return self
def __iter__(self):
for x in self._list:
yield x
def __len__(self):
return len(self._list)
def __ne__(self, other):
return not self == other
def __nonzero__(self):
return type(self).__bool__(self)
def __radd__(self, other):
try:
if isinstance(other, BaseList):
self._verify_combinable(other)
else:
other = self.__class__(other, data_types=self._dtypes)
except TypeError:
raise TypeError("cannot combine objects of type %s and %s" %
(repr(type(other).__name__), self.nom) )
if type(other) == type(self):
return self.__class__(other._list + self._list, data_types=self._dtypes)
else:
return other.__add__(self)
def __reversed__(self):
for x in reversed(self._list):
yield x
def __setitem__(self, key, value):
if isinstance(key, int_types):
self.set_element(key, value)
elif isinstance(key, slice):
self.set_slice(key, value)
else:
raise TypeError("invalid %s key (%s)" % (self.nom, repr(key) ) )
def __str__(self):
contents = "(%s)" % ", ".join( repr(x) for x in self._list )
return "%s%s" % (self.nom, contents)
def _adapt_index(self, index):
if not isinstance(index, int_types):
raise IndexError("%s index (%s) must be an integer" % (self.nom, repr(index) ) )
length = len(self._list)
if index < -length or index >= length:
raise IndexError("%s index (%d) out of range" % (self.nom, index) )
if index < 0:
index += length
return index
def _adapt_slice(self, slc, properties=None):
try:
if not all( isinstance(x, tuple([int_types] + [NoneType]) )
for x in (slc.start, slc.stop, slc.step) ):
raise TypeError("%s slice indices must be integer or None" % self.nom)
except AttributeError:
raise TypeError("%s _adapt_slice() takes a slice object" % self.nom)
length = len(self._list)
if slc.step is None:
step = 1
elif slc.step != 0:
step = slc.step
else:
raise ValueError("%s slice step cannot be zero" % self.nom)
if slc.start is not None:
start = slc.start
if start < -length or start >= length:
raise IndexError("%s slice start (%d) out of range" % (self.nom, start) )
if start < 0:
start += length
else:
start = 0 if step > 0 else length - 1
if slc.stop is not None:
stop = slc.stop
if stop < -length or stop > length:
raise IndexError("%s slice stop (%d) out of range" % (self.nom, stop) )
if stop < 0:
stop += length
if step == 1:
if start > stop:
stop = start
elif (step > 1 and start >= stop) or (step < 0 and start <= stop):
raise ValueError("%s extended slice is of size 0" % self.nom)
else:
stop = length if step > 0 else -1
if properties is not None:
step_quotient, step_remainder = divmod( abs(stop - start), abs(step) )
if step_remainder:
if step > 0:
last = stop - step_remainder
else:
last = stop + step_remainder
size = step_quotient + 1
else:
last = stop - step
size = step_quotient
try:
properties['last'] = last
properties['size'] = size
properties['min'], properties['max'] = sorted([start, last])
properties['span'] = properties['max'] - properties['min'] + 1
except TypeError:
raise TypeError("%s _adapt_slice() properties object must be a dict" % self.nom)
return slice(start, stop, step)
def _verify_combinable(self, other):
try:
if self._dtypes != other._dtypes:
raise ValueError("cannot combine %s and %s (data type mismatch)" %
(self.nom, other.nom) )
if type(other) != type(self):
for x in other:
self.validate_element(x)
except (AttributeError, TypeError):
raise TypeError("cannot combine objects of type %s and %s" %
(self.nom, repr(type(other).__name__) ) )
def append(self, value):
i = len(self._list)
self[i:i] = [ value ]
def count(self, value, start=None, stop=None):
indices = self.iter_indices(start=start, stop=stop)
return sum( 1 if self._list[i] == value else 0 for i in indices )
def extend(self, values):
i = len(self._list)
self[i:i] = values
def findall(self, value, start=None, stop=None):
indices = self.iter_indices(start=start, stop=stop)
return tuple( i for i in indices if self._list[i] == value )
def get_element(self, index):
index = self._adapt_index(index)
return self._list[index]
def get_slice(self, key):
slc = self._adapt_slice(key)
return self.__class__(self._list[slc], data_types=self._dtypes)
def index(self, value, start=None, stop=None):
indices = self.iter_indices(start=start, stop=stop)
for i in indices:
if self._list[i] == value:
return i
raise ValueError("value (%s) not found" % repr(value) )
def insert(self, index, value):
self[index:index] = [ value ]
def iter_indices(self, start=None, stop=None, reverse=False):
length = len(self._list)
if start is not None:
if not isinstance(start, int_types):
raise IndexError("%s start index (%s) must be an integer" %
(self.nom, repr(start) ) )
if start < -length or start >= length:
raise IndexError("%s start index (%d) out of range" %
(self.nom, start) )
if start < 0:
start += length
else:
start = 0
if stop is not None:
if not isinstance(stop, int_types):
raise IndexError("%s stop index (%s) must be an integer" %
(self.nom, repr(stop) ) )
if stop < -length or stop > length:
raise IndexError("%s stop index (%d) out of range" %
(self.nom, stop) )
if stop < 0:
stop += length
else:
stop = length
if start >= stop:
raise IndexError("%s iteration range has length zero" % self.nom)
if not reverse:
for i in range(start, stop):
yield i
else:
for i in reversed( range(start, stop) ):
yield i
def pop(self):
return self._list.pop()
def reverse(self):
self._list.reverse()
def rindex(self, value, start=None, stop=None):
indices = self.iter_indices(start=start, stop=stop, reverse=True)
for i in indices:
if self._list[i] == value:
return i
raise ValueError("value (%s) not found" % repr(value) )
def set_element(self, index, value):
index = self._adapt_index(key)
self.validate_element(value)
self._list[index] = value
def set_slice(self, key, value):
slc_info = dict()
slc = self._adapt_slice(key, slc_info)
try:
value_length = len(value)
except TypeError:
raise TypeError("%s slice value must be a sized iterable" % self.nom)
if slc.step != 1:
if value_length != slc_info['size']:
raise ValueError("cannot assign %d values to extended slice "
"of size %d" % (value_length, slc_info['size']) )
self.validate_list(value)
self._list[slc] = value
def tolist(self):
return [ x for x in self._list ]
def validate_element(self, value):
if not isinstance(value, self._dtypes):
dtype_names = str( tuple(x.__name__ for x in self._dtypes) )
raise TypeError("%s element data type must be one or more of %s" %
(self.nom, dtype_names) )
def validate_list(self, values):
if isinstance(values, BaseList):
if all( x in self._dtypes for x in values._dtypes ):
return
elif not is_sized_iterable(values) or isinstance(values, str_types):
raise TypeError("%s list must be a sized non-string iterable" % self.nom)
if not all( isinstance(value, self._dtypes) for value in values ):
dtype_names = str( tuple(x.__name__ for x in self._dtypes) )
raise TypeError("%s element data types must be one or more of %s" %
(self.nom, dtype_names) )
################################################################################
class BaseTable(BaseList):
@classmethod
def validate_row_type(this, row_type):
if not issubclass(row_type, BaseList) or issubclass(row_type, BaseTable):
raise TypeError("BaseTable row type must be a BaseList but not BaseTable")
@property
def data_types(self):
return self._dtypes
@property
def max_row_length(self):
return self._max_row_length
@property
def min_row_length(self):
return self._min_row_length
@property
def row_lengths(self):
return self._row_lengths
def __init__(self, contents, data_types=None, row_type=None, row_labels=None):
if data_types is not None:
self._dtypes = self.__class__.validate_data_types(data_types)
else:
self._dtypes = core.table_data_types
if row_type is not None:
self.__class__.validate_row_type(row_type)
self._rtype = row_type
else:
self._rtype = BaseList
self._list = [ self._rtype(row, data_types=self._dtypes)
for row in contents ]
if row_labels is not None:
self.row_labels = row_labels
def __add__(self, other):
try:
if isinstance(other, BaseTable):
self._verify_combinable(other)
else:
other = self.__class__(other, data_types=self._dtypes,
row_type=self._rtype)
except TypeError:
raise TypeError("cannot catenate objects of type %s and %s" %
(self.nom, repr(type(other).__name__) ) )
if type(other) == type(self):
item = deepcopy(self)
item.extend(other)
return item
else:
return other.__radd__(self)
def __contains__(self, value):
if isinstance(value, self._dtypes):
return any( value in row for row in self._list )
else:
return value in self._list
def __copy__(self):
if hasattr(self, "row_labels"):
row_labels = copy(self.row_labels)
else:
row_labels = None
return self.__class__(copy(self._list), data_types=self._dtypes,
row_type=self._rtype, row_labels=row_labels)
def __deepcopy__(self, memo=dict() ):
if hasattr(self, "row_labels"):
row_labels = deepcopy(self.row_labels)
else:
row_labels = None
return self.__class__([ deepcopy(x, memo) for x in self._list ],
data_types=deepcopy(self._dtypes, memo),
row_type=deepcopy(self._rtype, memo),
row_labels=row_labels )
def __delitem__(self, key):
if isinstance(key, int_types):
index = self._adapt_index(key)
del self._list[index]
elif isinstance(key, slice):
slc = self._adapt_slice(key)
del self._list[slc]
elif isinstance(key, tuple):
try:
row_key, col_key = key
except ValueError:
raise TypeError("too many %s indices/keys" % self.nom)
slicer = TableSlicer(self, row_key, col_key)
for r in slicer.iter_rows_decreasing():
del self._list[r][ slicer.col_slice ]
else:
raise TypeError("invalid %s index/key (%s)" % (self.nom, repr(key) ) )
# Clear row lengths so they will be recalculated.
self._clear_row_lengths()
def __eq__(self, other):
try:
if not isinstance(other, BaseTable):
other = BaseTable(other, data_types=self._dtypes,
row_type=self._rtype)
except TypeError:
return False
if type(other) != type(self) and issubclass(type(other), BaseTable):
return other.__eq__(self)
else:
if self._dtypes != other._dtypes:
return False
if self._rtype != other._rtype:
return False
if self._list != other._list:
return False
self_labels = self.row_labels if hasattr(self, "row_labels") else None
other_labels = other.row_labels if hasattr(other, "row_labels") else None
if self_labels != other_labels:
return False
return True
def __getattr__(self, attr):
if attr in ("_row_lengths", "_min_row_length", "_max_row_length"):
self._update_row_lengths()
return getattr(self, attr)
elif attr == "row_labels":
self.row_labels = TableLabels.from_length( len(self._list) )
return self.row_labels
else:
raise AttributeError("%s object has no attribute called %s" %
(self.nom, attr) )
def __getitem__(self, key):
item = None
if isinstance(key, int_types):
item = self.get_element(key)
elif isinstance(key, slice):
item = self.get_slice(key)
elif isinstance(key, tuple):
try:
row_key, col_key = key
except ValueError:
raise TypeError("too many %s indices/keys" % self.nom)
if all( isinstance(x, int_types) for x in key ):
item = self.get_table_element(row_key, col_key)
else:
item = self.get_table_slice(row_key, col_key)
else:
raise TypeError("invalid %s index/key (%s)" % (self.nom, repr(key) ) )
return item
def __iadd__(self, other):
try:
if not isinstance(other, BaseTable):
other = self.__class__(other, data_types=self._dtypes,
row_type=self._rtype)
elif type(other) == type(self):
self._verify_combinable(other)
else:
raise TypeError
except TypeError:
raise TypeError("cannot append %s to %s" %
(repr(type(other).__name__) ), self.nom)
self.extend(other)
return self
def __ne__(self, other):
return not self == other
def __radd__(self, other):
try:
if isinstance(other, BaseTable):
self._verify_combinable(other)
else:
other = self.__class__(other, data_types=self._dtypes,
row_type=self._rtype)
except TypeError:
raise TypeError("cannot catenate objects of type %s and %s" %
(repr(type(other).__name__) ), self.nom)
if type(other) == type(self):
item = deepcopy(other)
item.extend(self)
return item
else:
return other.__add__(self)
def __setattr__(self, attr, value):
if attr == "row_labels":
if value is not None:
if not isinstance(value, TableLabels):
raise TypeError("%s row labels must be of type TableLabels" % self.nom)
elif len(value) != len(self._list):
raise ValueError("number of %s row labels must match number of rows" % self.nom)
self.__dict__[attr] = value
def __setitem__(self, key, value):
if isinstance(key, int_types):
self.set_element(key, value)
elif isinstance(key, slice):
self.set_slice(key, value)
elif isinstance(key, tuple):
try:
row_key, col_key = key
except ValueError:
raise TypeError("too many %s indices/keys" % self.nom)
if all( isinstance(x, int_types) for x in key ):
self.set_table_element(row_key, col_key, value)
else:
self.set_table_slice(row_key, col_key, value)
else:
raise TypeError("invalid %s index/key (%s)" % (self.nom, repr(key) ) )
def __str__(self):
contents = "(\n %s\n)" % ",\n ".join( str(x) for x in self._list)
return "%s(\n %s\n)" % (self.nom, contents)
def _adapt_index2(self, index):
if not isinstance(index, int_types):
raise IndexError("%s index (%s) must be an integer" % (self.nom, repr(index) ) )
length = self.max_row_length
diff_lengths = self.min_row_length != length
if index < -length or index >= length:
raise IndexError("%s index (%d) out of range" % (self.nom, index ) )
if index < 0:
if length:
raise ValueError("cannot use negative index (%d) in jagged %s" % (index, self.nom) )
index += length
return index
def _adapt_slice2(self, slc, properties=None):
try:
if not all( isinstance(x, tuple([int_types] + [NoneType]) )
for x in (slc.start, slc.stop, slc.step) ):
raise TypeError("%s slice indices must be integer or None" % basetable.nom)
except AttributeError:
raise TypeError("%s _adapt_slice2() takes a slice object" % self.nom)
length = self.max_row_length
diff_lengths = self.min_row_length != length
if slc.step is None:
step = 1
elif slc.step != 0:
step = slc.step
else:
raise ValueError("%s slice step cannot be zero" % self.nom)
if slc.start is not None:
start = slc.start
if start < -length or start >= length:
raise IndexError("%s slice start (%d) out of range" % (self.nom, start) )
if start < 0:
if diff_lengths:
raise ValueError("cannot use negative start index (%d) in jagged %s" % (start, self.nom) )
start += length
else:
if step > 0:
start = 0
elif not diff_lengths:
start = length - 1
else:
raise ValueError("cannot set default start index of negative stepping slice in jagged %s" % self.nom)
if slc.stop is not None:
stop = slc.stop
if stop < -length or stop > length:
raise IndexError("%s slice stop (%d) out of range" % (self.nom, stop) )
if stop < 0:
if diff_lengths:
raise ValueError("cannot use negative stop index (%d) in jagged %s" % (stop, self.nom) )
stop += length
if step == 1:
if start > stop:
stop = start
elif (step > 1 and start >= stop) or (step < 0 and start <= stop):
raise ValueError("%s extended slice is of size 0" % self.nom)
else:
if step < 0:
stop = -1
elif not diff_lengths:
stop = length
else:
raise ValueError("cannot set default stop index in jagged %s" % self.nom)
if properties is not None:
step_quotient, step_remainder = divmod( abs(stop - start), abs(step) )
if step_remainder:
if step > 0:
last = stop - step_remainder
else:
last = stop + step_remainder
size = step_quotient + 1
else:
last = stop - step
size = step_quotient
try:
properties['last'] = last
properties['size'] = size
properties['min'], properties['max'] = sorted([start, last])
properties['span'] = properties['max'] - properties['min'] + 1
except TypeError:
raise TypeError("%s _adapt_slice2() properties object must be a dict" % self.nom)
return slice(start, stop, step)
def _clear_row_lengths(self):
try:
del self._row_lengths
del self._min_row_length
del self._max_row_length
except AttributeError:
pass
def _update_row_lengths(self):
self._row_lengths = tuple( len(x) for x in self._list )
try:
self._min_row_length = min(self._row_lengths)
self._max_row_length = max(self._row_lengths)
except ValueError:
self._min_row_length, self._max_row_length = None, None
def _verify_combinable(self, other):
try:
if self._dtypes != other._dtypes:
raise ValueError("cannot combine %s and %s (data type mismatch)" %
(self.nom, other.nom) )
if self._rtype != other._rtype:
raise ValueError("cannot combine %s and %s (row type mismatch)" %
(self.nom, other.nom) )
if type(other) != type(self):
for x in other:
self.validate_list(x)
except (AttributeError, TypeError):
raise TypeError("cannot combine objects of type %s and %s" %
(self.nom, repr(type(other).__name__) ) )
def append(self, value):
i = len(self._list)
self[i:i] = [ self._rtype(value, data_types=self._dtypes) ]
def count(self, value, start=None, stop=None):
if isinstance(value, self._dtypes):
indices = self.iter_indices(start=start, stop=stop)
return sum( 1 if self._list[r][c] == value else 0
for r, c in indices )
else:
return super(BaseTable, self).count(value, start=start, stop=stop)
def extend(self, values):
i = len(self._list)
self[i:i] = self.__class__(values, data_types=self._dtypes)
def findall(self, value, start=None, stop=None):
if isinstance(value, self._dtypes):
indices = self.iter_indices(start=start, stop=stop)
return tuple( (r, c) for r, c in indices
if self._list[r][c] == value )
else:
return super(BaseTable, self).findall(value, start=start, stop=stop)
def get_element(self, row_index):
r = self._adapt_index(row_index)
return self._rtype(self._list[r], data_types=self._dtypes)
def get_slice(self, row_key):
slc = self._adapt_slice(row_key)
if hasattr(self, "row_labels") and any(x for x in self.row_labels[slc]):
row_labels = self.row_labels[slc]
else:
row_labels = None
item = self.__class__(self._list[slc], data_types=self._dtypes,
row_type=self._rtype, row_labels=row_labels)
def get_table_element(self, row_index, col_index):
r = self._adapt_index(row_index)
c = self._adapt_index2(col_index)
try:
item = self._list[r][c]
except TypeError:
raise IndexError("%s index (%d, %d) out of range" % (self.nom, r, c) )
return item
def get_table_slice(self, row_key, col_key):
slicer = TableSlicer(self, row_key, col_key)
rows = list()
for r in slicer.iter_rows():
row = list()
for c in slicer.iter_cols():
try:
x = self._list[r][c]
except TypeError:
x = None
row.append(x)
rows.append(row)
if slicer.size[0] > 1:
item = self.__class__(rows, data_types=self._dtypes, row_type=self._rtype)
else:
item = self._rtype(rows[0], data_types=self._dtypes)
return item
def index(self, value, start=None, stop=None):
if isinstance(value, self._dtypes):
indices = self.iter_indices(start=start, stop=stop)
for r, c in indices:
if self._list[r][c] == value:
return (r, c)
else:
return super(BaseTable, self).index(value, start=start, stop=stop)
raise ValueError("value not in %s" % self.nom)
def insert(self, index, value):
self[index:index] = self._rtype(value, data_types=self._dtypes)
def iter_indices(self, start=None, stop=None, reverse=False):
table_length = len(self._list)
row_lengths = self._row_lengths
if start is not None:
try:
start_row, start_col = start
assert all( isinstance(x, int_types) for x in start )
except (AssertionError, ValueError):
raise TypeError("%s start argument must be a tuple of two integers" % self.nom)
if start_row < -table_length or start_row >= table_length:
raise IndexError("%s start row index (%d) out of range" % (self.nom, start_row) )
if start_row < 0:
start_row += table_length
start_row_length = row_lengths[start_row]
if start_col < -start_row_length or start_col >= start_row_length:
raise IndexError("%s start column index (%d) out of range" % (self.nom, start_col) )
if start_col < 0:
start_col += start_row_length
else:
start_row, start_col = (0, 0)
if stop is not None:
try:
stop_row, stop_col = stop
assert all( isinstance(x, int_types) for x in stop )
except (AssertionError, ValueError):
raise TypeError("%s stop argument must be a tuple of two integers" % self.nom)
if stop_row < -table_length or stop_row > table_length:
raise IndexError("%s stop row index (%d) out of range" % (self.nom, stop_row) )
if stop_row < 0:
stop_row += table_length
last_row_length = row_lengths[stop_row - 1]
if stop_col < -last_row_length or stop_col > last_row_length:
raise IndexError("%s stop column index (%d) out of range" % (self.nom, stop_col) )
if stop_col < 0:
stop_col += last_row_length
else:
stop_row, stop_col = (table_length, row_lengths[-1])
last_row = stop_row - 1
if stop_col == 0:
stop_row, last_row = stop_row - 1, last_row - 1
stop_col = row_lengths[last_row]
if start_row > last_row or (start_row == last_row and
start_col >= stop_col):
raise ValueError("%s has range zero" % self.nom)
if not reverse:
i, j = start_row, start_col
while i < last_row or ( i == last_row and j < stop_col ):
if j < row_lengths[i]:
yield (i, j)
j += 1
else:
i, j = (i+1, 0)
else:
i, j = last_row, stop_col - 1
while i > start_row or ( i == start_row and j >= start_col ):
if j >= 0:
yield (i, j)
j -= 1
else:
i, j = (i-1, row_lengths[i-1] - 1)
def pop(self):
if hasattr(self, "row_labels"):
self.row_labels = TableLabels( self.row_labels[:-1] )
self._clear_row_lengths()
return self._list.pop()
def reverse(self):
if hasattr(self, "row_labels"):
self.row_labels = TableLabels([x for x in reversed(self.row_labels)])
self._clear_row_lengths()
self._list.reverse()
def rindex(self, value, start=None, stop=None):
if isinstance(value, self._dtypes):
rindices = self.iter_indices(start=start, stop=stop, reverse=True)
for r, c in rindices:
if self._list[r][c] == value:
return (i, j)
else:
return super(BaseTable, self).rindex(value, start=start, stop=stop)
raise ValueError("value not in %s" % self.nom)
def set_element(self, row_index, value):
r = self._adapt_index(row_index)
self._list[r] = self._rtype(value, data_types=self._dtypes)
self._clear_row_lengths()
def set_slice(self, row_key, value):
slc_info = dict()
slc = self._adapt_slice(row_key, slc_info)
try:
value_length = len(value)
except TypeError:
raise TypeError("%s slice value must be a sized iterable" % self.nom)
if slc.step != 1:
if value_length != slc_info['size']:
raise ValueError("cannot assign %d rows to extended slice "
"of size %d" % (value_length, slc_info['size']) )
self._list[slc] = [ self._rtype(x, data_types=self._dtypes)
for x in value ]
if any( hasattr(x, "row_labels") for x in (self, value) ):
tlabels = list(self.row_labels)
try:
vlabels = list(value.row_labels)
except AttributeError:
vlabels = [''] * value_length
tlabels[slc] = vlabels
self.row_labels = TableLabels(tlabels)
self._clear_row_lengths()
def set_table_element(self, row_index, col_index, value):
r = self._adapt_index(row_index)
c = self._adapt_index2(col_index)
self.validate_element(value)
try:
self._list[r][c] = value
except TypeError: # if column index is None
raise IndexError("%s index (%d, %d) out of range" % (self.nom, r, c) )
def set_table_slice(self, row_key, col_key, value):
slicer = TableSlicer(self, row_key, col_key)
if slicer.size[0] == 1:
value = [ value ]
value = self.__class__(value, data_types=self._dtypes)
table_row_lengths = self._row_lengths
value_row_lengths = value.row_lengths
if len(value) != slicer.size[0]:
raise ValueError("cannot use double-indexing to create/delete rows")
if isinstance(col_key, int_types):
if any( l != 1 for l in value_row_lengths ):
raise ValueError("cannot assign multi-column value to "
"single %s column" % self.nom)
elif slicer.step[1] != 1:
if any( l != slicer.size[1] for l in value_row_lengths ):
raise ValueError("cannot assign jagged rows to extended slice")
for i, r in enumerate( slicer.iter_rows() ):
row_length = table_row_lengths[r]
if slicer.max[1] >= row_length:
if abs(slicer.step[1]) != 1:
if slicer.max[1] > row_length:
raise ValueError("cannot assign to disjoint extended slice")
elif slicer.min[1] > row_length:
raise ValueError("cannot assign to disjoint slice")
self._list[r].extend([None] * (slicer.max[1] + 1 - row_length) )
self._list[r][ slicer.col_slice ] = value[i]
self._clear_row_lengths()
def tolist(self, flatten=False):
if flatten:
return [ x for row in self._list for x in row ]
else:
return [ list(x) for x in self._list ]
def validate_table(self, table):
if isinstance(table, BaseTable):
if all( x in self._dtypes for row in table for x in row._dtypes ):
return
elif not is_sized_iterable(table) or isinstance(table, str_types):
raise TypeError("%s table must be a sized non-string iterable" % self.nom)
for row in table:
if isinstance(row, BaseList):
if all( x in self._dtypes for x in row._dtypes ):
continue
elif not is_sized_iterable(row) or isinstance(row, str_types):
raise TypeError("%s row must be a sized non-string iterable" % self.nom)
if not all( isinstance(value, self._dtypes) for value in row ):
dtype_names = str( tuple(x.__name__ for x in self._dtypes) )
raise TypeError("%s element data types must be one or more of %s" %
(self.nom, dtype_names) )
class TableLabels(MutableSequence):
@classmethod
def from_length(this, length):
try:
return this([''] * length)
except TypeError:
raise TypeError("%s length (%s) must be an integer" % (self.nom, length) )
@property
def nom(self):
return self.__class__.__name__
def __init__(self, labels):
try:
assert len(labels) > 0
assert not isinstance(labels, str_types)
assert all( isinstance(x, str_types) for x in labels )
except (AssertionError, TypeError):
raise TypeError("%s() takes a sized iterable of strings" % self.nom)
self._labels = labels
self._label2index = dict()
for index, label in enumerate(labels):
if label != '':
if label in self._label2index:
raise ValueError("%s cannot contain duplicate labels" % self.nom)
self._label2index[label] = index
def __bool__(self):
return len(self._labels) != 0
def __contains__(self, value):
return value in self._labels
def __copy__(self):
return self.__class__( copy(self._labels) )
def __deepcopy__(self, memo=dict() ):
return self.__class__( deepcopy(self._labels, memo) )
def __delitem__(self, key, value):
raise TypeError("%s cannot be resized" % self.nom)
def __eq__(self, other):
if isinstance(other, TableLabels):
return self._labels == other._labels
try:
return self._labels == TableLabels(other)._labels
except (TypeError, ValueError):
return False
def __getitem__(self, key, value):
if isinstance(key, int_types):
if key < -len(self._labels) or key >= len(self._labels):
raise IndexError("%s index (%d) out of range" % (self.nom, key) )
item = self._labels[key]
elif isinstance(key, str_types):
try:
item = self._label2index[key]
except KeyError:
raise KeyError("%s label (%s) not found" % (self.nom, repr(key) ) )
elif isinstance(key, slice):
slc = self._adapt_slice(key)
item = tuple( self._labels[slc] )
else:
raise TypeError("invalid %s key (%s)" % (self.nom, repr(key)) )
return item
def __iadd__(self, other):
raise TypeError("%s cannot be resized" % self.nom)
def __add__(self, other):
if not issubclass(type(other), TableLabels):
other = self.__class__(other)
return self.__class__(self._labels + other._labels)
def __iter__(self):
for x in self._labels:
yield x
def __radd__(self, other):
if not issubclass(type(other), TableLabels):
other = self.__class__(other)
return self.__class__(other._labels + self._labels)
def __len__(self):
return len(self._labels)
def __ne__(self, other):
if isinstance(other, TableLabels):
return self._labels != other._labels
try:
return self._labels != TableLabels(other)._labels
except (TypeError, ValueError):
return True
def __nonzero__(self):
return type(self).__bool__(self)
def __reversed__(self):
for x in reversed(self._labels):
yield x
def __setitem__(self, key, value):
if isinstance(key, int_types):
self.set_label(key, value)
elif isinstance(key, str_types):
self.set_label(value, key)
elif isinstance(key, slice):
if not is_sized_iterable(labels) or isinstance(labels, str_types):
raise TypeError("%s method takes a sized iterable of strings" % self.nom)
slc = self._adapt_slice(key)
slice_indices = [ i for i in range(slc.start, slc.stop, slc.step) ]
if len(value) != len(slice_indices):
raise TypeError("%s cannot be resized" % self.nom)
for index, label in zip(slice_indices, value):
self.set_label(index, label)
else:
raise TypeError("invalid %s key (%s)" % (self.nom, repr(key) ) )
def __str__(self):
contents = "(%s)" % ", ".join( repr(x) for x in self._labels )
return "%s%s" % (self.nom, contents)
def _adapt_slice(self, slc):
try:
if not all( isinstance(x, tuple([int_types] + [NoneType]) )
for x in (slc.start, slc.stop, slc.step) ):
raise TypeError("%s slice indices must be integer or None" % self.nom)
except AttributeError:
raise TypeError("%s _adapt_slice() takes a slice object" % self.nom)
length = len(self._labels)
if slc.step is None:
step = 1
elif slc.step != 0:
step = slc.step
else:
raise ValueError("%s slice step cannot be zero" % self.nom)
if slc.start is not None:
start = slc.start
if start < -length or start >= length:
raise IndexError("%s slice start (%d) out of range" % (self.nom, start) )
if start < 0:
start += length
else:
start = 0 if step > 0 else length - 1
if slc.stop is not None:
stop = slc.stop
if stop < -length or stop >= length:
raise IndexError("%s slice stop (%d) out of range" % (self.nom, stop) )
if stop < 0:
stop += length
if step == 1:
if start > stop:
stop = start
elif (step > 1 and start >= stop) or (step < 0 and start <= stop):
raise ValueError("%s extended slice is of size 0" % self.nom)
else:
stop = length if step > 0 else -1
return slice(start, stop, step)
def append(self, value):
raise TypeError("%s cannot be resized" % self.nom)
def clear(self, key):
if isinstance(key, int_types):
if key < -len(self._labels) or key >= len(self._labels):
raise IndexError("%s index (%d) out of range" % (self.nom, key) )
self._labels[key] = ''
del self._label2index[ self._labels[key] ]
elif isinstance(key, str_types):
if key not in self._label2index:
raise KeyError("%s label (%s) not found" % (self.nom, repr(key)) )
self._labels[ self._label2index[key] ] = ''
del self._label2index[key]
else:
raise TypeError("invalid %s key (%s)" % (self.nom, repr(key) ) )
def count(self, label):
return 1 if label in self._label2index else 0
def extend(self, values):
raise TypeError("%s cannot be resized" % self.nom)
def index(self, label):
if label in self._label2index:
return self._label2index[label]
raise ValueError("%s label (%s) not found" % (self.nom, repr(key) ) )
def insert(self, index, value):
raise TypeError("%s cannot be resized" % self.nom)
def pop(self):
raise TypeError("%s cannot be resized" % self.nom)
def remove(self, label):
raise TypeError("%s cannot be resized" % self.nom)
def reverse(self):
raise TypeError("%s cannot be reordered" % self.nom)
def set_label(self, index, label):
if not isinstance(index, int_types):
raise TypeError("%s index (%s) must be an integer" % (self.nom, repr(index) ) )
if index < -len(self._labels) or index >= len(self._labels):
raise IndexError("%s index (%d) out of range" % (self.nom, index) )
if index < 0:
index += len(self._labels)
if not isinstance(label, str_types) or label == '':
raise TypeError("invalid %s label (%s)" % (self.nom, repr(label) ) )
if label in self._label2index and index != self._label2index[label]:
raise ValueError("%s cannot contain duplicate labels" % self.nom)
if self._labels[index] in self._label2index:
del self._label2index[ self._labels[index] ]
self._labels[index] = label
self._label2index[label] = index
def tolist(self):
return [ x for x in self._labels ]
class ListSlicer(object):
@property
def last(self):
return self._last
@property
def max(self):
return self._max
@property
def min(self):
return self._min
@property
def nom(self):
return self.__class__.__name__
@property
def size(self):
return self._size
@property
def slice(self):
return slice(self._start, self._stop, self._step)
@property
def span(self):
return self._span
@property
def start(self):
return self._start
@property
def step(self):
return self._step
@property
def stop(self):
return self._stop
def __init__(self, obj, key):
if not isinstance(obj, BaseList):
raise TypeError("%s() takes a BaseList object" % self.nom)
if isinstance(key, int_types):
index = obj._adapt_index(key)
self._start = self._last = self._max = self._min = index
self._stop = index + 1
self._step = self._size = self._span = 1
elif isinstance(key, slice):
slc_info = dict()
slc = obj._adapt_slice(key, slc_info)
self._start = slc.start
self._stop = slc.stop
self._step = slc.step
self._last = slc_info['last']
self._max = slc_info['max']
self._min = slc_info['min']
self._size = slc_info['size']
self._span = slc_info['span']
self._rng = dict()
else:
raise TypeError("invalid %s index/key (%s)" % (obj.nom, repr(key) ) )
def iter_decreasing(self):
self._rng.setdefault('decreasing', tuple(
x for x in range(self._max, self._min - 1, -abs(self._step) ) ) )
return self._rng['decreasing']
def iter_increasing(self):
self._rng.setdefault('increasing', tuple(
x for x in range(self._min, self._max + 1, abs(self._step) ) ) )
return self._rng['increasing']
def iter_indices(self):
self._rng.setdefault('iter', tuple(
x for x in range(self._start, self._stop, self._step) ) )
return self._rng['iter']
class TableSlicer(ListSlicer):
@property
def col_slice(self):
return slice(self._start[1], self._stop[1], self._step[1])
@property
def row_slice(self):
return slice(self._start[0], self._stop[0], self._step[0])
@property
def slice(self):
raise NotImplementedError("%s has no property 'slice'" % self.nom)
def __init__(self, table, row_key, col_key):
if not isinstance(table, BaseTable):
raise TypeError("%s() takes a BaseTable object" % self.nom)
adapt_index = (table._adapt_index, table._adapt_index2)
adapt_slice = (table._adapt_slice, table._adapt_slice2)
start, step, stop, last, xmax, xmin, size, span, = (
[None] * 2 for _ in range(8) )
for i, key in enumerate([row_key, col_key]):
if isinstance(key, int_types):
index = adapt_index[i](key)
start[i] = last[i] = xmax[i] = xmin[i] = index
stop[i] = index + 1
step[i] = size[i] = span[i] = 1
elif isinstance(key, slice):
slc_info = dict()
slc = adapt_slice[i](key, slc_info)
start[i] = slc.start
stop[i] = slc.stop
step[i] = slc.step
last[i] = slc_info['last']
xmax[i] = slc_info['max']
xmin[i] = slc_info['min']
size[i] = slc_info['size']
span[i] = slc_info['span']
else:
raise TypeError("invalid %s index/key (%s)" % (table.nom, repr(key) ) )
self._last = tuple(last)
self._min = tuple(xmin)
self._max = tuple(xmax)
self._size = tuple(size)
self._span = tuple(span)
self._start = tuple(start)
self._step = tuple(step)
self._stop = tuple(stop)
self._rng = ({}, {})
self._row_lengths = dict()
row_lengths = table.row_lengths
if xmax[1] > table.min_row_length:
for r in range(self._start[0], self._stop[0], self._step[0]):
if row_lengths[r] <= self._max[1]:
self._row_lengths[r] = row_lengths[r]
def iter_cols(self):
self._rng[1].setdefault('iter', tuple( x for x in
range(self._start[1], self._stop[1], self._step[1]) ) )
return self._rng[1]['iter']
def iter_cols_decreasing(self):
self._rng[1].setdefault('decreasing', tuple( x for x in
range(self._max[1], self._min[1] - 1, -abs(self._step[1]) ) ) )
return self._rng[1]['decreasing']
def iter_cols_increasing(self):
self._rng[1].setdefault('increasing', tuple( x for x in
range(self._min[1], self._max[1] + 1, abs(self._step[1]) ) ) )
return self._rng[1]['increasing']
def iter_decreasing(self):
for r in self.iter_rows_decreasing():
for c in self.iter_cols_decreasing():
if r in self._row_lengths and c >= self._row_lengths[r]:
yield None
else:
yield (r, c)
def iter_increasing(self):
for r in self.iter_rows_increasing():
for c in self.iter_cols_increasing():
if r in self._row_lengths and c >= self._row_lengths[r]:
yield None
else:
yield (r, c)
def iter_indices(self):
for r in self.iter_rows():
for c in self.iter_cols():
if r in self._row_lengths and c >= self._row_lengths[r]:
yield None
else:
yield (r, c)
def iter_rows(self):
self._rng[0].setdefault('iter', tuple( x for x in
range(self._start[0], self._stop[0], self._step[0]) ) )
return self._rng[0]['iter']
def iter_rows_decreasing(self):
self._rng[0].setdefault('decreasing', tuple( x for x in
range(self._max[0], self._min[0] - 1, -abs(self._step[0]) ) ) )
return self._rng[0]['decreasing']
def iter_rows_increasing(self):
self._rng[0].setdefault('increasing', tuple( x for x in
range(self._min[0], self._max[0] + 1, abs(self._step[0]) ) ) )
return self._rng[0]['increasing']
################################################################################ | walsht26/PySelection | pyselection/table.py | Python | gpl-3.0 | 55,084 |
# pylint: disable=C0301
import decimal
from django.test import TestCase
from mock import patch, Mock
from ..models import Customer, Charge
from ..signals import card_changed
from ..utils import get_user_model
class TestCustomer(TestCase):
def setUp(self):
self.User = get_user_model()
self.user = self.User.objects.create_user(
username="patrick",
email="[email protected]"
)
self.customer = Customer.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx",
card_fingerprint="YYYYYYYY",
card_last_4="2342",
card_kind="Visa"
)
@patch("stripe.Customer.retrieve")
@patch("stripe.Customer.create")
def test_customer_create_user_only(self, CreateMock, RetrieveMock):
self.customer.delete()
stripe_customer = CreateMock()
stripe_customer.active_card = None
stripe_customer.subscription = None
stripe_customer.id = "cus_YYYYYYYYYYYYY"
customer = Customer.create(self.user)
self.assertEqual(customer.user, self.user)
self.assertEqual(customer.stripe_id, "cus_YYYYYYYYYYYYY")
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["email"], self.user.email)
self.assertIsNone(kwargs["card"])
self.assertIsNone(kwargs["plan"])
self.assertIsNone(kwargs["trial_end"])
@patch("stripe.Invoice.create")
@patch("stripe.Customer.retrieve")
@patch("stripe.Customer.create")
def test_customer_create_user_with_plan(self, CreateMock, RetrieveMock, PayMock):
self.customer.delete()
stripe_customer = CreateMock()
stripe_customer.active_card = None
stripe_customer.subscription.plan.id = "pro-monthly"
stripe_customer.subscription.current_period_start = 1348876800
stripe_customer.subscription.current_period_end = 1349876800
stripe_customer.subscription.plan.amount = 9999
stripe_customer.subscription.status = "active"
stripe_customer.subscription.cancel_at_period_end = False
stripe_customer.subscription.start = 1348876800
stripe_customer.subscription.quantity = 1
stripe_customer.subscription.trial_start = 1348876800
stripe_customer.subscription.trial_end = 1349876800
stripe_customer.id = "cus_YYYYYYYYYYYYY"
customer = Customer.create(self.user, card="token232323", plan="pro")
self.assertEqual(customer.user, self.user)
self.assertEqual(customer.stripe_id, "cus_YYYYYYYYYYYYY")
_, kwargs = CreateMock.call_args
self.assertEqual(kwargs["email"], self.user.email)
self.assertEqual(kwargs["card"], "token232323")
self.assertEqual(kwargs["plan"], "pro-monthly")
self.assertIsNotNone(kwargs["trial_end"])
self.assertTrue(PayMock.called)
self.assertTrue(customer.current_subscription.plan, "pro")
# @@@ Need to figure out a way to temporarily set DEFAULT_PLAN to "entry" for this test
# @patch("stripe.Invoice.create")
# @patch("stripe.Customer.retrieve")
# @patch("stripe.Customer.create")
# def test_customer_create_user_with_card_default_plan(self, CreateMock, RetrieveMock, PayMock):
# self.customer.delete()
# stripe_customer = CreateMock()
# stripe_customer.active_card = None
# stripe_customer.subscription.plan.id = "entry-monthly"
# stripe_customer.subscription.current_period_start = 1348876800
# stripe_customer.subscription.current_period_end = 1349876800
# stripe_customer.subscription.plan.amount = 9999
# stripe_customer.subscription.status = "active"
# stripe_customer.subscription.cancel_at_period_end = False
# stripe_customer.subscription.start = 1348876800
# stripe_customer.subscription.quantity = 1
# stripe_customer.subscription.trial_start = 1348876800
# stripe_customer.subscription.trial_end = 1349876800
# stripe_customer.id = "cus_YYYYYYYYYYYYY"
# customer = Customer.create(self.user, card="token232323")
# self.assertEqual(customer.user, self.user)
# self.assertEqual(customer.stripe_id, "cus_YYYYYYYYYYYYY")
# _, kwargs = CreateMock.call_args
# self.assertEqual(kwargs["email"], self.user.email)
# self.assertEqual(kwargs["card"], "token232323")
# self.assertEqual(kwargs["plan"], "entry-monthly")
# self.assertIsNotNone(kwargs["trial_end"])
# self.assertTrue(PayMock.called)
# self.assertTrue(customer.current_subscription.plan, "entry")
@patch("stripe.Customer.retrieve")
def test_customer_subscribe_with_specified_quantity(self, CustomerRetrieveMock):
customer = CustomerRetrieveMock()
customer.subscription.plan.id = "entry-monthly"
customer.subscription.current_period_start = 1348360173
customer.subscription.current_period_end = 1375603198
customer.subscription.plan.amount = decimal.Decimal("9.57")
customer.subscription.status = "active"
customer.subscription.cancel_at_period_end = True
customer.subscription.start = 1348360173
customer.subscription.quantity = 1
customer.subscription.trial_start = None
customer.subscription.trial_end = None
self.customer.subscribe("entry", quantity=3, charge_immediately=False)
_, kwargs = customer.update_subscription.call_args
self.assertEqual(kwargs["quantity"], 3)
@patch("stripe.Customer.retrieve")
def test_customer_subscribe_with_callback_quantity(self, CustomerRetrieveMock):
customer = CustomerRetrieveMock()
customer.subscription.plan.id = "entry-monthly"
customer.subscription.current_period_start = 1348360173
customer.subscription.current_period_end = 1375603198
customer.subscription.plan.amount = decimal.Decimal("9.57")
customer.subscription.status = "active"
customer.subscription.cancel_at_period_end = True
customer.subscription.start = 1348360173
customer.subscription.quantity = 1
customer.subscription.trial_start = None
customer.subscription.trial_end = None
self.customer.subscribe("entry", charge_immediately=False)
_, kwargs = customer.update_subscription.call_args
self.assertEqual(kwargs["quantity"], 4)
@patch("stripe.Customer.retrieve")
def test_customer_purge_leaves_customer_record(self, CustomerRetrieveMock):
self.customer.purge()
customer = Customer.objects.get(stripe_id=self.customer.stripe_id)
self.assertTrue(customer.user is None)
self.assertTrue(customer.card_fingerprint == "")
self.assertTrue(customer.card_last_4 == "")
self.assertTrue(customer.card_kind == "")
self.assertTrue(self.User.objects.filter(pk=self.user.pk).exists())
@patch("stripe.Customer.retrieve")
def test_customer_delete_same_as_purge(self, CustomerRetrieveMock):
self.customer.delete()
customer = Customer.objects.get(stripe_id=self.customer.stripe_id)
self.assertTrue(customer.user is None)
self.assertTrue(customer.card_fingerprint == "")
self.assertTrue(customer.card_last_4 == "")
self.assertTrue(customer.card_kind == "")
self.assertTrue(self.User.objects.filter(pk=self.user.pk).exists())
@patch("stripe.Customer.retrieve")
def test_customer_sync_updates_credit_card(self, StripeCustomerRetrieveMock):
"""
Test to make sure Customer.sync will update a credit card when there is a new card
"""
StripeCustomerRetrieveMock.return_value.active_card.fingerprint = "FINGERPRINT"
StripeCustomerRetrieveMock.return_value.active_card.type = "DINERS"
StripeCustomerRetrieveMock.return_value.active_card.last4 = "BEEF"
customer = Customer.objects.get(stripe_id=self.customer.stripe_id)
self.assertNotEqual(customer.card_fingerprint, customer.stripe_customer.active_card.fingerprint)
self.assertNotEqual(customer.card_last_4, customer.stripe_customer.active_card.last4)
self.assertNotEqual(customer.card_kind, customer.stripe_customer.active_card.type)
customer.sync()
# Reload saved customer
customer = Customer.objects.get(stripe_id=self.customer.stripe_id)
self.assertEqual(customer.card_fingerprint, customer.stripe_customer.active_card.fingerprint)
self.assertEqual(customer.card_last_4, customer.stripe_customer.active_card.last4)
self.assertEqual(customer.card_kind, customer.stripe_customer.active_card.type)
@patch("stripe.Customer.retrieve")
def test_customer_sync_does_not_update_credit_card(self, StripeCustomerRetrieveMock):
"""
Test to make sure Customer.sync will not update a credit card when there are no changes
"""
customer = Customer.objects.get(stripe_id=self.customer.stripe_id)
StripeCustomerRetrieveMock.return_value.active_card.fingerprint = customer.card_fingerprint
StripeCustomerRetrieveMock.return_value.active_card.type = customer.card_kind
StripeCustomerRetrieveMock.return_value.active_card.last4 = customer.card_last_4
self.assertEqual(customer.card_fingerprint, customer.stripe_customer.active_card.fingerprint)
self.assertEqual(customer.card_last_4, customer.stripe_customer.active_card.last4)
self.assertEqual(customer.card_kind, customer.stripe_customer.active_card.type)
customer.sync()
self.assertEqual(customer.card_fingerprint, customer.stripe_customer.active_card.fingerprint)
self.assertEqual(customer.card_last_4, customer.stripe_customer.active_card.last4)
self.assertEqual(customer.card_kind, customer.stripe_customer.active_card.type)
@patch("stripe.Customer.retrieve")
def test_customer_sync_removes_credit_card(self, StripeCustomerRetrieveMock):
"""
Test to make sure Customer.sync removes credit card when there is no active card
"""
def _perform_test(kitchen):
kitchen.sync()
# Reload saved customer
cus = Customer.objects.get(stripe_id=self.customer.stripe_id)
# Test to make sure card details were removed
self.assertEqual(cus.card_fingerprint, "")
self.assertEqual(cus.card_last_4, "")
self.assertEqual(cus.card_kind, "")
StripeCustomerRetrieveMock.return_value.active_card = None
customer = Customer.objects.get(stripe_id=self.customer.stripe_id)
_perform_test(customer)
# Test removal of attribute for active_card so hasattr will fail
# Add back credit card to the customer
self.test_customer_sync_updates_credit_card()
# Reload saved customer
customer = Customer.objects.get(stripe_id=self.customer.stripe_id)
# Remove the credit card from the mocked object
del customer.stripe_customer.active_card
_perform_test(customer)
def test_customer_sync_sends_credit_card_updated_signal(self):
"""
Test to make sure the card_changed signal gets sent when there is an updated credit card during sync
"""
mocked_func = Mock()
card_changed.connect(mocked_func, weak=False)
mocked_func.reset_mock()
self.test_customer_sync_updates_credit_card()
# Make sure the signal was called
self.assertTrue(mocked_func.called)
mocked_func.reset_mock()
self.test_customer_sync_removes_credit_card()
# Make sure the signal was called
self.assertTrue(mocked_func.called)
card_changed.disconnect(mocked_func, weak=False)
def test_customer_sync_does_not_send_credit_card_updated_signal(self):
"""
Test to make sure the card_changed signal does not get sent when there is no change to the credit card during sync
"""
mocked_func = Mock()
card_changed.connect(mocked_func, weak=False)
mocked_func.reset_mock()
self.test_customer_sync_does_not_update_credit_card()
# Make sure the signal was not called
self.assertFalse(mocked_func.called)
card_changed.disconnect(mocked_func, weak=False)
def test_change_charge(self):
self.assertTrue(self.customer.can_charge())
@patch("stripe.Customer.retrieve")
def test_cannot_charge(self, CustomerRetrieveMock):
self.customer.delete()
self.assertFalse(self.customer.can_charge())
def test_charge_accepts_only_decimals(self):
with self.assertRaises(ValueError):
self.customer.charge(10)
@patch("stripe.Charge.retrieve")
def test_record_charge(self, RetrieveMock):
RetrieveMock.return_value = {
"id": "ch_XXXXXX",
"card": {
"last4": "4323",
"type": "Visa"
},
"amount": 1000,
"paid": True,
"refunded": False,
"fee": 499,
"dispute": None,
"created": 1363911708,
"customer": "cus_xxxxxxxxxxxxxxx"
}
obj = self.customer.record_charge("ch_XXXXXX")
self.assertEquals(Charge.objects.get(stripe_id="ch_XXXXXX").pk, obj.pk)
self.assertEquals(obj.paid, True)
self.assertEquals(obj.disputed, False)
self.assertEquals(obj.refunded, False)
self.assertEquals(obj.amount_refunded, None)
@patch("stripe.Charge.retrieve")
def test_refund_charge(self, RetrieveMock):
charge = Charge.objects.create(
stripe_id="ch_XXXXXX",
customer=self.customer,
card_last_4="4323",
card_kind="Visa",
amount=decimal.Decimal("10.00"),
paid=True,
refunded=False,
fee=decimal.Decimal("4.99"),
disputed=False
)
RetrieveMock.return_value.refund.return_value = {
"id": "ch_XXXXXX",
"card": {
"last4": "4323",
"type": "Visa"
},
"amount": 1000,
"paid": True,
"refunded": True,
"amount_refunded": 1000,
"fee": 499,
"dispute": None,
"created": 1363911708,
"customer": "cus_xxxxxxxxxxxxxxx"
}
charge.refund()
charge2 = Charge.objects.get(stripe_id="ch_XXXXXX")
self.assertEquals(charge2.refunded, True)
self.assertEquals(charge2.amount_refunded, decimal.Decimal("10.00"))
def test_calculate_refund_amount_full_refund(self):
charge = Charge(
stripe_id="ch_111111",
customer=self.customer,
amount=decimal.Decimal("500.00")
)
self.assertEquals(
charge.calculate_refund_amount(),
50000
)
def test_calculate_refund_amount_partial_refund(self):
charge = Charge(
stripe_id="ch_111111",
customer=self.customer,
amount=decimal.Decimal("500.00")
)
self.assertEquals(
charge.calculate_refund_amount(amount=decimal.Decimal("300.00")),
30000
)
def test_calculate_refund_above_max_refund(self):
charge = Charge(
stripe_id="ch_111111",
customer=self.customer,
amount=decimal.Decimal("500.00")
)
self.assertEquals(
charge.calculate_refund_amount(amount=decimal.Decimal("600.00")),
50000
)
@patch("stripe.Charge.retrieve")
@patch("stripe.Charge.create")
def test_charge_converts_dollars_into_cents(self, ChargeMock, RetrieveMock):
ChargeMock.return_value.id = "ch_XXXXX"
RetrieveMock.return_value = {
"id": "ch_XXXXXX",
"card": {
"last4": "4323",
"type": "Visa"
},
"amount": 1000,
"paid": True,
"refunded": False,
"fee": 499,
"dispute": None,
"created": 1363911708,
"customer": "cus_xxxxxxxxxxxxxxx"
}
self.customer.charge(
amount=decimal.Decimal("10.00")
)
_, kwargs = ChargeMock.call_args
self.assertEquals(kwargs["amount"], 1000)
| wahuneke/django-stripe-payments | payments/tests/test_customer.py | Python | bsd-3-clause | 16,404 |
# (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test iris.util
"""
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import inspect
import os
import StringIO
import unittest
import numpy as np
import iris.analysis
import iris.coords
import iris.tests.stock as stock
import iris.util
class TestMonotonic(unittest.TestCase):
def assertMonotonic(self, array, direction=None, **kwargs):
if direction is not None:
mono, dir = iris.util.monotonic(array, return_direction=True, **kwargs)
if not mono:
self.fail('Array was not monotonic:/n %r' % array)
if dir != np.sign(direction):
self.fail('Array was monotonic but not in the direction expected:'
'/n + requested direction: %s/n + resultant direction: %s' % (direction, dir))
else:
mono = iris.util.monotonic(array, **kwargs)
if not mono:
self.fail('Array was not monotonic:/n %r' % array)
def assertNotMonotonic(self, array, **kwargs):
mono = iris.util.monotonic(array, **kwargs)
if mono:
self.fail("Array was monotonic when it shouldn't be:/n %r" % array)
def test_monotonic_pve(self):
a = np.array([3, 4, 5.3])
self.assertMonotonic(a)
self.assertMonotonic(a, direction=1)
# test the reverse for negative monotonic.
a = a[::-1]
self.assertMonotonic(a)
self.assertMonotonic(a, direction=-1)
def test_not_monotonic(self):
b = np.array([3, 5.3, 4])
self.assertNotMonotonic(b)
def test_monotonic_strict(self):
b = np.array([3, 5.3, 4])
self.assertNotMonotonic(b, strict=True)
self.assertNotMonotonic(b)
b = np.array([3, 5.3, 5.3])
self.assertNotMonotonic(b, strict=True)
self.assertMonotonic(b, direction=1)
b = b[::-1]
self.assertNotMonotonic(b, strict=True)
self.assertMonotonic(b, direction=-1)
b = np.array([0.0])
self.assertRaises(ValueError, iris.util.monotonic, b)
self.assertRaises(ValueError, iris.util.monotonic, b, strict=True)
b = np.array([0.0, 0.0])
self.assertNotMonotonic(b, strict=True)
self.assertMonotonic(b)
class TestReverse(unittest.TestCase):
def test_simple(self):
a = np.arange(12).reshape(3, 4)
np.testing.assert_array_equal(a[::-1], iris.util.reverse(a, 0))
np.testing.assert_array_equal(a[::-1, ::-1], iris.util.reverse(a, [0, 1]))
np.testing.assert_array_equal(a[:, ::-1], iris.util.reverse(a, 1))
np.testing.assert_array_equal(a[:, ::-1], iris.util.reverse(a, [1]))
self.assertRaises(ValueError, iris.util.reverse, a, [])
self.assertRaises(ValueError, iris.util.reverse, a, -1)
self.assertRaises(ValueError, iris.util.reverse, a, 10)
self.assertRaises(ValueError, iris.util.reverse, a, [-1])
self.assertRaises(ValueError, iris.util.reverse, a, [0, -1])
def test_single(self):
a = np.arange(36).reshape(3, 4, 3)
np.testing.assert_array_equal(a[::-1], iris.util.reverse(a, 0))
np.testing.assert_array_equal(a[::-1, ::-1], iris.util.reverse(a, [0, 1]))
np.testing.assert_array_equal(a[:, ::-1, ::-1], iris.util.reverse(a, [1, 2]))
np.testing.assert_array_equal(a[..., ::-1], iris.util.reverse(a, 2))
self.assertRaises(ValueError, iris.util.reverse, a, -1)
self.assertRaises(ValueError, iris.util.reverse, a, 10)
self.assertRaises(ValueError, iris.util.reverse, a, [-1])
self.assertRaises(ValueError, iris.util.reverse, a, [0, -1])
class TestClipString(unittest.TestCase):
def setUp(self):
self.test_string = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
self.rider = "**^^**$$..--__" # A good chance at being unique and not in the string to be tested!
def test_oversize_string(self):
# Test with a clip length that means the string will be clipped
clip_length = 109
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
# Check the length is between what we requested ( + rider length) and the length of the original string
self.assertTrue(clip_length + len(self.rider) <= len(result) < len(self.test_string), "String was not clipped.")
# Also test the rider was added
self.assertTrue(self.rider in result, "Rider was not added to the string when it should have been.")
def test_undersize_string(self):
# Test with a clip length that is longer than the string
clip_length = 10999
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
self.assertEqual(len(result), len(self.test_string), "String was clipped when it should not have been.")
# Also test that no rider was added on the end if the string was not clipped
self.assertFalse(self.rider in result, "Rider was adding to the string when it should not have been.")
def test_invalid_clip_lengths(self):
# Clip values less than or equal to zero are not valid
for clip_length in [0, -100]:
result = iris.util.clip_string(self.test_string, clip_length, self.rider)
self.assertEqual(len(result), len(self.test_string), "String was clipped when it should not have been.")
def test_default_values(self):
# Get the default values specified in the function
argspec = inspect.getargspec(iris.util.clip_string)
arg_dict = dict(zip(argspec.args[-2:], argspec.defaults))
result = iris.util.clip_string(self.test_string, arg_dict["clip_length"], arg_dict["rider"])
self.assertLess(len(result), len(self.test_string), "String was not clipped.")
rider_returned = result[-len(arg_dict["rider"]):]
self.assertEquals(rider_returned, arg_dict["rider"], "Default rider was not applied.")
def test_trim_string_with_no_spaces(self):
clip_length = 200
no_space_string = "a" * 500
# Since this string has no spaces, clip_string will not be able to gracefully clip it
# but will instead clip it exactly where the user specified
result = iris.util.clip_string(no_space_string, clip_length, self.rider)
expected_length = clip_length + len(self.rider)
# Check the length of the returned string is equal to clip length + length of rider
self.assertEquals(len(result), expected_length, "Mismatch in expected length of clipped string. Length was %s, expected value is %s" % (len(result), expected_length))
class TestDescribeDiff(iris.tests.IrisTest):
def test_identical(self):
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
return_str_IO = StringIO.StringIO()
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'compatible_cubes.str.txt')
def test_different(self):
return_str_IO = StringIO.StringIO()
# test incompatible attributes
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.attributes['Conventions'] = 'CF-1.5'
test_cube_b.attributes['Conventions'] = 'CF-1.6'
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_attr.str.txt')
# test incompatible names
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.standard_name = "relative_humidity"
return_str_IO.truncate(0)
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_name.str.txt')
# test incompatible unit
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d()
test_cube_a.units = iris.unit.Unit('m')
return_str_IO.truncate(0)
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_unit.str.txt')
# test incompatible methods
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d().collapsed('model_level_number', iris.analysis.MEAN)
return_str_IO.truncate(0)
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_str_IO)
return_str = return_str_IO.getvalue()
self.assertString(return_str, 'incompatible_meth.str.txt')
def test_output_file(self):
# test incompatible attributes
test_cube_a = stock.realistic_4d()
test_cube_b = stock.realistic_4d().collapsed('model_level_number', iris.analysis.MEAN)
test_cube_a.attributes['Conventions'] = 'CF-1.5'
test_cube_b.attributes['Conventions'] = 'CF-1.6'
test_cube_a.standard_name = "relative_humidity"
test_cube_a.units = iris.unit.Unit('m')
with self.temp_filename() as filename:
with open(filename, 'w') as f:
iris.util.describe_diff(test_cube_a, test_cube_b, output_file=f)
f.close()
self.assertFilesEqual(filename,
'incompatible_cubes.str.txt')
class TestAsCompatibleShape(tests.IrisTest):
def test_slice(self):
cube = tests.stock.realistic_4d()
sliced = cube[1, :, 2, :-2]
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced, cube)
self.assertEqual(res, expected)
def test_transpose(self):
cube = tests.stock.realistic_4d()
transposed = cube.copy()
transposed.transpose()
expected = cube
res = iris.util.as_compatible_shape(transposed, cube)
self.assertEqual(res, expected)
def test_slice_and_transpose(self):
cube = tests.stock.realistic_4d()
sliced_and_transposed = cube[1, :, 2, :-2]
sliced_and_transposed.transpose()
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced_and_transposed, cube)
self.assertEqual(res, expected)
def test_collapsed(self):
cube = tests.stock.realistic_4d()
collapsed = cube.collapsed('model_level_number', iris.analysis.MEAN)
expected_shape = list(cube.shape)
expected_shape[1] = 1
expected_data = collapsed.data.reshape(expected_shape)
res = iris.util.as_compatible_shape(collapsed, cube)
self.assertCML(res, ('util', 'as_compatible_shape_collapsed.cml'),
checksum=False)
self.assertArrayEqual(expected_data, res.data)
self.assertArrayEqual(expected_data.mask, res.data.mask)
def test_reduce_dimensionality(self):
# Test that as_compatible_shape() can demote
# length one dimensions to scalars.
cube = tests.stock.realistic_4d()
src = cube[:, 2:3]
expected = reduced = cube[:, 2]
res = iris.util.as_compatible_shape(src, reduced)
self.assertEqual(res, expected)
def test_anonymous_dims(self):
cube = tests.stock.realistic_4d()
# Move all coords from dim_coords to aux_coords.
for coord in cube.dim_coords:
dim = cube.coord_dims(coord)
cube.remove_coord(coord)
cube.add_aux_coord(coord, dim)
sliced = cube[1, :, 2, :-2]
expected = cube[1:2, :, 2:3, :-2]
res = iris.util.as_compatible_shape(sliced, cube)
self.assertEqual(res, expected)
def test_scalar_auxcoord(self):
def dim_to_aux(cube, coord_name):
"""Convert coordinate on cube from DimCoord to AuxCoord."""
coord = cube.coord(coord_name)
coord = iris.coords.AuxCoord.from_coord(coord)
cube.replace_coord(coord)
cube = tests.stock.realistic_4d()
src = cube[:, :, 3]
dim_to_aux(src, 'grid_latitude')
expected = cube[:, :, 3:4]
dim_to_aux(expected, 'grid_latitude')
res = iris.util.as_compatible_shape(src, cube)
self.assertEqual(res, expected)
if __name__ == '__main__':
unittest.main()
| bblay/iris | lib/iris/tests/test_util.py | Python | gpl-3.0 | 13,652 |
"""The Minecraft Server binary sensor platform."""
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import MinecraftServer, MinecraftServerEntity
from .const import DOMAIN, ICON_STATUS, NAME_STATUS
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Minecraft Server binary sensor platform."""
server = hass.data[DOMAIN][config_entry.unique_id]
# Create entities list.
entities = [MinecraftServerStatusBinarySensor(server)]
# Add binary sensor entities.
async_add_entities(entities, True)
class MinecraftServerStatusBinarySensor(MinecraftServerEntity, BinarySensorEntity):
"""Representation of a Minecraft Server status binary sensor."""
def __init__(self, server: MinecraftServer) -> None:
"""Initialize status binary sensor."""
super().__init__(
server=server,
type_name=NAME_STATUS,
icon=ICON_STATUS,
device_class=DEVICE_CLASS_CONNECTIVITY,
)
self._is_on = False
@property
def is_on(self) -> bool:
"""Return binary state."""
return self._is_on
async def async_update(self) -> None:
"""Update status."""
self._is_on = self._server.online
| tchellomello/home-assistant | homeassistant/components/minecraft_server/binary_sensor.py | Python | apache-2.0 | 1,464 |
from test_health_icd10 import suite
| kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/modules/health_icd10/tests/__init__.py | Python | gpl-3.0 | 36 |
"""
Django settings for content_edit project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*idvi++k)bkpecw)57uro8&5hqo0+6_2k7(k8mp8xkso1=gk3n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'content_edit',
)
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'content_edit_proj.urls'
WSGI_APPLICATION = 'content_edit_proj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
| burke-software/django-content-edit | content_edit_proj/settings.py | Python | bsd-3-clause | 2,898 |
#!/usr/bin/python
#import lxml.etree
#import lxml.builder
from lxml import etree
#E = lxml.builder.ElementMaker()
#KINBODY=E.KinBody
#BODY=E.Body
#GEOM=E.Geom
#EXTENTS=E.Extents
#TRANSLATION=E.Translation
#DIFUSSECOLOR=E.diffuseColor
# User variables
boxHeight = 1.0
inFileStr = '/home/yo/repos/2018-ptmr/map1/map1.csv'
resolution = 2.0 # Just to make similar to MATLAB [pixel/meter]
meterPerPixel = 1 / resolution # [meter/pixel]
# Program
from numpy import genfromtxt
inFile = genfromtxt(inFileStr, delimiter=',')
print inFile
print inFile.shape
nX = inFile.shape[0]
nY = inFile.shape[1]
Ez = boxHeight / 2.0 # Box size is actually double the extent
Ex = meterPerPixel / 2.0
Ey = meterPerPixel / 2.0
KinBody = etree.Element("KinBody", name="map")
for iY in range(nY):
#print "iY:",iY
for iX in range(nX):
#print "* iX:",iX
#-- Skip box if map indicates a 0
if inFile[iX][iY] == 0:
continue
#-- Add E___ to each to force begin at 0,0,0 (centered by default)
x = Ex + iX*meterPerPixel
y = Ey + iY*meterPerPixel
z = Ez # Add this to raise to floor level (centered by default)
#-- Create box
Body = etree.SubElement(KinBody, "Body", name="box_"+str(iX)+"_"+str(iY), type="static")
Geom = etree.SubElement(Body, "Geom", type="box")
Extents = etree.SubElement(Geom, "Extents").text= str(Ex)+" "+ str(Ey)+" "+str(Ez)
Translation = etree.SubElement(Geom, "Translation").text= str(x)+" "+str(y)+" "+str(z)
DifusseColor = etree.SubElement(Geom, "diffuseColor").text= ".5 .5 .5"
'''
the_doc = KINBODY(
BODY(
GEOM(
EXTENTS("0.001 0.115 0.065"),
TRANSLATION("0.6 "+ "-0.8 0.32"),
DIFUSSECOLOR(".5 .5 .5"),
type="box",
),
name="square"+str(i), type="static"
),
name="wall",
)
'''
myStr = etree.tostring(KinBody, pretty_print=True)
outFile = open('map.kinbody.xml', 'w')
outFile.write(myStr)
outFile.close()
| roboticslab-uc3m/xgnitive | programs/kinbody-creator/openraveMapGeneratorFile.py | Python | lgpl-2.1 | 2,113 |
# Copyright Yopi Angi
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
from odoo.tools import safe_eval
class GoogleMapDrawingShapeMixin(models.AbstractModel):
_name = 'google.map.drawing.shape.mixin'
_description = 'Google Maps Shape Mixin'
_rec_name = 'shape_name'
shape_name = fields.Char(string='Name')
shape_area = fields.Float(string='Area')
shape_radius = fields.Float(string='Radius')
shape_description = fields.Text(string='Description')
shape_type = fields.Selection([
('circle', 'Circle'),
('polygon', 'Polygon'),
('rectangle', 'Rectangle')],
string='Type', default='polygon', required=True)
shape_paths = fields.Text(string='Paths')
@api.multi
def decode_shape_paths(self):
self.ensure_one()
return safe_eval(self.shape_paths)
| OCA/geospatial | web_widget_google_map_drawing/models/drawing_mixin.py | Python | agpl-3.0 | 887 |
import sys
import numpy as np
import numpy.linalg as npl
from eric.molecule import Molecule
from eric.utils import print_mat
def getargs():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--stub', default="h2o_sto3g")
parser.add_argument('--nbasis', type=int, default=7)
parser.add_argument('--nelec', type=int, default=10)
parser.add_argument('--thresh-e', type=int, default=15)
parser.add_argument('--thresh-d', type=int, default=10)
parser.add_argument('--guess',
choices=('hcore', 'gwh'),
default='hcore',
help="""How should the guess for the initial MO
coefficients be obtained?""")
return parser.parse_args()
def parse_file_1(filename):
with open(filename) as fh:
val = float(fh.readline())
return val
def parse_int_file_2(filename, dim):
mat = np.zeros(shape=(dim, dim))
with open(filename) as fh:
contents = fh.readlines()
for line in contents:
mu, nu, intval = line.split()
mu, nu, intval = int(mu), int(nu), float(intval)
mat[mu-1, nu-1] = mat[nu-1, mu-1] = intval
return mat
def parse_int_file_4(filename, dim):
# be very inefficient with how we store these for now -- use all 4
# indices
mat = np.zeros(shape=(dim, dim, dim, dim))
with open(filename) as fh:
contents = fh.readlines()
for line in contents:
mu, nu, lm, sg, intval = line.split()
mu, nu, lm, sg = int(mu) - 1, int(nu) - 1, int(lm) - 1, int(sg) - 1
mat[mu, nu, lm, sg] = \
mat[mu, nu, sg, lm] = \
mat[nu, mu, lm, sg] = \
mat[nu, mu, sg, lm] = \
mat[lm, sg, mu, nu] = \
mat[lm, sg, nu, mu] = \
mat[sg, lm, mu, nu] = \
mat[sg, lm, nu, mu] = float(intval)
return mat
## This doesn't work like it does for the Fock build.
# def build_density(P, C, nbasis, nocc):
# for mu in range(nbasis):
# for nu in range(nbasis):
# for m in range(nocc):
# P[mu, nu] += C[mu, m] * C[nu, m]
# return
def build_density(C, nocc):
"""Form the density matrix from contraction over the occupied columns
of the MO coefficient matrix.
"""
return np.dot(C[:, :nocc], C[:, :nocc].T)
def calc_elec_energy(P, H, F):
"""Calculate the electronic energy from contracting the density matrix
with the one- (core Hamiltonian) and two- (Fock matrix) electron
components of the Hamiltonian.
"""
return np.sum(P * (H + F))
def build_fock(F, P, H, ERI, nbasis):
"""Build the Fock matrix in-place."""
for mu in range(H.shape[0]):
for nu in range(H.shape[1]):
F[mu, nu] = H[mu, nu]
for lm in range(P.shape[0]):
for sg in range(P.shape[1]):
F[mu, nu] += (P[lm, sg] * (2*ERI[mu, nu, lm, sg] -
ERI[mu, lm, nu, sg]))
return
def rmsd_density(D_new, D_old):
"""Calculate the root mean square deviation between two density
matrices.
"""
return np.sqrt(np.sum((D_new - D_old)**2))
def population_analysis(mol, pop_mat, basis_function_indices):
"""Perform population analysis..."""
charges = []
for i in range(mol.size):
# The basis function indices for each atom.
bfi = basis_function_indices[i]
# Take the trace of the "population" matrix block
# corresponding to each individual atom. Assuming that the
# indices are in order, and the block is bounded by the first
# and last elements of bfi. Is there a better way to do fancy
# indexing here?
tr = np.trace(pop_mat[bfi[0]:bfi[-1]+1,
bfi[0]:bfi[-1]+1])
# Definition of the final charge.
charge = mol.charges[i] - 2 * tr
charges.append(charge)
return np.asarray(charges)
def guess_gwh(mat_h, mat_s, cx=1.75):
"""From the core Hamiltonian and overlap matrices, form the matrix for
the generalized Wolfsberg-Helmholz approximation (DOI:
10.1063/1.1700580)
The default value of 1.75 is from the Q-Chem 4.3 manual.
"""
assert mat_h.shape == mat_s.shape
nr, nc = mat_h.shape
assert nr == nc
mat_gwh = np.empty_like(mat_h)
for mu in range(nr):
for nu in range(nc):
mat_gwh[mu, nu] = mat_s[mu, nu] * (mat_h[mu, mu] + mat_h[nu, nu])
mat_gwh *= (cx / 2)
return mat_gwh
if __name__ == "__main__":
args = getargs()
nelec = args.nelec
nocc = nelec // 2
dim = nbasis = args.nbasis
stub = args.stub + "_"
mol = Molecule(stub + "geom.dat")
filename_enuc = stub + "enuc.dat"
filename_s = stub + "s.dat"
filename_t = stub + "t.dat"
filename_v = stub + "v.dat"
filename_eri = stub + "eri.dat"
e_nuc = parse_file_1(filename_enuc)
mat_s = parse_int_file_2(filename_s, dim)
mat_t = parse_int_file_2(filename_t, dim)
mat_v = parse_int_file_2(filename_v, dim)
mat_eri = parse_int_file_4(filename_eri, dim)
print("Nuclear repulsion energy = {}\n".format(e_nuc))
print("Overlap Integrals:")
print_mat(mat_s)
print("Kinetic-Energy Integrals:")
print_mat(mat_t)
print("Nuclear Attraction Integrals:")
print_mat(mat_v)
mat_h = mat_t + mat_v
print("Core Hamiltonian:")
print_mat(mat_h)
lam_s, l_s = npl.eigh(mat_s)
lam_s = lam_s * np.eye(len(lam_s))
lam_sqrt_inv = np.sqrt(npl.inv(lam_s))
symm_orthog = np.dot(l_s, np.dot(lam_sqrt_inv, l_s.T))
print("S^-1/2 Matrix:")
print_mat(symm_orthog)
if args.guess == "hcore":
f_prime = np.dot(symm_orthog.T, np.dot(mat_h, symm_orthog))
elif args.guess == "gwh":
mat_gwh = guess_gwh(mat_h, mat_s, cx=1.75)
f_prime = np.dot(symm_orthog.T, np.dot(mat_gwh, symm_orthog))
else:
print("Invalid guess.", file=sys.stderr)
sys.exit(1)
print("Initial F' Matrix:")
print_mat(f_prime)
eps, c_prime = npl.eigh(f_prime)
eps = eps * np.eye(len(eps))
c = np.dot(symm_orthog, c_prime)
print("Initial C Matrix:")
print_mat(c)
d = build_density(c, nocc)
print("Initial Density Matrix:")
print_mat(d)
e_elec_new = calc_elec_energy(d, mat_h, mat_h)
e_total = e_elec_new + e_nuc
delta_e = e_total
print(" Iter E(elec) E(tot) Delta(E) RMS(D)")
print(" {:4d} {:20.12f} {:20.12f}".format(0, e_elec_new, e_total))
t = " {:4d} {:20.12f} {:20.12f} {:20.12f} {:20.12f}".format
f = np.empty(shape=(nbasis, nbasis))
thresh_e = 10**(-args.thresh_e)
thresh_d = 10**(-args.thresh_d)
iteration = 1
max_iterations = 1024
rmsd_d = 99999.9
while iteration < max_iterations:
build_fock(f, d, mat_h, mat_eri, nbasis)
f_prime = np.dot(symm_orthog.T, np.dot(f, symm_orthog))
eps, c_prime = npl.eigh(f_prime)
eps = eps * np.eye(len(eps))
c = np.dot(symm_orthog, c_prime)
d_old = d
d = build_density(c, nocc)
e_elec_old = e_elec_new
e_elec_new = calc_elec_energy(d, mat_h, f)
e_tot = e_elec_new + e_nuc
if iteration == 1:
print("Fock Matrix:")
print_mat(f)
print(t(iteration, e_elec_new, e_tot, 0.0, 0.0))
else:
print(t(iteration, e_elec_new, e_tot, delta_e, rmsd_d))
delta_e = e_elec_new - e_elec_old
rmsd_d = rmsd_density(d, d_old)
if (delta_e < thresh_e) and (rmsd_d < thresh_d):
print("Convergence achieved.")
break
f = f_prime
iteration += 1
# At convergence, the Fock matrix should be diagonal in the MO
# basis.
f_mo = np.dot(c.T, np.dot(f, c))
print_mat(f_mo)
# Save things to disk for use in other routines.
np.savez_compressed("H.npz", mat_h)
np.savez_compressed("TEI_AO.npz", mat_eri)
np.savez_compressed("C.npz", c)
np.savez_compressed("F_MO.npz", f_mo)
mat_dipole_x = parse_int_file_2(stub + "mux.dat", dim)
mat_dipole_y = parse_int_file_2(stub + "muy.dat", dim)
mat_dipole_z = parse_int_file_2(stub + "muz.dat", dim)
dipole_elec = 2 * np.array([np.sum(d * mat_dipole_x),
np.sum(d * mat_dipole_y),
np.sum(d * mat_dipole_z)])
dipole_moment_elec = npl.norm(dipole_elec)
dipole_nuc = mol.calc_dipole_nuc()
dipole_moment_nuc = npl.norm(dipole_nuc)
dipole_total = dipole_elec + dipole_nuc
dipole_moment_total = npl.norm(dipole_total)
print("Dipole components (electronic, a.u.):")
print("X: {:20.12f}".format(dipole_elec[0]))
print("Y: {:20.12f}".format(dipole_elec[1]))
print("Z: {:20.12f}".format(dipole_elec[2]))
print("Dipole components (nuclear, a.u.):")
print("X: {:20.12f}".format(dipole_nuc[0]))
print("Y: {:20.12f}".format(dipole_nuc[1]))
print("Z: {:20.12f}".format(dipole_nuc[2]))
print("Dipole components (total, a.u.):")
print("X: {:20.12f}".format(dipole_total[0]))
print("Y: {:20.12f}".format(dipole_total[1]))
print("Z: {:20.12f}".format(dipole_total[2]))
print("Dipole moment (a.u.):")
print("electronic: {:20.12f}".format(dipole_moment_elec))
print("nuclear : {:20.12f}".format(dipole_moment_nuc))
print("total : {:20.12f}".format(dipole_moment_total))
# This is cheating. How to determine this automatically without
# any a priori knowledge of the basis set?
basis_function_indices = [
[0, 1, 2, 3, 4,],
[5,],
[6,],
]
# Mulliken population analysis.
mat_mulliken = np.dot(d, mat_s)
charges_mulliken = population_analysis(mol, mat_mulliken, basis_function_indices)
print("Population analysis (Mulliken):")
print(" Charges:")
for i in range(mol.size):
print(" {:3d} {:3d} {:20.12f}".format(i + 1, mol.charges[i], charges_mulliken[i]))
print(sum(charges_mulliken))
print(" trace: {}".format(np.trace(mat_mulliken)))
# Loewdin population analysis.
mat_loewdin = np.dot(npl.inv(symm_orthog), np.dot(d, npl.inv(symm_orthog)))
charges_loewdin = population_analysis(mol, mat_loewdin, basis_function_indices)
print("Population analysis (Loewdin):")
print(" Charges:")
for i in range(mol.size):
print(" {:3d} {:3d} {:20.12f}".format(i + 1, mol.charges[i], charges_loewdin[i]))
print(sum(charges_loewdin))
print(" trace: {}".format(np.trace(mat_loewdin)))
| berquist/programming_party | eric/project8/project8.py | Python | mpl-2.0 | 10,633 |
"""
SUMMON - Multiple Window Management
"""
import time
from summon.core import *
from summon import util
import summon
class WindowEnsemble:
"""This class coordinates the position, size, translation, and zoom of
multiple SUMMON Windows.
"""
def __init__(self, windows, stackx=False, stacky=False,
samew=False, sameh=False,
tiex=False, tiey=False, pinx=False, piny=False,
coordsx=None, coordsy=None,
master=None,
close_with_master=None):
"""windows -- windows to coordinate
stackx -- (bool) windows should stack with same x-coordinate
stacky -- (bool) windows should stack with same y-coordinate
samew -- (bool) windows should have same width
sameh -- (bool) windows should have same height
tiex -- (bool) translation along x-axis should be coordinated
tiey -- (bool) translation along y-axis should be coordinated
pinx -- (bool) translation along x-axis should be offset by window position
piny -- (bool) translation along x-axis should be offset by window position
coordsx -- a list of x-offsets for translation
coordsy -- a list of y-offsets for translation
master -- master window
close_with_master -- (bool) if true, all windows close with master
"""
self.windows = windows[:]
self.pos = {}
self.sizes = {}
self.stackx = stackx
self.stacky = stacky
self.samew = samew
self.sameh = sameh
self.listeners = {}
self.ties = {}
self.lock = False
self.recentPos = util.Dict(default=[])
self.recentSize = util.Dict(default=[])
self.tiex = tiex
self.tiey = tiey
self.pinx = pinx
self.piny = piny
self.coordsx = coordsx
self.coordsy = coordsy
# setup master window
if master != None:
self.master = master
# close_with_master defaults to True if master is given
if close_with_master == None:
self.close_with_master = True
else:
self.close_with_master = close_with_master
else:
self.master = windows[0]
# close_with_master defaults to False if master is not given
if close_with_master == None:
self.close_with_master = False
else:
self.close_with_master = close_with_master
# record window positions and sizes
for win in windows:
self.pos[win] = win.get_position()
self.sizes[win] = win.get_size()
# setup window listeners
for win in windows:
self.init_listeners(win)
# setup window stacking
if stackx or stacky:
self.stack(self.master)
# setup scrolling ties
if tiex or tiey:
self.tie(windows, tiex=tiex, tiey=tiey, pinx=pinx, piny=piny,
coordsx=coordsx, coordsy=coordsy, master=master)
def add_window(self, win, index=-1, coordx=0, coordy=0):
"""add a window to the existing ensemble"""
if self.tiex or self.tiey:
self.untie()
if index == -1:
index = len(self.windows)
self.windows.insert(index, win)
self.pos[win] = win.get_position()
self.sizes[win] = win.get_size()
self.init_listeners(win)
self.recentPos.clear()
self.recentSize.clear()
# setup window stacking
if self.stackx or self.stacky:
self.stack(self.master)
if self.coordsx != None:
self.coordsx.insert(index, coordx)
if self.coordsy != None:
self.coordsy.insert(index, coordy)
# setup scrolling ties
if self.tiex or self.tiey:
self.tie(self.windows, tiex=self.tiex, tiey=self.tiey,
pinx=self.pinx, piny=self.piny,
coordsx=self.coordsx, coordsy=self.coordsy,
master=self.master)
def init_listeners(self, win):
"""initialize listeners for a window managed by the ensemble"""
self.listeners[win] = util.Bundle(
close=lambda: self._on_window_close(win),
resize=lambda w, h: self._on_window_resize(win, w, h),
move=lambda x, y: self._on_window_move(win, x, y))
win.add_close_listener(self.listeners[win].close)
win.add_resize_listener(self.listeners[win].resize)
win.add_move_listener(self.listeners[win].move)
def stop(self):
"""stop the window ensemble from coordinating window movements"""
# pretend all the windows have closed
for win in list(self.windows):
self._on_window_close(win)
def _on_window_close(self, win):
"""callback for when a window in the ensemble closes"""
self.remove_window(win)
# close all windows if master closes
if self.close_with_master and win == self.master:
for win2 in self.windows:
win2.close()
def remove_window(self, win):
"""removes a window from the ensemble"""
# do nothing if window is not in ensemble
if win not in self.windows:
return
self.windows.remove(win)
# remove all callbacks
win.remove_close_listener(self.listeners[win].close)
win.remove_resize_listener(self.listeners[win].resize)
win.remove_move_listener(self.listeners[win].move)
del self.listeners[win]
self.untie(win)
def _on_window_resize(self, win, width, height):
"""callback for when a window resizes"""
# ignore windows that have been changed by the ensemble
size = (width, height)
if size in self.recentSize[win]:
ind = self.recentSize[win].index(size)
self.recentSize[win] = self.recentSize[win][ind+1:]
# process windows that have been changed by outside forces
elif self.sizes[win] != (width, height):
if self.stackx or self.stacky:
self.stack(win)
else:
self.align(win)
self.raise_windows(win)
def _on_window_move(self, win, x, y):
"""callback for when a window moves"""
# ignore windows that have been changed by the ensemble
pos = (x, y)
if pos in self.recentPos[win]:
ind = self.recentPos[win].index(pos)
self.recentPos[win] = self.recentPos[win][ind+1:]
# process windows that have been changed by outside forces
elif self.pos[win] != (x, y):
if self.stackx or self.stacky:
self.stack(win)
else:
self.align(win)
self.raise_windows(win)
def stack(self, win):
"""restack windows together"""
target_pos = win.get_position()
target_size = win.get_size()
self.pos[win] = target_pos
self.sizes[win] = target_size
# get window sizes
widths = []
heights = []
x = []
y = []
totalx = 0
totaly = 0
target = []
for win2 in self.windows:
# update size
if win2 == win:
w, h = target_size
# determine destination positions
target = [totalx, totaly]
else:
w2, h2 = win2.get_size()
if self.samew:
w = target_size[0]
else:
w = w2
if self.sameh:
h = target_size[1]
else:
h = h2
if (w,h) != (w2, h2):
self.recentSize[win2].append((w,h))
self.sizes[win2] = (w, h)
win2.set_size(w, h)
widths.append(w)
heights.append(h)
x.append(totalx)
y.append(totaly)
deco = win2.get_decoration()
totalx += w + deco[0]
totaly += h + deco[1]
# set window positions
for i, win2 in enumerate(self.windows):
if win == win2:
continue
if self.stackx:
newx = target_pos[0]
newy = target_pos[1] + y[i] - target[1]
elif self.stacky:
newx = target_pos[0] + x[i] - target[0]
newy = target_pos[1]
oldpos = self.pos[win2] #win2.get_position()
self.pos[win2] = (newx, newy)
if (newx, newy) != oldpos:
win2.set_position(newx, newy)
self.recentPos[win2].append((newx, newy))
def align(self, win):
"""move all windows the same amount window 'win' has moved"""
now = win.get_position()
now = [now[0], now[1]]
pos1 = self.pos[win]
# move all other windows to match moved window
for win2 in self.windows:
if win2 != win:
pos2 = self.pos[win2]
pos3 = [now[0] + pos2[0] - pos1[0],
now[1] + pos2[1] - pos1[1]]
win2.set_position(*pos3)
self.recentPos[win2].append(tuple(pos3))
self.pos[win2] = pos3
# record new position for main window
self.pos[win] = now
def tie(self, windows, tiex=False, tiey=False, pinx=False, piny=False,
coordsx=None, coordsy=None, master=None):
"""ties the scrolling and zooming of multiple windows together"""
if len(windows) < 2:
return
self.tiex = tiex
self.tiey = tiey
self.pinx = pinx
self.piny = piny
self.coordsx = coordsx
self.coordsy = coordsy
if master == None:
master = windows[0]
if coordsx == None:
coordsx = [0] * len(windows)
if coordsy == None:
coordsy = [0] * len(windows)
# make coordinate lookup
self.coords = {}
for win, x, y in zip(windows, coordsx, coordsy):
self.coords[win] = util.Bundle(x=x, y=y)
# set callbacks for each window
for win in windows:
others = util.remove(windows, win)
tie = WindowTie(win, others, self)
self.ties[win] = tie
win.add_view_change_listener(tie.update_scroll)
win.add_focus_change_listener(tie.update_focus)
if master == win:
master_trans = tie.update_scroll
master_focus = tie.update_focus
master_focus()
master_trans()
def untie(self, win=None):
"""remove a window from any ties"""
if win == None:
# untie all windows
for win2 in self.windows:
self.untie(win2)
else:
if win not in self.ties:
return
win.remove_view_change_listener(self.ties[win].update_scroll)
win.remove_focus_change_listener(self.ties[win].update_focus)
del self.ties[win]
# make sure window ties remove their callbacks
for tie in self.ties.itervalues():
tie.remove_window(win)
def raise_windows(self, top=None):
"""raises all windows in ensemble above other windows on the desktop"""
for win in self.windows:
win.raise_window(True)
if top != None:
top.raise_window(True)
class WindowTie:
"""This class coordinates the translation and zoom of multiple SUMMON Windows.
"""
def __init__(self, win, others, ensemble):
self.win = win
self.others = others
self.ensemble = ensemble
def remove_window(self, win):
"""removes a window from the list of tied windows"""
if win in self.others:
self.others.remove(win)
def update_scroll(self):
"""call back that sets translation and zoom"""
# prevent infinite loops
if self.ensemble.lock:
return
self.ensemble.lock = True
w1 = self.win
others = self.others
coords = self.ensemble.coords
needpin = self.ensemble.pinx or self.ensemble.piny
if needpin:
pos1 = w1.get_position()
trans1 = w1.get_trans()
zoom1 = w1.get_zoom()
for w2 in others:
if needpin:
pos2 = w2.get_position()
oldtrans2 = list(w2.get_trans())
oldzoom2 = list(w2.get_zoom())
trans2 = oldtrans2[:]
zoom2 = oldzoom2[:]
if self.ensemble.tiex:
trans2[0] = trans1[0] - coords[w2].x + coords[w1].x
zoom2[0] = zoom1[0]
if self.ensemble.pinx:
trans2[0] += pos1[0] - pos2[0]
if self.ensemble.tiey:
trans2[1] = trans1[1] - coords[w2].y + coords[w1].y
zoom2[1] = zoom1[1]
if self.ensemble.piny:
trans2[1] -= pos1[1] - pos2[1]
# check to see if there is a change (prevents infinite loops)
if trans2 != oldtrans2:
w2.set_trans(*trans2)
if zoom2 != oldzoom2:
w2.set_zoom(*zoom2)
self.ensemble.lock = False
def update_focus(self):
"""callback that sets focus"""
# prevent infinite loops
if self.ensemble.lock:
return
self.ensemble.lock = True
coords = self.ensemble.coords
fx1, fy1 = self.win.get_focus()
fx1 -= coords[self.win].x
fy1 -= coords[self.win].y
for w2 in self.others:
newpos = (fx1 + coords[w2].x, fy1 + coords[w2].y)
oldpos = w2.get_focus()
if newpos != oldpos:
w2.set_focus(* newpos)
self.ensemble.lock = False
| mdrasmus/summon | lib/summon/multiwindow.py | Python | gpl-2.0 | 14,930 |
# -*- coding: utf-8 -*-
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""\
Implementation of the CheckMin, CheckMax, CheckMinLen, CheckMaxLen check classes.
"""
from ..errors import (
MinValueError,
MaxValueError,
MinLengthError,
MaxLengthError,
)
from .check import Check
from .option import Option
__author__ = "Simone Campagna"
__copyright__ = 'Copyright (c) 2015 Simone Campagna'
__license__ = 'Apache License Version 2.0'
__all__ = [
'CheckMin',
'CheckMax',
'CheckMinLen',
'CheckMaxLen',
]
class CheckRange(Check): # pylint: disable=abstract-method
"""Base class for CheckMin, CheckMax, CheckMinLen, CheckMaxLen."""
ATTRIBUTE_NAME = None
def self_validate(self, validator):
value = getattr(self, self.ATTRIBUTE_NAME)
if (value is not None) and self.has_actual_value(value):
name = "<{}>".format(self.ATTRIBUTE_NAME)
option = Option(name=name, value=value, defined=True)
validator.validate_option(option, section=None)
class CheckMin(CheckRange):
"""Checks if value is >= min.
Parameters
----------
min: |any|, optional
the min value
"""
ATTRIBUTE_NAME = 'min'
def __init__(self, min=None): # pylint: disable=redefined-builtin
self.min = min
super().__init__()
def check(self, option, section):
min_value = self.get_value(self.min, section)
if min_value is not None:
value = option.value
if value < min_value:
raise MinValueError.build(
option,
"value is lower than min {!r}".format(min_value))
class CheckMax(CheckRange):
"""Checks if value is <= max.
Parameters
----------
max: |any|, optional
the max value
Attributes
----------
max: |any|, optional
the max value
"""
ATTRIBUTE_NAME = 'max'
def __init__(self, max=None): # pylint: disable=redefined-builtin
self.max = max
super().__init__()
def check(self, option, section):
max_value = self.get_value(self.max, section)
if max_value is not None:
value = option.value
if value > max_value:
raise MaxValueError.build(
option,
"value is greater than max {!r}".format(max_value))
class CheckMinLen(Check):
"""Checks if value length is >= min_len.
Parameters
----------
min_len: |any|, optional
the min length
Attributes
----------
min_len: |any|, optional
the min length
"""
def __init__(self, min_len=None):
self.min_len = min_len
super().__init__()
def check(self, option, section):
min_len_value = self.get_value(self.min_len, section)
if min_len_value is not None:
value = option.value
if len(value) < min_len_value:
raise MinLengthError.build(
option,
"length {} is lower than min_len {!r}".format(
len(value),
min_len_value))
class CheckMaxLen(Check):
"""Checks if value length is >= max_len.
Parameters
----------
max_len: |any|, optional
the max length
Attributes
----------
max_len: |any|, optional
the max length
"""
def __init__(self, max_len=None):
self.max_len = max_len
super().__init__()
def check(self, option, section):
max_len_value = self.get_value(self.max_len, section)
if max_len_value is not None:
value = option.value
if len(value) > max_len_value:
raise MaxLengthError.build(
option,
"length {} is greater than max_len {!r}".format(
len(value),
max_len_value))
| simone-campagna/daikon | zirkon/validator/check_range.py | Python | apache-2.0 | 4,546 |
#!/usr/bin/env python3
import stringcase
from generator.Field import Field
class OneOfField:
def __init__(self, oneof_name, oneof_fields, context):
self.name = oneof_name
# Some booleans to describe the type
self.one_of = True
self.type = 'OneOf{}'.format(stringcase.pascalcase(self.name))
self.fqn = '{}.{}'.format(context.fqn, self.type)
self.default_value = '{}()'.format(self.type)
self.map_type = False
self.repeated = False
self.pointer = False
self.array_size = 0
self.bytes_type = False
self.basic = False
self.oneof_fields = [Field(f, context) for f in oneof_fields]
# Since our cpp_type is used a lot, precalculate it
self.cpp_type = self.fqn.replace('.', '::')
self.special_cpp_type = False
def generate_cpp_header(self):
return '{} {};'.format(self.cpp_type, self.name)
| Fastcode/NUClearExample | nuclear/message/generator/OneOfField.py | Python | mit | 936 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import socket
import unittest
from StringIO import StringIO
import trac.tests.compat
from trac.util.text import empty, expandtabs, fix_eol, javascript_quote, \
levenshtein_distance, normalize_whitespace, \
print_table, quote_query_string, shorten_line, \
strip_line_ws, stripws, text_width, \
to_js_string, to_unicode, unicode_from_base64, \
unicode_quote, unicode_quote_plus, \
unicode_to_base64, unicode_unquote, \
unicode_urlencode, wrap
class ToUnicodeTestCase(unittest.TestCase):
def test_explicit_charset(self):
uc = to_unicode('\xc3\xa7', 'utf-8')
self.assertIsInstance(uc, unicode)
self.assertEquals(u'\xe7', uc)
def test_explicit_charset_with_replace(self):
uc = to_unicode('\xc3', 'utf-8')
self.assertIsInstance(uc, unicode)
self.assertEquals(u'\xc3', uc)
def test_implicit_charset(self):
uc = to_unicode('\xc3\xa7')
self.assertIsInstance(uc, unicode)
self.assertEquals(u'\xe7', uc)
def test_from_exception_using_unicode_args(self):
u = u'\uB144'
try:
raise ValueError, '%s is not a number.' % u
except ValueError, e:
self.assertEqual(u'\uB144 is not a number.', to_unicode(e))
def test_from_exception_using_str_args(self):
u = u'Das Ger\xe4t oder die Ressource ist belegt'
try:
raise ValueError, u.encode('utf-8')
except ValueError, e:
self.assertEqual(u, to_unicode(e))
def test_from_windows_error(self):
try:
os.stat('non/existent/file.txt')
except OSError, e:
uc = to_unicode(e)
self.assertIsInstance(uc, unicode, uc)
self.assertTrue(uc.startswith('[Error '), uc)
self.assertIn(e.strerror.decode('mbcs'), uc)
def test_from_socket_error(self):
for res in socket.getaddrinfo('127.0.0.1', 65536, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = socket.socket(af, socktype, proto)
try:
s.connect(sa)
except socket.error, e:
uc = to_unicode(e)
self.assertIsInstance(uc, unicode, uc)
if hasattr(e, 'strerror'):
self.assertIn(e.strerror.decode('mbcs'), uc)
if os.name != 'nt':
del test_from_windows_error
del test_from_socket_error
class ExpandtabsTestCase(unittest.TestCase):
def test_empty(self):
x = expandtabs('', ignoring='\0')
self.assertEqual('', x)
def test_ingoring(self):
x = expandtabs('\0\t', ignoring='\0')
self.assertEqual('\0 ', x)
def test_tabstops(self):
self.assertEqual(' ', expandtabs(' \t'))
self.assertEqual(' ', expandtabs('\t\t'))
class JavascriptQuoteTestCase(unittest.TestCase):
def test_quoting(self):
self.assertEqual(r'Quote \" in text',
javascript_quote('Quote " in text'))
self.assertEqual(r'\\\"\b\f\n\r\t\'',
javascript_quote('\\"\b\f\n\r\t\''))
self.assertEqual(r'\u0002\u001e',
javascript_quote('\x02\x1e'))
self.assertEqual(r'\u0026\u003c\u003e',
javascript_quote('&<>'))
self.assertEqual(r'\u2028\u2029',
javascript_quote(u'\u2028\u2029'))
class ToJsStringTestCase(unittest.TestCase):
def test_(self):
self.assertEqual(r'"Quote \" in text"',
to_js_string('Quote " in text'))
self.assertEqual(r'''"\\\"\b\f\n\r\t'"''',
to_js_string('\\"\b\f\n\r\t\''))
self.assertEqual(r'"\u0002\u001e"',
to_js_string('\x02\x1e'))
self.assertEqual(r'"\u0026\u003c\u003e"',
to_js_string('&<>'))
self.assertEqual('""',
to_js_string(''))
self.assertEqual('""',
to_js_string(None))
self.assertEqual(r'"\u2028\u2029"',
to_js_string(u'\u2028\u2029'))
class UnicodeQuoteTestCase(unittest.TestCase):
def test_unicode_quote(self):
self.assertEqual(u'the%20%C3%9C%20thing',
unicode_quote(u'the Ü thing'))
self.assertEqual(u'%2520%C3%9C%20%2520',
unicode_quote(u'%20Ü %20'))
def test_unicode_quote_plus(self):
self.assertEqual(u'the+%C3%9C+thing',
unicode_quote_plus(u'the Ü thing'))
self.assertEqual(u'%2520%C3%9C+%2520',
unicode_quote_plus(u'%20Ü %20'))
def test_unicode_unquote(self):
u = u'the Ü thing'
up = u'%20Ü %20'
self.assertEqual(u, unicode_unquote(unicode_quote(u)))
self.assertEqual(up, unicode_unquote(unicode_quote(up)))
def test_unicode_urlencode(self):
self.assertEqual('thing=%C3%9C&%C3%9C=thing&%C3%9Cthing',
unicode_urlencode({u'Ü': 'thing',
'thing': u'Ü',
u'Üthing': empty}))
class QuoteQueryStringTestCase(unittest.TestCase):
def test_quote(self):
text = u'type=the Ü thing&component=comp\x7fonent'
self.assertEqual('type=the+%C3%9C+thing&component=comp%7Fonent',
quote_query_string(text))
class WhitespaceTestCase(unittest.TestCase):
def test_default(self):
self.assertEqual(u'This is text ',
normalize_whitespace(u'Th\u200bis\u00a0is te\u200bxt\u00a0'))
self.assertEqual(u'Some other text',
normalize_whitespace(u'Some\tother\ntext\r', to_space='\t\n',
remove='\r'))
class TextWidthTestCase(unittest.TestCase):
def test_single(self):
def tw1(text):
return text_width(text, ambiwidth=1)
self.assertEqual(8, tw1(u'Alphabet'))
self.assertEqual(16, tw1('east asian width'))
self.assertEqual(16, tw1(u'ひらがなカタカナ'))
self.assertEqual(21, tw1(u'色は匂えど…酔ひもせず'))
def test_double(self):
def tw2(text):
return text_width(text, ambiwidth=2)
self.assertEqual(8, tw2(u'Alphabet'))
self.assertEqual(16, tw2('east asian width'))
self.assertEqual(16, tw2(u'ひらがなカタカナ'))
self.assertEqual(22, tw2(u'色は匂えど…酔ひもせず'))
class PrintTableTestCase(unittest.TestCase):
def test_single_bytes(self):
data = (
('Trac 0.12', '2010-06-13', 'Babel'),
('Trac 0.11', '2008-06-22', 'Genshi'),
('Trac 0.10', '2006-09-28', 'Zengia'),
('Trac 0.9', '2005-10-31', 'Vodun'),
('Trac 0.8', '2004-11-15', 'Qualia'),
('Trac 0.7', '2004-05-18', 'Fulci'),
('Trac 0.6', '2004-03-23', 'Solanum'),
('Trac 0.5', '2004-02-23', 'Incognito'),
)
headers = ('Version', 'Date', 'Name')
expected = """\
Version Date Name
----------------------------------
Trac 0.12 | 2010-06-13 | Babel
Trac 0.11 | 2008-06-22 | Genshi
Trac 0.10 | 2006-09-28 | Zengia
Trac 0.9 | 2005-10-31 | Vodun
Trac 0.8 | 2004-11-15 | Qualia
Trac 0.7 | 2004-05-18 | Fulci
Trac 0.6 | 2004-03-23 | Solanum
Trac 0.5 | 2004-02-23 | Incognito
"""
self._validate_print_table(expected, data, headers=headers, sep=' | ',
ambiwidth=1)
def test_various_types(self):
data = (
('NoneType', 'None', None),
('bool', 'True', True),
('bool', 'False', False),
('int', '0', 0),
('float', '0.0', 0.0),
)
expected = u"""\
NoneType | None |
bool | True | True
bool | False | False
int | 0 | 0
float | 0.0 | 0.0
"""
self._validate_print_table(expected, data, sep=' | ', ambiwidth=1)
def test_ambiwidth_1(self):
data = (
('foo@localhost', 'foo@localhost'),
(u'bar@….com', '[email protected]'),
)
headers = ('Obfuscated', 'Email')
expected = u"""\
Obfuscated Email
-------------------------------
foo@localhost | foo@localhost
bar@….com | [email protected]
"""
self._validate_print_table(expected, data, headers=headers, sep=' | ',
ambiwidth=1)
def test_ambiwidth_2(self):
data = (
('foo@localhost', 'foo@localhost'),
(u'bar@….com', '[email protected]'),
)
headers = ('Obfuscated', 'Email')
expected = u"""\
Obfuscated Email
-------------------------------
foo@localhost | foo@localhost
bar@….com | [email protected]
"""
self._validate_print_table(expected, data, headers=headers, sep=' | ',
ambiwidth=2)
def _validate_print_table(self, expected, data, **kwargs):
out = StringIO()
kwargs['out'] = out
print_table(data, **kwargs)
self.assertEqual(expected.encode('utf-8'),
strip_line_ws(out.getvalue(), leading=False))
class WrapTestCase(unittest.TestCase):
def test_wrap_ambiwidth_single(self):
text = u'Lorem ipsum dolor sit amet, consectetur adipisicing ' + \
u'elit, sed do eiusmod tempor incididunt ut labore et ' + \
u'dolore magna aliqua. Ut enim ad minim veniam, quis ' + \
u'nostrud exercitation ullamco laboris nisi ut aliquip ex ' + \
u'ea commodo consequat. Duis aute irure dolor in ' + \
u'reprehenderit in voluptate velit esse cillum dolore eu ' + \
u'fugiat nulla pariatur. Excepteur sint occaecat ' + \
u'cupidatat non proident, sunt in culpa qui officia ' + \
u'deserunt mollit anim id est laborum.'
wrapped = u"""\
> Lorem ipsum dolor sit amet, consectetur adipisicing elit,
| sed do eiusmod tempor incididunt ut labore et dolore
| magna aliqua. Ut enim ad minim veniam, quis nostrud
| exercitation ullamco laboris nisi ut aliquip ex ea
| commodo consequat. Duis aute irure dolor in reprehenderit
| in voluptate velit esse cillum dolore eu fugiat nulla
| pariatur. Excepteur sint occaecat cupidatat non proident,
| sunt in culpa qui officia deserunt mollit anim id est
| laborum."""
self.assertEqual(wrapped, wrap(text, 59, '> ', '| ', '\n'))
def test_wrap_ambiwidth_double(self):
text = u'Trac は BSD ライセンスのもとで配布されて' + \
u'います。[1:]このライセンスの全文は、𠀋' + \
u'配布ファイルに含まれている[3:CОPYING]ファ' + \
u'イルと同じものが[2:オンライン]で参照でき' \
u'ます。'
wrapped = u"""\
> Trac は BSD ライセンスのもとで配布されています。[1:]この
| ライセンスの全文は、𠀋配布ファイルに含まれている
| [3:CОPYING]ファイルと同じものが[2:オンライン]で参照でき
| ます。"""
self.assertEqual(wrapped, wrap(text, 59, '> ', '| ', '\n',
ambiwidth=2))
class FixEolTestCase(unittest.TestCase):
def test_mixed_eol(self):
text = u'\nLine 2\rLine 3\r\nLine 4\n\r'
self.assertEqual(u'\nLine 2\nLine 3\nLine 4\n\n',
fix_eol(text, '\n'))
self.assertEqual(u'\rLine 2\rLine 3\rLine 4\r\r',
fix_eol(text, '\r'))
self.assertEqual(u'\r\nLine 2\r\nLine 3\r\nLine 4\r\n\r\n',
fix_eol(text, '\r\n'))
class UnicodeBase64TestCase(unittest.TestCase):
def test_to_and_from_base64_unicode(self):
text = u'Trac は ØÆÅ'
text_base64 = unicode_to_base64(text)
self.assertEqual('VHJhYyDjga8gw5jDhsOF', text_base64)
self.assertEqual(text, unicode_from_base64(text_base64))
def test_to_and_from_base64_whitespace(self):
# test that removing whitespace does not affect conversion
text = 'a space: '
text_base64 = unicode_to_base64(text)
self.assertEqual('YSBzcGFjZTog', text_base64)
self.assertEqual(text, unicode_from_base64(text_base64))
text = 'two newlines: \n\n'
text_base64 = unicode_to_base64(text)
self.assertEqual('dHdvIG5ld2xpbmVzOiAKCg==', text_base64)
self.assertEqual(text, unicode_from_base64(text_base64))
text = 'a test string ' * 10000
text_base64_strip = unicode_to_base64(text)
text_base64_no_strip = unicode_to_base64(text, strip_newlines=False)
self.assertNotEqual(text_base64_strip, text_base64_no_strip)
self.assertEqual(text, unicode_from_base64(text_base64_strip))
self.assertEqual(text, unicode_from_base64(text_base64_no_strip))
class StripwsTestCase(unittest.TestCase):
def test_stripws(self):
self.assertEqual(u'stripws',
stripws(u' \u200b\t\u3000stripws \u200b\t\u2008'))
self.assertEqual(u'stripws \u3000\t',
stripws(u'\u200b\t\u2008 stripws \u3000\t',
trailing=False))
self.assertEqual(u' \t\u3000stripws',
stripws(u' \t\u3000stripws \u200b\t\u2008',
leading=False))
self.assertEqual(u' \t\u3000stripws \u200b\t\u2008',
stripws(u' \t\u3000stripws \u200b\t\u2008',
leading=False, trailing=False))
class LevenshteinDistanceTestCase(unittest.TestCase):
def test_distance(self):
self.assertEqual(5, levenshtein_distance('kitten', 'sitting'))
self.assertEqual(1, levenshtein_distance('wii', 'wiki'))
self.assertEqual(2, levenshtein_distance('comfig', 'config'))
self.assertEqual(5, levenshtein_distance('update', 'upgrade'))
self.assertEqual(0, levenshtein_distance('milestone', 'milestone'))
class ShortenLineTestCase(unittest.TestCase):
def test_less_than_maxlen(self):
text = '123456789'
self.assertEqual(text, shorten_line(text, 10))
def test_equalto_maxlen(self):
text = '1234567890'
self.assertEqual(text, shorten_line(text, 10))
def test_greater_than_maxlen(self):
text = 'word word word word'
self.assertEqual('word word ...', shorten_line(text, 15))
text = 'abcdefghij'
self.assertEqual('abcde ...', shorten_line(text, 9))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ToUnicodeTestCase))
suite.addTest(unittest.makeSuite(ExpandtabsTestCase))
suite.addTest(unittest.makeSuite(UnicodeQuoteTestCase))
suite.addTest(unittest.makeSuite(JavascriptQuoteTestCase))
suite.addTest(unittest.makeSuite(ToJsStringTestCase))
suite.addTest(unittest.makeSuite(QuoteQueryStringTestCase))
suite.addTest(unittest.makeSuite(WhitespaceTestCase))
suite.addTest(unittest.makeSuite(TextWidthTestCase))
suite.addTest(unittest.makeSuite(PrintTableTestCase))
suite.addTest(unittest.makeSuite(WrapTestCase))
suite.addTest(unittest.makeSuite(FixEolTestCase))
suite.addTest(unittest.makeSuite(UnicodeBase64TestCase))
suite.addTest(unittest.makeSuite(StripwsTestCase))
suite.addTest(unittest.makeSuite(LevenshteinDistanceTestCase))
suite.addTest(unittest.makeSuite(ShortenLineTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| jun66j5/trac-ja | trac/util/tests/text.py | Python | bsd-3-clause | 16,422 |
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.core import mail
from notifications.forms import MessageForm
class LoginTestCase(TestCase):
# inject these two functions
def login_user(self):
return self.client.login(username=self.user_username, password=self.user_password)
def login_superuser(self):
return self.client.login(username=self.superuser_username, password=self.superuser_password)
class NoficationsTestCase(LoginTestCase):
def setUp(self):
self.client = Client()
self.user_username = 'ganetitest'
self.user_password = 'ganetitest'
self.superuser_username = 'ganetitestadmin'
self.superuser_password = 'ganetitestadmin'
self.user = User.objects.create_user(self.user_username, '[email protected]', self.user_password)
self.superuser = User.objects.create_user(self.superuser_username, '[email protected]', self.superuser_password)
self.superuser.is_staff = True
self.superuser.is_superuser = True
self.superuser.save()
def test_usergroups(self):
# should get a redirect to the login page
res = self.client.get(reverse('usergroups'))
self.assertEqual(res.status_code, 302)
# should get a redirect
self.login_user()
res = self.client.get(reverse('usergroups'))
self.assertEqual(res.status_code, 403)
self.login_superuser()
res = self.client.get(reverse('usergroups'))
self.assertEqual(res.status_code, 400)
res = self.client.get(reverse('usergroups'), {'q': 'test', 'type': 'test'})
self.assertEqual(res.status_code, 200)
def test_notify(self):
# should get a redirect to the login page
res = self.client.get(reverse('notify'))
self.assertEqual(res.status_code, 302)
# should get 403
self.login_user()
res = self.client.get(reverse('notify'))
self.assertEqual(res.status_code, 403)
self.login_superuser()
res = self.client.get(reverse('notify'))
self.assertEqual(res.status_code, 200)
res = self.client.get(reverse('notify'), {'instance': 'test.test.test'})
self.assertEqual(res.status_code, 200)
# post form validate
data = {
'search_for': 'users',
'subject': 'test',
'message': 'This is a test',
'recipient_list': 'ganetitestadmin,ganetitest'
}
form = MessageForm(data)
self.assertEqual(form.is_valid(), True)
def test_send_notification(self):
self.login_superuser()
res = self.client.post(reverse('notify'))
self.assertEqual(res.status_code, 200)
body = 'this is a test'
res = self.client.post(reverse('notify'), {
'recipient_list': 'u_%s' % (self.user.pk),
'subject': 'test',
'message': body,
'search_for': 'users',
})
self.assertEqual(mail.outbox[0].recipients()[0], self.user.email)
self.assertEqual(mail.outbox[0].body, body)
self.assertEqual(res.status_code, 302)
| irregulator/ganetimgr | notifications/tests.py | Python | gpl-3.0 | 3,209 |
import urllib, urllib2, sys, httplib
url = "/MELA/REST_WS"
HOST_IP="localhost:8180"
filename="./serviceDescription.xml"
if __name__=='__main__':
args = sys.argv;
if (len(args) > 1):
filename = str(args[1])
connection = httplib.HTTPConnection(HOST_IP)
description_file = open(filename, "r")
body_content = description_file.read()
headers={
'Content-Type':'application/xml; charset=utf-8',
'Accept':'application/json, multipart/related'
}
connection.request('PUT', url+'/service', body=body_content,headers=headers,)
result = connection.getresponse()
print result.read()
| tuwiendsg/MELA | MELA-Extensions/MELA-ComplexCostEvaluationService/tests/spark/submitServiceDescription.py | Python | apache-2.0 | 674 |
# -*- coding: utf-8 -*-
"""
Base backend specific settings module.
"""
from __future__ import unicode_literals
from django_event import settings
HOST = settings.BACKEND_OPTIONS.get('HOST', 'localhost')
PORT = settings.BACKEND_OPTIONS.get('PORT')
PASSWORD = settings.BACKEND_OPTIONS.get('PASSWORD', '') | ailove-dev/django-event | django_event/backends/base/settings.py | Python | mit | 307 |
"""
Tests for dit.math.misc.
"""
import pytest
from dit.math.misc import combinations, is_integer, is_number, factorial
@pytest.mark.parametrize('n', range(-10, 10))
def test_number1(n):
assert is_number(n)
@pytest.mark.parametrize('n', range(-10, 10))
def test_number2(n):
assert is_number(n / 10)
@pytest.mark.parametrize('n', ['a', int, []])
def test_number3(n):
assert not is_number(n)
@pytest.mark.parametrize('n', range(-10, 10))
def test_integer1(n):
assert is_integer(n)
@pytest.mark.parametrize('n', range(-10, 10))
def test_integer2(n):
assert not is_integer(n / 10)
@pytest.mark.parametrize('n', ['a', int, []])
def test_integer3(n):
assert not is_integer(n)
@pytest.mark.parametrize(('n', 'expected'), [
(0, 1),
(1, 1),
(2, 2),
(3, 6),
(4, 24),
(5, 120),
])
def test_factorial1(n, expected):
assert factorial(n) == expected
@pytest.mark.parametrize('n', [-1, 0.5, 1 + 2j])
def test_factorial2(n):
with pytest.raises(ValueError, match="is not a positive integer."):
factorial(n)
@pytest.mark.parametrize('n', ['a', int, []])
def test_factorial3(n):
with pytest.raises(TypeError, match="(is not a number.|unhashable type)"):
factorial(n)
@pytest.mark.parametrize(('k', 'c'), [
(0, 1),
(1, 3),
(2, 3),
(3, 1),
])
def test_combinations1(k, c):
n = 3
assert combinations(n, k) == c
def test_combinations2():
with pytest.raises(ValueError, match="is larger than"):
combinations(5, 7)
| dit/dit | tests/math/test_misc.py | Python | bsd-3-clause | 1,530 |
# -*- coding: utf-8 -*-
import sys
import os
import json
import traceback
import warnings
import webob
from webob.exc import HTTPNotFound
import mock
from webtest import TestApp
import six
from six import b as b_
from six import u as u_
from six.moves import cStringIO as StringIO
from pecan import (
Pecan, Request, Response, expose, request, response, redirect,
abort, make_app, override_template, render, route
)
from pecan.templating import (
_builtin_renderers as builtin_renderers, error_formatters
)
from pecan.decorators import accept_noncanonical
from pecan.tests import PecanTestCase
if sys.version_info < (2, 7):
import unittest2 as unittest # pragma: nocover
else:
import unittest # pragma: nocover
class SampleRootController(object):
pass
class TestAppRoot(PecanTestCase):
def test_controller_lookup_by_string_path(self):
app = Pecan('pecan.tests.test_base.SampleRootController')
assert app.root and isinstance(app.root, SampleRootController)
class TestEmptyContent(PecanTestCase):
@property
def app_(self):
class RootController(object):
@expose()
def index(self):
pass
@expose()
def explicit_body(self):
response.body = b_('Hello, World!')
@expose()
def empty_body(self):
response.body = b_('')
@expose()
def explicit_text(self):
response.text = six.text_type('Hello, World!')
@expose()
def empty_text(self):
response.text = six.text_type('')
@expose()
def explicit_json(self):
response.json = {'foo': 'bar'}
@expose()
def explicit_json_body(self):
response.json_body = {'foo': 'bar'}
@expose()
def non_unicode(self):
return chr(0xc0)
return TestApp(Pecan(RootController()))
def test_empty_index(self):
r = self.app_.get('/')
self.assertEqual(r.status_int, 204)
self.assertNotIn('Content-Type', r.headers)
self.assertEqual(r.headers['Content-Length'], '0')
self.assertEqual(len(r.body), 0)
def test_index_with_non_unicode(self):
r = self.app_.get('/non_unicode/')
self.assertEqual(r.status_int, 200)
def test_explicit_body(self):
r = self.app_.get('/explicit_body/')
self.assertEqual(r.status_int, 200)
self.assertEqual(r.body, b_('Hello, World!'))
def test_empty_body(self):
r = self.app_.get('/empty_body/')
self.assertEqual(r.status_int, 204)
self.assertEqual(r.body, b_(''))
def test_explicit_text(self):
r = self.app_.get('/explicit_text/')
self.assertEqual(r.status_int, 200)
self.assertEqual(r.body, b_('Hello, World!'))
def test_empty_text(self):
r = self.app_.get('/empty_text/')
self.assertEqual(r.status_int, 204)
self.assertEqual(r.body, b_(''))
def test_explicit_json(self):
r = self.app_.get('/explicit_json/')
self.assertEqual(r.status_int, 200)
json_resp = json.loads(r.body.decode())
assert json_resp == {'foo': 'bar'}
def test_explicit_json_body(self):
r = self.app_.get('/explicit_json_body/')
self.assertEqual(r.status_int, 200)
json_resp = json.loads(r.body.decode())
assert json_resp == {'foo': 'bar'}
class TestAppIterFile(PecanTestCase):
@property
def app_(self):
class RootController(object):
@expose()
def index(self):
body = six.BytesIO(b_('Hello, World!'))
response.body_file = body
@expose()
def empty(self):
body = six.BytesIO(b_(''))
response.body_file = body
return TestApp(Pecan(RootController()))
def test_body_generator(self):
r = self.app_.get('/')
self.assertEqual(r.status_int, 200)
assert r.body == b_('Hello, World!')
def test_empty_body_generator(self):
r = self.app_.get('/empty')
self.assertEqual(r.status_int, 204)
assert len(r.body) == 0
class TestInvalidURLEncoding(PecanTestCase):
@property
def app_(self):
class RootController(object):
@expose()
def _route(self, args, request):
assert request.path
return TestApp(Pecan(RootController()))
def test_rest_with_non_utf_8_body(self):
r = self.app_.get('/%aa/', expect_errors=True)
assert r.status_int == 400
class TestIndexRouting(PecanTestCase):
@property
def app_(self):
class RootController(object):
@expose()
def index(self):
return 'Hello, World!'
return TestApp(Pecan(RootController()))
def test_empty_root(self):
r = self.app_.get('/')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
def test_index(self):
r = self.app_.get('/index')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
def test_index_html(self):
r = self.app_.get('/index.html')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
class TestObjectDispatch(PecanTestCase):
@property
def app_(self):
class SubSubController(object):
@expose()
def index(self):
return '/sub/sub/'
@expose()
def deeper(self):
return '/sub/sub/deeper'
class SubController(object):
@expose()
def index(self):
return '/sub/'
@expose()
def deeper(self):
return '/sub/deeper'
sub = SubSubController()
class RootController(object):
@expose()
def index(self):
return '/'
@expose()
def deeper(self):
return '/deeper'
sub = SubController()
return TestApp(Pecan(RootController()))
def test_index(self):
r = self.app_.get('/')
assert r.status_int == 200
assert r.body == b_('/')
def test_one_level(self):
r = self.app_.get('/deeper')
assert r.status_int == 200
assert r.body == b_('/deeper')
def test_one_level_with_trailing(self):
r = self.app_.get('/sub/')
assert r.status_int == 200
assert r.body == b_('/sub/')
def test_two_levels(self):
r = self.app_.get('/sub/deeper')
assert r.status_int == 200
assert r.body == b_('/sub/deeper')
def test_two_levels_with_trailing(self):
r = self.app_.get('/sub/sub/')
assert r.status_int == 200
def test_three_levels(self):
r = self.app_.get('/sub/sub/deeper')
assert r.status_int == 200
assert r.body == b_('/sub/sub/deeper')
@unittest.skipIf(not six.PY3, "tests are Python3 specific")
class TestUnicodePathSegments(PecanTestCase):
def test_unicode_methods(self):
class RootController(object):
pass
setattr(RootController, '🌰', expose()(lambda self: 'Hello, World!'))
app = TestApp(Pecan(RootController()))
resp = app.get('/%F0%9F%8C%B0/')
assert resp.status_int == 200
assert resp.body == b_('Hello, World!')
def test_unicode_child(self):
class ChildController(object):
@expose()
def index(self):
return 'Hello, World!'
class RootController(object):
pass
setattr(RootController, '🌰', ChildController())
app = TestApp(Pecan(RootController()))
resp = app.get('/%F0%9F%8C%B0/')
assert resp.status_int == 200
assert resp.body == b_('Hello, World!')
class TestLookups(PecanTestCase):
@property
def app_(self):
class LookupController(object):
def __init__(self, someID):
self.someID = someID
@expose()
def index(self):
return '/%s' % self.someID
@expose()
def name(self):
return '/%s/name' % self.someID
class RootController(object):
@expose()
def index(self):
return '/'
@expose()
def _lookup(self, someID, *remainder):
return LookupController(someID), remainder
return TestApp(Pecan(RootController()))
def test_index(self):
r = self.app_.get('/')
assert r.status_int == 200
assert r.body == b_('/')
def test_lookup(self):
r = self.app_.get('/100/')
assert r.status_int == 200
assert r.body == b_('/100')
def test_lookup_with_method(self):
r = self.app_.get('/100/name')
assert r.status_int == 200
assert r.body == b_('/100/name')
def test_lookup_with_wrong_argspec(self):
class RootController(object):
@expose()
def _lookup(self, someID):
return 'Bad arg spec' # pragma: nocover
with warnings.catch_warnings():
warnings.simplefilter("ignore")
app = TestApp(Pecan(RootController()))
r = app.get('/foo/bar', expect_errors=True)
assert r.status_int == 404
class TestCanonicalLookups(PecanTestCase):
@property
def app_(self):
class LookupController(object):
def __init__(self, someID):
self.someID = someID
@expose()
def index(self):
return self.someID
class UserController(object):
@expose()
def _lookup(self, someID, *remainder):
return LookupController(someID), remainder
class RootController(object):
users = UserController()
return TestApp(Pecan(RootController()))
def test_canonical_lookup(self):
assert self.app_.get('/users', expect_errors=404).status_int == 404
assert self.app_.get('/users/', expect_errors=404).status_int == 404
assert self.app_.get('/users/100').status_int == 302
assert self.app_.get('/users/100/').body == b_('100')
class TestControllerArguments(PecanTestCase):
@property
def app_(self):
class RootController(object):
@expose()
def index(self, id):
return 'index: %s' % id
@expose()
def multiple(self, one, two):
return 'multiple: %s, %s' % (one, two)
@expose()
def optional(self, id=None):
return 'optional: %s' % str(id)
@expose()
def multiple_optional(self, one=None, two=None, three=None):
return 'multiple_optional: %s, %s, %s' % (one, two, three)
@expose()
def variable_args(self, *args):
return 'variable_args: %s' % ', '.join(args)
@expose()
def variable_kwargs(self, **kwargs):
data = [
'%s=%s' % (key, kwargs[key])
for key in sorted(kwargs.keys())
]
return 'variable_kwargs: %s' % ', '.join(data)
@expose()
def variable_all(self, *args, **kwargs):
data = [
'%s=%s' % (key, kwargs[key])
for key in sorted(kwargs.keys())
]
return 'variable_all: %s' % ', '.join(list(args) + data)
@expose()
def eater(self, id, dummy=None, *args, **kwargs):
data = [
'%s=%s' % (key, kwargs[key])
for key in sorted(kwargs.keys())
]
return 'eater: %s, %s, %s' % (
id,
dummy,
', '.join(list(args) + data)
)
@staticmethod
@expose()
def static(id):
return "id is %s" % id
@expose()
def _route(self, args, request):
if hasattr(self, args[0]):
return getattr(self, args[0]), args[1:]
else:
return self.index, args
return TestApp(Pecan(RootController()))
def test_required_argument(self):
try:
r = self.app_.get('/')
assert r.status_int != 200 # pragma: nocover
except Exception as ex:
assert type(ex) == TypeError
assert ex.args[0] in (
"index() takes exactly 2 arguments (1 given)",
"index() missing 1 required positional argument: 'id'"
) # this messaging changed in Python 3.3
def test_single_argument(self):
r = self.app_.get('/1')
assert r.status_int == 200
assert r.body == b_('index: 1')
def test_single_argument_with_encoded_url(self):
r = self.app_.get('/This%20is%20a%20test%21')
assert r.status_int == 200
assert r.body == b_('index: This is a test!')
def test_single_argument_with_plus(self):
r = self.app_.get('/foo+bar')
assert r.status_int == 200
assert r.body == b_('index: foo+bar')
def test_single_argument_with_encoded_plus(self):
r = self.app_.get('/foo%2Bbar')
assert r.status_int == 200
assert r.body == b_('index: foo+bar')
def test_two_arguments(self):
r = self.app_.get('/1/dummy', status=404)
assert r.status_int == 404
def test_keyword_argument(self):
r = self.app_.get('/?id=2')
assert r.status_int == 200
assert r.body == b_('index: 2')
def test_keyword_argument_with_encoded_url(self):
r = self.app_.get('/?id=This%20is%20a%20test%21')
assert r.status_int == 200
assert r.body == b_('index: This is a test!')
def test_keyword_argument_with_plus(self):
r = self.app_.get('/?id=foo+bar')
assert r.status_int == 200
assert r.body == b_('index: foo bar')
def test_keyword_argument_with_encoded_plus(self):
r = self.app_.get('/?id=foo%2Bbar')
assert r.status_int == 200
assert r.body == b_('index: foo+bar')
def test_argument_and_keyword_argument(self):
r = self.app_.get('/3?id=three')
assert r.status_int == 200
assert r.body == b_('index: 3')
def test_encoded_argument_and_keyword_argument(self):
r = self.app_.get('/This%20is%20a%20test%21?id=three')
assert r.status_int == 200
assert r.body == b_('index: This is a test!')
def test_explicit_kwargs(self):
r = self.app_.post('/', {'id': '4'})
assert r.status_int == 200
assert r.body == b_('index: 4')
def test_path_with_explicit_kwargs(self):
r = self.app_.post('/4', {'id': 'four'})
assert r.status_int == 200
assert r.body == b_('index: 4')
def test_explicit_json_kwargs(self):
r = self.app_.post_json('/', {'id': '4'})
assert r.status_int == 200
assert r.body == b_('index: 4')
def test_path_with_explicit_json_kwargs(self):
r = self.app_.post_json('/4', {'id': 'four'})
assert r.status_int == 200
assert r.body == b_('index: 4')
def test_multiple_kwargs(self):
r = self.app_.get('/?id=5&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('index: 5')
def test_kwargs_from_root(self):
r = self.app_.post('/', {'id': '6', 'dummy': 'dummy'})
assert r.status_int == 200
assert r.body == b_('index: 6')
def test_json_kwargs_from_root(self):
r = self.app_.post_json('/', {'id': '6', 'dummy': 'dummy'})
assert r.status_int == 200
assert r.body == b_('index: 6')
# multiple args
def test_multiple_positional_arguments(self):
r = self.app_.get('/multiple/one/two')
assert r.status_int == 200
assert r.body == b_('multiple: one, two')
def test_multiple_positional_arguments_with_url_encode(self):
r = self.app_.get('/multiple/One%20/Two%21')
assert r.status_int == 200
assert r.body == b_('multiple: One , Two!')
def test_multiple_positional_arguments_with_kwargs(self):
r = self.app_.get('/multiple?one=three&two=four')
assert r.status_int == 200
assert r.body == b_('multiple: three, four')
def test_multiple_positional_arguments_with_url_encoded_kwargs(self):
r = self.app_.get('/multiple?one=Three%20&two=Four%20%21')
assert r.status_int == 200
assert r.body == b_('multiple: Three , Four !')
def test_positional_args_with_dictionary_kwargs(self):
r = self.app_.post('/multiple', {'one': 'five', 'two': 'six'})
assert r.status_int == 200
assert r.body == b_('multiple: five, six')
def test_positional_args_with_json_kwargs(self):
r = self.app_.post_json('/multiple', {'one': 'five', 'two': 'six'})
assert r.status_int == 200
assert r.body == b_('multiple: five, six')
def test_positional_args_with_url_encoded_dictionary_kwargs(self):
r = self.app_.post('/multiple', {'one': 'Five%20', 'two': 'Six%20%21'})
assert r.status_int == 200
assert r.body == b_('multiple: Five%20, Six%20%21')
# optional arg
def test_optional_arg(self):
r = self.app_.get('/optional')
assert r.status_int == 200
assert r.body == b_('optional: None')
def test_multiple_optional(self):
r = self.app_.get('/optional/1')
assert r.status_int == 200
assert r.body == b_('optional: 1')
def test_multiple_optional_url_encoded(self):
r = self.app_.get('/optional/Some%20Number')
assert r.status_int == 200
assert r.body == b_('optional: Some Number')
def test_multiple_optional_missing(self):
r = self.app_.get('/optional/2/dummy', status=404)
assert r.status_int == 404
def test_multiple_with_kwargs(self):
r = self.app_.get('/optional?id=2')
assert r.status_int == 200
assert r.body == b_('optional: 2')
def test_multiple_with_url_encoded_kwargs(self):
r = self.app_.get('/optional?id=Some%20Number')
assert r.status_int == 200
assert r.body == b_('optional: Some Number')
def test_multiple_args_with_url_encoded_kwargs(self):
r = self.app_.get('/optional/3?id=three')
assert r.status_int == 200
assert r.body == b_('optional: 3')
def test_url_encoded_positional_args(self):
r = self.app_.get('/optional/Some%20Number?id=three')
assert r.status_int == 200
assert r.body == b_('optional: Some Number')
def test_optional_arg_with_kwargs(self):
r = self.app_.post('/optional', {'id': '4'})
assert r.status_int == 200
assert r.body == b_('optional: 4')
def test_optional_arg_with_json_kwargs(self):
r = self.app_.post_json('/optional', {'id': '4'})
assert r.status_int == 200
assert r.body == b_('optional: 4')
def test_optional_arg_with_url_encoded_kwargs(self):
r = self.app_.post('/optional', {'id': 'Some%20Number'})
assert r.status_int == 200
assert r.body == b_('optional: Some%20Number')
def test_multiple_positional_arguments_with_dictionary_kwargs(self):
r = self.app_.post('/optional/5', {'id': 'five'})
assert r.status_int == 200
assert r.body == b_('optional: 5')
def test_multiple_positional_arguments_with_json_kwargs(self):
r = self.app_.post_json('/optional/5', {'id': 'five'})
assert r.status_int == 200
assert r.body == b_('optional: 5')
def test_multiple_positional_url_encoded_arguments_with_kwargs(self):
r = self.app_.post('/optional/Some%20Number', {'id': 'five'})
assert r.status_int == 200
assert r.body == b_('optional: Some Number')
def test_optional_arg_with_multiple_kwargs(self):
r = self.app_.get('/optional?id=6&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('optional: 6')
def test_optional_arg_with_multiple_url_encoded_kwargs(self):
r = self.app_.get('/optional?id=Some%20Number&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('optional: Some Number')
def test_optional_arg_with_multiple_dictionary_kwargs(self):
r = self.app_.post('/optional', {'id': '7', 'dummy': 'dummy'})
assert r.status_int == 200
assert r.body == b_('optional: 7')
def test_optional_arg_with_multiple_json_kwargs(self):
r = self.app_.post_json('/optional', {'id': '7', 'dummy': 'dummy'})
assert r.status_int == 200
assert r.body == b_('optional: 7')
def test_optional_arg_with_multiple_url_encoded_dictionary_kwargs(self):
r = self.app_.post('/optional', {
'id': 'Some%20Number',
'dummy': 'dummy'
})
assert r.status_int == 200
assert r.body == b_('optional: Some%20Number')
# multiple optional args
def test_multiple_optional_positional_args(self):
r = self.app_.get('/multiple_optional')
assert r.status_int == 200
assert r.body == b_('multiple_optional: None, None, None')
def test_multiple_optional_positional_args_one_arg(self):
r = self.app_.get('/multiple_optional/1')
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_positional_args_one_url_encoded_arg(self):
r = self.app_.get('/multiple_optional/One%21')
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, None, None')
def test_multiple_optional_positional_args_all_args(self):
r = self.app_.get('/multiple_optional/1/2/3')
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, 2, 3')
def test_multiple_optional_positional_args_all_url_encoded_args(self):
r = self.app_.get('/multiple_optional/One%21/Two%21/Three%21')
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, Two!, Three!')
def test_multiple_optional_positional_args_too_many_args(self):
r = self.app_.get('/multiple_optional/1/2/3/dummy', status=404)
assert r.status_int == 404
def test_multiple_optional_positional_args_with_kwargs(self):
r = self.app_.get('/multiple_optional?one=1')
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_positional_args_with_url_encoded_kwargs(self):
r = self.app_.get('/multiple_optional?one=One%21')
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, None, None')
def test_multiple_optional_positional_args_with_string_kwargs(self):
r = self.app_.get('/multiple_optional/1?one=one')
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_positional_args_with_encoded_str_kwargs(self):
r = self.app_.get('/multiple_optional/One%21?one=one')
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, None, None')
def test_multiple_optional_positional_args_with_dict_kwargs(self):
r = self.app_.post('/multiple_optional', {'one': '1'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_positional_args_with_json_kwargs(self):
r = self.app_.post_json('/multiple_optional', {'one': '1'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_positional_args_with_encoded_dict_kwargs(self):
r = self.app_.post('/multiple_optional', {'one': 'One%21'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: One%21, None, None')
def test_multiple_optional_positional_args_and_dict_kwargs(self):
r = self.app_.post('/multiple_optional/1', {'one': 'one'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_positional_args_and_json_kwargs(self):
r = self.app_.post_json('/multiple_optional/1', {'one': 'one'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_encoded_positional_args_and_dict_kwargs(self):
r = self.app_.post('/multiple_optional/One%21', {'one': 'one'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, None, None')
def test_multiple_optional_args_with_multiple_kwargs(self):
r = self.app_.get('/multiple_optional?one=1&two=2&three=3&four=4')
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, 2, 3')
def test_multiple_optional_args_with_multiple_encoded_kwargs(self):
r = self.app_.get(
'/multiple_optional?one=One%21&two=Two%21&three=Three%21&four=4'
)
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, Two!, Three!')
def test_multiple_optional_args_with_multiple_dict_kwargs(self):
r = self.app_.post(
'/multiple_optional',
{'one': '1', 'two': '2', 'three': '3', 'four': '4'}
)
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, 2, 3')
def test_multiple_optional_args_with_multiple_json_kwargs(self):
r = self.app_.post_json(
'/multiple_optional',
{'one': '1', 'two': '2', 'three': '3', 'four': '4'}
)
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, 2, 3')
def test_multiple_optional_args_with_multiple_encoded_dict_kwargs(self):
r = self.app_.post(
'/multiple_optional',
{
'one': 'One%21',
'two': 'Two%21',
'three': 'Three%21',
'four': '4'
}
)
assert r.status_int == 200
assert r.body == b_('multiple_optional: One%21, Two%21, Three%21')
def test_multiple_optional_args_with_last_kwarg(self):
r = self.app_.get('/multiple_optional?three=3')
assert r.status_int == 200
assert r.body == b_('multiple_optional: None, None, 3')
def test_multiple_optional_args_with_last_encoded_kwarg(self):
r = self.app_.get('/multiple_optional?three=Three%21')
assert r.status_int == 200
assert r.body == b_('multiple_optional: None, None, Three!')
def test_multiple_optional_args_with_middle_arg(self):
r = self.app_.get('/multiple_optional', {'two': '2'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: None, 2, None')
def test_variable_args(self):
r = self.app_.get('/variable_args')
assert r.status_int == 200
assert r.body == b_('variable_args: ')
def test_multiple_variable_args(self):
r = self.app_.get('/variable_args/1/dummy')
assert r.status_int == 200
assert r.body == b_('variable_args: 1, dummy')
def test_multiple_encoded_variable_args(self):
r = self.app_.get('/variable_args/Testing%20One%20Two/Three%21')
assert r.status_int == 200
assert r.body == b_('variable_args: Testing One Two, Three!')
def test_variable_args_with_kwargs(self):
r = self.app_.get('/variable_args?id=2&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('variable_args: ')
def test_variable_args_with_dict_kwargs(self):
r = self.app_.post('/variable_args', {'id': '3', 'dummy': 'dummy'})
assert r.status_int == 200
assert r.body == b_('variable_args: ')
def test_variable_args_with_json_kwargs(self):
r = self.app_.post_json(
'/variable_args',
{'id': '3', 'dummy': 'dummy'}
)
assert r.status_int == 200
assert r.body == b_('variable_args: ')
def test_variable_kwargs(self):
r = self.app_.get('/variable_kwargs')
assert r.status_int == 200
assert r.body == b_('variable_kwargs: ')
def test_multiple_variable_kwargs(self):
r = self.app_.get('/variable_kwargs/1/dummy', status=404)
assert r.status_int == 404
def test_multiple_variable_kwargs_with_explicit_kwargs(self):
r = self.app_.get('/variable_kwargs?id=2&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('variable_kwargs: dummy=dummy, id=2')
def test_multiple_variable_kwargs_with_explicit_encoded_kwargs(self):
r = self.app_.get(
'/variable_kwargs?id=Two%21&dummy=This%20is%20a%20test'
)
assert r.status_int == 200
assert r.body == b_('variable_kwargs: dummy=This is a test, id=Two!')
def test_multiple_variable_kwargs_with_dict_kwargs(self):
r = self.app_.post('/variable_kwargs', {'id': '3', 'dummy': 'dummy'})
assert r.status_int == 200
assert r.body == b_('variable_kwargs: dummy=dummy, id=3')
def test_multiple_variable_kwargs_with_json_kwargs(self):
r = self.app_.post_json(
'/variable_kwargs',
{'id': '3', 'dummy': 'dummy'}
)
assert r.status_int == 200
assert r.body == b_('variable_kwargs: dummy=dummy, id=3')
def test_multiple_variable_kwargs_with_encoded_dict_kwargs(self):
r = self.app_.post(
'/variable_kwargs',
{'id': 'Three%21', 'dummy': 'This%20is%20a%20test'}
)
assert r.status_int == 200
result = 'variable_kwargs: dummy=This%20is%20a%20test, id=Three%21'
assert r.body == b_(result)
def test_variable_all(self):
r = self.app_.get('/variable_all')
assert r.status_int == 200
assert r.body == b_('variable_all: ')
def test_variable_all_with_one_extra(self):
r = self.app_.get('/variable_all/1')
assert r.status_int == 200
assert r.body == b_('variable_all: 1')
def test_variable_all_with_two_extras(self):
r = self.app_.get('/variable_all/2/dummy')
assert r.status_int == 200
assert r.body == b_('variable_all: 2, dummy')
def test_variable_mixed(self):
r = self.app_.get('/variable_all/3?month=1&day=12')
assert r.status_int == 200
assert r.body == b_('variable_all: 3, day=12, month=1')
def test_variable_mixed_explicit(self):
r = self.app_.get('/variable_all/4?id=four&month=1&day=12')
assert r.status_int == 200
assert r.body == b_('variable_all: 4, day=12, id=four, month=1')
def test_variable_post(self):
r = self.app_.post('/variable_all/5/dummy')
assert r.status_int == 200
assert r.body == b_('variable_all: 5, dummy')
def test_variable_post_with_kwargs(self):
r = self.app_.post('/variable_all/6', {'month': '1', 'day': '12'})
assert r.status_int == 200
assert r.body == b_('variable_all: 6, day=12, month=1')
def test_variable_post_with_json_kwargs(self):
r = self.app_.post_json(
'/variable_all/6',
{'month': '1', 'day': '12'}
)
assert r.status_int == 200
assert r.body == b_('variable_all: 6, day=12, month=1')
def test_variable_post_mixed(self):
r = self.app_.post(
'/variable_all/7',
{'id': 'seven', 'month': '1', 'day': '12'}
)
assert r.status_int == 200
assert r.body == b_('variable_all: 7, day=12, id=seven, month=1')
def test_variable_post_mixed_with_json(self):
r = self.app_.post_json(
'/variable_all/7',
{'id': 'seven', 'month': '1', 'day': '12'}
)
assert r.status_int == 200
assert r.body == b_('variable_all: 7, day=12, id=seven, month=1')
def test_duplicate_query_parameters_GET(self):
r = self.app_.get('/variable_kwargs?list=1&list=2')
l = [u_('1'), u_('2')]
assert r.status_int == 200
assert r.body == b_('variable_kwargs: list=%s' % l)
def test_duplicate_query_parameters_POST(self):
r = self.app_.post('/variable_kwargs',
{'list': ['1', '2']})
l = [u_('1'), u_('2')]
assert r.status_int == 200
assert r.body == b_('variable_kwargs: list=%s' % l)
def test_duplicate_query_parameters_POST_mixed(self):
r = self.app_.post('/variable_kwargs?list=1&list=2',
{'list': ['3', '4']})
l = [u_('1'), u_('2'), u_('3'), u_('4')]
assert r.status_int == 200
assert r.body == b_('variable_kwargs: list=%s' % l)
def test_duplicate_query_parameters_POST_mixed_json(self):
r = self.app_.post('/variable_kwargs?list=1&list=2',
{'list': 3})
l = [u_('1'), u_('2'), u_('3')]
assert r.status_int == 200
assert r.body == b_('variable_kwargs: list=%s' % l)
def test_staticmethod(self):
r = self.app_.get('/static/foobar')
assert r.status_int == 200
assert r.body == b_('id is foobar')
def test_no_remainder(self):
try:
r = self.app_.get('/eater')
assert r.status_int != 200 # pragma: nocover
except Exception as ex:
assert type(ex) == TypeError
assert ex.args[0] in (
"eater() takes at least 2 arguments (1 given)",
"eater() missing 1 required positional argument: 'id'"
) # this messaging changed in Python 3.3
def test_one_remainder(self):
r = self.app_.get('/eater/1')
assert r.status_int == 200
assert r.body == b_('eater: 1, None, ')
def test_two_remainders(self):
r = self.app_.get('/eater/2/dummy')
assert r.status_int == 200
assert r.body == b_('eater: 2, dummy, ')
def test_many_remainders(self):
r = self.app_.get('/eater/3/dummy/foo/bar')
assert r.status_int == 200
assert r.body == b_('eater: 3, dummy, foo, bar')
def test_remainder_with_kwargs(self):
r = self.app_.get('/eater/4?month=1&day=12')
assert r.status_int == 200
assert r.body == b_('eater: 4, None, day=12, month=1')
def test_remainder_with_many_kwargs(self):
r = self.app_.get('/eater/5?id=five&month=1&day=12&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('eater: 5, dummy, day=12, month=1')
def test_post_remainder(self):
r = self.app_.post('/eater/6')
assert r.status_int == 200
assert r.body == b_('eater: 6, None, ')
def test_post_three_remainders(self):
r = self.app_.post('/eater/7/dummy')
assert r.status_int == 200
assert r.body == b_('eater: 7, dummy, ')
def test_post_many_remainders(self):
r = self.app_.post('/eater/8/dummy/foo/bar')
assert r.status_int == 200
assert r.body == b_('eater: 8, dummy, foo, bar')
def test_post_remainder_with_kwargs(self):
r = self.app_.post('/eater/9', {'month': '1', 'day': '12'})
assert r.status_int == 200
assert r.body == b_('eater: 9, None, day=12, month=1')
def test_post_empty_remainder_with_json_kwargs(self):
r = self.app_.post_json('/eater/9/', {'month': '1', 'day': '12'})
assert r.status_int == 200
assert r.body == b_('eater: 9, None, day=12, month=1')
def test_post_remainder_with_json_kwargs(self):
r = self.app_.post_json('/eater/9', {'month': '1', 'day': '12'})
assert r.status_int == 200
assert r.body == b_('eater: 9, None, day=12, month=1')
def test_post_many_remainders_with_many_kwargs(self):
r = self.app_.post(
'/eater/10',
{'id': 'ten', 'month': '1', 'day': '12', 'dummy': 'dummy'}
)
assert r.status_int == 200
assert r.body == b_('eater: 10, dummy, day=12, month=1')
def test_post_many_remainders_with_many_json_kwargs(self):
r = self.app_.post_json(
'/eater/10',
{'id': 'ten', 'month': '1', 'day': '12', 'dummy': 'dummy'}
)
assert r.status_int == 200
assert r.body == b_('eater: 10, dummy, day=12, month=1')
class TestDefaultErrorRendering(PecanTestCase):
def test_plain_error(self):
class RootController(object):
pass
app = TestApp(Pecan(RootController()))
r = app.get('/', status=404)
assert r.status_int == 404
assert r.content_type == 'text/plain'
assert r.body == b_(HTTPNotFound().plain_body({}))
def test_html_error(self):
class RootController(object):
pass
app = TestApp(Pecan(RootController()))
r = app.get('/', headers={'Accept': 'text/html'}, status=404)
assert r.status_int == 404
assert r.content_type == 'text/html'
assert r.body == b_(HTTPNotFound().html_body({}))
def test_json_error(self):
class RootController(object):
pass
app = TestApp(Pecan(RootController()))
r = app.get('/', headers={'Accept': 'application/json'}, status=404)
assert r.status_int == 404
json_resp = json.loads(r.body.decode())
assert json_resp['code'] == 404
assert json_resp['description'] is None
assert json_resp['title'] == 'Not Found'
assert r.content_type == 'application/json'
class TestAbort(PecanTestCase):
def test_abort(self):
class RootController(object):
@expose()
def index(self):
abort(404)
app = TestApp(Pecan(RootController()))
r = app.get('/', status=404)
assert r.status_int == 404
def test_abort_with_detail(self):
class RootController(object):
@expose()
def index(self):
abort(status_code=401, detail='Not Authorized')
app = TestApp(Pecan(RootController()))
r = app.get('/', status=401)
assert r.status_int == 401
def test_abort_keeps_traceback(self):
last_exc, last_traceback = None, None
try:
try:
raise Exception('Bottom Exception')
except:
abort(404)
except Exception:
last_exc, _, last_traceback = sys.exc_info()
assert last_exc is HTTPNotFound
assert 'Bottom Exception' in traceback.format_tb(last_traceback)[-1]
class TestScriptName(PecanTestCase):
def setUp(self):
super(TestScriptName, self).setUp()
self.environ = {'SCRIPT_NAME': '/foo'}
def test_handle_script_name(self):
class RootController(object):
@expose()
def index(self):
return 'Root Index'
app = TestApp(Pecan(RootController()), extra_environ=self.environ)
r = app.get('/foo/')
assert r.status_int == 200
class TestRedirect(PecanTestCase):
@property
def app_(self):
class RootController(object):
@expose()
def index(self):
redirect('/testing')
@expose()
def internal(self):
redirect('/testing', internal=True)
@expose()
def bad_internal(self):
redirect('/testing', internal=True, code=301)
@expose()
def permanent(self):
redirect('/testing', code=301)
@expose()
def testing(self):
return 'it worked!'
return TestApp(make_app(RootController(), debug=False))
def test_index(self):
r = self.app_.get('/')
assert r.status_int == 302
r = r.follow()
assert r.status_int == 200
assert r.body == b_('it worked!')
def test_internal(self):
r = self.app_.get('/internal')
assert r.status_int == 200
assert r.body == b_('it worked!')
def test_internal_with_301(self):
self.assertRaises(ValueError, self.app_.get, '/bad_internal')
def test_permanent_redirect(self):
r = self.app_.get('/permanent')
assert r.status_int == 301
r = r.follow()
assert r.status_int == 200
assert r.body == b_('it worked!')
def test_x_forward_proto(self):
class ChildController(object):
@expose()
def index(self):
redirect('/testing') # pragma: nocover
class RootController(object):
@expose()
def index(self):
redirect('/testing') # pragma: nocover
@expose()
def testing(self):
return 'it worked!' # pragma: nocover
child = ChildController()
app = TestApp(make_app(RootController(), debug=True))
res = app.get(
'/child', extra_environ=dict(HTTP_X_FORWARDED_PROTO='https')
)
# non-canonical url will redirect, so we won't get a 301
assert res.status_int == 302
# should add trailing / and changes location to https
assert res.location == 'https://localhost/child/'
assert res.request.environ['HTTP_X_FORWARDED_PROTO'] == 'https'
class TestInternalRedirectContext(PecanTestCase):
@property
def app_(self):
class RootController(object):
@expose()
def redirect_with_context(self):
request.context['foo'] = 'bar'
redirect('/testing')
@expose()
def internal_with_context(self):
request.context['foo'] = 'bar'
redirect('/testing', internal=True)
@expose('json')
def testing(self):
return request.context
return TestApp(make_app(RootController(), debug=False))
def test_internal_with_request_context(self):
r = self.app_.get('/internal_with_context')
assert r.status_int == 200
assert json.loads(r.body.decode()) == {'foo': 'bar'}
def test_context_does_not_bleed(self):
r = self.app_.get('/redirect_with_context').follow()
assert r.status_int == 200
assert json.loads(r.body.decode()) == {}
class TestStreamedResponse(PecanTestCase):
def test_streaming_response(self):
class RootController(object):
@expose(content_type='text/plain')
def test(self, foo):
if foo == 'stream':
# mimic large file
contents = six.BytesIO(b_('stream'))
response.content_type = 'application/octet-stream'
contents.seek(0, os.SEEK_END)
response.content_length = contents.tell()
contents.seek(0, os.SEEK_SET)
response.app_iter = contents
return response
else:
return 'plain text'
app = TestApp(Pecan(RootController()))
r = app.get('/test/stream')
assert r.content_type == 'application/octet-stream'
assert r.body == b_('stream')
r = app.get('/test/plain')
assert r.content_type == 'text/plain'
assert r.body == b_('plain text')
class TestManualResponse(PecanTestCase):
def test_manual_response(self):
class RootController(object):
@expose()
def index(self):
resp = webob.Response(response.environ)
resp.body = b_('Hello, World!')
return resp
app = TestApp(Pecan(RootController()))
r = app.get('/')
assert r.body == b_('Hello, World!')
class TestCustomResponseandRequest(PecanTestCase):
def test_custom_objects(self):
class CustomRequest(Request):
@property
def headers(self):
headers = super(CustomRequest, self).headers
headers['X-Custom-Request'] = 'ABC'
return headers
class CustomResponse(Response):
@property
def headers(self):
headers = super(CustomResponse, self).headers
headers['X-Custom-Response'] = 'XYZ'
return headers
class RootController(object):
@expose()
def index(self):
return request.headers.get('X-Custom-Request')
app = TestApp(Pecan(
RootController(),
request_cls=CustomRequest,
response_cls=CustomResponse
))
r = app.get('/')
assert r.body == b_('ABC')
assert r.headers.get('X-Custom-Response') == 'XYZ'
class TestThreadLocalState(PecanTestCase):
def test_thread_local_dir(self):
"""
Threadlocal proxies for request and response should properly
proxy ``dir()`` calls to the underlying webob class.
"""
class RootController(object):
@expose()
def index(self):
assert 'method' in dir(request)
assert 'status' in dir(response)
return '/'
app = TestApp(Pecan(RootController()))
r = app.get('/')
assert r.status_int == 200
assert r.body == b_('/')
def test_request_state_cleanup(self):
"""
After a request, the state local() should be totally clean
except for state.app (so that objects don't leak between requests)
"""
from pecan.core import state
class RootController(object):
@expose()
def index(self):
return '/'
app = TestApp(Pecan(RootController()))
r = app.get('/')
assert r.status_int == 200
assert r.body == b_('/')
assert state.__dict__ == {}
class TestFileTypeExtensions(PecanTestCase):
@property
def app_(self):
"""
Test extension splits
"""
class RootController(object):
@expose(content_type=None)
def _default(self, *args):
ext = request.pecan['extension']
assert len(args) == 1
if ext:
assert ext not in args[0]
return ext or ''
return TestApp(Pecan(RootController()))
def test_html_extension(self):
for path in ('/index.html', '/index.html/'):
r = self.app_.get(path)
assert r.status_int == 200
assert r.body == b_('.html')
def test_image_extension(self):
for path in ('/index.png', '/index.png/'):
r = self.app_.get(path)
assert r.status_int == 200
assert r.body == b_('.png')
def test_hidden_file(self):
for path in ('/.vimrc', '/.vimrc/'):
r = self.app_.get(path)
assert r.status_int == 204
assert r.body == b_('')
def test_multi_dot_extension(self):
for path in ('/gradient.min.js', '/gradient.min.js/'):
r = self.app_.get(path)
assert r.status_int == 200
assert r.body == b_('.js')
def test_bad_content_type(self):
class RootController(object):
@expose()
def index(self):
return '/'
app = TestApp(Pecan(RootController()))
r = app.get('/')
assert r.status_int == 200
assert r.body == b_('/')
r = app.get('/index.html', expect_errors=True)
assert r.status_int == 200
assert r.body == b_('/')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
r = app.get('/index.txt', expect_errors=True)
assert r.status_int == 404
def test_unknown_file_extension(self):
class RootController(object):
@expose(content_type=None)
def _default(self, *args):
assert 'example:x.tiny' in args
assert request.pecan['extension'] is None
return 'SOME VALUE'
app = TestApp(Pecan(RootController()))
r = app.get('/example:x.tiny')
assert r.status_int == 200
assert r.body == b_('SOME VALUE')
def test_guessing_disabled(self):
class RootController(object):
@expose(content_type=None)
def _default(self, *args):
assert 'index.html' in args
assert request.pecan['extension'] is None
return 'SOME VALUE'
app = TestApp(Pecan(RootController(),
guess_content_type_from_ext=False))
r = app.get('/index.html')
assert r.status_int == 200
assert r.body == b_('SOME VALUE')
def test_content_type_guessing_disabled(self):
class ResourceController(object):
def __init__(self, name):
self.name = name
assert self.name == 'file.html'
@expose('json')
def index(self):
return dict(name=self.name)
class RootController(object):
@expose()
def _lookup(self, name, *remainder):
return ResourceController(name), remainder
app = TestApp(
Pecan(RootController(), guess_content_type_from_ext=False)
)
r = app.get('/file.html/')
assert r.status_int == 200
result = dict(json.loads(r.body.decode()))
assert result == {'name': 'file.html'}
r = app.get('/file.html')
assert r.status_int == 302
r = r.follow()
result = dict(json.loads(r.body.decode()))
assert result == {'name': 'file.html'}
class TestContentTypeByAcceptHeaders(PecanTestCase):
@property
def app_(self):
"""
Test that content type is set appropriately based on Accept headers.
"""
class RootController(object):
@expose(content_type='text/html')
@expose(content_type='application/json')
def index(self, *args):
return 'Foo'
return TestApp(Pecan(RootController()))
def test_quality(self):
r = self.app_.get('/', headers={
'Accept': 'text/html,application/json;q=0.9,*/*;q=0.8'
})
assert r.status_int == 200
assert r.content_type == 'text/html'
r = self.app_.get('/', headers={
'Accept': 'application/json,text/html;q=0.9,*/*;q=0.8'
})
assert r.status_int == 200
assert r.content_type == 'application/json'
def test_file_extension_has_higher_precedence(self):
r = self.app_.get('/index.html', headers={
'Accept': 'application/json,text/html;q=0.9,*/*;q=0.8'
})
assert r.status_int == 200
assert r.content_type == 'text/html'
def test_not_acceptable(self):
r = self.app_.get('/', headers={
'Accept': 'application/xml',
}, status=406)
assert r.status_int == 406
def test_accept_header_missing(self):
r = self.app_.get('/')
assert r.status_int == 200
assert r.content_type == 'text/html'
class TestCanonicalRouting(PecanTestCase):
@property
def app_(self):
class ArgSubController(object):
@expose()
def index(self, arg):
return arg
class AcceptController(object):
@accept_noncanonical
@expose()
def index(self):
return 'accept'
class SubController(object):
@expose()
def index(self, **kw):
return 'subindex'
class RootController(object):
@expose()
def index(self):
return 'index'
sub = SubController()
arg = ArgSubController()
accept = AcceptController()
return TestApp(Pecan(RootController()))
def test_root(self):
r = self.app_.get('/')
assert r.status_int == 200
assert b_('index') in r.body
def test_index(self):
r = self.app_.get('/index')
assert r.status_int == 200
assert b_('index') in r.body
def test_broken_clients(self):
# for broken clients
r = self.app_.get('', status=302)
assert r.status_int == 302
assert r.location == 'http://localhost/'
def test_sub_controller_with_trailing(self):
r = self.app_.get('/sub/')
assert r.status_int == 200
assert b_('subindex') in r.body
def test_sub_controller_redirect(self):
r = self.app_.get('/sub', status=302)
assert r.status_int == 302
assert r.location == 'http://localhost/sub/'
def test_with_query_string(self):
# try with query string
r = self.app_.get('/sub?foo=bar', status=302)
assert r.status_int == 302
assert r.location == 'http://localhost/sub/?foo=bar'
def test_posts_fail(self):
try:
self.app_.post('/sub', dict(foo=1))
raise Exception("Post should fail") # pragma: nocover
except Exception as e:
assert isinstance(e, RuntimeError)
def test_with_args(self):
r = self.app_.get('/arg/index/foo')
assert r.status_int == 200
assert r.body == b_('foo')
def test_accept_noncanonical(self):
r = self.app_.get('/accept/')
assert r.status_int == 200
assert r.body == b_('accept')
def test_accept_noncanonical_no_trailing_slash(self):
r = self.app_.get('/accept')
assert r.status_int == 200
assert r.body == b_('accept')
class TestNonCanonical(PecanTestCase):
@property
def app_(self):
class ArgSubController(object):
@expose()
def index(self, arg):
return arg # pragma: nocover
class AcceptController(object):
@accept_noncanonical
@expose()
def index(self):
return 'accept' # pragma: nocover
class SubController(object):
@expose()
def index(self, **kw):
return 'subindex'
class RootController(object):
@expose()
def index(self):
return 'index'
sub = SubController()
arg = ArgSubController()
accept = AcceptController()
return TestApp(Pecan(RootController(), force_canonical=False))
def test_index(self):
r = self.app_.get('/')
assert r.status_int == 200
assert b_('index') in r.body
def test_subcontroller(self):
r = self.app_.get('/sub')
assert r.status_int == 200
assert b_('subindex') in r.body
def test_subcontroller_with_kwargs(self):
r = self.app_.post('/sub', dict(foo=1))
assert r.status_int == 200
assert b_('subindex') in r.body
def test_sub_controller_with_trailing(self):
r = self.app_.get('/sub/')
assert r.status_int == 200
assert b_('subindex') in r.body
def test_proxy(self):
class RootController(object):
@expose()
def index(self):
request.testing = True
assert request.testing is True
del request.testing
assert hasattr(request, 'testing') is False
return '/'
app = TestApp(make_app(RootController(), debug=True))
r = app.get('/')
assert r.status_int == 200
def test_app_wrap(self):
class RootController(object):
pass
wrapped_apps = []
def wrap(app):
wrapped_apps.append(app)
return app
make_app(RootController(), wrap_app=wrap, debug=True)
assert len(wrapped_apps) == 1
class TestLogging(PecanTestCase):
def test_logging_setup(self):
class RootController(object):
@expose()
def index(self):
import logging
logging.getLogger('pecantesting').info('HELLO WORLD')
return "HELLO WORLD"
f = StringIO()
app = TestApp(make_app(RootController(), logging={
'loggers': {
'pecantesting': {
'level': 'INFO', 'handlers': ['memory']
}
},
'handlers': {
'memory': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'stream': f
}
}
}))
app.get('/')
assert f.getvalue() == 'HELLO WORLD\n'
def test_logging_setup_with_config_obj(self):
class RootController(object):
@expose()
def index(self):
import logging
logging.getLogger('pecantesting').info('HELLO WORLD')
return "HELLO WORLD"
f = StringIO()
from pecan.configuration import conf_from_dict
app = TestApp(make_app(RootController(), logging=conf_from_dict({
'loggers': {
'pecantesting': {
'level': 'INFO', 'handlers': ['memory']
}
},
'handlers': {
'memory': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'stream': f
}
}
})))
app.get('/')
assert f.getvalue() == 'HELLO WORLD\n'
class TestEngines(PecanTestCase):
template_path = os.path.join(os.path.dirname(__file__), 'templates')
@unittest.skipIf('genshi' not in builtin_renderers, 'Genshi not installed')
def test_genshi(self):
class RootController(object):
@expose('genshi:genshi.html')
def index(self, name='Jonathan'):
return dict(name=name)
@expose('genshi:genshi_bad.html')
def badtemplate(self):
return dict()
app = TestApp(
Pecan(RootController(), template_path=self.template_path)
)
r = app.get('/')
assert r.status_int == 200
assert b_("<h1>Hello, Jonathan!</h1>") in r.body
r = app.get('/index.html?name=World')
assert r.status_int == 200
assert b_("<h1>Hello, World!</h1>") in r.body
error_msg = None
try:
r = app.get('/badtemplate.html')
except Exception as e:
for error_f in error_formatters:
error_msg = error_f(e)
if error_msg:
break
assert error_msg is not None
@unittest.skipIf('kajiki' not in builtin_renderers, 'Kajiki not installed')
def test_kajiki(self):
class RootController(object):
@expose('kajiki:kajiki.html')
def index(self, name='Jonathan'):
return dict(name=name)
app = TestApp(
Pecan(RootController(), template_path=self.template_path)
)
r = app.get('/')
assert r.status_int == 200
assert b_("<h1>Hello, Jonathan!</h1>") in r.body
r = app.get('/index.html?name=World')
assert r.status_int == 200
assert b_("<h1>Hello, World!</h1>") in r.body
@unittest.skipIf('jinja' not in builtin_renderers, 'Jinja not installed')
def test_jinja(self):
class RootController(object):
@expose('jinja:jinja.html')
def index(self, name='Jonathan'):
return dict(name=name)
@expose('jinja:jinja_bad.html')
def badtemplate(self):
return dict()
app = TestApp(
Pecan(RootController(), template_path=self.template_path)
)
r = app.get('/')
assert r.status_int == 200
assert b_("<h1>Hello, Jonathan!</h1>") in r.body
error_msg = None
try:
r = app.get('/badtemplate.html')
except Exception as e:
for error_f in error_formatters:
error_msg = error_f(e)
if error_msg:
break
assert error_msg is not None
@unittest.skipIf('mako' not in builtin_renderers, 'Mako not installed')
def test_mako(self):
class RootController(object):
@expose('mako:mako.html')
def index(self, name='Jonathan'):
return dict(name=name)
@expose('mako:mako_bad.html')
def badtemplate(self):
return dict()
app = TestApp(
Pecan(RootController(), template_path=self.template_path)
)
r = app.get('/')
assert r.status_int == 200
assert b_("<h1>Hello, Jonathan!</h1>") in r.body
r = app.get('/index.html?name=World')
assert r.status_int == 200
assert b_("<h1>Hello, World!</h1>") in r.body
error_msg = None
try:
r = app.get('/badtemplate.html')
except Exception as e:
for error_f in error_formatters:
error_msg = error_f(e)
if error_msg:
break
assert error_msg is not None
def test_json(self):
try:
from simplejson import loads
except:
from json import loads # noqa
expected_result = dict(
name='Jonathan',
age=30, nested=dict(works=True)
)
class RootController(object):
@expose('json')
def index(self):
return expected_result
app = TestApp(Pecan(RootController()))
r = app.get('/')
assert r.status_int == 200
result = dict(loads(r.body.decode()))
assert result == expected_result
def test_override_template(self):
class RootController(object):
@expose('foo.html')
def index(self):
override_template(None, content_type='text/plain')
return 'Override'
app = TestApp(Pecan(RootController()))
r = app.get('/')
assert r.status_int == 200
assert b_('Override') in r.body
assert r.content_type == 'text/plain'
def test_render(self):
class RootController(object):
@expose()
def index(self, name='Jonathan'):
return render('mako.html', dict(name=name))
app = TestApp(
Pecan(RootController(), template_path=self.template_path)
)
r = app.get('/')
assert r.status_int == 200
assert b_("<h1>Hello, Jonathan!</h1>") in r.body
def test_default_json_renderer(self):
class RootController(object):
@expose()
def index(self, name='Bill'):
return dict(name=name)
app = TestApp(Pecan(RootController(), default_renderer='json'))
r = app.get('/')
assert r.status_int == 200
result = dict(json.loads(r.body.decode()))
assert result == {'name': 'Bill'}
def test_default_json_renderer_with_explicit_content_type(self):
class RootController(object):
@expose(content_type='text/plain')
def index(self, name='Bill'):
return name
app = TestApp(Pecan(RootController(), default_renderer='json'))
r = app.get('/')
assert r.status_int == 200
assert r.body == b_("Bill")
class TestDeprecatedRouteMethod(PecanTestCase):
@property
def app_(self):
class RootController(object):
@expose()
def index(self, *args):
return ', '.join(args)
@expose()
def _route(self, args):
return self.index, args
return TestApp(Pecan(RootController()))
def test_required_argument(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
r = self.app_.get('/foo/bar/')
assert r.status_int == 200
assert b_('foo, bar') in r.body
class TestExplicitRoute(PecanTestCase):
def test_alternate_route(self):
class RootController(object):
@expose(route='some-path')
def some_path(self):
return 'Hello, World!'
app = TestApp(Pecan(RootController()))
r = app.get('/some-path/')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
r = app.get('/some_path/', expect_errors=True)
assert r.status_int == 404
def test_manual_route(self):
class SubController(object):
@expose(route='some-path')
def some_path(self):
return 'Hello, World!'
class RootController(object):
pass
route(RootController, 'some-controller', SubController())
app = TestApp(Pecan(RootController()))
r = app.get('/some-controller/some-path/')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
r = app.get('/some-controller/some_path/', expect_errors=True)
assert r.status_int == 404
def test_manual_route_conflict(self):
class SubController(object):
pass
class RootController(object):
@expose()
def hello(self):
return 'Hello, World!'
self.assertRaises(
RuntimeError,
route,
RootController,
'hello',
SubController()
)
def test_custom_route_on_index(self):
class RootController(object):
@expose(route='some-path')
def index(self):
return 'Hello, World!'
app = TestApp(Pecan(RootController()))
r = app.get('/some-path/')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
r = app.get('/')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
r = app.get('/index/', expect_errors=True)
assert r.status_int == 404
def test_custom_route_with_attribute_conflict(self):
class RootController(object):
@expose(route='mock')
def greet(self):
return 'Hello, World!'
@expose()
def mock(self):
return 'You are not worthy!'
app = TestApp(Pecan(RootController()))
self.assertRaises(
RuntimeError,
app.get,
'/mock/'
)
def test_conflicting_custom_routes(self):
class RootController(object):
@expose(route='testing')
def foo(self):
return 'Foo!'
@expose(route='testing')
def bar(self):
return 'Bar!'
app = TestApp(Pecan(RootController()))
self.assertRaises(
RuntimeError,
app.get,
'/testing/'
)
def test_conflicting_custom_routes_in_subclass(self):
class BaseController(object):
@expose(route='testing')
def foo(self):
return request.path
class ChildController(BaseController):
pass
class RootController(BaseController):
child = ChildController()
app = TestApp(Pecan(RootController()))
r = app.get('/testing/')
assert r.body == b_('/testing/')
r = app.get('/child/testing/')
assert r.body == b_('/child/testing/')
def test_custom_route_prohibited_on_lookup(self):
try:
class RootController(object):
@expose(route='some-path')
def _lookup(self):
return 'Hello, World!'
except ValueError:
pass
else:
raise AssertionError(
'_lookup cannot be used with a custom path segment'
)
def test_custom_route_prohibited_on_default(self):
try:
class RootController(object):
@expose(route='some-path')
def _default(self):
return 'Hello, World!'
except ValueError:
pass
else:
raise AssertionError(
'_default cannot be used with a custom path segment'
)
def test_custom_route_prohibited_on_route(self):
try:
class RootController(object):
@expose(route='some-path')
def _route(self):
return 'Hello, World!'
except ValueError:
pass
else:
raise AssertionError(
'_route cannot be used with a custom path segment'
)
def test_custom_route_with_generic_controllers(self):
class RootController(object):
@expose(route='some-path', generic=True)
def foo(self):
return 'Hello, World!'
@foo.when(method='POST')
def handle_post(self):
return 'POST!'
app = TestApp(Pecan(RootController()))
r = app.get('/some-path/')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
r = app.get('/foo/', expect_errors=True)
assert r.status_int == 404
r = app.post('/some-path/')
assert r.status_int == 200
assert r.body == b_('POST!')
r = app.post('/foo/', expect_errors=True)
assert r.status_int == 404
def test_custom_route_prohibited_on_generic_controllers(self):
try:
class RootController(object):
@expose(generic=True)
def foo(self):
return 'Hello, World!'
@foo.when(method='POST', route='some-path')
def handle_post(self):
return 'POST!'
except ValueError:
pass
else:
raise AssertionError(
'generic controllers cannot be used with a custom path segment'
)
def test_invalid_route_arguments(self):
class C(object):
def secret(self):
return {}
self.assertRaises(TypeError, route)
self.assertRaises(TypeError, route, 'some-path', lambda x: x)
self.assertRaises(TypeError, route, 'some-path', C.secret)
self.assertRaises(TypeError, route, C, {}, C())
for path in (
'VARIED-case-PATH',
'this,custom,path',
'123-path',
'path(with-parens)',
'path;with;semicolons',
'path:with:colons',
'v2.0',
'~username',
'somepath!',
'four*four',
'one+two',
'@twitterhandle',
'package=pecan'
):
handler = C()
route(C, path, handler)
assert getattr(C, path, handler)
self.assertRaises(ValueError, route, C, '/path/', C())
self.assertRaises(ValueError, route, C, '.', C())
self.assertRaises(ValueError, route, C, '..', C())
self.assertRaises(ValueError, route, C, 'path?', C())
self.assertRaises(ValueError, route, C, 'percent%20encoded', C())
| StackStorm/pecan | pecan/tests/test_base.py | Python | bsd-3-clause | 71,720 |
from __future__ import absolute_import
import sys, os, re, inspect
import imp
try:
import hashlib
except ImportError:
import md5 as hashlib
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
import Cython
from ..Compiler.Main import Context, CompilationOptions, default_options
from ..Compiler.ParseTreeTransforms import (CythonTransform,
SkipDeclarations, AnalyseDeclarationsTransform, EnvTransform)
from ..Compiler.TreeFragment import parse_from_strings
from .Dependencies import strip_string_literals, cythonize, cached_function
from ..Compiler import Pipeline, Nodes
from ..Utils import get_cython_cache_dir
import cython as cython_module
# A utility function to convert user-supplied ASCII strings to unicode.
if sys.version_info[0] < 3:
def to_unicode(s):
if not isinstance(s, unicode):
return s.decode('ascii')
else:
return s
else:
to_unicode = lambda x: x
class UnboundSymbols(EnvTransform, SkipDeclarations):
def __init__(self):
CythonTransform.__init__(self, None)
self.unbound = set()
def visit_NameNode(self, node):
if not self.current_env().lookup(node.name):
self.unbound.add(node.name)
return node
def __call__(self, node):
super(UnboundSymbols, self).__call__(node)
return self.unbound
@cached_function
def unbound_symbols(code, context=None):
code = to_unicode(code)
if context is None:
context = Context([], default_options)
from ..Compiler.ParseTreeTransforms import AnalyseDeclarationsTransform
tree = parse_from_strings('(tree fragment)', code)
for phase in Pipeline.create_pipeline(context, 'pyx'):
if phase is None:
continue
tree = phase(tree)
if isinstance(phase, AnalyseDeclarationsTransform):
break
try:
import builtins
except ImportError:
import __builtin__ as builtins
return UnboundSymbols()(tree) - set(dir(builtins))
def unsafe_type(arg, context=None):
py_type = type(arg)
if py_type is int:
return 'long'
else:
return safe_type(arg, context)
def safe_type(arg, context=None):
py_type = type(arg)
if py_type in [list, tuple, dict, str]:
return py_type.__name__
elif py_type is complex:
return 'double complex'
elif py_type is float:
return 'double'
elif py_type is bool:
return 'bint'
elif 'numpy' in sys.modules and isinstance(arg, sys.modules['numpy'].ndarray):
return 'numpy.ndarray[numpy.%s_t, ndim=%s]' % (arg.dtype.name, arg.ndim)
else:
for base_type in py_type.mro():
if base_type.__module__ in ('__builtin__', 'builtins'):
return 'object'
module = context.find_module(base_type.__module__, need_pxd=False)
if module:
entry = module.lookup(base_type.__name__)
if entry.is_type:
return '%s.%s' % (base_type.__module__, base_type.__name__)
return 'object'
def _get_build_extension():
dist = Distribution()
# Ensure the build respects distutils configuration by parsing
# the configuration files
config_files = dist.find_config_files()
dist.parse_config_files(config_files)
build_extension = build_ext(dist)
build_extension.finalize_options()
return build_extension
@cached_function
def _create_context(cython_include_dirs):
return Context(list(cython_include_dirs), default_options)
def cython_inline(code,
get_type=unsafe_type,
lib_dir=os.path.join(get_cython_cache_dir(), 'inline'),
cython_include_dirs=['.'],
force=False,
quiet=False,
locals=None,
globals=None,
**kwds):
if get_type is None:
get_type = lambda x: 'object'
code = to_unicode(code)
orig_code = code
code, literals = strip_string_literals(code)
code = strip_common_indent(code)
ctx = _create_context(tuple(cython_include_dirs))
if locals is None:
locals = inspect.currentframe().f_back.f_back.f_locals
if globals is None:
globals = inspect.currentframe().f_back.f_back.f_globals
try:
for symbol in unbound_symbols(code):
if symbol in kwds:
continue
elif symbol in locals:
kwds[symbol] = locals[symbol]
elif symbol in globals:
kwds[symbol] = globals[symbol]
else:
print("Couldn't find ", symbol)
except AssertionError:
if not quiet:
# Parsing from strings not fully supported (e.g. cimports).
print("Could not parse code as a string (to extract unbound symbols).")
cimports = []
for name, arg in kwds.items():
if arg is cython_module:
cimports.append('\ncimport cython as %s' % name)
del kwds[name]
arg_names = kwds.keys()
arg_names.sort()
arg_sigs = tuple([(get_type(kwds[arg], ctx), arg) for arg in arg_names])
key = orig_code, arg_sigs, sys.version_info, sys.executable, Cython.__version__
module_name = "_cython_inline_" + hashlib.md5(str(key).encode('utf-8')).hexdigest()
if module_name in sys.modules:
module = sys.modules[module_name]
else:
build_extension = None
if cython_inline.so_ext is None:
# Figure out and cache current extension suffix
build_extension = _get_build_extension()
cython_inline.so_ext = build_extension.get_ext_filename('')
module_path = os.path.join(lib_dir, module_name + cython_inline.so_ext)
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if force or not os.path.isfile(module_path):
cflags = []
c_include_dirs = []
qualified = re.compile(r'([.\w]+)[.]')
for type, _ in arg_sigs:
m = qualified.match(type)
if m:
cimports.append('\ncimport %s' % m.groups()[0])
# one special case
if m.groups()[0] == 'numpy':
import numpy
c_include_dirs.append(numpy.get_include())
# cflags.append('-Wno-unused')
module_body, func_body = extract_func_code(code)
params = ', '.join(['%s %s' % a for a in arg_sigs])
module_code = """
%(module_body)s
%(cimports)s
def __invoke(%(params)s):
%(func_body)s
return locals()
""" % {'cimports': '\n'.join(cimports),
'module_body': module_body,
'params': params,
'func_body': func_body }
for key, value in literals.items():
module_code = module_code.replace(key, value)
pyx_file = os.path.join(lib_dir, module_name + '.pyx')
fh = open(pyx_file, 'w')
try:
fh.write(module_code)
finally:
fh.close()
extension = Extension(
name = module_name,
sources = [pyx_file],
include_dirs = c_include_dirs,
extra_compile_args = cflags)
if build_extension is None:
build_extension = _get_build_extension()
build_extension.extensions = cythonize([extension], include_path=cython_include_dirs, quiet=quiet)
build_extension.build_temp = os.path.dirname(pyx_file)
build_extension.build_lib = lib_dir
build_extension.run()
module = imp.load_dynamic(module_name, module_path)
arg_list = [kwds[arg] for arg in arg_names]
return module.__invoke(*arg_list)
# Cached suffix used by cython_inline above. None should get
# overridden with actual value upon the first cython_inline invocation
cython_inline.so_ext = None
non_space = re.compile('[^ ]')
def strip_common_indent(code):
min_indent = None
lines = code.split('\n')
for line in lines:
match = non_space.search(line)
if not match:
continue # blank
indent = match.start()
if line[indent] == '#':
continue # comment
elif min_indent is None or min_indent > indent:
min_indent = indent
for ix, line in enumerate(lines):
match = non_space.search(line)
if not match or line[indent] == '#':
continue
else:
lines[ix] = line[min_indent:]
return '\n'.join(lines)
module_statement = re.compile(r'^((cdef +(extern|class))|cimport|(from .+ cimport)|(from .+ import +[*]))')
def extract_func_code(code):
module = []
function = []
current = function
code = code.replace('\t', ' ')
lines = code.split('\n')
for line in lines:
if not line.startswith(' '):
if module_statement.match(line):
current = module
else:
current = function
current.append(line)
return '\n'.join(module), ' ' + '\n '.join(function)
try:
from inspect import getcallargs
except ImportError:
def getcallargs(func, *arg_values, **kwd_values):
all = {}
args, varargs, kwds, defaults = inspect.getargspec(func)
if varargs is not None:
all[varargs] = arg_values[len(args):]
for name, value in zip(args, arg_values):
all[name] = value
for name, value in kwd_values.items():
if name in args:
if name in all:
raise TypeError("Duplicate argument %s" % name)
all[name] = kwd_values.pop(name)
if kwds is not None:
all[kwds] = kwd_values
elif kwd_values:
raise TypeError("Unexpected keyword arguments: %s" % kwd_values.keys())
if defaults is None:
defaults = ()
first_default = len(args) - len(defaults)
for ix, name in enumerate(args):
if name not in all:
if ix >= first_default:
all[name] = defaults[ix - first_default]
else:
raise TypeError("Missing argument: %s" % name)
return all
def get_body(source):
ix = source.index(':')
if source[:5] == 'lambda':
return "return %s" % source[ix+1:]
else:
return source[ix+1:]
# Lots to be done here... It would be especially cool if compiled functions
# could invoke each other quickly.
class RuntimeCompiledFunction(object):
def __init__(self, f):
self._f = f
self._body = get_body(inspect.getsource(f))
def __call__(self, *args, **kwds):
all = getcallargs(self._f, *args, **kwds)
return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all)
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/Cython/Build/Inline.py | Python | agpl-3.0 | 10,977 |
# coding: utf-8
"""
api_blueprint.py
~~~~~~~~~~~~~~~~~
This module implements the main Job api for Viki.
:license: Apache2, see LICENSE for more details.
"""
from flask import Blueprint, jsonify, request
from vikid.job import Job
blueprint_name = 'api_blueprint'
template_folder_name = 'templates'
job = Job()
api_blueprint = Blueprint(blueprint_name,
__name__,
template_folder=template_folder_name)
# --- Api endpoints
@api_blueprint.route("/api/v1/jobs", methods=['GET'])
def jobs():
""" List all jobs """
return jsonify(job.get_jobs())
@api_blueprint.route("/api/v1/job/<string:job_name>", methods=['GET', 'POST', 'PUT', 'DELETE'])
def get_job(job_name):
""" Actions for a single job specified by name
GET: Gets details of a single job
POST: Creates a job out of specified details sent as post parameters
PUT: Updates a job with specified details sent as parameters
DELETE: Deletes a job with specified name
"""
ret = None
if request.method == 'GET':
# Retrieve a jobs details
ret = job.get_job_by_name(job_name)
if request.method == 'POST':
# Create job
# Requires "application/json" mime type and valid JSON body
# containing description, and steps
job_config = str(request.get_json())
ret = job.create_job(job_name, job_config)
if request.method == 'PUT':
# Updated a job
# Requires "application/json" mime type and valid JSON body
# containing field/s to be updated
ret = job.update_job(job_name)
if request.method == 'DELETE':
# Deletes a job from the repository
ret = job.delete_job(job_name)
if ret is None:
ret = {"success":0, "message":"Failed"}
return jsonify(ret)
@api_blueprint.route("/api/v1/job/<string:job_name>/run", methods=['POST'])
def run_job(job_name):
""" Run specific job by name """
return jsonify(job.run_job(job_name))
@api_blueprint.route("/api/v1/job/<string:job_name>/output", methods=['GET'])
def output_job(job_name):
""" Get the last run's output of a specific job """
return jsonify(job.output_job(job_name))
@api_blueprint.route("/api/v1/3laws", methods=['GET'])
def three_laws():
""" The three laws of robotics easter-egg """
return jsonify('A robot may not injure a human being or, through inaction, allow a human being to come to harm. ' +
'A robot must obey the orders given it by human beings except where such orders would conflict with the First Law. ' +
'A robot must protect its own existence as long as such protection does not conflict with the First or Second Laws.')
| shanahanjrs/vikid | vikid/blueprints/api_blueprint.py | Python | apache-2.0 | 2,707 |
# -*- coding: utf-8 -*-
""" Sahana Eden Common Alerting Protocol (CAP) Model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3CAPModel",
"cap_info_labels",
"cap_alert_is_template",
"cap_rheader",
"cap_gis_location_xml_post_parse",
"cap_gis_location_xml_post_render",
]
import datetime
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from gluon.tools import fetch
from ..s3 import *
# =============================================================================
class S3CAPModel(S3Model):
"""
CAP: Common Alerting Protocol
- this module is a non-functional stub
http://eden.sahanafoundation.org/wiki/BluePrint/Messaging#CAP
"""
names = ["cap_alert",
"cap_alert_represent",
"cap_info",
"cap_info_represent",
"cap_resource",
"cap_area",
"cap_area_represent",
"cap_area_location",
"cap_area_tag",
"cap_info_category_opts",
]
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# List of Incident Categories -- copied from irs module <--
# @ToDo: Switch to using event_incident_type
#
# The keys are based on the Canadian ems.incident hierarchy, with a
# few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed
# as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
cap_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
# http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.amberAlert": T("Child Abduction Emergency"),
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
# http://en.wikipedia.org/wiki/Silver_Alert
"missingPerson.silver": T("Missing Senior Citizen"),
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# ---------------------------------------------------------------------
# CAP alerts
#
# CAP alert Status Code (status)
cap_alert_status_code_opts = OrderedDict([
("Actual", T("Actual - actionable by all targeted recipients")),
("Exercise", T("Exercise - only for designated participants (decribed in note)")),
("System", T("System - for internal functions")),
("Test", T("Test - testing, all recipients disregard")),
("Draft", T("Draft - not actionable in its current form")),
])
# CAP alert message type (msgType)
cap_alert_msgType_code_opts = OrderedDict([
("Alert", T("Alert: Initial information requiring attention by targeted recipients")),
("Update", T("Update: Update and supercede earlier message(s)")),
("Cancel", T("Cancel: Cancel earlier message(s)")),
("Ack", T("Ack: Acknowledge receipt and acceptance of the message(s)")),
("Error", T("Error: Indicate rejection of the message(s)")),
])
# CAP alert scope
cap_alert_scope_code_opts = OrderedDict([
("Public", T("Public - unrestricted audiences")),
("Restricted", T("Restricted - to users with a known operational requirement (described in restriction)")),
("Private", T("Private - only to specified addresses (mentioned as recipients)"))
])
# CAP info categories
cap_info_category_opts = OrderedDict([
("Geo", T("Geophysical (inc. landslide)")),
("Met", T("Meteorological (inc. flood)")),
("Safety", T("General emergency and public safety")),
("Security", T("Law enforcement, military, homeland and local/private security")),
("Rescue", T("Rescue and recovery")),
("Fire", T("Fire suppression and rescue")),
("Health", T("Medical and public health")),
("Env", T("Pollution and other environmental")),
("Transport", T("Public and private transportation")),
("Infra", T("Utility, telecommunication, other non-transport infrastructure")),
("CBRNE", T("Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack")),
("Other", T("Other events")),
])
tablename = "cap_alert"
define_table(tablename,
Field("is_template", "boolean",
readable = False,
writable = True,
),
Field("template_id", "reference cap_alert",
label = T("Template"),
ondelete = "RESTRICT",
represent = self.template_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
comment = T("Apply a template"),
),
Field("template_title",
label = T("Template Title"),
),
Field("template_settings", "text",
default = "{}",
readable = False,
),
Field("identifier", unique=True, length=128,
default = self.generate_identifier,
label = T("Identifier"),
),
Field("sender",
label = T("Sender"),
default = self.generate_sender,
# @todo: can not be empty in alerts (validator!)
),
s3_datetime("sent",
default = "now",
writable = False,
),
Field("status",
default = "Draft",
label = T("Status"),
requires = IS_IN_SET(cap_alert_status_code_opts),
),
Field("msg_type",
label = T("Message Type"),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_msgType_code_opts)
),
),
Field("source",
label = T("Source"),
),
Field("scope",
label = T("Scope"),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_scope_code_opts)
),
),
# Text describing the restriction for scope=restricted
Field("restriction", "text",
label = T("Restriction"),
),
Field("addresses", "list:string",
label = T("Recipients"),
represent = self.list_string_represent,
#@ToDo: provide a better way to add multiple addresses,
# do not ask the user to delimit it themselves
# this should eventually use the CAP contacts
#widget = S3CAPAddressesWidget,
),
Field("codes", "text",
default = settings.get_cap_codes(),
label = T("Codes"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
Field("note", "text",
label = T("Note"),
),
Field("reference", "list:reference cap_alert",
label = T("Reference"),
represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ",
multiple = True,
),
# @ToDo: This should not be manually entered,
# needs a widget
#widget = S3ReferenceWidget(table,
# one_to_many=True,
# allow_create=False),
),
# @ToDo: Switch to using event_incident_type_id
Field("incidents", "list:string",
label = T("Incidents"),
represent = S3Represent(options = cap_incident_type_opts,
multiple = True),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_incident_type_opts,
multiple = True,
sort = True,
)),
widget = S3MultiSelectWidget(),
),
*s3_meta_fields())
filter_widgets = [
S3TextFilter(["identifier",
"sender",
"incidents",
"cap_info.headline",
"cap_info.event",
],
label = T("Search"),
comment = T("Search for an Alert by sender, incident, headline or event."),
),
S3OptionsFilter("info.category",
label = T("Category"),
options = cap_info_category_opts,
),
S3LocationFilter("location.location_id",
label = T("Location(s)"),
# options = gis.get_countries().keys(),
),
S3OptionsFilter("info.language",
label = T("Language"),
),
]
configure(tablename,
context = {"location": "location.location_id",
},
filter_widgets = filter_widgets,
onvalidation = self.cap_alert_form_validation,
)
# Components
add_components(tablename,
cap_area = "alert_id",
cap_area_location = {"name": "location",
"joinby": "alert_id",
},
cap_info = "alert_id",
cap_resource = "alert_id",
)
self.set_method("cap", "alert",
method = "import_feed",
action = CAPImportFeed())
if crud_strings["cap_template"]:
crud_strings[tablename] = crud_strings["cap_template"]
else:
ADD_ALERT = T("Create Alert")
crud_strings[tablename] = Storage(
label_create = ADD_ALERT,
title_display = T("Alert Details"),
title_list = T("Alerts"),
# If already-published, this should create a new "Update"
# alert instead of modifying the original
title_update = T("Edit Alert"),
title_upload = T("Import Alerts"),
label_list_button = T("List Alerts"),
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert created"),
msg_record_modified = T("Alert modified"),
msg_record_deleted = T("Alert deleted"),
msg_list_empty = T("No alerts to show"))
alert_represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ")
alert_id = S3ReusableField("alert_id", "reference %s" % tablename,
comment = T("The alert message containing this information"),
label = T("Alert"),
ondelete = "CASCADE",
represent = alert_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
alert_represent)),
)
# ---------------------------------------------------------------------
# CAP info segments
#
cap_info_responseType_opts = OrderedDict([
("Shelter", T("Shelter - Take shelter in place or per instruction")),
("Evacuate", T("Evacuate - Relocate as instructed in the instruction")),
("Prepare", T("Prepare - Make preparations per the instruction")),
("Execute", T("Execute - Execute a pre-planned activity identified in instruction")),
("Avoid", T("Avoid - Avoid the subject event as per the instruction")),
("Monitor", T("Monitor - Attend to information sources as described in instruction")),
("Assess", T("Assess - Evaluate the information in this message.")),
("AllClear", T("AllClear - The subject event no longer poses a threat")),
("None", T("None - No action recommended")),
])
cap_info_urgency_opts = OrderedDict([
("Immediate", T("Response action should be taken immediately")),
("Expected", T("Response action should be taken soon (within next hour)")),
("Future", T("Responsive action should be taken in the near future")),
("Past", T("Responsive action is no longer required")),
("Unknown", T("Unknown")),
])
cap_info_severity_opts = OrderedDict([
("Extreme", T("Extraordinary threat to life or property")),
("Severe", T("Significant threat to life or property")),
("Moderate", T("Possible threat to life or property")),
("Minor", T("Minimal to no known threat to life or property")),
("Unknown", T("Severity unknown")),
])
cap_info_certainty_opts = OrderedDict([
("Observed", T("Observed: determined to have occurred or to be ongoing")),
("Likely", T("Likely (p > ~50%)")),
("Possible", T("Possible but not likely (p <= ~50%)")),
("Unlikely", T("Not expected to occur (p ~ 0)")),
("Unknown", T("Certainty unknown")),
])
# CAP info priority
priorities = settings.get_cap_priorities()
try:
cap_info_priority_opts = OrderedDict([(f[0], f[1]) for f in priorities]
+ [("Undefined", T("Undefined"))])
except IndexError:
raise ValueError("CAP priorities setting is not structured properly")
# @ToDo: i18n: Need label=T("")
tablename = "cap_info"
define_table(tablename,
alert_id(),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("template_info_id", "reference cap_info",
ondelete = "RESTRICT",
readable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
widget = S3HiddenWidget(),
),
Field("template_settings", "text",
readable = False,
),
Field("language",
default = "en",
requires = IS_EMPTY_OR(
IS_IN_SET(settings.get_cap_languages())
),
),
Field("category", "list:string",
represent = S3Represent(options = cap_info_category_opts,
multiple = True,
),
required = True,
requires = IS_IN_SET(cap_info_category_opts,
multiple = True,
),
widget = S3MultiSelectWidget(),
), # 1 or more allowed
Field("event",
required = True,
),
Field("response_type", "list:string",
represent = S3Represent(options = cap_info_responseType_opts,
multiple = True,
),
requires = IS_IN_SET(cap_info_responseType_opts,
multiple = True),
widget = S3MultiSelectWidget(),
), # 0 or more allowed
Field("priority",
requires = IS_EMPTY_OR(
IS_IN_SET(cap_info_priority_opts)
),
),
Field("urgency",
required = True,
requires = IS_IN_SET(cap_info_urgency_opts),
),
Field("severity",
required = True,
requires = IS_IN_SET(cap_info_severity_opts),
),
Field("certainty",
required = True,
requires = IS_IN_SET(cap_info_certainty_opts),
),
Field("audience", "text"),
Field("event_code", "text",
default = settings.get_cap_event_codes(),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
s3_datetime("effective"),
s3_datetime("onset"),
s3_datetime("expires",
past = 0,
),
Field("sender_name"),
Field("headline"),
Field("description", "text"),
Field("instruction", "text"),
Field("contact", "text"),
Field("web",
requires = IS_EMPTY_OR(IS_URL()),
),
Field("parameter", "text",
default = settings.get_cap_parameters(),
label = T("Parameters"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
*s3_meta_fields())
info_labels = cap_info_labels()
for field in info_labels:
db.cap_info[field].label = info_labels[field]
if crud_strings["cap_template_info"]:
crud_strings[tablename] = crud_strings["cap_template_info"]
else:
ADD_INFO = T("Add alert information")
crud_strings[tablename] = Storage(
label_create = ADD_INFO,
title_display = T("Alert information"),
title_list = T("Information entries"),
title_update = T("Update alert information"), # this will create a new "Update" alert?
title_upload = T("Import alert information"),
subtitle_list = T("Listing of alert information items"),
label_list_button = T("List information entries"),
label_delete_button = T("Delete Information"),
msg_record_created = T("Alert information created"),
msg_record_modified = T("Alert information modified"),
msg_record_deleted = T("Alert information deleted"),
msg_list_empty = T("No alert information to show"))
info_represent = S3Represent(lookup = tablename,
fields = ["language", "headline"],
field_sep = " - ")
info_id = S3ReusableField("info_id", "reference %s" % tablename,
label = T("Information Segment"),
ondelete = "CASCADE",
represent = info_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
info_represent)
),
sortby = "identifier",
)
configure(tablename,
#create_next = URL(f="info", args=["[id]", "area"]),
onaccept = self.info_onaccept,
)
# Components
add_components(tablename,
cap_resource = "info_id",
cap_area = "info_id",
)
# ---------------------------------------------------------------------
# CAP Resource segments
#
# Resource elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
tablename = "cap_resource"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
self.super_link("doc_id", "doc_entity"),
Field("resource_desc",
requires = IS_NOT_EMPTY(),
),
Field("mime_type",
requires = IS_NOT_EMPTY(),
),
Field("size", "integer",
writable = False,
),
Field("uri",
# needs a special validation
writable = False,
),
#Field("file", "upload"),
Field("deref_uri", "text",
readable = False,
writable = False,
),
Field("digest",
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Resource"),
title_display = T("Alert Resource"),
title_list = T("Resources"),
title_update = T("Edit Resource"),
subtitle_list = T("List Resources"),
label_list_button = T("List Resources"),
label_delete_button = T("Delete Resource"),
msg_record_created = T("Resource added"),
msg_record_modified = T("Resource updated"),
msg_record_deleted = T("Resource deleted"),
msg_list_empty = T("No resources currently defined for this alert"))
# @todo: complete custom form
crud_form = S3SQLCustomForm(#"name",
"info_id",
"resource_desc",
S3SQLInlineComponent("image",
label=T("Image"),
fields=["file",
],
),
S3SQLInlineComponent("document",
label=T("Document"),
fields=["file",
],
),
)
configure(tablename,
super_entity = "doc_entity",
crud_form = crud_form,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# CAP Area segments
#
# Area elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
# Each <area> can have multiple elements which are one of <polygon>,
# <circle>, or <geocode>.
# <polygon> and <circle> are explicit geometry elements.
# <geocode> is a key-value pair in which the key is a standard
# geocoding system like SAME, FIPS, ZIP, and the value is a defined
# value in that system. The region described by the <area> is the
# union of the areas described by the individual elements, but the
# CAP spec advises that, if geocodes are included, the concrete
# geometry elements should outline the area specified by the geocodes,
# as not all recipients will have access to the meanings of the
# geocodes. However, since geocodes are a compact way to describe an
# area, it may be that they will be used without accompanying geometry,
# so we should not count on having <polygon> or <circle>.
#
# Geometry elements are each represented by a gis_location record, and
# linked to the cap_area record via the cap_area_location link table.
# For the moment, <circle> objects are stored with the center in the
# gis_location's lat, lon, and radius (in km) as a tag "radius" and
# value. ToDo: Later, we will add CIRCLESTRING WKT.
#
# Geocode elements are currently stored as key value pairs in the
# cap_area record.
#
# <area> can also specify a minimum altitude and maximum altitude
# ("ceiling"). These are stored in explicit fields for now, but could
# be replaced by key value pairs, if it is found that they are rarely
# used.
#
# (An alternative would be to have cap_area link to a gis_location_group
# record. In that case, the geocode tags could be stored in the
# gis_location_group's overall gis_location element's tags. The altitude
# could be stored in the overall gis_location's elevation, with ceiling
# stored in a tag. We could consider adding a maximum elevation field.)
tablename = "cap_area"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
Field("name",
label = T("Area description"),
required = True,
),
Field("altitude", "integer"), # Feet above Sea-level in WGS84 (Specific or Minimum is using a range)
Field("ceiling", "integer"), # Feet above Sea-level in WGS84 (Maximum)
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Area"),
title_display = T("Alert Area"),
title_list = T("Areas"),
title_update = T("Edit Area"),
subtitle_list = T("List Areas"),
label_list_button = T("List Areas"),
label_delete_button = T("Delete Area"),
msg_record_created = T("Area added"),
msg_record_modified = T("Area updated"),
msg_record_deleted = T("Area deleted"),
msg_list_empty = T("No areas currently defined for this alert"))
crud_form = S3SQLCustomForm("name",
"info_id",
# Not yet working with default formstyle or multiple=True
#S3SQLInlineComponent("location",
# name = "location",
# label = "",
# multiple = False,
# fields = [("", "location_id")],
# ),
S3SQLInlineComponent("tag",
name = "tag",
label = "",
fields = ["tag",
"value",
],
),
"altitude",
"ceiling",
)
area_represent = S3Represent(lookup=tablename)
configure(tablename,
#create_next = URL(f="area", args=["[id]", "location"]),
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
crud_form = crud_form,
)
# Components
add_components(tablename,
cap_area_location = {"name": "location",
"joinby": "area_id",
},
cap_area_tag = {"name": "tag",
"joinby": "area_id",
},
)
area_id = S3ReusableField("area_id", "reference %s" % tablename,
label = T("Area"),
ondelete = "CASCADE",
represent = area_represent,
requires = IS_ONE_OF(db, "cap_area.id",
area_represent),
)
# ToDo: Use a widget tailored to entering <polygon> and <circle>.
# Want to be able to enter them by drawing on the map.
# Also want to allow selecting existing locations that have
# geometry, maybe with some filtering so the list isn't cluttered
# with irrelevant locations.
tablename = "cap_area_location"
define_table(tablename,
alert_id(readable = False,
writable = False,
),
area_id(),
self.gis_location_id(
widget = S3LocationSelectorWidget2(points = False,
polygons = True,
show_map = True,
show_address = False,
show_postcode = False,
),
),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Location"),
title_display = T("Alert Location"),
title_list = T("Locations"),
title_update = T("Edit Location"),
subtitle_list = T("List Locations"),
label_list_button = T("List Locations"),
label_delete_button = T("Delete Location"),
msg_record_created = T("Location added"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location deleted"),
msg_list_empty = T("No locations currently defined for this alert"))
configure(tablename,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# Area Tags
# - Key-Value extensions
# - Used to hold for geocodes: key is the geocode system name, and
# value is the specific value for this area.
# - Could store other values here as well, to avoid dedicated fields
# in cap_area for rarely-used items like altitude and ceiling, but
# would have to distinguish those from geocodes.
#
# ToDo: Provide a mechanism for pre-loading geocodes that are not tied
# to individual areas.
# ToDo: Allow sharing the key-value pairs. Cf. Ruby on Rails tagging
# systems such as acts-as-taggable-on, which has a single table of tags
# used by all classes. Each tag record has the class and field that the
# tag belongs to, as well as the tag string. We'd want tag and value,
# but the idea is the same: There would be a table with tag / value
# pairs, and individual cap_area, event_event, org_whatever records
# would link to records in the tag table. So we actually would not have
# duplicate tag value records as we do now.
tablename = "cap_area_tag"
define_table(tablename,
area_id(),
# ToDo: Allow selecting from a dropdown list of pre-defined
# geocode system names.
Field("tag",
label = T("Geocode Name"),
),
# ToDo: Once the geocode system is selected, fetch a list
# of current values for that geocode system. Allow adding
# new values, e.g. with combo box menu.
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
#configure(tablename,
# deduplicate = self.cap_area_tag_deduplicate,
# )
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(cap_alert_id = alert_id,
cap_alert_represent = alert_represent,
cap_area_represent = area_represent,
cap_info_represent = info_represent,
cap_info_category_opts = cap_info_category_opts
)
# -------------------------------------------------------------------------
@staticmethod
def generate_identifier():
"""
Generate an identifier for a new form
"""
db = current.db
table = db.cap_alert
r = db().select(table.id,
limitby=(0, 1),
orderby=~table.id).first()
_time = datetime.datetime.strftime(datetime.datetime.utcnow(), "%Y/%m/%dT%H:%M:%S")
if r:
next_id = int(r.id) + 1
else:
next_id = 1
# Format: prefix-time+-timezone+sequence-suffix
settings = current.deployment_settings
prefix = settings.get_cap_identifier_prefix() or current.xml.domain
suffix = settings.get_cap_identifier_suffix()
return "%s-%s-%d%s%s" % \
(prefix, _time, next_id, ["", "-"][bool(suffix)], suffix)
# -------------------------------------------------------------------------
@staticmethod
def generate_sender():
"""
Generate a sender for a new form
"""
try:
user_id = current.auth.user.id
except AttributeError:
return ""
return "%s/%d" % (current.xml.domain, user_id)
# -------------------------------------------------------------------------
@staticmethod
def template_represent(id, row=None):
"""
Represent an alert template concisely
"""
if row:
id = row.id
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.cap_alert
row = db(table.id == id).select(table.is_template,
table.template_title,
# left = table.on(table.id == table.parent_item_category_id), Doesn't work
limitby=(0, 1)).first()
try:
# @ToDo: Should get headline from "info"?
if row.is_template:
return row.template_title
else:
return s3db.cap_alert_represent(id)
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def list_string_represent(string, fmt=lambda v: v):
try:
if isinstance(string, list):
return ", ".join([fmt(i) for i in string])
elif isinstance(string, basestring):
return ", ".join([fmt(i) for i in string[1:-1].split("|")])
except IndexError:
return current.messages.UNKNOWN_OPT
return ""
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_form_validation(form):
"""
On Validation for CAP alert form
"""
form_vars = form.vars
if form_vars.get("scope") == "Private" and not form_vars.get("addresses"):
form.errors["addresses"] = \
current.T("'Recipients' field mandatory in case of 'Private' scope")
return
# -------------------------------------------------------------------------
@staticmethod
def info_onaccept(form):
"""
After DB I/O
"""
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
info_id = form_vars.id
if not info_id:
return
db = current.db
atable = db.cap_alert
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
if info:
alert_id = info.alert_id
if alert_id and cap_alert_is_template(alert_id):
info.update(is_template = True)
return True
# =============================================================================
def cap_info_labels():
"""
Labels for CAP info segments
"""
T = current.T
return dict(language=T("Language"),
category=T("Category"),
event=T("Event"),
response_type=T("Response type"),
urgency=T("Urgency"),
severity=T("Severity"),
certainty=T("Certainty"),
audience=T("Audience"),
event_code=T("Event code"),
effective=T("Effective"),
onset=T("Onset"),
expires=T("Expires at"),
sender_name=T("Sender's name"),
headline=T("Headline"),
description=T("Description"),
instruction=T("Instruction"),
web=T("URL"),
contact=T("Contact information"),
parameter=T("Parameters")
)
# =============================================================================
def cap_alert_is_template(alert_id):
"""
Tell whether an alert entry is a template
"""
if not alert_id:
return False
table = current.s3db.cap_alert
query = (table.id == alert_id)
r = current.db(query).select(table.is_template,
limitby=(0, 1)).first()
return r and r.is_template
# =============================================================================
def cap_rheader(r):
""" Resource Header for CAP module """
rheader = None
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
tablename = r.tablename
if tablename == "cap_alert":
record_id = record.id
table = s3db.cap_info
query = (table.alert_id == record_id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if record.is_template:
if not (row and row.id):
error = DIV(T("An alert needs to contain at least one info item."),
_class="error")
else:
error = ""
tabs = [(T("Template"), None),
(T("Information template"), "info"),
#(T("Area"), "area"),
#(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(S3CAPModel.template_represent(record_id, record),
_href=URL(c="cap", f="template",
args=[record_id, "update"]))),
),
),
rheader_tabs,
error
)
else:
if not (row and row.id):
error = DIV(T("You need to create at least one alert information item in order to be able to broadcast this alert!"),
_class="error")
export_btn = ""
else:
error = ""
export_btn = A(DIV(_class="export_cap_large"),
_href=URL(c="cap", f="alert", args=["%s.cap" % record_id]),
_target="_blank",
)
table = s3db.cap_area
query = (table.alert_id == record_id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
# We have an Area, so we can add Locations
location_tab = (T("Location"), "location")
else:
location_tab = ""
tabs = [(T("Alert Details"), None),
(T("Information"), "info"),
(T("Area"), "area"),
location_tab,
(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record_id, record),
_href=URL(c="cap", f="alert",
args=[record_id, "update"]))),
),
TR(export_btn)
),
rheader_tabs,
error
)
elif tablename == "cap_area":
# Shouldn't ever be called
tabs = [(T("Area"), None),
(T("Locations"), "location"),
#(T("Geocodes"), "tag"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.id, "update"])))
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.info_id),
_href=URL(c="cap", f="info",
args=[record.info_id, "update"]))),
),
TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.id, record),
_href=URL(c="cap", f="area",
args=[record.id, "update"]))),
),
),
rheader_tabs
)
elif tablename == "cap_area_location":
# Shouldn't ever be called
# We need the rheader only for the link back to the area.
rheader = DIV(TABLE(TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.area_id),
_href=URL(c="cap", f="area",
args=[record.area_id, "update"]))),
),
))
elif tablename == "cap_info":
# Shouldn't ever be called
tabs = [(T("Information"), None),
(T("Resource Files"), "resource"),
]
if cap_alert_is_template(record.alert_id):
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(S3CAPModel.template_represent(record.alert_id),
_href=URL(c="cap", f="template",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Info template")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs,
_class="cap_info_template_form"
)
current.response.s3.js_global.append('''i18n.cap_locked="%s"''' % T("Locked"))
else:
tabs.insert(1, (T("Areas"), "area"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs
)
return rheader
# =============================================================================
def update_alert_id(tablename):
""" On-accept for area and resource records """
def func(form):
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
if form_vars.get("alert_id", None):
# Nothing to do
return
# Look up from the info/area
_id = form_vars.id
if not _id:
return
db = current.db
table = db[tablename]
if tablename == "cap_area_location":
area_id = form_vars.get("area_id", None)
if not area_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.area_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
area_id = item.area_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
atable = db.cap_area
area = db(atable.id == area_id).select(atable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = area.alert_id
except:
# Nothing we can do
return
else:
info_id = form_vars.get("info_id", None)
if not info_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.info_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
info_id = item.info_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = info.alert_id
except:
# Nothing we can do
return
db(table.id == _id).update(alert_id = alert_id)
return func
# =============================================================================
def cap_gis_location_xml_post_parse(element, record):
"""
UNUSED - done in XSLT
Convert CAP polygon representation to WKT; extract circle lat lon.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Extract altitude and ceiling from the enclosing <area>, and
# compute an elevation value to apply to all enclosed gis_locations.
cap_polygons = element.xpath("cap_polygon")
if cap_polygons:
cap_polygon_text = cap_polygons[0].text
# CAP polygons and WKT have opposite separator conventions:
# CAP has spaces between coordinate pairs and within pairs the
# coordinates are separated by comma, and vice versa for WKT.
# Unfortunately, CAP and WKT (as we use it) also have opposite
# orders of lat and lon. CAP has lat lon, WKT has lon lat.
# Both close the polygon by repeating the first point.
cap_points_text = cap_polygon_text.split()
cap_points = [cpoint.split(",") for cpoint in cap_points_text]
# @ToDo: Should we try interpreting all the points as decimal numbers,
# and failing validation if they're wrong?
wkt_points = ["%s %s" % (cpoint[1], cpoint[0]) for cpoint in cap_points]
wkt_polygon_text = "POLYGON ((%s))" % ", ".join(wkt_points)
record.wkt = wkt_polygon_text
return
cap_circle_values = element.xpath("resource[@name='gis_location_tag']/data[@field='tag' and text()='cap_circle']/../data[@field='value']")
if cap_circle_values:
cap_circle_text = cap_circle_values[0].text
coords, radius = cap_circle_text.split()
lat, lon = coords.split(",")
try:
# If any of these fail to interpret as numbers, the circle was
# badly formatted. For now, we don't try to fail validation,
# but just don't set the lat, lon.
lat = float(lat)
lon = float(lon)
radius = float(radius)
except ValueError:
return
record.lat = lat
record.lon = lon
# Add a bounding box for the given radius, if it is not zero.
if radius > 0.0:
bbox = current.gis.get_bounds_from_radius(lat, lon, radius)
record.lat_min = bbox["lat_min"]
record.lon_min = bbox["lon_min"]
record.lat_max = bbox["lat_max"]
record.lon_max = bbox["lon_max"]
# =============================================================================
def cap_gis_location_xml_post_render(element, record):
"""
UNUSED - done in XSLT
Convert Eden WKT polygon (and eventually circle) representation to
CAP format and provide them in the rendered s3xml.
Not all internal formats have a parallel in CAP, but an effort is made
to provide a resonable substitute:
Polygons are supported.
Circles that were read in from CAP (and thus carry the original CAP
circle data) are supported.
Multipolygons are currently rendered as their bounding box.
Points are rendered as zero radius circles.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Can we rely on gis_feature_type == 3 to tell if the location is a
# polygon, or is it better to look for POLYGON in the wkt? For now, check
# both.
# @ToDo: CAP does not support multipolygons. Do we want to extract their
# outer polygon if passed MULTIPOLYGON wkt? For now, these are exported
# with their bounding box as the polygon.
# @ToDo: What if a point (gis_feature_type == 1) that is not a CAP circle
# has a non-point bounding box? Should it be rendered as a polygon for
# the bounding box?
try:
from lxml import etree
except:
# This won't fail, since we're in the middle of processing xml.
return
SubElement = etree.SubElement
s3xml = current.xml
TAG = s3xml.TAG
RESOURCE = TAG["resource"]
DATA = TAG["data"]
ATTRIBUTE = s3xml.ATTRIBUTE
NAME = ATTRIBUTE["name"]
FIELD = ATTRIBUTE["field"]
VALUE = ATTRIBUTE["value"]
loc_tablename = "gis_location"
tag_tablename = "gis_location_tag"
tag_fieldname = "tag"
val_fieldname = "value"
polygon_tag = "cap_polygon"
circle_tag = "cap_circle"
fallback_polygon_tag = "cap_polygon_fallback"
fallback_circle_tag = "cap_circle_fallback"
def __cap_gis_location_add_polygon(element, cap_polygon_text, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds the CAP polygon
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_polygon_tag
else:
tag_field.text = polygon_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
val_field.text = cap_polygon_text
def __cap_gis_location_add_circle(element, lat, lon, radius, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds CAP circle
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_circle_tag
else:
tag_field.text = circle_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
# Construct a CAP circle string: latitude,longitude radius
cap_circle_text = "%s,%s %s" % (lat, lon, radius)
val_field.text = cap_circle_text
# Sort out the geometry case by wkt, CAP tags, gis_feature_type, bounds,...
# Check the two cases for CAP-specific locations first, as those will have
# definite export values. For others, we'll attempt to produce either a
# circle or polygon: Locations with a bounding box will get a box polygon,
# points will get a zero-radius circle.
# Currently wkt is stripped out of gis_location records right here:
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1332
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1426
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L3152
# Until we provide a way to configure that choice, this will not work for
# polygons.
wkt = record.get("wkt", None)
# WKT POLYGON: Although there is no WKT spec, according to every reference
# that deals with nested polygons, the outer, enclosing, polygon must be
# listed first. Hence, we extract only the first polygon, as CAP has no
# provision for nesting.
if wkt and wkt.startswith("POLYGON"):
# ToDo: Is it sufficient to test for adjacent (( to find the start of
# the polygon, or might there be whitespace between them?
start = wkt.find("((")
end = wkt.find(")")
if start >=0 and end >=0:
polygon_text = wkt[start + 2 : end]
points_text = polygon_text.split(",")
points = [p.split() for p in points_text]
cap_points_text = ["%s,%s" % (point[1], point[0]) for point in points]
cap_polygon_text = " ".join(cap_points_text)
__cap_gis_location_add_polygon(element, cap_polygon_text)
return
# Fall through if the wkt string was mal-formed.
# CAP circle stored in a gis_location_tag with tag = cap_circle.
# If there is a cap_circle tag, we don't need to do anything further, as
# export.xsl will use it. However, we don't know if there is a cap_circle
# tag...
#
# @ToDo: The export calls xml_post_render after processing a resource's
# fields, but before its components are added as children in the xml tree.
# If this were delayed til after the components were added, we could look
# there for the cap_circle gis_location_tag record. Since xml_post_parse
# isn't in use yet (except for this), maybe we could look at moving it til
# after the components?
#
# For now, with the xml_post_render before components: We could do a db
# query to check for a real cap_circle tag record, and not bother with
# creating fallbacks from bounding box or point...but we don't have to.
# Instead, just go ahead and add the fallbacks under different tag names,
# and let the export.xsl sort them out. This only wastes a little time
# compared to a db query.
# ToDo: MULTIPOLYGON -- Can stitch together the outer polygons in the
# multipolygon, but would need to assure all were the same handedness.
# The remaining cases are for locations that don't have either polygon wkt
# or a cap_circle tag.
# Bounding box: Make a four-vertex polygon from the bounding box.
# This is a fallback, as if there is a circle tag, we'll use that.
lon_min = record.get("lon_min", None)
lon_max = record.get("lon_max", None)
lat_min = record.get("lat_min", None)
lat_max = record.get("lat_max", None)
if lon_min and lon_max and lat_min and lat_max and \
(lon_min != lon_max) and (lat_min != lat_max):
# Although there is no WKT requirement, arrange the points in
# counterclockwise order. Recall format is:
# lat1,lon1 lat2,lon2 ... latN,lonN, lat1,lon1
cap_polygon_text = \
"%(lat_min)s,%(lon_min)s %(lat_min)s,%(lon_max)s %(lat_max)s,%(lon_max)s %(lat_max)s,%(lon_min)s %(lat_min)s,%(lon_min)s" \
% {"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": lat_min,
"lat_max": lat_max}
__cap_gis_location_add_polygon(element, cap_polygon_text, fallback=True)
return
# WKT POINT or location with lat, lon: This can be rendered as a
# zero-radius circle.
# Q: Do we put bounding boxes around POINT locations, and are they
# meaningful?
lat = record.get("lat", None)
lon = record.get("lon", None)
if not lat or not lon:
# Look for POINT.
if wkt and wkt.startswith("POINT"):
start = wkt.find("(")
end = wkt.find(")")
if start >=0 and end >=0:
point_text = wkt[start + 2 : end]
point = point_text.split()
try:
lon = float(point[0])
lat = float(point[1])
except ValueError:
pass
if lat and lon:
# Add a (fallback) circle with zero radius.
__cap_gis_location_add_circle(element, lat, lon, 0, True)
return
# ToDo: Other WKT.
# Did not find anything to use. Presumably the area has a text description.
return
# -----------------------------------------------------------------------------
class CAPImportFeed(S3Method):
"""
Import CAP alerts from a URL
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
request = current.request
response = current.response
title = T("Import from Feed URL")
# @ToDo: use Formstyle
form = FORM(
TABLE(
TR(TD(DIV(B("%s:" % T("URL")),
SPAN(" *", _class="req"))),
TD(INPUT(_type="text", _name="url",
_id="url", _value="")),
TD(),
),
TR(TD(B("%s: " % T("User"))),
TD(INPUT(_type="text", _name="user",
_id="user", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Password"))),
TD(INPUT(_type="text", _name="password",
_id="password", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Ignore Errors?"))),
TD(INPUT(_type="checkbox", _name="ignore_errors",
_id="ignore_errors")),
TD(),
),
TR(TD(),
TD(INPUT(_type="submit", _value=T("Import"))),
TD(),
)
)
)
response.view = "create.html"
output = dict(title=title,
form=form)
if form.accepts(request.vars, current.session):
form_vars = form.vars
url = form_vars.get("url", None)
if not url:
response.error = T("URL is required")
return output
# @ToDo:
username = form_vars.get("username", None)
password = form_vars.get("password", None)
try:
file = fetch(url)
except urllib2.URLError:
response.error = str(sys.exc_info()[1])
return output
except urllib2.HTTPError:
response.error = str(sys.exc_info()[1])
return output
File = StringIO(file)
stylesheet = os.path.join(request.folder, "static", "formats",
"cap", "import.xsl")
xml = current.xml
tree = xml.parse(File)
resource = current.s3db.resource("cap_alert")
s3xml = xml.transform(tree, stylesheet_path=stylesheet,
name=resource.name)
try:
resource.import_xml(s3xml,
ignore_errors=form_vars.get("ignore_errors", None))
except:
response.error = str(sys.exc_info()[1])
else:
import_count = resource.import_count
if import_count:
response.confirmation = "%s %s" % \
(import_count,
T("Alerts successfully imported."))
else:
response.information = T("No Alerts available.")
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# END =========================================================================
| gnarula/eden_deployment | modules/s3db/cap.py | Python | mit | 78,715 |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
Python source code - replace this with a description of the code and write the code below this text.
"""
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| muldy/dotvim | templates/python.py | Python | gpl-3.0 | 211 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, too-many-arguments, too-many-locals
# pylint: disable=too-many-public-methods, too-many-branches, too-many-lines
"""`BaseModule` defines an API for modules."""
import time
import logging
import warnings
from .. import metric
from .. import ndarray
from ..context import cpu
from ..model import BatchEndParam
from ..initializer import Uniform
from ..io import DataDesc
from ..base import _as_list
def _check_input_names(symbol, names, typename, throw):
"""Check that all input names are in symbol's arguments."""
args = symbol.list_arguments()
for name in names:
if name in args:
continue
candidates = [arg for arg in args if
not arg.endswith('_weight') and
not arg.endswith('_bias') and
not arg.endswith('_gamma') and
not arg.endswith('_beta')]
msg = "\033[91mYou created Module with Module(..., %s_names=%s) but " \
"input with name '%s' is not found in symbol.list_arguments(). " \
"Did you mean one of:\n\t%s\033[0m"%(
typename, str(names), name, '\n\t'.join(candidates))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
def _check_names_match(data_names, data_shapes, name, throw):
"""Check that input names matches input data descriptors."""
actual = [x[0] for x in data_shapes]
if sorted(data_names) != sorted(actual):
msg = "Data provided by %s_shapes don't match names specified by %s_names (%s vs. %s)"%(
name, name, str(data_shapes), str(data_names))
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
def _parse_data_desc(data_names, label_names, data_shapes, label_shapes):
"""parse data_attrs into DataDesc format and check that names match"""
data_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in data_shapes]
_check_names_match(data_names, data_shapes, 'data', True)
if label_shapes is not None:
label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
_check_names_match(label_names, label_shapes, 'label', False)
else:
_check_names_match(label_names, [], 'label', False)
return data_shapes, label_shapes
class BaseModule(object):
"""The base class of a module.
A module represents a computation component. One can think of module as a computation machine.
A module can execute forward and backward passes and update parameters in a model.
We aim to make the APIs easy to use, especially in the case when we need to use the imperative
API to work with multiple modules (e.g. stochastic depth network).
A module has several states:
- Initial state: Memory is not allocated yet, so the module is not ready for computation yet.
- Binded: Shapes for inputs, outputs, and parameters are all known, memory has been allocated,
and the module is ready for computation.
- Parameters are initialized: For modules with parameters, doing computation before
initializing the parameters might result in undefined outputs.
- Optimizer is installed: An optimizer can be installed to a module. After this, the parameters
of the module can be updated according to the optimizer after gradients are computed
(forward-backward).
In order for a module to interact with others, it must be able to report the
following information in its initial state (before binding):
- `data_names`: list of type string indicating the names of the required input data.
- `output_names`: list of type string indicating the names of the required outputs.
After binding, a module should be able to report the following richer information:
- state information
- `binded`: `bool`, indicates whether the memory buffers needed for computation
have been allocated.
- `for_training`: whether the module is bound for training.
- `params_initialized`: `bool`, indicates whether the parameters of this module
have been initialized.
- `optimizer_initialized`: `bool`, indicates whether an optimizer is defined
and initialized.
- `inputs_need_grad`: `bool`, indicates whether gradients with respect to the
input data are needed. Might be useful when implementing composition of modules.
- input/output information
- `data_shapes`: a list of `(name, shape)`. In theory, since the memory is allocated,
we could directly provide the data arrays. But in the case of data parallelism,
the data arrays might not be of the same shape as viewed from the external world.
- `label_shapes`: a list of `(name, shape)`. This might be `[]` if the module does
not need labels (e.g. it does not contains a loss function at the top), or a module
is not bound for training.
- `output_shapes`: a list of `(name, shape)` for outputs of the module.
- parameters (for modules with parameters)
- `get_params()`: return a tuple `(arg_params, aux_params)`. Each of those
is a dictionary of name to ``NDArray`` mapping. Those `NDArray` always lives on
CPU. The actual parameters used for computing might live on other devices (GPUs),
this function will retrieve (a copy of) the latest parameters.
- ``set_params(arg_params, aux_params)``: assign parameters to the devices
doing the computation.
- ``init_params(...)``: a more flexible interface to assign or initialize the parameters.
- setup
- `bind()`: prepare environment for computation.
- `init_optimizer()`: install optimizer for parameter updating.
- `prepare()`: prepare the module based on the current data batch.
- computation
- `forward(data_batch)`: forward operation.
- `backward(out_grads=None)`: backward operation.
- `update()`: update parameters according to installed optimizer.
- `get_outputs()`: get outputs of the previous forward operation.
- `get_input_grads()`: get the gradients with respect to the inputs computed
in the previous backward operation.
- `update_metric(metric, labels, pre_sliced=False)`: update performance metric
for the previous forward
computed results.
- other properties (mostly for backward compatibility)
- `symbol`: the underlying symbolic graph for this module (if any)
This property is not necessarily constant. For example, for `BucketingModule`,
this property is simply the *current* symbol being used. For other modules,
this value might not be well defined.
When those intermediate-level API are implemented properly, the following
high-level API will be automatically available for a module:
- `fit`: train the module parameters on a data set.
- `predict`: run prediction on a data set and collect outputs.
- `score`: run prediction on a data set and evaluate performance.
Examples
--------
>>> # An example of creating a mxnet module.
>>> import mxnet as mx
>>> data = mx.symbol.Variable('data')
>>> fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)
>>> act1 = mx.symbol.Activation(fc1, name='relu1', act_type="relu")
>>> fc2 = mx.symbol.FullyConnected(act1, name = 'fc2', num_hidden = 64)
>>> act2 = mx.symbol.Activation(fc2, name='relu2', act_type="relu")
>>> fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10)
>>> out = mx.symbol.SoftmaxOutput(fc3, name = 'softmax')
>>> mod = mx.mod.Module(out)
"""
def __init__(self, logger=logging):
self.logger = logger
self.binded = False
self.for_training = False
self.inputs_need_grad = False
self.params_initialized = False
self.optimizer_initialized = False
self._symbol = None
self._total_exec_bytes = 0
################################################################################
# High Level API
################################################################################
def forward_backward(self, data_batch):
"""A convenient function that calls both ``forward`` and ``backward``."""
self.forward(data_batch, is_train=True)
self.backward()
def score(self, eval_data, eval_metric, num_batch=None, batch_end_callback=None,
score_end_callback=None,
reset=True, epoch=0, sparse_row_id_fn=None):
"""Runs prediction on ``eval_data`` and evaluates the performance according to
the given ``eval_metric``.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
eval_metric : EvalMetric or list of EvalMetrics
Evaluation metric to use.
num_batch : int
Number of batches to run. Defaults to ``None``, indicating run until the `DataIter`
finishes.
batch_end_callback : function
Could also be a list of functions.
reset : bool
Defaults to ``True``. Indicates whether we should reset `eval_data` before starting
evaluating.
epoch : int
Defaults to 0. For compatibility, this will be passed to callbacks (if any).
During training, this will correspond to the training epoch number.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using score for prediction.
>>> # Evaluate accuracy on val_dataiter
>>> metric = mx.metric.Accuracy()
>>> mod.score(val_dataiter, metric)
>>> mod.score(val_dataiter, ['mse', 'acc'])
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
eval_metric.reset()
actual_num_batch = 0
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
if isinstance(eval_batch, list):
self.update_metric(eval_metric, [eb.label for eb in eval_batch], pre_sliced=True)
else:
self.update_metric(eval_metric, eval_batch.label)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
actual_num_batch += 1
if score_end_callback:
params = BatchEndParam(epoch=epoch,
nbatch=actual_num_batch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(score_end_callback):
callback(params)
return eval_metric.get_name_value()
def iter_predict(self, eval_data, num_batch=None, reset=True, sparse_row_id_fn=None):
"""Iterates over predictions.
Example Usage:
----------
>>> for pred, i_batch, batch in module.iter_predict(eval_data):
... # pred is a list of outputs from the module
... # i_batch is a integer
... # batch is the data batch from the data iterator
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
num_batch : int
Default is ``None``, indicating running all the batches in the data iterator.
reset : bool
Default is ``True``, indicating whether we should reset the data iter before start
doing prediction.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad] for out in self.get_outputs()]
yield (outputs, nbatch, eval_batch)
def predict(self, eval_data, num_batch=None, merge_batches=True, reset=True,
always_output_list=False, sparse_row_id_fn=None):
"""Runs prediction and collects the outputs.
When `merge_batches` is ``True`` (by default), the return value will be a list
``[out1, out2, out3]``, where each element is formed by concatenating the outputs for
all the mini-batches. When `always_output_list` is ``False`` (as by default),
then in the case of a single output, `out1` is returned instead of ``[out1]``.
When `merge_batches` is ``False``, the return value will be a nested list like
``[[out1_batch1, out2_batch1], [out1_batch2], ...]``. This mode is useful because
in some cases (e.g. bucketing), the module does not necessarily produce the same
number of outputs.
The objects in the results have type `NDArray`. If you need to work with a numpy array,
just call ``.asnumpy()`` on each `NDArray`.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
num_batch : int
Defaults to ``None``, indicates running all the batches in the data iterator.
merge_batches : bool
Defaults to ``True``, see above for return values.
reset : bool
Defaults to ``True``, indicates whether we should reset the data iter before
doing prediction.
always_output_list : bool
Defaults to ``False``, see above for return values.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Returns
-------
list of NDArray or list of list of NDArray
Prediction results.
Examples
--------
>>> # An example of using `predict` for prediction.
>>> # Predict on the first 10 batches of val_dataiter
>>> mod.predict(eval_data=val_dataiter, num_batch=10)
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
output_list = []
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
pad = eval_batch.pad
outputs = [out[0:out.shape[0]-pad].copy() for out in self.get_outputs()]
output_list.append(outputs)
if len(output_list) == 0:
return output_list
if merge_batches:
num_outputs = len(output_list[0])
for out in output_list:
assert len(out) == num_outputs, \
'Cannot merge batches, as num of outputs is not the same ' + \
'in mini-batches. Maybe bucketing is used?'
output_list2 = [ndarray.concatenate([out[i] for out in output_list])
for i in range(num_outputs)]
if num_outputs == 1 and not always_output_list:
return output_list2[0]
return output_list2
return output_list
def fit(self, train_data, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local',
optimizer='sgd', optimizer_params=(('learning_rate', 0.01),),
eval_end_callback=None,
eval_batch_end_callback=None, initializer=Uniform(0.01),
arg_params=None, aux_params=None, allow_missing=False,
force_rebind=False, force_init=False, begin_epoch=0, num_epoch=None,
validation_metric=None, monitor=None, sparse_row_id_fn=None):
"""Trains the module parameters.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
train_data : DataIter
Train DataIter.
eval_data : DataIter
If not ``None``, will be used as validation set and the performance
after each epoch will be evaluated.
eval_metric : str or EvalMetric
Defaults to 'accuracy'. The performance measure used to display during training.
Other possible predefined metrics are:
'ce' (CrossEntropy), 'f1', 'mae', 'mse', 'rmse', 'top_k_accuracy'.
epoch_end_callback : function or list of functions
Each callback will be called with the current `epoch`, `symbol`, `arg_params`
and `aux_params`.
batch_end_callback : function or list of function
Each callback will be called with a `BatchEndParam`.
kvstore : str or KVStore
Defaults to 'local'.
optimizer : str or Optimizer
Defaults to 'sgd'.
optimizer_params : dict
Defaults to ``(('learning_rate', 0.01),)``. The parameters for
the optimizer constructor.
The default value is not a dict, just to avoid pylint warning on dangerous
default values.
eval_end_callback : function or list of function
These will be called at the end of each full evaluation, with the metrics over
the entire evaluation set.
eval_batch_end_callback : function or list of function
These will be called at the end of each mini-batch during evaluation.
initializer : Initializer
The initializer is called to initialize the module parameters when they are
not already initialized.
arg_params : dict
Defaults to ``None``, if not ``None``, should be existing parameters from a trained
model or loaded from a checkpoint (previously saved model). In this case,
the value here will be used to initialize the module parameters, unless they
are already initialized by the user via a call to `init_params` or `fit`.
`arg_params` has a higher priority than `initializer`.
aux_params : dict
Defaults to ``None``. Similar to `arg_params`, except for auxiliary states.
allow_missing : bool
Defaults to ``False``. Indicates whether to allow missing parameters when `arg_params`
and `aux_params` are not ``None``. If this is ``True``, then the missing parameters
will be initialized via the `initializer`.
force_rebind : bool
Defaults to ``False``. Whether to force rebinding the executors if already bound.
force_init : bool
Defaults to ``False``. Indicates whether to force initialization even if the
parameters are already initialized.
begin_epoch : int
Defaults to 0. Indicates the starting epoch. Usually, if resumed from a
checkpoint saved at a previous training phase at epoch N, then this value should be
N+1.
num_epoch : int
Number of epochs for training.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using fit for training.
>>> # Assume training dataIter and validation dataIter are ready
>>> # Assume loading a previously checkpointed model
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 3)
>>> mod.fit(train_data=train_dataiter, eval_data=val_dataiter, optimizer='sgd',
... optimizer_params={'learning_rate':0.01, 'momentum': 0.9},
... arg_params=arg_params, aux_params=aux_params,
... eval_metric='acc', num_epoch=10, begin_epoch=3)
"""
assert num_epoch is not None, 'please specify number of epochs'
self.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label,
for_training=True, force_rebind=force_rebind)
if monitor is not None:
self.install_monitor(monitor)
self.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init)
self.init_optimizer(kvstore=kvstore, optimizer=optimizer,
optimizer_params=optimizer_params)
if validation_metric is None:
validation_metric = eval_metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
################################################################################
# training loop
################################################################################
for epoch in range(begin_epoch, num_epoch):
tic = time.time()
eval_metric.reset()
nbatch = 0
data_iter = iter(train_data)
end_of_batch = False
next_data_batch = next(data_iter)
while not end_of_batch:
data_batch = next_data_batch
if monitor is not None:
monitor.tic()
self.forward_backward(data_batch)
self.update()
if isinstance(data_batch, list):
self.update_metric(eval_metric,
[db.label for db in data_batch],
pre_sliced=True)
else:
self.update_metric(eval_metric, data_batch.label)
try:
# pre fetch next batch
next_data_batch = next(data_iter)
self.prepare(next_data_batch, sparse_row_id_fn=sparse_row_id_fn)
except StopIteration:
end_of_batch = True
if monitor is not None:
monitor.toc_print()
if end_of_batch:
eval_name_vals = eval_metric.get_name_value()
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
nbatch += 1
# one epoch of training is finished
for name, val in eval_name_vals:
self.logger.info('Epoch[%d] Train-%s=%f', epoch, name, val)
toc = time.time()
self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc-tic))
# sync aux params across devices
arg_params, aux_params = self.get_params()
self.set_params(arg_params, aux_params)
if epoch_end_callback is not None:
for callback in _as_list(epoch_end_callback):
callback(epoch, self.symbol, arg_params, aux_params)
#----------------------------------------
# evaluation on validation set
if eval_data:
res = self.score(eval_data, validation_metric,
score_end_callback=eval_end_callback,
batch_end_callback=eval_batch_end_callback, epoch=epoch)
#TODO: pull this into default
for name, val in res:
self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name, val)
# end of 1 epoch, reset the data-iter for another epoch
train_data.reset()
################################################################################
# Symbol information
################################################################################
@property
def data_names(self):
"""A list of names for data required by this module."""
raise NotImplementedError()
@property
def output_names(self):
"""A list of names for the outputs of this module."""
raise NotImplementedError()
################################################################################
# Input/Output information
################################################################################
@property
def data_shapes(self):
"""A list of (name, shape) pairs specifying the data inputs to this module."""
raise NotImplementedError()
@property
def label_shapes(self):
"""A list of (name, shape) pairs specifying the label inputs to this module.
If this module does not accept labels -- either it is a module without loss
function, or it is not bound for training, then this should return an empty
list ``[]``.
"""
raise NotImplementedError()
@property
def output_shapes(self):
"""A list of (name, shape) pairs specifying the outputs of this module."""
raise NotImplementedError()
################################################################################
# Parameters of a module
################################################################################
def get_params(self):
"""Gets parameters, those are potentially copies of the the actual parameters used
to do computation on the device.
Returns
-------
``(arg_params, aux_params)``
A pair of dictionaries each mapping parameter names to NDArray values.
Examples
--------
>>> # An example of getting module parameters.
>>> print mod.get_params()
({'fc2_weight': <NDArray 64x128 @cpu(0)>, 'fc1_weight': <NDArray 128x100 @cpu(0)>,
'fc3_bias': <NDArray 10 @cpu(0)>, 'fc3_weight': <NDArray 10x64 @cpu(0)>,
'fc2_bias': <NDArray 64 @cpu(0)>, 'fc1_bias': <NDArray 128 @cpu(0)>}, {})
"""
raise NotImplementedError()
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes the parameters and auxiliary states.
Parameters
----------
initializer : Initializer
Called to initialize parameters if needed.
arg_params : dict
If not ``None``, should be a dictionary of existing `arg_params`. Initialization
will be copied from that.
aux_params : dict
If not ``None``, should be a dictionary of existing `aux_params`. Initialization
will be copied from that.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, `force_init` will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of initializing module parameters.
>>> mod.init_params()
"""
raise NotImplementedError()
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True,
allow_extra=False):
"""Assigns parameter and aux state values.
Parameters
----------
arg_params : dict
Dictionary of name to value (`NDArray`) mapping.
aux_params : dict
Dictionary of name to value (`NDArray`) mapping.
allow_missing : bool
If ``True``, params could contain missing values, and the initializer will be
called to fill those missing params.
force_init : bool
If ``True``, will force re-initialize even if already initialized.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
Examples
--------
>>> # An example of setting module parameters.
>>> sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, n_epoch_load)
>>> mod.set_params(arg_params=arg_params, aux_params=aux_params)
"""
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params,
allow_missing=allow_missing, force_init=force_init,
allow_extra=allow_extra)
def save_params(self, fname):
"""Saves model parameters to file.
Parameters
----------
fname : str
Path to output param file.
Examples
--------
>>> # An example of saving module parameters.
>>> mod.save_params('myfile')
"""
arg_params, aux_params = self.get_params()
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
ndarray.save(fname, save_dict)
def load_params(self, fname):
"""Loads model parameters from file.
Parameters
----------
fname : str
Path to input param file.
Examples
--------
>>> # An example of loading module parameters.
>>> mod.load_params('myfile')
"""
save_dict = ndarray.load(fname)
arg_params = {}
aux_params = {}
for k, value in save_dict.items():
arg_type, name = k.split(':', 1)
if arg_type == 'arg':
arg_params[name] = value
elif arg_type == 'aux':
aux_params[name] = value
else:
raise ValueError("Invalid param file " + fname)
self.set_params(arg_params, aux_params)
def get_states(self, merge_multi_context=True):
"""Gets states from all devices
If `merge_multi_context` is ``True``, returns output of form ``[out1, out2]``.
Otherwise, it returns output of the form
``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``.
All output elements are `NDArray`.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the states
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
A list of ``NDArray`` or a list of list of ``NDArray``.
"""
assert self.binded and self.params_initialized
assert not merge_multi_context
return []
def set_states(self, states=None, value=None):
"""Sets value for states. Only one of states & value can be specified.
Parameters
----------
states : list of list of NDArray
Source states arrays formatted like
``[[state1_dev1, state1_dev2], [state2_dev1, state2_dev2]]``.
value : number
A single scalar value for all state arrays.
"""
assert self.binded and self.params_initialized
assert not states and not value
def install_monitor(self, mon):
"""Installs monitor on all executors."""
raise NotImplementedError()
################################################################################
# Computations
################################################################################
# pylint: disable=unused-argument
def prepare(self, data_batch, sparse_row_id_fn=None):
'''Prepares the module for processing a data batch.
Usually involves switching bucket and reshaping.
For modules that contain `row_sparse` parameters in KVStore,
it prepares the `row_sparse` parameters based on the sparse_row_id_fn.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
the `update()` updates the copy of parameters in KVStore, but doesn't broadcast
the updated parameters to all devices / machines. The `prepare` function is used to
broadcast `row_sparse` parameters with the next batch of data.
Parameters
----------
data_batch : DataBatch
The current batch of data for forward computation.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
'''
if sparse_row_id_fn is not None:
warnings.warn(UserWarning("sparse_row_id_fn is not invoked for BaseModule."))
# pylint: enable=unused-argument
def forward(self, data_batch, is_train=None):
"""Forward computation. It supports data batches with different shapes, such as
different batch sizes or different image sizes.
If reshaping of data batch relates to modification of symbol or module, such as
changing image layout ordering or switching from training to predicting, module
rebinding is required.
Parameters
----------
data_batch : DataBatch
Could be anything with similar API implemented.
is_train : bool
Default is ``None``, which means `is_train` takes the value of ``self.for_training``.
Examples
--------
>>> import mxnet as mx
>>> from collections import namedtuple
>>> Batch = namedtuple('Batch', ['data'])
>>> data = mx.sym.Variable('data')
>>> out = data * 2
>>> mod = mx.mod.Module(symbol=out, label_names=None)
>>> mod.bind(data_shapes=[('data', (1, 10))])
>>> mod.init_params()
>>> data1 = [mx.nd.ones((1, 10))]
>>> mod.forward(Batch(data1))
>>> print mod.get_outputs()[0].asnumpy()
[[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]
>>> # Forward with data batch of different shape
>>> data2 = [mx.nd.ones((3, 5))]
>>> mod.forward(Batch(data2))
>>> print mod.get_outputs()[0].asnumpy()
[[ 2. 2. 2. 2. 2.]
[ 2. 2. 2. 2. 2.]
[ 2. 2. 2. 2. 2.]]
"""
raise NotImplementedError()
def backward(self, out_grads=None):
"""Backward computation.
Parameters
----------
out_grads : NDArray or list of NDArray, optional
Gradient on the outputs to be propagated back.
This parameter is only needed when bind is called
on outputs that are not a loss function.
Examples
--------
>>> # An example of backward computation.
>>> mod.backward()
>>> print mod.get_input_grads()[0].asnumpy()
[[[ 1.10182791e-05 5.12257748e-06 4.01927764e-06 8.32566820e-06
-1.59775993e-06 7.24269375e-06 7.28067835e-06 -1.65902311e-05
5.46342608e-06 8.44196393e-07]
...]]
"""
raise NotImplementedError()
def get_outputs(self, merge_multi_context=True):
"""Gets outputs of the previous forward computation.
If `merge_multi_context` is ``True``, it is like ``[out1, out2]``. Otherwise,
it returns out put of form ``[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]``.
All the output elements have type `NDArray`. When `merge_multi_context` is ``False``,
those `NDArray` instances might live on different devices.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the outputs
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of `NDArray` or list of list of `NDArray`.
Output
Examples
--------
>>> # An example of getting forward output.
>>> print mod.get_outputs()[0].asnumpy()
[[ 0.09999977 0.10000153 0.10000716 0.10000195 0.09999853 0.09999743
0.10000272 0.10000113 0.09999088 0.09999888]]
"""
raise NotImplementedError()
def get_input_grads(self, merge_multi_context=True):
"""Gets the gradients to the inputs, computed in the previous backward computation.
If `merge_multi_context` is ``True``, it is like ``[grad1, grad2]``. Otherwise, it
is like ``[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]``. All the output
elements have type `NDArray`. When `merge_multi_context` is ``False``, those `NDArray`
instances might live on different devices.
Parameters
----------
merge_multi_context : bool
Defaults to ``True``. In the case when data-parallelism is used, the gradients
will be collected from multiple devices. A ``True`` value indicates that we
should merge the collected results so that they look like from a single
executor.
Returns
-------
list of NDArray or list of list of NDArray
Input gradients.
Examples
--------
>>> # An example of getting input gradients.
>>> print mod.get_input_grads()[0].asnumpy()
[[[ 1.10182791e-05 5.12257748e-06 4.01927764e-06 8.32566820e-06
-1.59775993e-06 7.24269375e-06 7.28067835e-06 -1.65902311e-05
5.46342608e-06 8.44196393e-07]
...]]
"""
raise NotImplementedError()
def update(self):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch.
When KVStore is used to update parameters for multi-device or multi-machine training,
a copy of the parameters are stored in KVStore. Note that for `row_sparse` parameters,
this function does update the copy of parameters in KVStore, but doesn't broadcast the
updated parameters to all devices / machines. Please call `prepare` to broadcast
`row_sparse` parameters with the next batch of data.
Examples
--------
>>> # An example of updating module parameters.
>>> mod.init_optimizer(kvstore='local', optimizer='sgd',
... optimizer_params=(('learning_rate', 0.01), ))
>>> mod.backward()
>>> mod.update()
>>> print mod.get_params()[0]['fc3_weight'].asnumpy()
[[ 5.86930104e-03 5.28078526e-03 -8.88729654e-03 -1.08308345e-03
6.13054074e-03 4.27560415e-03 1.53817423e-03 4.62131854e-03
4.69872449e-03 -2.42400169e-03 9.94111411e-04 1.12386420e-03
...]]
"""
raise NotImplementedError()
def update_metric(self, eval_metric, labels, pre_sliced=False):
"""Evaluates and accumulates evaluation metric on outputs of the last forward
computation.
Parameters
----------
eval_metric : EvalMetric
Evaluation metric to use.
labels : list of NDArray if `pre_sliced` parameter is set to `False`,
list of lists of NDArray otherwise. Typically `data_batch.label`.
pre_sliced: bool
Whether the labels are already sliced per device (default: False).
Examples
--------
>>> # An example of updating evaluation metric.
>>> mod.forward(data_batch)
>>> mod.update_metric(metric, data_batch.label)
"""
raise NotImplementedError()
################################################################################
# module setup
################################################################################
def bind(self, data_shapes, label_shapes=None, for_training=True,
inputs_need_grad=False, force_rebind=False, shared_module=None,
grad_req='write'):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
Parameters
----------
data_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_data``. Can also be a list of
(data name, data shape).
label_shapes : list of (str, tuple) or DataDesc objects
Typically is ``data_iter.provide_label``. Can also be a list of
(label name, label shape).
for_training : bool
Default is ``True``. Whether the executors should be bind for training.
inputs_need_grad : bool
Default is ``False``. Whether the gradients to the input data need to be computed.
Typically this is not needed. But this might be needed when implementing composition
of modules.
force_rebind : bool
Default is ``False``. This function does nothing if the executors are already
bound. But with this ``True``, the executors will be forced to rebind.
shared_module : Module
Default is ``None``. This is used in bucketing. When not ``None``, the shared module
essentially corresponds to a different bucket -- a module with different symbol
but with the same sets of parameters (e.g. unrolled RNNs with different lengths).
grad_req : str, list of str, dict of str to str
Requirement for gradient accumulation. Can be 'write', 'add', or 'null'
(default to 'write').
Can be specified globally (str) or for each argument (list, dict).
Examples
--------
>>> # An example of binding symbols.
>>> mod.bind(data_shapes=[('data', (1, 10, 10))])
>>> # Assume train_iter is already created.
>>> mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
"""
raise NotImplementedError()
def init_optimizer(self, kvstore='local', optimizer='sgd',
optimizer_params=(('learning_rate', 0.01),), force_init=False):
"""Installs and initializes optimizers, as well as initialize kvstore for
distributed training
Parameters
----------
kvstore : str or KVStore
Defaults to `'local'`.
optimizer : str or Optimizer
Defaults to `'sgd'`.
optimizer_params : dict
Defaults to ``(('learning_rate', 0.01),)``. The default value is not a dictionary,
just to avoid pylint warning of dangerous default values.
force_init : bool
Defaults to ``False``, indicates whether to force re-initializing an optimizer
if it is already installed.
Examples
--------
>>> # An example of initializing optimizer.
>>> mod.init_optimizer(optimizer='sgd', optimizer_params=(('learning_rate', 0.005),))
"""
raise NotImplementedError()
################################################################################
# misc
################################################################################
@property
def symbol(self):
"""Gets the symbol associated with this module.
Except for `Module`, for other types of modules (e.g. `BucketingModule`), this
property might not be a constant throughout its life time. Some modules might
not even be associated with any symbols.
"""
return self._symbol
| rahul003/mxnet | python/mxnet/module/base_module.py | Python | apache-2.0 | 47,156 |
from decimal import Decimal
from django.template.defaultfilters import pluralize
from django.test import SimpleTestCase
from ..utils import setup
class PluralizeTests(SimpleTestCase):
def check_values(self, *tests):
for value, expected in tests:
with self.subTest(value=value):
output = self.engine.render_to_string('t', {'value': value})
self.assertEqual(output, expected)
@setup({'t': 'vote{{ value|pluralize }}'})
def test_no_arguments(self):
self.check_values(('0', 'votes'), ('1', 'vote'), ('2', 'votes'))
@setup({'t': 'class{{ value|pluralize:"es" }}'})
def test_suffix(self):
self.check_values(('0', 'classes'), ('1', 'class'), ('2', 'classes'))
@setup({'t': 'cand{{ value|pluralize:"y,ies" }}'})
def test_singular_and_plural_suffix(self):
self.check_values(('0', 'candies'), ('1', 'candy'), ('2', 'candies'))
class FunctionTests(SimpleTestCase):
def test_integers(self):
self.assertEqual(pluralize(1), '')
self.assertEqual(pluralize(0), 's')
self.assertEqual(pluralize(2), 's')
def test_floats(self):
self.assertEqual(pluralize(0.5), 's')
self.assertEqual(pluralize(1.5), 's')
def test_decimals(self):
self.assertEqual(pluralize(Decimal(1)), '')
self.assertEqual(pluralize(Decimal(0)), 's')
self.assertEqual(pluralize(Decimal(2)), 's')
def test_lists(self):
self.assertEqual(pluralize([1]), '')
self.assertEqual(pluralize([]), 's')
self.assertEqual(pluralize([1, 2, 3]), 's')
def test_suffixes(self):
self.assertEqual(pluralize(1, 'es'), '')
self.assertEqual(pluralize(0, 'es'), 'es')
self.assertEqual(pluralize(2, 'es'), 'es')
self.assertEqual(pluralize(1, 'y,ies'), 'y')
self.assertEqual(pluralize(0, 'y,ies'), 'ies')
self.assertEqual(pluralize(2, 'y,ies'), 'ies')
self.assertEqual(pluralize(0, 'y,ies,error'), '')
def test_no_len_type(self):
self.assertEqual(pluralize(object(), 'y,es'), '')
self.assertEqual(pluralize(object(), 'es'), '')
def test_value_error(self):
self.assertEqual(pluralize('', 'y,es'), '')
self.assertEqual(pluralize('', 'es'), '')
| atul-bhouraskar/django | tests/template_tests/filter_tests/test_pluralize.py | Python | bsd-3-clause | 2,290 |
from discord.ext import commands
import aiohttp
class Wikipedia:
"""
Le Wikipedia Cog
"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, name='wikipedia', aliases=['wiki', 'w'])
async def _wikipedia(self, context, *query: str):
"""
Get information from Wikipedia
"""
try:
url = 'https://en.wikipedia.org/w/api.php?'
payload = {}
payload['action'] = 'query'
payload['format'] = 'json'
payload['prop'] = 'extracts'
payload['titles'] = " ".join(query).replace(' ', '_')
payload['exsentences'] = '3'
payload['redirects'] = '1'
payload['explaintext'] = '1'
headers = {'user-agent': 'Red-cog/1.0'}
conn = aiohttp.TCPConnector(verify_ssl=False)
session = aiohttp.ClientSession(connector=conn)
async with session.get(url, params=payload, headers=headers) as r:
result = await r.json()
session.close()
if '-1' not in result['query']['pages']:
for page in result['query']['pages']:
title = result['query']['pages'][page]['title']
description = result['query']['pages'][page]['extract']
message = '\n{}\n\n{}'.format(title, description)
else:
message = 'I\'m sorry, I can\'t find {}'.format(" ".join(query))
except Exception as e:
message = 'Something went terribly wrong! [{}]'.format(e)
await self.bot.say('```{}```'.format(message))
def setup(bot):
n = Wikipedia(bot)
bot.add_cog(n)
| Injabie3/Red-DiscordBot | cogs/wikipedia.py | Python | gpl-3.0 | 1,456 |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for interacting with Google Compute Engine firewalls."""
import socket
from google.apputils import appcommands
import gflags as flags
from gcutil_lib import command_base
from gcutil_lib import gcutil_errors
from gcutil_lib import utils
FLAGS = flags.FLAGS
class FirewallCommand(command_base.GoogleComputeCommand):
"""Base command for working with the firewalls collection."""
print_spec = command_base.ResourcePrintSpec(
summary=['name', 'network'],
field_mappings=(
('name', 'name'),
('description', 'description'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
detail=(
('name', 'name'),
('description', 'description'),
('creation-time', 'creationTimestamp'),
('network', 'network'),
('source-ips', 'sourceRanges'),
('source-tags', 'sourceTags'),
('target-tags', 'targetTags')),
sort_by='name')
resource_collection_name = 'firewalls'
def __init__(self, name, flag_values):
super(FirewallCommand, self).__init__(name, flag_values)
def GetDetailRow(self, result):
"""Returns an associative list of items for display in a detail table.
Args:
result: A dict returned by the server.
Returns:
A list.
"""
data = []
# Add the rules
for allowed in result.get('allowed', []):
as_string = str(allowed['IPProtocol'])
if allowed.get('ports'):
as_string += ': %s' % ', '.join(allowed['ports'])
data.append(('allowed', as_string))
return data
class FirewallRules(object):
"""Class representing the list of a firewall's rules.
This class is only used for parsing a firewall from command-line flags,
for printing the firewall, we simply dump the JSON.
"""
@staticmethod
def ParsePortSpecs(port_spec_strings):
"""Parse the port-specification portion of firewall rules.
This takes the value of the 'allowed' flag and builds the
corresponding firewall rules, excluding the 'source' fields.
Args:
port_spec_strings: A list of strings specifying the port-specific
components of a firewall rule. These are of the form
"(<protocol>)?(:<port>('-'<port>)?)?"
Returns:
A list of dict values containing a protocol string and a list
of port range strings. This is a substructure of the firewall
rule dictionaries, which additionally contain a 'source' field.
Raises:
ValueError: If any of the input strings are malformed.
"""
def _AddToPortSpecs(protocol, port_string, port_specs):
"""Ensure the specified rule for this protocol allows the given port(s).
If there is no port_string specified it implies all ports are allowed,
and whatever is in the port_specs map for that protocol get clobbered.
This method also makes sure that any protocol entry without a ports
member does not get further restricted.
Args:
protocol: The protocol under which the given port range is allowed.
port_string: The string specification of what ports are allowed.
port_specs: The mapping from protocols to firewall rules.
"""
port_spec_entry = port_specs.setdefault(protocol,
{'IPProtocol': str(protocol),
'ports': []})
if 'ports' in port_spec_entry:
# We only handle the 'then' case because in the other case the
# existing entry already allows all ports.
if not port_string:
# A missing 'ports' field indicates all ports are allowed.
port_spec_entry.pop('ports')
else:
port_spec_entry['ports'].append(port_string)
port_specs = {}
for port_spec_string in port_spec_strings:
protocol = None
port_string = None
parts = port_spec_string.split(':')
if len(parts) > 2:
raise ValueError('Invalid allowed entry: %s' %
port_spec_string)
elif len(parts) == 2:
if parts[0]:
protocol = utils.ParseProtocol(parts[0])
port_string = utils.ReplacePortNames(parts[1])
else:
protocol = utils.ParseProtocol(parts[0])
if protocol:
_AddToPortSpecs(protocol, port_string, port_specs)
else:
# Add entries for both UPD and TCP
_AddToPortSpecs(socket.getprotobyname('tcp'), port_string, port_specs)
_AddToPortSpecs(socket.getprotobyname('udp'), port_string, port_specs)
return port_specs.values()
def __init__(self, allowed, allowed_ip_sources):
self.port_specs = FirewallRules.ParsePortSpecs(allowed)
self.source_ranges = allowed_ip_sources
self.source_tags = []
self.target_tags = []
def SetTags(self, source_tags, target_tags):
self.source_tags = sorted(set(source_tags))
self.target_tags = sorted(set(target_tags))
def AddToFirewall(self, firewall):
if self.source_ranges:
firewall['sourceRanges'] = self.source_ranges
if self.source_tags:
firewall['sourceTags'] = self.source_tags
if self.target_tags:
firewall['targetTags'] = self.target_tags
firewall['allowed'] = self.port_specs
class AddFirewall(FirewallCommand):
"""Create a new firewall rule to allow incoming traffic to a network."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(AddFirewall, self).__init__(name, flag_values)
flags.DEFINE_string('description',
'',
'An optional Firewall description.',
flag_values=flag_values)
flags.DEFINE_string('network',
'default',
'Specifies which network this firewall applies to.',
flag_values=flag_values)
flags.DEFINE_list('allowed',
None,
'[Required] Specifies a list of allowed ports for this '
'firewall. Each entry must be a combination of the '
'protocol and the port or port range in the following '
'form: \'<protocol>:<port>-<port>\' or '
'\'<protocol>:<port>\'. To specify multiple ports, '
'protocols, or ranges, provide them as comma'
'-separated entries. For example: '
'\'--allowed=tcp:ssh,udp:5000-6000,tcp:80,icmp\'.',
flag_values=flag_values)
flags.DEFINE_list('allowed_ip_sources',
[],
'Specifies a list of IP addresses that are allowed '
'to talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If no IP or tag sources are listed, all sources '
'will be allowed.',
flag_values=flag_values)
flags.DEFINE_list('allowed_tag_sources',
[],
'Specifies a list of instance tags that are allowed to '
'talk to instances within the network, through the '
'<protocols>:<ports> described by the \'--allowed\' '
'flag. If specifying multiple tags, provide them as '
'comma-separated entries. For example, '
'\'--allowed_tag_sources=www,database,frontend\'. '
'If no tag or ip sources are listed, all sources will '
'be allowed.',
flag_values=flag_values)
flags.DEFINE_list('target_tags',
[],
'Specifies a set of tagged instances that this '
'firewall applies to. To specify multiple tags, '
'provide them as comma-separated entries. If no tags '
'are listed, this firewall applies to all instances in '
'the network.',
flag_values=flag_values)
def Handle(self, firewall_name):
"""Add the specified firewall.
Args:
firewall_name: The name of the firewall to add.
Returns:
The result of inserting the firewall.
Raises:
gcutil_errors.CommandError: If the passed flag values cannot be
interpreted.
"""
if not self._flags.allowed:
raise gcutil_errors.CommandError(
'You must specify at least one rule through --allowed.')
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_resource = {
'kind': self._GetResourceApiKind('firewall'),
'name': firewall_context['firewall'],
'description': self._flags.description,
}
if self._flags.network is not None:
firewall_resource['network'] = self._context_parser.NormalizeOrPrompt(
'networks', self._flags.network)
if (not self._flags.allowed_ip_sources and
not self._flags.allowed_tag_sources):
self._flags.allowed_ip_sources.append('0.0.0.0/0')
try:
firewall_rules = FirewallRules(self._flags.allowed,
self._flags.allowed_ip_sources)
firewall_rules.SetTags(self._flags.allowed_tag_sources,
self._flags.target_tags)
firewall_rules.AddToFirewall(firewall_resource)
firewall_request = self.api.firewalls.insert(
project=firewall_context['project'], body=firewall_resource)
return firewall_request.execute()
except ValueError, e:
raise gcutil_errors.CommandError(e)
class GetFirewall(FirewallCommand):
"""Get a firewall."""
positional_args = '<firewall-name>'
def __init__(self, name, flag_values):
super(GetFirewall, self).__init__(name, flag_values)
def Handle(self, firewall_name):
"""Get the specified firewall.
Args:
firewall_name: The name of the firewall to get.
Returns:
The result of getting the firewall.
"""
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
firewall_name)
firewall_request = self.api.firewalls.get(
project=firewall_context['project'],
firewall=firewall_context['firewall'])
return firewall_request.execute()
class DeleteFirewall(FirewallCommand):
"""Delete one or more firewall rules.
Specify multiple firewalls as multiple arguments. The firewalls will be
deleted in parallel.
"""
positional_args = '<firewall-name-1> ... <firewall-name-n>'
safety_prompt = 'Delete firewall'
def __init__(self, name, flag_values):
super(DeleteFirewall, self).__init__(name, flag_values)
def Handle(self, *firewall_names):
"""Delete the specified firewall.
Args:
*firewall_names: The names of the firewalls to delete.
Returns:
Tuple (results, exceptions) - results of deleting the firewalls.
"""
requests = []
for name in firewall_names:
firewall_context = self._context_parser.ParseContextOrPrompt('firewalls',
name)
requests.append(self.api.firewalls.delete(
project=firewall_context['project'],
firewall=firewall_context['firewall']))
results, exceptions = self.ExecuteRequests(requests)
return (self.MakeListResult(results, 'operationList'), exceptions)
class ListFirewalls(FirewallCommand, command_base.GoogleComputeListCommand):
"""List the firewall rules for a project."""
def ListFunc(self):
"""Returns the function for listing firewalls."""
return self.api.firewalls.list
def AddCommands():
appcommands.AddCmd('addfirewall', AddFirewall)
appcommands.AddCmd('getfirewall', GetFirewall)
appcommands.AddCmd('deletefirewall', DeleteFirewall)
appcommands.AddCmd('listfirewalls', ListFirewalls)
| harshilasu/LinkurApp | y/google-cloud-sdk/platform/gcutil/lib/google_compute_engine/gcutil_lib/firewall_cmds.py | Python | gpl-3.0 | 12,747 |
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from .java_types import *
from .attribute_info import Attribute
from .signature_parser import BaseType
from . import util
_UNPARSED = (None, 0)
class FieldInfoFlags(object):
"""http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#88358
"""
ACC_PUBLIC = 0x0001
ACC_PRIVATE = 0x0002
ACC_PROTECTED = 0x0004
ACC_STATIC = 0x0008
ACC_FINAL = 0x0010
ACC_VOLATILE = 0x0040
ACC_TRANSIENT = 0x0080
def __init__(self, data):
self._flags = u2(data).get()
def public(self):
return self._flags & FieldInfoFlags.ACC_PUBLIC
def private(self):
return self._flags & FieldInfoFlags.ACC_PRIVATE
def protected(self):
return self._flags & FieldInfoFlags.ACC_PROTECTED
def static(self):
return self._flags & FieldInfoFlags.ACC_STATIC
def final(self):
return self._flags & FieldInfoFlags.ACC_FINAL
def volatile(self):
return self._flags & FieldInfoFlags.ACC_VOLATILE
def transient(self):
return self._flags & FieldInfoFlags.ACC_TRANSIENT
def __str__(self):
verbs = []
if self.public(): verbs.append('public')
if self.private(): verbs.append('private')
if self.protected(): verbs.append('protected')
if self.static(): verbs.append('static')
if self.final(): verbs.append('final')
if self.volatile(): verbs.append('volatile')
if self.transient(): verbs.append('transient')
return ' '.join(verbs)
class ObjectType(object):
@staticmethod
def match(data):
if data[0] == 'L':
eof = data.find(';')
return data[1:eof], eof + 1
else:
return _UNPARSED
class ArrayType(object):
@staticmethod
def match(data):
if data[0] == '[':
component, offset = ComponentType.match(data[1:])
return component+'[]', offset + 1
else:
return _UNPARSED
class ComponentType(object):
@staticmethod
def match(data):
return FieldType.match(data)
class FieldDescriptor(object):
@staticmethod
def match(data):
return FieldType.match(data)
class FieldType(object):
"""http://java.sun.com/docs/books/jvms/second_edition/html/ClassFile.doc.html#1170
FieldType:
BaseType
ObjectType
ArrayType
FieldDescriptor:
FieldType
ComponentType:
FieldType
BaseType: 'B' | 'C' | 'D' | 'F' | 'I' | 'J' | 'S' | 'Z'
ObjectType:
L <classname> ;
ArrayType:
[ ComponentType
"""
@staticmethod
def match(data):
base_type, offset = BaseType.match(data)
if offset: return base_type, offset
object_type, offset = ObjectType.match(data)
if offset: return object_type, offset
array_type, offset = ArrayType.match(data)
if offset: return array_type, offset
return _UNPARSED
class FieldInfo(object):
def __init__(self, data, constants):
self._access_flags = FieldInfoFlags(data[0:2])
(self._name_index, self._descriptor_index, self._attributes_count), data = \
JavaNativeType.parse(data[2:], u2, u2, u2)
self._name = constants[self._name_index] # synthesized
self._descriptor = constants[self._descriptor_index] # synthesized
self._parsed_descriptor, _ = FieldDescriptor.match(self._descriptor.bytes())
self._attributes = []
offset = 0
for k in range(self._attributes_count):
attribute = Attribute.parse(data[offset:], constants)
offset += attribute.size()
self._attributes.append(attribute)
self._size = offset + 8
def size(self):
return self._size
def __str__(self):
base = '%s %s %s' % (
self._access_flags,
util.javaify(self._parsed_descriptor),
self._name)
if self._attributes:
for attr in self._attributes:
base += '\n %s: %s' % (attr.name(), attr)
base += '\n'
return base
| foursquare/commons-old | src/python/twitter/common/java/field_info.py | Python | apache-2.0 | 4,650 |
##############################################################################
#
# Copyright (c) 2012 RiTH-Tech (http://rith-tech.com). All Right Reserved
#
# Author : Huy Doan ([email protected])
#
##############################################################################
from osv import fields, osv
class pos_floor(osv.osv):
"""Restaunrent/cafe floor"""
_name = 'pos.floor'
_description = "Floor"
_columns = {
'name': fields.char('Name', size=64, required=True),
'description': fields.char('Description', size=100),
'table_ids': fields.one2many('pos.table', 'floor_id', 'Tables'),
'icon': fields.binary('Icon'),
'background': fields.binary('Background'),
'state': fields.selection([('draft', 'Draft'),
('open', 'Opened'),
('close', 'Closed')],
'State', readonly=True),
}
_defaults = {
'state': 'draft',
}
def set_open(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'open'}, context=context)
return True
def set_close(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'close'}, context=context)
return True
pos_floor()
class pos_table(osv.osv):
"""Restaunrent/cafe table"""
_name = 'pos.table'
_description = "Table"
_columns = {
'name': fields.char("Name", size=64, required=True),
'floor_id': fields.many2one('pos.floor', "Floor", required=True),
'description': fields.char('Description', size=100),
'icon': fields.binary('Icon'),
'state': fields.selection([
('draft', 'Draft'),
('open', 'Opened'),
('close', 'Closed')],
'State', readonly=True),
'x': fields.integer('X coordinate'),
'y': fields.integer('Y coordinate'),
}
_defaults = {
'state': 'draft',
}
def set_open(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'open'}, context=context)
return True
def set_close(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'close'}, context=context)
return True
pos_table()
class pos_order(osv.osv):
_inherit = 'pos.order'
_columns = {
'table_id': fields.many2one('pos.table', 'Table', required=False, readonly=False),
}
pos_order()
| rgv151/Paracel-POS-Backend | paracelpos/paracelpos.py | Python | gpl-3.0 | 2,419 |
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
"reddit-validations": {
"task": "reddit.tasks.process_validations",
"schedule": timedelta(minutes=10),
},
"eveapi-update": {
"task": "eve_api.tasks.account.queue_apikey_updates",
"schedule": timedelta(minutes=10),
},
"alliance-update": {
"task": "eve_api.tasks.alliance.import_alliance_details",
"schedule": timedelta(hours=6),
},
"api-log-clear": {
"task": "eve_proxy.tasks.clear_old_logs",
"schedule": timedelta(days=1),
},
"blacklist-check": {
"task": "hr.tasks.blacklist_check",
"schedule": timedelta(days=7),
},
"reddit-update": {
"task": "reddit.tasks.queue_account_updates",
"schedule": timedelta(minutes=15),
}
}
CELERY_ROUTES = {
"sso.tasks.update_service_groups": {'queue': 'bulk'},
"hr.tasks.blacklist_check": {'queue': 'bulk'},
"eve_api.tasks.import_apikey_result": {'queue': 'fastresponse'},
"sso.tasks.update_user_access": {'queue': 'fastresponse'},
}
| nikdoof/test-auth | app/conf/celeryschedule.py | Python | bsd-3-clause | 1,084 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
pub = rospy.Publisher('/mobile_base/commands/velocity', Twist, queue_size=10)
def od_callback(robot_od):
x = robot_od.pose.pose.position.x
r = 2.0
k = 0.5
v = k * (r - x)
robot_tw = Twist()
robot_tw.linear.x = v
pub.publish(robot_tw)
def control():
rospy.init_node('amr_control')
rospy.Subscriber('/odom', Odometry, od_callback)
rospy.spin()
if __name__ == '__main__':
try:
control()
except rospy.ROSInterruptException:
pass
| rafafigueroa/amrws | src/amrpkg/scripts/odom_control.py | Python | mit | 624 |
# -*- coding: utf-8 -*-
'''
@author: moloch
Copyright 2013
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from tornado.web import UIModule
from models.Permission import ADMIN_PERMISSION
from models.User import User
class Menu(UIModule):
def render(self, *args, **kwargs):
''' Renders the top menu '''
if self.handler.session is not None:
if self.handler.session['user_menu'] == ADMIN_PERMISSION:
return self.render_string('menu/admin.html',
user_name=self.handler.session['user_name']
)
elif self.handler.session['user_menu'] == 'user':
return self.render_string('menu/user.html',
user_name=self.handler.session['user_name']
)
return self.render_string('menu/public.html')
| lunarca/fngrpt | uimodules/Menu.py | Python | apache-2.0 | 1,356 |
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent, # NOT DRY
)
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent as component, # DRY
)
class C:
@pytest.mark.parametrize(
("post_data", "message"),
[
# metadata_version errors.
(
{},
"None is an invalid value for Metadata-Version. Error: This field is"
" required. see"
" https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "-1"},
"'-1' is an invalid value for Metadata-Version. Error: Unknown Metadata"
" Version see"
" https://packaging.python.org/specifications/core-metadata",
),
# name errors.
(
{"metadata_version": "1.2"},
"'' is an invalid value for Name. Error: This field is required. see"
" https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "1.2", "name": "foo-"},
"'foo-' is an invalid value for Name. Error: Must start and end with a"
" letter or numeral and contain only ascii numeric and '.', '_' and"
" '-'. see https://packaging.python.org/specifications/core-metadata",
),
# version errors.
(
{"metadata_version": "1.2", "name": "example"},
"'' is an invalid value for Version. Error: This field is required. see"
" https://packaging.python.org/specifications/core-metadata",
),
(
{"metadata_version": "1.2", "name": "example", "version": "dog"},
"'dog' is an invalid value for Version. Error: Must start and end with"
" a letter or numeral and contain only ascii numeric and '.', '_' and"
" '-'. see https://packaging.python.org/specifications/core-metadata",
),
],
)
def test_fails_invalid_post_data(
self, pyramid_config, db_request, post_data, message
):
pyramid_config.testing_securitypolicy(userid=1)
db_request.POST = MultiDict(post_data)
def foo(list_a, list_b):
results = (
User.query.filter(User.foo == "bar")
.filter( # Because foo.
db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
)
.filter(User.xyz.is_(None))
# Another comment about the filtering on is_quux goes here.
.filter(db.not_(User.is_pending.astext.cast(db.Boolean).is_(True)))
.order_by(User.created_at.desc())
.with_for_update(key_share=True)
.all()
)
return results
def foo2(list_a, list_b):
# Standalone comment reasonably placed.
return (
User.query.filter(User.foo == "bar")
.filter(
db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
)
.filter(User.xyz.is_(None))
)
def foo3(list_a, list_b):
return (
# Standlone comment but weirdly placed.
User.query.filter(User.foo == "bar")
.filter(
db.or_(User.field_a.astext.in_(list_a), User.field_b.astext.in_(list_b))
)
.filter(User.xyz.is_(None))
)
| psf/black | tests/data/comments4.py | Python | mit | 3,531 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import print_function
from __future__ import absolute_import
import math
from psi4 import core
def _autofragment_convert(p, symbol):
# Finding radii for auto-fragmenter
if symbol[p] == 'H':
d = 1.001
if symbol[p] == 'He':
d = 1.012
if symbol[p] == 'Li':
d = 0.825
if symbol[p] == 'Be':
d = 1.408
if symbol[p] == 'B':
d = 1.485
if symbol[p] == 'C':
d = 1.452
if symbol[p] == 'N':
d = 1.397
if symbol[p] == 'O':
d = 1.342
if symbol[p] == 'F':
d = 1.287
if symbol[p] == 'Ne':
d = 1.243
if symbol[p] == 'Na':
d = 1.144
if symbol[p] == 'Mg':
d = 1.364
if symbol[p] == 'Al':
d = 1.639
if symbol[p] == 'Si':
d = 1.716
if symbol[p] == 'P':
d = 1.705
if symbol[p] == 'S':
d = 1.683
if symbol[p] == 'Cl':
d = 1.639
if symbol[p] == 'Ar':
d = 1.595
return d / 1.5
def auto_fragments(**kwargs):
r"""Detects fragments if the user does not supply them.
Currently only used for the WebMO implementation of SAPT.
:returns: :py:class:`~psi4.core.Molecule`) |w--w| fragmented molecule.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:examples:
>>> # [1] replicates with cbs() the simple model chemistry scf/cc-pVDZ: set basis cc-pVDZ energy('scf')
>>> molecule mol {\nH 0.0 0.0 0.0\nH 2.0 0.0 0.0\nF 0.0 1.0 0.0\nF 2.0 1.0 0.0\n}
>>> print mol.nfragments() # 1
>>> fragmol = auto_fragments()
>>> print fragmol.nfragments() # 2
"""
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
molname = molecule.name()
geom = molecule.save_string_xyz()
numatoms = molecule.natom()
VdW = [1.2, 1.7, 1.5, 1.55, 1.52, 1.9, 1.85, 1.8]
symbol = list(range(numatoms))
X = [0.0] * numatoms
Y = [0.0] * numatoms
Z = [0.0] * numatoms
Queue = []
White = []
Black = []
F = geom.split('\n')
for f in range(numatoms):
A = F[f + 1].split()
symbol[f] = A[0]
X[f] = float(A[1])
Y[f] = float(A[2])
Z[f] = float(A[3])
White.append(f)
Fragment = [[] for i in range(numatoms)] # stores fragments
start = 0 # starts with the first atom in the list
Queue.append(start)
White.remove(start)
frag = 0
while((len(White) > 0) or (len(Queue) > 0)): # Iterates to the next fragment
while(len(Queue) > 0): # BFS within a fragment
for u in Queue: # find all nearest Neighbors
# (still coloured white) to vertex u
for i in White:
Distance = math.sqrt((X[i] - X[u]) * (X[i] - X[u]) +
(Y[i] - Y[u]) * (Y[i] - Y[u]) +
(Z[i] - Z[u]) * (Z[i] - Z[u]))
if Distance < _autofragment_convert(u, symbol) + _autofragment_convert(i, symbol):
Queue.append(i) # if you find you, put it in the que
White.remove(i) # and remove it from the untouched list
Queue.remove(u) # remove focus from Queue
Black.append(u)
Fragment[frag].append(int(u + 1)) # add to group (adding 1 to start
# list at one instead of zero)
if(len(White) != 0): # cant move White->Queue if no more exist
Queue.append(White[0])
White.remove(White[0])
frag += 1
new_geom = """\n"""
for i in Fragment[0]:
new_geom = new_geom + F[i].lstrip() + """\n"""
new_geom = new_geom + """--\n"""
for j in Fragment[1]:
new_geom = new_geom + F[j].lstrip() + """\n"""
new_geom = new_geom + """units angstrom\n"""
moleculenew = core.Molecule.create_molecule_from_string(new_geom)
moleculenew.set_name(molname)
moleculenew.update_geometry()
moleculenew.print_cluster()
core.print_out(""" Exiting auto_fragments\n""")
return moleculenew
| kratman/psi4public | psi4/driver/wrapper_autofrag.py | Python | gpl-2.0 | 5,232 |
from django.contrib.auth.views import LoginView, LogoutView
from django.core.urlresolvers import reverse_lazy
from django.views.generic import ListView, TemplateView, CreateView, UpdateView, DeleteView
from vms.entities.models import Volunteer, VolunteerSchedule, Location
from vms.volunteers.forms import AddScheduleForm, EditScheduleForm
class HomeView(TemplateView):
template_name = 'home.html'
class VolunteersView(ListView):
model = Volunteer
template_name = 'volunteers.html'
def get_queryset(self):
qs = super().get_queryset()
return qs.order_by('name')
class LocationsView(ListView):
model = Location
template_name = 'locations.html'
def get_queryset(self):
qs = super().get_queryset()
return qs.order_by('id')
class ScheduleView(ListView):
model = VolunteerSchedule
template_name = 'schedule.html'
def get_queryset(self):
qs = super().get_queryset()
return qs.order_by('projection__date')
class ScheduleAddView(CreateView):
model = VolunteerSchedule
form_class = AddScheduleForm
template_name = 'schedule_add.html'
success_url = reverse_lazy('volunteers:schedule')
class ScheduleEditView(UpdateView):
model = VolunteerSchedule
form_class = EditScheduleForm
template_name = 'schedule_edit.html'
success_url = reverse_lazy('volunteers:schedule')
class ScheduleDeleteView(DeleteView):
model = VolunteerSchedule
template_name = 'schedule_delete.html'
success_url = reverse_lazy('volunteers:schedule')
class ScheduleApiView(ListView):
model = VolunteerSchedule
template_name = 'schedule_api'
def get_queryset(self):
qs = super().get_queryset()
return qs.current().order_by('projection__date')
class MockSchedule(TemplateView):
template_name = 'schedule_mock.html'
class VMSLogin(LoginView):
template_name = 'login.html'
class VMSLogout(LogoutView):
template_name = 'logout.html'
| mitzaM/VolunteerManagementSystem | vms/volunteers/views.py | Python | gpl-3.0 | 1,982 |
from __future__ import absolute_import
import string
import math
print("Hello %r" % math.sqrt(2))
print(string.capwords('the quick brown fox'))
| storborg/futurify | futurify/tests/desired/simple.py | Python | mit | 147 |
from lib.actions import BaseAction
__all__ = [
'ListVehiclesAction'
]
class ListVehiclesAction(BaseAction):
def run(self):
return self.formatter.formatter(
self.connection.vehicles())
| pidah/st2contrib | packs/tesla/actions/list_vehicles.py | Python | apache-2.0 | 215 |
"""
Arithmetic operations for PandasObjects
This is not a public API.
"""
import operator
from typing import TYPE_CHECKING, Optional, Set
import warnings
import numpy as np
from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op # noqa:F401
from pandas._typing import Level
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import is_array_like, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms
from pandas.core.ops.array_ops import ( # noqa:F401
arithmetic_op,
comp_method_OBJECT_ARRAY,
comparison_op,
get_array_op,
logical_op,
)
from pandas.core.ops.common import ( # noqa:F401
get_op_result_name,
unpack_zerodim_and_defer,
)
from pandas.core.ops.docstrings import (
_flex_comp_doc_FRAME,
_op_descriptions,
make_flex_doc,
)
from pandas.core.ops.invalid import invalid_comparison # noqa:F401
from pandas.core.ops.mask_ops import kleene_and, kleene_or, kleene_xor # noqa: F401
from pandas.core.ops.methods import add_flex_arithmetic_methods # noqa:F401
from pandas.core.ops.roperator import ( # noqa:F401
radd,
rand_,
rdiv,
rdivmod,
rfloordiv,
rmod,
rmul,
ror_,
rpow,
rsub,
rtruediv,
rxor,
)
if TYPE_CHECKING:
from pandas import DataFrame, Series
# -----------------------------------------------------------------------------
# constants
ARITHMETIC_BINOPS: Set[str] = {
"add",
"sub",
"mul",
"pow",
"mod",
"floordiv",
"truediv",
"divmod",
"radd",
"rsub",
"rmul",
"rpow",
"rmod",
"rfloordiv",
"rtruediv",
"rdivmod",
}
COMPARISON_BINOPS: Set[str] = {"eq", "ne", "lt", "gt", "le", "ge"}
# -----------------------------------------------------------------------------
# Masking NA values and fallbacks for operations numpy does not support
def fill_binop(left, right, fill_value):
"""
If a non-None fill_value is given, replace null entries in left and right
with this value, but only in positions where _one_ of left/right is null,
not both.
Parameters
----------
left : array-like
right : array-like
fill_value : object
Returns
-------
left : array-like
right : array-like
Notes
-----
Makes copies if fill_value is not None and NAs are present.
"""
if fill_value is not None:
left_mask = isna(left)
right_mask = isna(right)
# one but not both
mask = left_mask ^ right_mask
if left_mask.any():
# Avoid making a copy if we can
left = left.copy()
left[left_mask & mask] = fill_value
if right_mask.any():
# Avoid making a copy if we can
right = right.copy()
right[right_mask & mask] = fill_value
return left, right
# -----------------------------------------------------------------------------
# Series
def align_method_SERIES(left: "Series", right, align_asobject: bool = False):
""" align lhs and rhs Series """
# ToDo: Different from align_method_FRAME, list, tuple and ndarray
# are not coerced here
# because Series has inconsistencies described in #13637
if isinstance(right, ABCSeries):
# avoid repeated alignment
if not left.index.equals(right.index):
if align_asobject:
# to keep original value's dtype for bool ops
left = left.astype(object)
right = right.astype(object)
left, right = left.align(right, copy=False)
return left, right
def flex_method_SERIES(op):
name = op.__name__.strip("_")
doc = make_flex_doc(name, "series")
@Appender(doc)
def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# validate axis
if axis is not None:
self._get_axis_number(axis)
res_name = get_op_result_name(self, other)
if isinstance(other, ABCSeries):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, list, tuple)):
if len(other) != len(self):
raise ValueError("Lengths must be equal")
other = self._constructor(other, self.index)
result = self._binop(other, op, level=level, fill_value=fill_value)
result.name = res_name
return result
else:
if fill_value is not None:
self = self.fillna(fill_value)
return op(self, other)
flex_wrapper.__name__ = name
return flex_wrapper
# -----------------------------------------------------------------------------
# DataFrame
def align_method_FRAME(
left, right, axis, flex: Optional[bool] = False, level: Level = None
):
"""
Convert rhs to meet lhs dims if input is list, tuple or np.ndarray.
Parameters
----------
left : DataFrame
right : Any
axis: int, str, or None
flex: bool or None, default False
Whether this is a flex op, in which case we reindex.
None indicates not to check for alignment.
level : int or level name, default None
Returns
-------
left : DataFrame
right : Any
"""
def to_series(right):
msg = "Unable to coerce to Series, length must be {req_len}: given {given_len}"
if axis is not None and left._get_axis_name(axis) == "index":
if len(left.index) != len(right):
raise ValueError(
msg.format(req_len=len(left.index), given_len=len(right))
)
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
raise ValueError(
msg.format(req_len=len(left.columns), given_len=len(right))
)
right = left._constructor_sliced(right, index=left.columns)
return right
if isinstance(right, np.ndarray):
if right.ndim == 1:
right = to_series(right)
elif right.ndim == 2:
if right.shape == left.shape:
right = left._constructor(right, index=left.index, columns=left.columns)
elif right.shape[0] == left.shape[0] and right.shape[1] == 1:
# Broadcast across columns
right = np.broadcast_to(right, left.shape)
right = left._constructor(right, index=left.index, columns=left.columns)
elif right.shape[1] == left.shape[1] and right.shape[0] == 1:
# Broadcast along rows
right = to_series(right[0, :])
else:
raise ValueError(
"Unable to coerce to DataFrame, shape "
f"must be {left.shape}: given {right.shape}"
)
elif right.ndim > 2:
raise ValueError(
"Unable to coerce to Series/DataFrame, "
f"dimension must be <= 2: {right.shape}"
)
elif is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame)):
# GH 36702. Raise when attempting arithmetic with list of array-like.
if any(is_array_like(el) for el in right):
raise ValueError(
f"Unable to coerce list of {type(right[0])} to Series/DataFrame"
)
# GH17901
right = to_series(right)
if flex is not None and isinstance(right, ABCDataFrame):
if not left._indexed_same(right):
if flex:
left, right = left.align(right, join="outer", level=level, copy=False)
else:
raise ValueError(
"Can only compare identically-labeled DataFrame objects"
)
elif isinstance(right, ABCSeries):
# axis=1 is default for DataFrame-with-Series op
axis = left._get_axis_number(axis) if axis is not None else 1
if not flex:
if not left.axes[axis].equals(right.index):
warnings.warn(
"Automatic reindexing on DataFrame vs Series comparisons "
"is deprecated and will raise ValueError in a future version. "
"Do `left, right = left.align(right, axis=1, copy=False)` "
"before e.g. `left == right`",
FutureWarning,
stacklevel=5,
)
left, right = left.align(
right, join="outer", axis=axis, level=level, copy=False
)
right = _maybe_align_series_as_frame(left, right, axis)
return left, right
def should_reindex_frame_op(
left: "DataFrame", right, op, axis, default_axis, fill_value, level
) -> bool:
"""
Check if this is an operation between DataFrames that will need to reindex.
"""
assert isinstance(left, ABCDataFrame)
if op is operator.pow or op is rpow:
# GH#32685 pow has special semantics for operating with null values
return False
if not isinstance(right, ABCDataFrame):
return False
if fill_value is None and level is None and axis is default_axis:
# TODO: any other cases we should handle here?
# Intersection is always unique so we have to check the unique columns
left_uniques = left.columns.unique()
right_uniques = right.columns.unique()
cols = left_uniques.intersection(right_uniques)
if len(cols) and not (cols.equals(left_uniques) and cols.equals(right_uniques)):
# TODO: is there a shortcut available when len(cols) == 0?
return True
return False
def frame_arith_method_with_reindex(
left: "DataFrame", right: "DataFrame", op
) -> "DataFrame":
"""
For DataFrame-with-DataFrame operations that require reindexing,
operate only on shared columns, then reindex.
Parameters
----------
left : DataFrame
right : DataFrame
op : binary operator
Returns
-------
DataFrame
"""
# GH#31623, only operate on shared columns
cols, lcols, rcols = left.columns.join(
right.columns, how="inner", level=None, return_indexers=True
)
new_left = left.iloc[:, lcols]
new_right = right.iloc[:, rcols]
result = op(new_left, new_right)
# Do the join on the columns instead of using align_method_FRAME
# to avoid constructing two potentially large/sparse DataFrames
join_columns, _, _ = left.columns.join(
right.columns, how="outer", level=None, return_indexers=True
)
if result.columns.has_duplicates:
# Avoid reindexing with a duplicate axis.
# https://github.com/pandas-dev/pandas/issues/35194
indexer, _ = result.columns.get_indexer_non_unique(join_columns)
indexer = algorithms.unique1d(indexer)
result = result._reindex_with_indexers(
{1: [join_columns, indexer]}, allow_dups=True
)
else:
result = result.reindex(join_columns, axis=1)
return result
def _maybe_align_series_as_frame(frame: "DataFrame", series: "Series", axis: int):
"""
If the Series operand is not EA-dtype, we can broadcast to 2D and operate
blockwise.
"""
rvalues = series._values
if not isinstance(rvalues, np.ndarray):
# TODO(EA2D): no need to special-case with 2D EAs
if rvalues.dtype == "datetime64[ns]" or rvalues.dtype == "timedelta64[ns]":
# We can losslessly+cheaply cast to ndarray
rvalues = np.asarray(rvalues)
else:
return series
if axis == 0:
rvalues = rvalues.reshape(-1, 1)
else:
rvalues = rvalues.reshape(1, -1)
rvalues = np.broadcast_to(rvalues, frame.shape)
return type(frame)(rvalues, index=frame.index, columns=frame.columns)
def flex_arith_method_FRAME(op):
op_name = op.__name__.strip("_")
default_axis = "columns"
na_op = get_array_op(op)
doc = make_flex_doc(op_name, "dataframe")
@Appender(doc)
def f(self, other, axis=default_axis, level=None, fill_value=None):
if should_reindex_frame_op(
self, other, op, axis, default_axis, fill_value, level
):
return frame_arith_method_with_reindex(self, other, op)
if isinstance(other, ABCSeries) and fill_value is not None:
# TODO: We could allow this in cases where we end up going
# through the DataFrame path
raise NotImplementedError(f"fill_value {fill_value} not supported.")
axis = self._get_axis_number(axis) if axis is not None else 1
self, other = align_method_FRAME(self, other, axis, flex=True, level=level)
if isinstance(other, ABCDataFrame):
# Another DataFrame
new_data = self._combine_frame(other, na_op, fill_value)
elif isinstance(other, ABCSeries):
new_data = self._dispatch_frame_op(other, op, axis=axis)
else:
# in this case we always have `np.ndim(other) == 0`
if fill_value is not None:
self = self.fillna(fill_value)
new_data = self._dispatch_frame_op(other, op)
return self._construct_result(new_data)
f.__name__ = op_name
return f
def flex_comp_method_FRAME(op):
op_name = op.__name__.strip("_")
default_axis = "columns" # because we are "flex"
doc = _flex_comp_doc_FRAME.format(
op_name=op_name, desc=_op_descriptions[op_name]["desc"]
)
@Appender(doc)
def f(self, other, axis=default_axis, level=None):
axis = self._get_axis_number(axis) if axis is not None else 1
self, other = align_method_FRAME(self, other, axis, flex=True, level=level)
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
f.__name__ = op_name
return f
| jreback/pandas | pandas/core/ops/__init__.py | Python | bsd-3-clause | 13,971 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
test_records = [] | rohitw1991/latestadbwnf | core/doctype/workflow_state/test_workflow_state.py | Python | mit | 104 |
#----------------------------------------------------------------------
# VMouse - OpenCV Virtual Mouse (HCI)
# Copyright (C) 2014 Kunal Dawn <[email protected]>
# Copyright (C) 2014 Medha Devaraj <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#----------------------------------------------------------------------
#!/usr/bin/python
from socket import *
import subprocess
localAddr = ('localhost', 8889)
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(localAddr)
sock.listen(5)
data = ""
while 1:
(client, address) = sock.accept()
print 'CONNECTED TO : ', address
while 1:
data = client.recv(1024)
if not len(data):
break
values = data.split(",")
if(len(values) == 2):
print values[0]
subprocess.call(["xdotool", values[0], values[1]])
else:
print 'double click'
subprocess.call(["xdotool", values[0], values[2]])
subprocess.call(["xdotool", values[1], values[2]])
| kunaldawn/vmouse-opencv-qt-hand-gesture-hci | servers/serverMouseEvent.py | Python | gpl-3.0 | 1,649 |
# Dynamic Programming for 0-1 Knapsack with dominance
# By James Lao.
#!/usr/bin/env python3
from collections import deque
import sys
import time
INF = float("inf")
def knapsack(vw, limit, n):
vw = sorted(vw, key=lambda x: x[1], reverse=True) # Accelerate
A = deque([(0, 0)])
for i in range(0, n):
B = deque() # find all possiblities after adding one new item
for item in A:
if item[1] + vw[i][1] > limit: # A is sorted
break
B.append((item[0] + vw[i][0], item[1] + vw[i][1]))
level, merge = -1, deque() # the bar keeps going up
while A or B: # merging the two queues
ia, ib = A[0][1] if A else INF, B[0][1] if B else INF
x = A.popleft() if (ia < ib) else B.popleft()
if x[0] > level:
merge.append(x)
level = x[0]
A = merge
return A[-1]
if __name__ == "__main__":
with open(sys.argv[1] if len(sys.argv) > 1 else sys.exit(1)) as f:
limit, n = map(int, f.readline().split())
vw = [tuple(map(int, ln.split())) for ln in f.readlines()]
start = time.time()
A = knapsack(vw, limit, n)
end = time.time()
print("Max value:", A[0])
print("Total weight:", A[1])
print(end - start)
| jameslao/Knapsack-in-Python | knapsack_dp_dominance.py | Python | mit | 1,294 |
# This file handles differences between BGL and PyOpenGL, and provides various
# utility functions for OpenGL
try:
from bgl import *
USING_BGL = True
except ImportError:
from OpenGL.GL import *
from OpenGL.GLU import *
from bgl import Buffer
if USING_BGL:
_glGenTextures = glGenTextures
def glGenTextures(n, textures=None):
id_buf = Buffer(GL_INT, n)
_glGenTextures(n, id_buf)
if textures:
textures.extend(id_buf.to_list())
return id_buf.to_list()[0] if n == 1 else id_buf.to_list()
_glDeleteTextures = glDeleteTextures
def glDeleteTextures(textures):
n = len(textures)
id_buf = Buffer(GL_INT, n, textures)
_glDeleteTextures(n, id_buf)
_glGetIntegerv = glGetIntegerv
def glGetIntegerv(pname):
# Only used for GL_VIEWPORT right now, so assume we want a size 4 Buffer
buf = Buffer(GL_INT, 4)
_glGetIntegerv(pname, buf)
return buf.to_list()
else:
_glTexImage2D = glTexImage2D
def glTexImage2D(target, level, internalFormat, width, height, border, format, type, data):
_glTexImage2D(target, level, internalFormat, width, height,
border, format, type, data.to_list())
| Alwnikrotikz/bgui | bgui/gl_utils.py | Python | mit | 1,115 |
"""Test calling functions in static methods with a stripped binary."""
import os, time
import unittest2
import lldb
import lldbutil
from lldbtest import *
class TestObjCStaticMethodStripped(TestBase):
mydir = os.path.join("lang", "objc", "objc-static-method-stripped")
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
@python_api_test
#<rdar://problem/12042992>
@dsym_test
def test_with_dsym_and_python_api(self):
"""Test calling functions in static methods with a stripped binary."""
if self.getArchitecture() == 'i386':
self.skipTest("requires modern objc runtime")
self.buildDsym()
self.objc_static_method_stripped()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers to break inside main().
self.main_source = "static.m"
self.break_line = line_number(self.main_source, '// Set breakpoint here.')
#<rdar://problem/12042992>
def objc_static_method_stripped(self):
"""Test calling functions in static methods with a stripped binary."""
exe = os.path.join(os.getcwd(), "a.out.stripped")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
bpt = target.BreakpointCreateByLocation(self.main_source, self.break_line)
self.assertTrue(bpt, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple (None, None, os.getcwd())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
thread_list = lldbutil.get_threads_stopped_at_breakpoint (process, bpt)
# Make sure we stopped at the first breakpoint.
self.assertTrue (len(thread_list) != 0, "No thread stopped at our breakpoint.")
self.assertTrue (len(thread_list) == 1, "More than one thread stopped at our breakpoint.")
# Now make sure we can call a function in the static method we've stopped in.
frame = thread_list[0].GetFrameAtIndex(0)
self.assertTrue (frame, "Got a valid frame 0 frame.")
cmd_value = frame.EvaluateExpression ("(char *) sel_getName (_cmd)")
self.assertTrue (cmd_value.IsValid())
sel_name = cmd_value.GetSummary()
self.assertTrue (sel_name == "\"doSomethingWithString:\"", "Got the right value for the selector as string.")
cmd_value = frame.EvaluateExpression ("[Foo doSomethingElseWithString:string]")
self.assertTrue (cmd_value.IsValid())
string_length = cmd_value.GetValueAsUnsigned()
self.assertTrue (string_length == 27, "Got the right value from another class method on the same class.")
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| s20121035/rk3288_android5.1_repo | external/lldb/test/lang/objc/objc-static-method-stripped/TestObjCStaticMethodStripped.py | Python | gpl-3.0 | 2,939 |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Portions of the following are derived from the compat.py file in
# Twisted, under the following copyright:
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories
__doc__ = """
Compatibility idioms for builtins names
This module adds names to the builtins module for things that we want
to use in SCons but which don't show up until later Python versions than
the earliest ones we support.
This module checks for the following builtins names:
all()
any()
sorted()
memoryview()
Implementations of functions are *NOT* guaranteed to be fully compliant
with these functions in later versions of Python. We are only concerned
with adding functionality that we actually use in SCons, so be wary
if you lift this code for other uses. (That said, making these more
nearly the same as later, official versions is still a desirable goal,
we just don't need to be obsessive about it.)
If you're looking at this with pydoc and various names don't show up in
the FUNCTIONS or DATA output, that means those names are already built in
to this version of Python and we don't need to add them from this module.
"""
__revision__ = "src/engine/SCons/compat/_scons_builtins.py 5357 2011/09/09 21:31:03 bdeegan"
import builtins
try:
all
except NameError:
# Pre-2.5 Python has no all() function.
def all(iterable):
"""
Returns True if all elements of the iterable are true.
"""
for element in iterable:
if not element:
return False
return True
builtins.all = all
all = all
try:
any
except NameError:
# Pre-2.5 Python has no any() function.
def any(iterable):
"""
Returns True if any element of the iterable is true.
"""
for element in iterable:
if element:
return True
return False
builtins.any = any
any = any
try:
memoryview
except NameError:
# Pre-2.7 doesn't have the memoryview() built-in.
class memoryview(object):
def __init__(self, obj):
# wrapping buffer in () keeps the fixer from changing it
self.obj = (buffer)(obj)
def __getitem__(self, indx):
if isinstance(indx, slice):
return self.obj[indx.start:indx.stop]
else:
return self.obj[indx]
builtins.memoryview = memoryview
try:
sorted
except NameError:
# Pre-2.4 Python has no sorted() function.
#
# The pre-2.4 Python list.sort() method does not support
# list.sort(key=) nor list.sort(reverse=) keyword arguments, so
# we must implement the functionality of those keyword arguments
# by hand instead of passing them to list.sort().
def sorted(iterable, cmp=None, key=None, reverse=False):
if key is not None:
result = [(key(x), x) for x in iterable]
else:
result = iterable[:]
if cmp is None:
# Pre-2.3 Python does not support list.sort(None).
result.sort()
else:
result.sort(cmp)
if key is not None:
result = [t1 for t0,t1 in result]
if reverse:
result.reverse()
return result
builtins.sorted = sorted
#if sys.version_info[:3] in ((2, 2, 0), (2, 2, 1)):
# def lstrip(s, c=string.whitespace):
# while s and s[0] in c:
# s = s[1:]
# return s
# def rstrip(s, c=string.whitespace):
# while s and s[-1] in c:
# s = s[:-1]
# return s
# def strip(s, c=string.whitespace, l=lstrip, r=rstrip):
# return l(r(s, c), c)
#
# object.__setattr__(str, 'lstrip', lstrip)
# object.__setattr__(str, 'rstrip', rstrip)
# object.__setattr__(str, 'strip', strip)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib64/scons-2.1.0/SCons/compat/_scons_builtins.py | Python | gpl-2.0 | 5,045 |
# Copyright 2020 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Produces reference environments for rodent tasks."""
import functools
from dm_control import composer
from dm_control.composer.variation import distributions
from dm_control.locomotion.arenas import bowl
from dm_control.locomotion.arenas import corridors as corr_arenas
from dm_control.locomotion.arenas import floors
from dm_control.locomotion.arenas import labmaze_textures
from dm_control.locomotion.arenas import mazes
from dm_control.locomotion.props import target_sphere
from dm_control.locomotion.tasks import corridors as corr_tasks
from dm_control.locomotion.tasks import escape
from dm_control.locomotion.tasks import random_goal_maze
from dm_control.locomotion.tasks import reach
from dm_control.locomotion.walkers import rodent
_CONTROL_TIMESTEP = .02
_PHYSICS_TIMESTEP = 0.001
def rodent_escape_bowl(random_state=None):
"""Requires a rodent to climb out of a bowl-shaped terrain."""
# Build a position-controlled rodent walker.
walker = rodent.Rat(
observable_options={'egocentric_camera': dict(enabled=True)})
# Build a bowl-shaped arena.
arena = bowl.Bowl(
size=(20., 20.),
aesthetic='outdoor_natural')
# Build a task that rewards the agent for being far from the origin.
task = escape.Escape(
walker=walker,
arena=arena,
physics_timestep=_PHYSICS_TIMESTEP,
control_timestep=_CONTROL_TIMESTEP)
return composer.Environment(time_limit=20,
task=task,
random_state=random_state,
strip_singleton_obs_buffer_dim=True)
def rodent_run_gaps(random_state=None):
"""Requires a rodent to run down a corridor with gaps."""
# Build a position-controlled rodent walker.
walker = rodent.Rat(
observable_options={'egocentric_camera': dict(enabled=True)})
# Build a corridor-shaped arena with gaps, where the sizes of the gaps and
# platforms are uniformly randomized.
arena = corr_arenas.GapsCorridor(
platform_length=distributions.Uniform(.4, .8),
gap_length=distributions.Uniform(.05, .2),
corridor_width=2,
corridor_length=40,
aesthetic='outdoor_natural')
# Build a task that rewards the agent for running down the corridor at a
# specific velocity.
task = corr_tasks.RunThroughCorridor(
walker=walker,
arena=arena,
walker_spawn_position=(5, 0, 0),
walker_spawn_rotation=0,
target_velocity=1.0,
contact_termination=False,
terminate_at_height=-0.3,
physics_timestep=_PHYSICS_TIMESTEP,
control_timestep=_CONTROL_TIMESTEP)
return composer.Environment(time_limit=30,
task=task,
random_state=random_state,
strip_singleton_obs_buffer_dim=True)
def rodent_maze_forage(random_state=None):
"""Requires a rodent to find all items in a maze."""
# Build a position-controlled rodent walker.
walker = rodent.Rat(
observable_options={'egocentric_camera': dict(enabled=True)})
# Build a maze with rooms and targets.
wall_textures = labmaze_textures.WallTextures(style='style_01')
arena = mazes.RandomMazeWithTargets(
x_cells=11,
y_cells=11,
xy_scale=.5,
z_height=.3,
max_rooms=4,
room_min_size=4,
room_max_size=5,
spawns_per_room=1,
targets_per_room=3,
wall_textures=wall_textures,
aesthetic='outdoor_natural')
# Build a task that rewards the agent for obtaining targets.
task = random_goal_maze.ManyGoalsMaze(
walker=walker,
maze_arena=arena,
target_builder=functools.partial(
target_sphere.TargetSphere,
radius=0.05,
height_above_ground=.125,
rgb1=(0, 0, 0.4),
rgb2=(0, 0, 0.7)),
target_reward_scale=50.,
contact_termination=False,
physics_timestep=_PHYSICS_TIMESTEP,
control_timestep=_CONTROL_TIMESTEP)
return composer.Environment(time_limit=30,
task=task,
random_state=random_state,
strip_singleton_obs_buffer_dim=True)
def rodent_two_touch(random_state=None):
"""Requires a rodent to tap an orb, wait an interval, and tap it again."""
# Build a position-controlled rodent walker.
walker = rodent.Rat(
observable_options={'egocentric_camera': dict(enabled=True)})
# Build an open floor arena
arena = floors.Floor(
size=(10., 10.),
aesthetic='outdoor_natural')
# Build a task that rewards the walker for touching/reaching orbs with a
# specific time interval between touches
task = reach.TwoTouch(
walker=walker,
arena=arena,
target_builders=[
functools.partial(target_sphere.TargetSphereTwoTouch, radius=0.025),
],
randomize_spawn_rotation=True,
target_type_rewards=[25.],
shuffle_target_builders=False,
target_area=(1.5, 1.5),
physics_timestep=_PHYSICS_TIMESTEP,
control_timestep=_CONTROL_TIMESTEP,
)
return composer.Environment(time_limit=30,
task=task,
random_state=random_state,
strip_singleton_obs_buffer_dim=True)
| deepmind/dm_control | dm_control/locomotion/examples/basic_rodent_2020.py | Python | apache-2.0 | 5,924 |
# -*- coding: utf-8 -*-
a=input('please input your age:')
age=int(a)
if age >=20:
print('adult')
elif age >=6:
print('teenager')
else:
print('kid')
| liysky/Python_life | Grammar - demos/age.py | Python | gpl-3.0 | 162 |
call = lambda b:(
b,
1, # Break here
)
call(1)
call(2)
print('TEST SUCEEDED') | fabioz/PyDev.Debugger | tests_python/resources/_debugger_case_lambda_multiline.py | Python | epl-1.0 | 99 |
# -*- coding: utf-8 -*-
{
'name': "website_register_b2b",
'summary': """
Registration form for site purchases """,
'description': """
Registration form for site purchases
""",
'author': "Alexsandro Haag <[email protected]>, HGSOFT",
'website': "http://www.hgsoft.com.br",
'category': 'Website',
'version': '10.0.1',
'depends': ['base','website_sale'],
'data': [
'views/register.xml',
],
'installable': True,
'auto_install': False,
}
| alexsandrohaag/odoo-website-addons | website_register_b2b/__manifest__.py | Python | agpl-3.0 | 514 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyUnittest2py3k(PythonPackage):
"""unittest2 is a backport of the new features added to the unittest
testing framework in Python 2.7 and 3.2. This is a Python 3 compatible
version of unittest2."""
homepage = "https://pypi.python.org/pypi/unittest2py3k"
url = "https://pypi.io/packages/source/u/unittest2py3k/unittest2py3k-0.5.1.tar.gz"
version('0.5.1', '8824ff92044310d9365f90d892bf0f09')
depends_on('python@3:')
depends_on('py-setuptools', type='build')
| krafczyk/spack | var/spack/repos/builtin/packages/py-unittest2py3k/package.py | Python | lgpl-2.1 | 1,758 |
from django.contrib.admin.utils import quote
from django.utils.encoding import force_str
from django.utils.translation import ugettext as _
class ButtonHelper:
default_button_classnames = ['button']
add_button_classnames = ['bicolor', 'icon', 'icon-plus']
inspect_button_classnames = []
edit_button_classnames = []
delete_button_classnames = ['no']
def __init__(self, view, request):
self.view = view
self.request = request
self.model = view.model
self.opts = view.model._meta
self.verbose_name = force_str(self.opts.verbose_name)
self.verbose_name_plural = force_str(self.opts.verbose_name_plural)
self.permission_helper = view.permission_helper
self.url_helper = view.url_helper
def finalise_classname(self, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
combined = self.default_button_classnames + classnames_add
finalised = [cn for cn in combined if cn not in classnames_exclude]
return ' '.join(finalised)
def add_button(self, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.add_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.create_url,
'label': _('Add %s') % self.verbose_name,
'classname': cn,
'title': _('Add a new %s') % self.verbose_name,
}
def inspect_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.inspect_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('inspect', quote(pk)),
'label': _('Inspect'),
'classname': cn,
'title': _('Inspect this %s') % self.verbose_name,
}
def edit_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.edit_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('edit', quote(pk)),
'label': _('Edit'),
'classname': cn,
'title': _('Edit this %s') % self.verbose_name,
}
def delete_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.delete_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('delete', quote(pk)),
'label': _('Delete'),
'classname': cn,
'title': _('Delete this %s') % self.verbose_name,
}
def get_buttons_for_obj(self, obj, exclude=None, classnames_add=None,
classnames_exclude=None):
if exclude is None:
exclude = []
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
ph = self.permission_helper
usr = self.request.user
pk = getattr(obj, self.opts.pk.attname)
btns = []
if('inspect' not in exclude and ph.user_can_inspect_obj(usr, obj)):
btns.append(
self.inspect_button(pk, classnames_add, classnames_exclude)
)
if('edit' not in exclude and ph.user_can_edit_obj(usr, obj)):
btns.append(
self.edit_button(pk, classnames_add, classnames_exclude)
)
if('delete' not in exclude and ph.user_can_delete_obj(usr, obj)):
btns.append(
self.delete_button(pk, classnames_add, classnames_exclude)
)
return btns
class PageButtonHelper(ButtonHelper):
unpublish_button_classnames = []
copy_button_classnames = []
def unpublish_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.unpublish_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('unpublish', quote(pk)),
'label': _('Unpublish'),
'classname': cn,
'title': _('Unpublish this %s') % self.verbose_name,
}
def copy_button(self, pk, classnames_add=None, classnames_exclude=None):
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
classnames = self.copy_button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
return {
'url': self.url_helper.get_action_url('copy', quote(pk)),
'label': _('Copy'),
'classname': cn,
'title': _('Copy this %s') % self.verbose_name,
}
def get_buttons_for_obj(self, obj, exclude=None, classnames_add=None,
classnames_exclude=None):
if exclude is None:
exclude = []
if classnames_add is None:
classnames_add = []
if classnames_exclude is None:
classnames_exclude = []
ph = self.permission_helper
usr = self.request.user
pk = getattr(obj, self.opts.pk.attname)
btns = []
if('inspect' not in exclude and ph.user_can_inspect_obj(usr, obj)):
btns.append(
self.inspect_button(pk, classnames_add, classnames_exclude)
)
if('edit' not in exclude and ph.user_can_edit_obj(usr, obj)):
btns.append(
self.edit_button(pk, classnames_add, classnames_exclude)
)
if('copy' not in exclude and ph.user_can_copy_obj(usr, obj)):
btns.append(
self.copy_button(pk, classnames_add, classnames_exclude)
)
if('unpublish' not in exclude and ph.user_can_unpublish_obj(usr, obj)):
btns.append(
self.unpublish_button(pk, classnames_add, classnames_exclude)
)
if('delete' not in exclude and ph.user_can_delete_obj(usr, obj)):
btns.append(
self.delete_button(pk, classnames_add, classnames_exclude)
)
return btns
| mikedingjan/wagtail | wagtail/contrib/modeladmin/helpers/button.py | Python | bsd-3-clause | 7,222 |
from executor.Executor import TBTAFExecutor
from common.suite import TBTestSuite
from common.sample_test import TBTAFSampleTest
import time
def sample_run():
ejecutor = TBTAFExecutor()
suite = TBTestSuite(1,'A')
for i in range(2):
newTest = TBTAFSampleTest()
suite.addTestCase(newTest)
newTest = TBTAFSampleTest()
newTest.testResult = None
suite.addTestCase(newTest)
for i in range(2):
newTest = TBTAFSampleTest()
suite.addTestCase(newTest)
ejecutor.executeTests(suite)
while True:
ejecutor.getStatus(suite)
time.sleep(10)
sample_run() | S41nz/TBTAF | tbtaf/test/executor/prueba_executor_invalid_test.py | Python | apache-2.0 | 634 |
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import subprocess, time
last_ch = 0
class TvServerHandler(BaseHTTPRequestHandler):
def do_GET(self):
global last_ch
cmd = self.path.split('/')
if 'favicon.ico' in cmd:
return
ch = int(cmd[1])
if not ch or ch < 1:
ch = 1
if ch == last_ch:
return
last_ch = ch
p = subprocess.Popen("killall VLC",shell=True)
time.sleep(0.5)
cmd = "/Applications/VLC.app/Contents/MacOS/VLC -I dummy eyetv:// --sout='#std{access=http,mux=ts,dst=<your ip>:8484}' --sout-keep --autocrop --intf dummy --eyetv-channel=%s" % ch
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,close_fds=True)
time.sleep(0.5)
self.send_response(301)
self.send_header("Location", "http://<your ip>:8484?t=%f" % time.time())
self.end_headers()
return
def do_POST(self):
pass
return
def main():
try:
server = HTTPServer(('',8485),TvServerHandler)
print 'server started'
server.serve_forever()
except KeyboardInterrupt:
print 'shutting down'
server.socket.close()
if __name__ == '__main__':
main()
| mimepp/umspx | htdocs/umsp/plugins/eyetv/eyetv-controller.py | Python | gpl-3.0 | 1,293 |
# This file is part of the Hotwire Shell project API.
# Copyright (C) 2007 Colin Walters <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
# THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os,sys,http.client
from http.client import HTTPResponse
from io import StringIO
from hotwire.fs import FilePath
from hotwire.builtin import Builtin, BuiltinRegistry, InputStreamSchema, ArgSpec
class HttpGetBuiltin(Builtin):
__doc__ = _("""Perform a HTTP GET.""")
def __init__(self):
super(HttpGetBuiltin, self).__init__('http-get',
output=HTTPResponse,
input=None,
singlevalue=True,
argspec=(ArgSpec('host'), ArgSpec('path', opt=True)))
def execute(self, context, args, options=[]):
if len(args) == 1:
host = args[0]
path = '/'
elif len(args) == 2:
host = args[0]
path = args[1]
else:
assert False
conn = http.client.HTTPConnection(host)
conn.request('GET', path)
response = conn.getresponse()
return response
BuiltinRegistry.getInstance().register_hotwire(HttpGetBuiltin())
| SDX2000/hotwire | hotwire/builtins/httpget.py | Python | gpl-2.0 | 2,322 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
needed_by = (("oauth2_provider", "0001_initial"),)
def forwards(self, orm):
# Adding model 'Account'
db.create_table(u'accounts_account', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('uuid', self.gf('uuidfield.fields.UUIDField')(db_index=True, unique=True, max_length=32, blank=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=75)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('language', self.gf('django.db.models.fields.CharField')(default='he', max_length=2)),
))
db.send_create_signal(u'accounts', ['Account'])
# Adding M2M table for field groups on 'Account'
m2m_table_name = db.shorten_name(u'accounts_account_groups')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('account', models.ForeignKey(orm[u'accounts.account'], null=False)),
('group', models.ForeignKey(orm[u'auth.group'], null=False))
))
db.create_unique(m2m_table_name, ['account_id', 'group_id'])
# Adding M2M table for field user_permissions on 'Account'
m2m_table_name = db.shorten_name(u'accounts_account_user_permissions')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('account', models.ForeignKey(orm[u'accounts.account'], null=False)),
('permission', models.ForeignKey(orm[u'auth.permission'], null=False))
))
db.create_unique(m2m_table_name, ['account_id', 'permission_id'])
def backwards(self, orm):
# Deleting model 'Account'
db.delete_table(u'accounts_account')
# Removing M2M table for field groups on 'Account'
db.delete_table(db.shorten_name(u'accounts_account_groups'))
# Removing M2M table for field user_permissions on 'Account'
db.delete_table(db.shorten_name(u'accounts_account_user_permissions'))
models = {
u'accounts.account': {
'Meta': {'ordering': "['email', 'created_on']", 'object_name': 'Account'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'he'", 'max_length': '2'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'uuid': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
| openbudgets/openbudgets | openbudgets/apps/accounts/migrations/0001_initial.py | Python | bsd-3-clause | 6,585 |
# coding=utf-8
# Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Flax MBart model."""
import math
import random
from functools import partial
from typing import Callable, Optional, Tuple
import numpy as np
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from jax import lax
from jax.random import PRNGKey
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...modeling_flax_outputs import (
FlaxBaseModelOutput,
FlaxBaseModelOutputWithPastAndCrossAttentions,
FlaxCausalLMOutputWithCrossAttentions,
FlaxSeq2SeqLMOutput,
FlaxSeq2SeqModelOutput,
FlaxSeq2SeqQuestionAnsweringModelOutput,
FlaxSeq2SeqSequenceClassifierOutput,
)
from ...modeling_flax_utils import (
ACT2FN,
FlaxPreTrainedModel,
append_call_sample_docstring,
append_replace_return_docstrings,
overwrite_call_docstring,
)
from ...utils import logging
from .configuration_mbart import MBartConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25"
_CONFIG_FOR_DOC = "MBartConfig"
_TOKENIZER_FOR_DOC = "MBartTokenizer"
MBART_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a Flax Linen
[flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`MBartConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
"""
MBART_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`MBartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`MBartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
MBART_ENCODE_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`MBartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
MBART_DECODE_INPUTS_DOCSTRING = r"""
Args:
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`MBartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
encoder_outputs (`tuple(tuple(jnp.ndarray)`):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int) -> jnp.ndarray:
"""
Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that MBart does not
have a single `decoder_start_token_id` in contrast to other Bart-like models.
"""
prev_output_tokens = np.array(input_ids).copy()
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
prev_output_tokens = np.where(prev_output_tokens == -100, pad_token_id, input_ids)
index_of_eos = (np.where(prev_output_tokens != pad_token_id, 1, 0).sum(axis=-1) - 1).reshape(-1, 1)
decoder_start_tokens = np.array(
[prev_output_tokens[i, eos_idx] for i, eos_idx in enumerate(index_of_eos)], dtype=np.int32
).squeeze()
prev_output_tokens[:, 1:] = prev_output_tokens[:, :-1].copy()
prev_output_tokens[:, 0] = decoder_start_tokens
return prev_output_tokens
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->MBart
class FlaxMBartAttention(nn.Module):
config: MBartConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {self.num_heads})."
)
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.causal:
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size = hidden_states.shape[0]
# get query proj
query_states = self.q_proj(hidden_states)
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self.k_proj(key_value_states)
value_states = self.v_proj(key_value_states)
else:
# self_attention
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = self._split_heads(query_states)
key_states = self._split_heads(key_states)
value_states = self._split_heads(value_states)
# handle cache prepare causal attention mask
if self.causal:
query_length, key_length = query_states.shape[1], key_states.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, float("-inf")).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
class FlaxMBartEncoderLayer(nn.Module):
config: MBartConfig
dtype: jnp.dtype = jnp.float32
def setup(self) -> None:
self.embed_dim = self.config.d_model
self.self_attn = FlaxMBartAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.encoder_attention_heads,
dropout=self.config.attention_dropout,
dtype=self.dtype,
)
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
self.fc1 = nn.Dense(
self.config.encoder_ffn_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.fc2 = nn.Dense(
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->MBart
class FlaxMBartEncoderLayerCollection(nn.Module):
config: MBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
FlaxMBartEncoderLayer(self.config, name=str(i), dtype=self.dtype)
for i in range(self.config.encoder_layers)
]
self.layerdrop = self.config.encoder_layerdrop
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for encoder_layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = (hidden_states, all_hidden_states, all_attentions)
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class FlaxMBartDecoderLayer(nn.Module):
config: MBartConfig
dtype: jnp.dtype = jnp.float32
def setup(self) -> None:
self.embed_dim = self.config.d_model
self.self_attn = FlaxMBartAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
causal=True,
dtype=self.dtype,
)
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.encoder_attn = FlaxMBartAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
dtype=self.dtype,
)
self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.fc1 = nn.Dense(
self.config.encoder_ffn_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.fc2 = nn.Dense(
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->MBart
class FlaxMBartDecoderLayerCollection(nn.Module):
config: MBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
FlaxMBartDecoderLayer(self.config, name=str(i), dtype=self.dtype)
for i in range(self.config.decoder_layers)
]
self.layerdrop = self.config.decoder_layerdrop
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not deterministic and (dropout_probability < self.layerdrop):
layer_outputs = (None, None, None)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
init_cache=init_cache,
output_attentions=output_attentions,
deterministic=deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartClassificationHead with Bart->MBart
class FlaxMBartClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
config: MBartConfig
inner_dim: int
num_classes: int
pooler_dropout: float
dtype: jnp.dtype = jnp.float32
def setup(self):
self.dense = nn.Dense(
self.inner_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.dropout = nn.Dropout(rate=self.pooler_dropout)
self.out_proj = nn.Dense(
self.num_classes,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
def __call__(self, hidden_states: jnp.ndarray, deterministic: bool):
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.dense(hidden_states)
hidden_states = jnp.tanh(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class FlaxMBartEncoder(nn.Module):
config: MBartConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.max_source_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
# MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
self.embed_positions = nn.Embed(
self.config.max_position_embeddings + self.offset,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxMBartEncoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
input_shape = input_ids.shape
input_ids = input_ids.reshape(-1, input_shape[-1])
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(position_ids + self.offset)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
outputs = self.layers(
hidden_states,
attention_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_states = outputs[0]
last_hidden_states = self.layer_norm(last_hidden_states)
if not return_dict:
return (last_hidden_states,) + outputs[1:]
return FlaxBaseModelOutput(
last_hidden_state=last_hidden_states,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class FlaxMBartDecoder(nn.Module):
config: MBartConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.padding_idx = self.config.pad_token_id
self.max_target_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
# MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
self.embed_positions = nn.Embed(
self.config.max_position_embeddings + self.offset,
embed_dim,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.layers = FlaxMBartDecoderLayerCollection(self.config, self.dtype)
self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
input_shape = input_ids.shape
input_ids = input_ids.reshape(-1, input_shape[-1])
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# embed positions
positions = self.embed_positions(position_ids + self.offset)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
outputs = self.layers(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_states = outputs[0]
last_hidden_states = self.layer_norm(last_hidden_states)
if not return_dict:
return (last_hidden_states,) + outputs[1:]
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=last_hidden_states,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->MBart
class FlaxMBartModule(nn.Module):
config: MBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.shared = nn.Embed(
self.config.vocab_size,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.encoder = FlaxMBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
self.decoder = FlaxMBartDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
def _get_encoder_module(self):
return self.encoder
def _get_decoder_module(self):
return self.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return FlaxSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
class FlaxMBartPreTrainedModel(FlaxPreTrainedModel):
config_class = MBartConfig
base_model_prefix: str = "model"
module_class: nn.Module = None
def __init__(
self,
config: MBartConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
**kwargs
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
# make sure initialization pass will work for FlaxMBartForSequenceClassificationModule
input_ids = jax.ops.index_update(input_ids, (..., -1), self.config.eos_token_id)
attention_mask = jnp.ones_like(input_ids)
decoder_input_ids = input_ids
decoder_attention_mask = jnp.ones_like(input_ids)
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
return self.module.init(
rngs,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
)["params"]
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartPreTrainedModel.init_cache with Bart->MBart
def init_cache(self, batch_size, max_length, encoder_outputs):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
`attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
"""
# init input variables to retrieve cache
decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
decoder_position_ids = jnp.broadcast_to(
jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
)
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
init_variables = self.module.init(
jax.random.PRNGKey(0),
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
init_cache=True,
method=_decoder_forward, # we only need to call the decoder to init the cache
)
return unfreeze(init_variables["cache"])
@add_start_docstrings(MBART_ENCODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=MBartConfig)
def encode(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration
>>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-cc25")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
encode_module = module._get_encoder_module()
return encode_module(input_ids, attention_mask, position_ids, **kwargs)
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
method=_encoder_forward,
)
@add_start_docstrings(MBART_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=MBartConfig)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration
>>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-cc25")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> last_decoder_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxMBartAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past = outputs
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past = outputs
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
@add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
def __call__(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
decoder_input_ids: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# prepare encoder inputs
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# prepare decoder inputs
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
if decoder_position_ids is None:
batch_size, sequence_length = decoder_input_ids.shape
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
)
@add_start_docstrings(
"The bare MBart Model transformer outputting raw hidden-states without any specific head on top.",
MBART_START_DOCSTRING,
)
class FlaxMBartModel(FlaxMBartPreTrainedModel):
config: MBartConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
module_class = FlaxMBartModule
append_call_sample_docstring(
FlaxMBartModel, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC
)
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->MBart
class FlaxMBartForConditionalGenerationModule(nn.Module):
config: MBartConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
def setup(self):
self.model = FlaxMBartModule(config=self.config, dtype=self.dtype)
self.lm_head = nn.Dense(
self.model.shared.num_embeddings,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
def _get_encoder_module(self):
return self.model.encoder
def _get_decoder_module(self):
return self.model.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
position_ids=position_ids,
decoder_position_ids=decoder_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = self.model.variables["params"]["shared"]["embedding"]
lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
lm_logits = self.lm_head(hidden_states)
lm_logits += self.final_logits_bias.astype(self.dtype)
if not return_dict:
output = (lm_logits,) + outputs[1:]
return output
return FlaxSeq2SeqLMOutput(
logits=lm_logits,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"The MMBart Model with a language modeling head. Can be used for summarization.", MBART_START_DOCSTRING
)
class FlaxMBartForConditionalGeneration(FlaxMBartPreTrainedModel):
module_class = FlaxMBartForConditionalGenerationModule
dtype: jnp.dtype = jnp.float32
@add_start_docstrings(MBART_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=MBartConfig)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration
>>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-cc25")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> logits = outputs.logits
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxMBartAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
outputs = decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = module.model.variables["params"]["shared"]["embedding"]
lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
lm_logits = module.lm_head(hidden_states)
lm_logits += module.final_logits_bias.astype(self.dtype)
return lm_logits, outputs
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
if past_key_values is None:
lm_logits, decoder_outputs = outputs
else:
(lm_logits, decoder_outputs), past = outputs
if return_dict:
outputs = FlaxCausalLMOutputWithCrossAttentions(
logits=lm_logits,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
)
else:
outputs = (lm_logits,) + decoder_outputs[1:]
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
def prepare_inputs_for_generation(
self,
decoder_input_ids,
max_length,
attention_mask: Optional[jnp.DeviceArray] = None,
decoder_attention_mask: Optional[jnp.DeviceArray] = None,
encoder_outputs=None,
**kwargs
):
# initializing the cache
batch_size, seq_length = decoder_input_ids.shape
past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
# But since the decoder uses a causal mask, those positions are masked anyways.
# Thus we can create a single static attention_mask here, which is more efficient for compilation
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
if decoder_attention_mask is not None:
position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
else:
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
return {
"past_key_values": past_key_values,
"encoder_outputs": encoder_outputs,
"encoder_attention_mask": attention_mask,
"decoder_attention_mask": extended_attention_mask,
"decoder_position_ids": position_ids,
}
def update_inputs_for_generation(self, model_outputs, model_kwargs):
model_kwargs["past_key_values"] = model_outputs.past_key_values
model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
return model_kwargs
FLAX_MBART_CONDITIONAL_GENERATION_DOCSTRING = r"""
Returns:
Summarization example:
```python
>>> from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration, MBartConfig
>>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-cc25")
>>> ARTICLE_TO_SUMMARIZE = "Meine Freunde sind cool, aber sie essen zu viel Kuchen."
>>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np")
>>> # Generate Summary
>>> summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5).sequences
>>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
```
Mask filling example:
```python
>>> from transformers import MBartTokenizer, FlaxMBartForConditionalGeneration
>>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
>>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-cc25")
>>> # de_DE is the language symbol id <LID> for German
>>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE"
>>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="np")["input_ids"]
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero()[0].item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
```
"""
overwrite_call_docstring(
FlaxMBartForConditionalGeneration, MBART_INPUTS_DOCSTRING + FLAX_MBART_CONDITIONAL_GENERATION_DOCSTRING
)
append_replace_return_docstrings(
FlaxMBartForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
)
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForSequenceClassificationModule with Bart->MBart
class FlaxMBartForSequenceClassificationModule(nn.Module):
config: MBartConfig
dtype: jnp.dtype = jnp.float32
num_labels: Optional[int] = None
def setup(self):
self.model = FlaxMBartModule(config=self.config, dtype=self.dtype)
self.classification_head = FlaxMBartClassificationHead(
config=self.config,
inner_dim=self.config.d_model,
num_classes=self.num_labels if self.num_labels is not None else self.config.num_labels,
pooler_dropout=self.config.classifier_dropout,
)
def _get_encoder_module(self):
return self.model.encoder
def _get_decoder_module(self):
return self.model.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
position_ids=position_ids,
decoder_position_ids=decoder_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
hidden_states = outputs[0] # last hidden state
eos_mask = jnp.where(input_ids == self.config.eos_token_id, 1, 0)
# The first condition is necessary to overcome jax._src.errors.ConcretizationTypeError during JIT compilation
if type(eos_mask) != jax.interpreters.partial_eval.DynamicJaxprTracer:
if len(jnp.unique(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
if any(eos_mask.sum(1) == 0):
raise ValueError("There are missing <eos> tokens in input_ids")
# Ensure to keep 1 only for the last <eos> token for each example
eos_mask_noised = eos_mask + jnp.arange(eos_mask.shape[1]) * 1e-6
eos_mask = jnp.where(eos_mask_noised == eos_mask_noised.max(1).reshape(-1, 1), 1, 0)
sentence_representation = jnp.einsum("ijk, ij -> ijk", hidden_states, eos_mask).sum(1)
logits = self.classification_head(sentence_representation, deterministic=deterministic)
if not return_dict:
output = (logits,) + outputs[1:]
return output
return FlaxSeq2SeqSequenceClassifierOutput(
logits=logits,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"""
MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
tasks.
""",
MBART_START_DOCSTRING,
)
class FlaxMBartForSequenceClassification(FlaxMBartPreTrainedModel):
module_class = FlaxMBartForSequenceClassificationModule
dtype = jnp.float32
append_call_sample_docstring(
FlaxMBartForSequenceClassification,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxSeq2SeqSequenceClassifierOutput,
_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForQuestionAnsweringModule with Bart->MBart
class FlaxMBartForQuestionAnsweringModule(nn.Module):
config: MBartConfig
dtype: jnp.dtype = jnp.float32
num_labels = 2
def setup(self):
self.model = FlaxMBartModule(config=self.config, dtype=self.dtype)
self.qa_outputs = nn.Dense(
self.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
def _get_encoder_module(self):
return self.model.encoder
def _get_decoder_module(self):
return self.model.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
position_ids=position_ids,
decoder_position_ids=decoder_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = jnp.split(logits, logits.shape[-1], axis=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return output
return FlaxSeq2SeqQuestionAnsweringModelOutput(
start_logits=start_logits,
end_logits=end_logits,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"""
MBart Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
MBART_START_DOCSTRING,
)
class FlaxMBartForQuestionAnswering(FlaxMBartPreTrainedModel):
module_class = FlaxMBartForQuestionAnsweringModule
dtype = jnp.float32
append_call_sample_docstring(
FlaxMBartForQuestionAnswering,
_TOKENIZER_FOR_DOC,
_CHECKPOINT_FOR_DOC,
FlaxSeq2SeqQuestionAnsweringModelOutput,
_CONFIG_FOR_DOC,
)
| huggingface/transformers | src/transformers/models/mbart/modeling_flax_mbart.py | Python | apache-2.0 | 73,864 |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib import admin
from django.db.models import Q
from reversion.admin import VersionAdmin
from .models import RDFPrefix, SPARQLQuery, SPARQLUpdateQuery, CacheResource, UserGeneratedContent
class GroupOwnedAdmin(admin.ModelAdmin):
"""
Admin class for models that subclass the abstract ``Owned``
model. Handles limiting the change list to objects owned by the
logged in user, as well as setting the owner of newly created
objects to the logged in user.
Remember that this will include the ``user`` field in the required
fields for the admin change form which may not be desirable. The
best approach to solve this is to define a ``fieldsets`` attribute
that excludes the ``user`` field or simple add ``user`` to your
admin excludes: ``exclude = ('user',)``
"""
def save_form(self, request, form, change):
"""
Set the object's owner as the logged in user.
"""
obj = form.save(commit=False)
if obj.user_id is None:
obj.user = request.user
return super(GroupOwnedAdmin, self).save_form(request, form, change)
def queryset(self, request):
"""
Filter the change list by currently logged in user if not a
superuser. We also skip filtering if the model for this admin
class has been added to the sequence in the setting
``OWNABLE_MODELS_ALL_EDITABLE``, which contains models in the
format ``app_label.object_name``, and allows models subclassing
``Owned`` to be excluded from filtering, eg: ownership should
not imply permission to edit.
"""
opts = self.model._meta
model_name = ("%s.%s" % (opts.app_label, opts.object_name)).lower()
models_all_editable = settings.OWNABLE_MODELS_ALL_EDITABLE
models_all_editable = [m.lower() for m in models_all_editable]
qs = super(GroupOwnedAdmin, self).queryset(request)
if request.user.is_superuser or model_name in models_all_editable:
return qs
return qs.filter(Q(groups__in=request.user.groups.values_list('id', flat=True)) | Q(user__id=request.user.id))
class SPARQLQueryAdmin(VersionAdmin):
list_filter = ['prefixes', 'modified']
filter_horizontal = ('prefixes',)
fields = ['title', 'prefixes', 'query', 'description']
admin.site.register(SPARQLQuery, SPARQLQueryAdmin)
class RDFPrefixAdmin(VersionAdmin):
pass
admin.site.register(RDFPrefix, RDFPrefixAdmin)
class CacheResourceAdmin(VersionAdmin):
pass
admin.site.register(CacheResource, CacheResourceAdmin)
class SPARQLUpdateQueryAdmin(VersionAdmin):
list_filter = ['prefixes', 'modified']
filter_horizontal = ('prefixes',)
fields = ['title', 'prefixes', 'query', 'description']
admin.site.register(SPARQLUpdateQuery, SPARQLUpdateQueryAdmin)
class UserGeneratedContentAdmin(VersionAdmin):
list_display = ['name', 'content_type', 'link']
list_filter = ['content_type', 'published']
fields = ['name', 'content_type', 'link',
'source_uri', 'short_description', 'published']
if not admin.site.is_registered(UserGeneratedContent):
admin.site.register(UserGeneratedContent, UserGeneratedContentAdmin)
| delving/nave | nave/lod/admin.py | Python | gpl-2.0 | 3,286 |
"""""
A mixin for staff grading.
"""
import logging
from xblock.core import XBlock
from submissions import team_api as team_sub_api
from openassessment.assessment.api import (
staff as staff_api,
teams as teams_api
)
from openassessment.assessment.errors import StaffAssessmentInternalError, StaffAssessmentRequestError
from openassessment.workflow import (
api as workflow_api,
team_api as team_workflow_api
)
from .data_conversion import (
clean_criterion_feedback, create_rubric_dict, verify_assessment_parameters, verify_multiple_assessment_parameters
)
from .staff_area_mixin import require_course_staff
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class StaffAssessmentMixin:
"""
This mixin is for all staff-assessment related endpoints.
"""
def staff_assessment_exists(self, submission_uuid):
"""
Returns True if there exists a staff assessment for the given uuid. False otherwise.
"""
return staff_api.get_latest_staff_assessment(submission_uuid) is not None
def do_staff_assessment(self, data):
"""
Creates a staff assessment with the given assessment info
publishes a openassessmentblock.staff_assess tracking event
updates the assessed submission's workflow
"""
if 'submission_uuid' not in data:
return False, self._("The submission ID of the submission being assessed was not found.")
if self.is_team_assignment():
return self._team_assess(data)
else:
try:
assessment = staff_api.create_assessment(
data['submission_uuid'],
self.get_student_item_dict()["student_id"],
data['options_selected'],
clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
data['overall_feedback'],
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels)
)
assess_type = data.get('assess_type', 'regrade')
self.publish_assessment_event("openassessmentblock.staff_assess", assessment, type=assess_type)
workflow_api.update_from_assessments(
assessment["submission_uuid"],
None,
override_submitter_requirements=(assess_type == 'regrade')
)
except StaffAssessmentRequestError:
logger.warning(
"An error occurred while submitting a staff assessment "
"for the submission %s",
data['submission_uuid'],
exc_info=True
)
msg = self._("Your staff assessment could not be submitted.")
return False, msg
except StaffAssessmentInternalError:
logger.exception(
"An error occurred while submitting a staff assessment "
"for the submission %s",
data['submission_uuid']
)
msg = self._("Your staff assessment could not be submitted.")
return False, msg
return True, ''
@XBlock.json_handler
@require_course_staff("STUDENT_INFO")
@verify_assessment_parameters
def staff_assess(self, data, suffix=''): # pylint: disable=unused-argument
"""
Create a staff assessment from a staff submission.
"""
success, err_msg = self.do_staff_assessment(data)
return {'success': success, 'msg': err_msg}
@XBlock.json_handler
@require_course_staff("STUDENT_INFO")
@verify_multiple_assessment_parameters
def bulk_staff_assess(self, data, suffix=''): # pylint: disable=unused-argument
"""
Create a staff assessment from a multiple staff submissions.
params: list of dicts in the following format:
{
submission_uuid (str): The submission uuid for the submission being assessed,
options_selected (dict): Dictionary mapping criterion names to the option names the user selected
for that criterion.
criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback given
for the criterion. Since criterion feedback is optional, some criteria may
not appear in the dictionary.
overall_feedback (str): Free-form text feedback on the submission overall.
}
returns a json dict:
{
success: (boolean) was the operation successful? were all assessments successfully created?
msg: (string) [optional] error message, if applicable
errors: (dict) [optional] mapping from index of an assessment in the input params -> an error message, if
there was an error submitting that assessment
}
"""
errors = {}
for assessment_index, assessment in enumerate(data):
success, err_msg = self.do_staff_assessment(assessment)
if not success:
errors[assessment_index] = err_msg
if errors:
return {
'success': False,
'msg': self._("There were one or more errors submitting the requested assessments"),
'errors': errors
}
else:
return {'success': True, 'msg': ''}
def _team_assess(self, data):
"""
Encapsulates the functionality around staff assessment for a team based assignment
"""
try:
team_submission = team_sub_api.get_team_submission_from_individual_submission(data['submission_uuid'])
team_submission_uuid = team_submission['team_submission_uuid']
assessment = teams_api.create_assessment(
team_submission_uuid,
self.get_student_item_dict()["student_id"],
data['options_selected'],
clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
data['overall_feedback'],
create_rubric_dict(self.prompts, self.rubric_criteria_with_labels)
)
assess_type = data.get('assess_type', 'regrade')
self.publish_assessment_event("openassessmentblock.staff_assess", assessment[0], type=assess_type)
team_workflow_api.update_from_assessments(
team_submission_uuid,
override_submitter_requirements=(assess_type == 'regrade')
)
except StaffAssessmentRequestError:
logger.warning(
"An error occurred while submitting a team assessment "
"for the submission %s",
data['submission_uuid'],
exc_info=True
)
msg = self._("Your team assessment could not be submitted.")
return {'success': False, 'msg': msg}
except StaffAssessmentInternalError:
logger.exception(
"An error occurred while submitting a team assessment "
"for the submission %s",
data['submission_uuid'],
)
msg = self._("Your team assessment could not be submitted.")
return {'success': False, 'msg': msg}
return {'success': True, 'msg': ""}
@XBlock.handler
def render_staff_assessment(self, data, suffix=''): # pylint: disable=unused-argument
"""
Renders the Staff Assessment HTML section of the XBlock
Generates the staff assessment HTML for the Open
Assessment XBlock. See OpenAssessmentBlock.render_assessment() for
more information on rendering XBlock sections.
Args:
data (dict):
"""
path, context_dict = self.staff_path_and_context()
return self.render_assessment(path, context_dict)
def staff_path_and_context(self):
"""
Retrieve the correct template path and template context for the handler to render.
"""
workflow = self.get_workflow_info()
status = workflow.get('status')
path = 'openassessmentblock/staff/oa_staff_grade.html'
not_available_context = {
'status_value': self._('Not Available'),
'button_active': 'disabled=disabled aria-expanded=false',
'step_classes': 'is--unavailable',
}
if status == 'cancelled':
context = {
'status_value': self._('Cancelled'),
'icon_class': 'fa-exclamation-triangle',
'step_classes': 'is--unavailable',
'button_active': 'disabled=disabled aria-expanded=false',
}
elif status == 'done': # Staff grade exists and all steps completed.
context = {
'status_value': self._('Complete'),
'icon_class': 'fa-check',
'step_classes': 'is--complete is--empty',
'button_active': 'disabled=disabled aria-expanded=false',
}
elif status == 'waiting':
# If we are in the 'waiting' workflow, this means that a staff grade cannot exist
# (because if a staff grade did exist, we would be in 'done' regardless of whether other
# peers have assessed). Therefore we show that we are waiting on staff to provide a grade.
context = {
'status_value': self._('Not Available'),
'message_title': self._('Waiting for a Staff Grade'),
'message_content': self._('Check back later to see if a course staff member has assessed '
'your response. You will receive your grade after the assessment '
'is complete.'),
'step_classes': 'is--showing',
'button_active': 'aria-expanded=true',
}
elif status is None: # not started
context = not_available_context
else: # status is 'self' or 'peer', indicating that the student still has work to do.
if self.staff_assessment_exists(self.submission_uuid):
context = {
'status_value': self._('Complete'),
'icon_class': 'fa-check',
'message_title': self._('You Must Complete the Steps Above to View Your Grade'),
'message_content': self._('Although a course staff member has assessed your response, '
'you will receive your grade only after you have completed '
'all the required steps of this problem.'),
'step_classes': 'is--initially--collapsed',
'button_active': 'aria-expanded=false',
}
else: # Both student and staff still have work to do, just show "Not Available".
context = not_available_context
context['xblock_id'] = self.get_xblock_id()
return path, context
| edx/edx-ora2 | openassessment/xblock/staff_assessment_mixin.py | Python | agpl-3.0 | 11,241 |
from __future__ import absolute_import
from bokeh.io import save
from bokeh.plotting import figure
plot = figure(toolbar_location=None)
l1 = plot.line([1, 2, 3], [1, 2, 3])
l2 = plot.line([1, 2, 3], [2, 4, 6])
plot.xaxis.visible = False
plot.ygrid.visible = False
l1.visible = False
l2.visible = True
save(plot)
| mindriot101/bokeh | examples/integration/plots/visible_property.py | Python | bsd-3-clause | 317 |
# -*- coding: utf-8 -*-
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
AuthUserFactory,
InstitutionFactory,
)
@pytest.mark.django_db
class TestUserInstititutionRelationship:
@pytest.fixture()
def institution_one(self):
return InstitutionFactory()
@pytest.fixture()
def institution_two(self):
return InstitutionFactory()
@pytest.fixture()
def user(self, institution_one, institution_two):
user = AuthUserFactory()
user.affiliated_institutions.add(institution_one)
user.affiliated_institutions.add(institution_two)
user.save()
return user
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def url(self, user):
return '/{}users/{}/relationships/institutions/'.format(API_BASE, user._id)
def test_get(self, app, user, institution_one, institution_two, url):
# test_get_relationship_institutions
res = app.get(
url, auth=user.auth
)
assert res.status_code == 200
assert user.absolute_api_v2_url + 'relationships/institutions/' in res.json['links']['self']
assert user.absolute_api_v2_url + 'institutions/' in res.json['links']['html']
ids = [val['id'] for val in res.json['data']]
assert institution_one._id in ids
assert institution_two._id in ids
# test_get_institutions_relationship_while_logged_out
res = app.get(
url
)
ids = [val['id'] for val in res.json['data']]
assert institution_one._id in ids
assert institution_two._id in ids
def test_delete_one(self, app, user, institution_one, institution_two, url):
res = app.delete_json_api(
url,
{'data': [
{'type': 'institutions', 'id': institution_one._id}
]},
auth=user.auth
)
assert res.status_code == 204
user.reload()
ids = list(user.affiliated_institutions.values_list('_id', flat=True))
assert institution_one._id not in ids
assert institution_two._id in ids
def test_delete_multiple(self, app, user, institution_one, institution_two, url):
res = app.delete_json_api(
url,
{'data': [
{'type': 'institutions', 'id': institution_one._id},
{'type': 'institutions', 'id': institution_two._id}
]},
auth=user.auth
)
assert res.status_code == 204
user.reload()
ids = list(user.affiliated_institutions.values_list('_id', flat=True))
assert institution_one._id not in ids
assert institution_two._id not in ids
def test_delete_one_not_existing(self, app, user, institution_one, institution_two, url):
res = app.delete_json_api(
url,
{'data': [
{'type': 'institutions', 'id': 'not_an_id'}
]},
auth=user.auth
)
assert res.status_code == 204
user.reload()
ids = list(user.affiliated_institutions.values_list('_id', flat=True))
assert institution_one._id in ids
assert institution_two._id in ids
def test_institution_relationship_errors(self, app, user, user_two, institution_one, institution_two, url):
# test_type_mistyped
res = app.delete_json_api(
url,
{'data': [
{'type': 'wow', 'id': institution_one._id}
]},
auth=user.auth, expect_errors=True
)
assert res.status_code == 409
# test_post_with_auth
res = app.post_json_api(
url, {},
auth=user.auth,
expect_errors=True
)
assert res.status_code == 405
# test_put_with_auth
res = app.put_json_api(
url, {},
auth=user.auth,
expect_errors=True
)
assert res.status_code == 405
# test_post_without_auth
res = app.post_json_api(
url, {}, expect_errors=True
)
assert res.status_code == 401
# test_put_without_auth
res = app.put_json_api(
url, {}, expect_errors=True
)
assert res.status_code == 401
# test_delete_no_auth
res = app.delete_json_api(
url,
{'data': [
{'type': 'institutions', 'id': institution_one._id}
]},
expect_errors=True
)
assert res.status_code == 401
# test_delete_wrong_auth
res = app.delete_json_api(
url,
{'data': [
{'type': 'institutions', 'id': institution_one._id}
]},
auth=user_two.auth, expect_errors=True
)
assert res.status_code == 403
# test_attempt_payload_not_in_array
res = app.delete_json_api(
url,
{'data':
{'type': 'institutions', 'id': institution_one._id}
},
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
# test_attempt_with_no_type_field
res = app.delete_json_api(
url,
{'data': [
{'id': institution_one._id}
]},
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
# test_attempt_with_no_id_field
res = app.delete_json_api(
url,
{'data': [
{'type': 'institutions'}
]},
auth=user.auth, expect_errors=True
)
assert res.status_code == 400
| aaxelb/osf.io | api_tests/users/views/test_user_institutions_relationship.py | Python | apache-2.0 | 5,760 |
from typing import Tuple
class Parents:
def __init__(self, parents):
# type: (Tuple(Human)) -> None
self.parents = parents
def is_parent(self, human):
# type: (Human) -> bool
return human in self.parents
| Diralf/evolution | app/entity/human/parents.py | Python | mit | 247 |
"""
Welcome to the TriFusion API reference guide. This reference guide details
the sub-packages and modules used for each component of TriFusion.
This guide is intended for power users and developers that wish to modify
the source and/or contribute with new code. A brief description of each
main component of TriFusion follows below with the relevant modules and
classes specified. These descriptions can be used to send you on your
way to the sections of interest, where a more detailed documentation
is available.
What is TriFusion
=================
TriFusion is a GUI and command line application designed to streamline the
gathering, processing and visualization of phylogenomic data. It is broadly
divided in these three modules.
Orthology
---------
Provides a pipeline for running OrthoMCL, with the code ported to python
and SQLite instead of the original perl and MySQL. OrthoMCL is the most
popular ortholog detection pipeline, and TriFusion offers an easy and
intuitive way of running the pipeline while providing the complete
range of options available.
In addition to the search pipeline, TriFusion allows the filtering and visual
exploration of the resulting ortholog groups. In the end, orthologs can
be exported as protein or DNA sequences.
Process
-------
At its core, the Process module is a conversion and concatenation tool that
handles very large sequence alignment data sets. It reads and exports
alignments into several popular formats used in phylogenetics and population
genetics. In addition to these main operations, TriFusion offers a wide
array of manipulations that can be performed on alignment data, such as
filtering, collapsing, creating consensus, etc.
Statistics
----------
Generates a wide array of graphical visualizations and statistical
analyses of alignment data sets.
How can TriFusion be used
=========================
TriFusion can be used as a:
- Desktop application with graphical interface (TriFusion).
- Command line application (orthomcl_pipeline, TriSeq and TriStats).
- Library of high performance classes to parse, modify, export and
plot alignment data.
Components of TriFusion
=======================
The TriFusion package is the result of multiple modular components that
work in combination to produce the main application. This modularity
means that the graphical interface is separated from the multiple
backends that power its features, and each backend works independently
of each other. This greatly facilitates changing the existing objects
or creating new ones for specific modules without having to worry about
other aspects of the package.
Here is a brief overview of the GUI and backend components.
TriFusion GUI
-------------
The core graphical interface of TriFusion is controlled by two main files:
- :mod:`trifusion.app`: Contains the :class:`~trifusion.app.TriFusionApp`
class, with most of the methods and attributes responsible for
the interactivity and event handling of the TriFusion application.
- :mod:`trifusion.trifusion.kv`: Contains graphical instructions for the
main window and the definition of the majority of the Widgets in the
kivy language.
.. warning:: API documentation of :mod:`trifusion.app` is still under progress.
Main screens
~~~~~~~~~~~~
The graphical instructions in kivy language for the 8 main screens of
TriFusion are defined in the `trifusion/data/screens` directory, each screen
with its own `.kv` file. These files contain only the definition of
the root widgets of each screen. Other Widgets that are later added to
the screen via some method
should be defined in the main `trifusion.trifusion.kv` file.
The initial setup of these screens in performed in the
:func:`~trifusion.app.TriFusionApp.build` method.
Custom widgets
~~~~~~~~~~~~~~
Custom widgets can be created using the `kivy` toolkit. `Kivy` provides
a convenient way of defining graphical instructions to build the Widget's
layout using the kivy language instead of directly with python. Therefore,
the layout of new widgets can be defined in the `trifusion.trifusion.kv` file.
If they need to be used by the python code, they also have to be defined
as a new class in the :mod:`trifusion.data.resources.custom_widgets` module
and imported in the module where they will be used.
This class can be empty (e.g.
:func:`~trifusion.data.resources.custom_widgets.TableCell`) or it can harbor
custom attributes and methods that are useful for that widget
(e.g. :func:`~trifusion.data.resources.custom_widgets.FileChooserL`).
.. warning:: API documentation of
:mod:`trifusion.data.resources.custom_widgets` is still under
progress.
Icons and backgrounds
~~~~~~~~~~~~~~~~~~~~~
Icons and background images are stored in the `trifusion/data/backgrounds`
directory. In either python or kivy files, these backgrounds can be
reference with the path relative to the package root. For example, in any
python file, they can be referenced like::
bn = "data/backgrounds/check_ok.png"
Or in any kivy file::
background_normal: "data/backgrounds/bt_process.png"
Running tasks in the background
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Time consuming operations are executed in worker threads separated from the
main GUI thread. These background tasks are defined in
:mod:`trifusion.data.resources.background_tasks`. The module documentation
provides more information on how to setup background tasks in TriFusion.
In-App help
~~~~~~~~~~~
Help buttons are spread throughout TriFusion. The help information is defined
in multiple dictionary objects in :mod:`trifusion.data.resources.info_data`.
Orthology backend
-----------------
The orthology search pipeline is defined in
:mod:`trifusion.orthomcl_pipeline`. This module can be used as a CLI program
to execute the pipeline and is also used by TriFusion as a library.
The handling and exploration of ortholog group files, the output of the
orthology search operation, is made in the
:mod:`trifusion.ortho.OrthomclToolbox` module, in particular using the
:class:`~trifusion.ortho.OrthomclToolbox.MultiGroupsLight` and
:class:`~trifusion.ortho.OrthomclToolbox.GroupLight` classes.
.. warning:: API documentation for :mod:`trifusion.orthomcl_pipeline` and
:mod:`trifusion.ortho.OrthomclToolbox` is still in progress.
Process backend
---------------
The main functionality of the Process module (and the TriSeq CLI) program
are provided by the modules in the :mod:`trifusion.process` sub package.
Classes that handle alignment data are defined in
:mod:`trifusion.process.sequence`, while data set partitions are handled
in the :mod:`trifusion.process.data` module.
Statistics backend
------------------
The generation of plot data in the Statistics screen or by the TriStats
CLI program is a joint effort between the
:class:`~trifusion.process.sequence.AlignmentList` class and the plotting
functions defined in the :mod:`trifusion.base.plotter` module. Briefly,
the methods of the :class:`~trifusion.process.sequence.AlignmentList` class
are responsible for generating the data and plotting instructions, while
the functions in the :mod:`trifusion.base.plotter` module receive that
information and generate the plot.
"""
__version__ = "1.0.2rc1"
__build__ = "260918"
__author__ = "Diogo N. Silva"
__copyright__ = "Diogo N. Silva"
__credits__ = ["Diogo N. Silva", "Tiago F. Jesus", "Fernando Alves", "Ana Vieira"]
__license__ = "GPL3"
__maintainer__ = "Diogo N. Silva"
__email__ = "[email protected]"
__status__ = "4 - Beta"
| ODiogoSilva/TriFusion | trifusion/__init__.py | Python | gpl-3.0 | 7,541 |
import collections
import numpy as np
import os.path
from sklearn.feature_selection import *
from sklearn.model_selection import *
from scipy.stats import linregress
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from github_analysis_tool.analyzer.clustering import Clustering
from github_analysis_tool.analyzer.classification import Classification
from github_analysis_tool.services.database_service import DatabaseService
from github_analysis_tool.analyzer.analysis_utilities import AnalysisUtilities
from github_analysis_tool import OssConstants
class NetworkAnalysis:
def __init__(self):
self.classification = Classification()
self.clustering = Clustering()
self.database_service = DatabaseService()
self.__analysis_utilities = AnalysisUtilities()
def compute_cross_correlations(self, data, message=""):
correlations = collections.OrderedDict()
avg_correlations = collections.OrderedDict()
metrics_to_be_removed = set()
print(data.shape)
for metric1 in data._series.keys():
correlations[metric1] = collections.OrderedDict()
corrs = []
for metric2 in data._series.keys():
# http://stackoverflow.com/questions/3949226/calculating-pearson-correlation-and-significance-in-python
# corr = np.corrcoef(feature_vector_pair[0], feature_vector_pair[1])[0][1]
# linear regression of two feature vectors.
lin = linregress(data._series[metric1]._values, data._series[metric2]._values)
correlations[metric1][metric2] = lin.rvalue
corrs.append(lin.rvalue)
avg_correlations[metric1] = np.mean(corrs)
considered_metrics = set()
metric_votes = {}
for metric1 in correlations:
considered_metrics.add(metric1)
for metric2 in list(set(correlations.keys())-considered_metrics):
if metric1 == metric2:
continue
if abs(correlations[metric1][metric2]) > 0.50:
#print(metric1, metric2, str(correlations[metric1][metric2]))
if metric1 not in metric_votes:
metric_votes[metric1] = -1
else:
metric_votes[metric1] -= 1
if metric2 not in metric_votes:
metric_votes[metric2] = -1
else:
metric_votes[metric2] -= 1
else:
if metric1 not in metric_votes:
metric_votes[metric1] = 1
else:
metric_votes[metric1] += 1
if metric2 not in metric_votes:
metric_votes[metric2] = 1
else:
metric_votes[metric2] += 1
for metric in metric_votes:
if(metric_votes[metric] < 0):
metrics_to_be_removed.add(metric)
new_data = data.drop(metrics_to_be_removed, axis=1)
print(new_data.shape)
output_file_path = os.path.join(OssConstants.OUTPUT_DIR, message + "correlation_matrix.csv")
with open(output_file_path, "w") as output:
output.write(";")
for metric1 in correlations:
output.write(metric1 + ";")
output.write("\n")
for metric1 in correlations:
output.write(metric1 + ";")
for metric2 in correlations[metric1]:
output.write(str(correlations[metric1][metric2]) + ";")
output.write("\n")
return new_data
def __decompose_and_preprocess(self, df, labelling_func, out_folder_path, normalize):
reduced_df = df[~df.index.duplicated(keep="Last")] # Remove duplicate rows
labels, row_labels, ignored_indexes = labelling_func(df=reduced_df)
reduced_df = self.__analysis_utilities.drop_rows(reduced_df, ignored_indexes) # Remove non-labeled rows/repos
if normalize:
reduced_df = self.__analysis_utilities.normalize_df(reduced_df)
columns, repos, observations = self.__analysis_utilities.decompose_df(reduced_df)
k = np.math.floor(len(columns) / 3) # relatively determine number of features to keep
# TODO: We are analyzing features twice, better to da that at once
# Write the names of k best features to a file
self.__analysis_utilities.export_best_feature_names(reduced_df, labels, out_folder_path, k)
reduced_observations = SelectKBest(chi2, k=k).fit_transform(observations, labels)
return reduced_observations, labels, row_labels
def do_classification(self, classifiers, df, df_name, labelling_func, labelling_name, sampling, normalize):
print("----> Classifying data set \"" + df_name + "\" with \"" + labelling_name + " \" labels.")
msg = "" # this string will be passed as message to file construct file name
if normalize:
msg += "_normalized"
if sampling:
msg += "_sampling"
out_folder_path = os.path.join(OssConstants.OUTPUT_DIR, "classification", labelling_name, df_name)
if not os.path.exists(out_folder_path):
os.makedirs(out_folder_path)
observations, labels, row_labels = \
self.__decompose_and_preprocess(df, labelling_func, out_folder_path, normalize)
''' Preprocessing is Done, now do classification! '''
for classifier in classifiers:
conf_matrices = []
scores = []
for i in range(0, 10):
print("------> iteration: " + str(i))
label_names = np.unique(labels)
scores_of_iter = []
conf_matrices_of_iter = []
for train_index, test_index in StratifiedKFold(n_splits=3, shuffle=False).split(observations, labels):
training_set, training_labels = np.array(observations)[train_index], np.array(labels)[train_index]
test_set, test_labels = np.array(observations)[test_index], np.array(labels)[test_index]
if sampling:
biasing_labels = self.__analysis_utilities.get_biasing_labels(training_labels, 0.40)
size = self.__analysis_utilities.find_sampling_size(biasing_labels, training_labels)
# retrieve reduced / sampled training set-labels.
training_set, training_labels = self.__analysis_utilities.undersampling(training_set,
training_labels, biasing_labels,
size, seed=i)
# do classification and get results.
conf_matrix, score = self.classification.classify(classifier["func"], classifier["name"], out_folder_path,
training_set, training_labels, test_set, test_labels,
msg=msg+"_"+str(i))
scores_of_iter.append((score, len(test_set)-score))
conf_matrices_of_iter.append(conf_matrix)
''' 3-Fold CV is done. '''
result_conf_matrix_of_iter = self.__analysis_utilities.sum_matrices(conf_matrices_of_iter)
conf_matrices.append(result_conf_matrix_of_iter)
scores.append(tuple(map(sum, zip(*scores_of_iter))))
# export results
out_file_pre_path = os.path.join(out_folder_path, classifier["name"] + msg)
total_score = self.__analysis_utilities.compute_total_confusion_matrix(conf_matrices, out_file_pre_path,
label_names, scores)
# add it to Reports csv.
self.__analysis_utilities.export_report(total_score, out_folder_path, classifier["name"]+msg)
def do_clustering(self, data_frame, df_name):
# Try different clustering algorithms with different parameters
print("----> Clustering data set: " + df_name)
out_folder_path = os.path.join(OssConstants.OUTPUT_DIR, "clustering", df_name)
if not os.path.exists(out_folder_path):
os.makedirs(out_folder_path)
for i in range(3, 9):
print("------> MB K-Means clustering with # of clusters: " + str(i))
self.clustering.minibatchs_k_means_clustering(out_folder_path, data_frame, number_of_clusters=i)
for i in range(3, 9):
print("------> K-Means clustering with # of clusters: " + str(i))
self.clustering.k_means_clustering(out_folder_path, data_frame, number_of_clusters=i)
for i in range(9, 15):
print("------> Agglomerative clustering with # of clusters: " + str(i))
self.clustering.agglomerative_clustering(out_folder_path, data_frame, number_of_clusters=i)
for i in range(2, 8, 2):
for j in range(2, 5):
print("------> HDBSCAN clustering with min clusters: " + str(i) + ", min samples: " + str(j))
self.clustering.hdbscan_clustering(out_folder_path, data_frame, min_cluster_size=i, min_samples=j)
| itu-oss-project-team/oss-github-analysis-project | github_analysis_tool/analyzer/network_analysis.py | Python | mit | 9,487 |
from assert_helpers import assert_difference, assert_no_difference
from ekklesia_portal.datamodel import ArgumentRelation, ArgumentVote
from ekklesia_portal.enums import ArgumentType
from webtest_helpers import assert_deform
def test_argumentrelation(client, argument_relation):
proposition = argument_relation.proposition
argument = argument_relation.argument
res = client.get(f"/p/{proposition.id}/a/{argument.id}")
assert 'argument_vote_btn' not in res, 'vote button not present'
html = res.html
proposition_title_link = html.find(class_="proposition_title").find("a")
assert proposition_title_link.text == proposition.title
assert str(proposition.id) in proposition_title_link["href"]
assert html.find(class_="argument_title").find("a").text == argument.title
assert html.find(class_="argument_abstract").text == argument.abstract
assert html.find(class_="argument_details_extended").text == argument.details
def test_argumentrelation_with_logged_in_user(client, argument_relation, logged_in_user):
proposition = argument_relation.proposition
argument = argument_relation.argument
res = client.get(f"/p/{proposition.id}/a/{argument.id}")
assert 'argument_vote_btn' in res, 'vote button present'
def test_new(client, logged_in_user, proposition):
res = client.get(f'/p/{proposition.id}/a/+new?relation_type={ArgumentType.PRO.name}')
expected = {'proposition_id': proposition.id, 'relation_type': ArgumentType.PRO.name}
assert_deform(res, expected)
def test_create(db_query, client, logged_in_user, proposition):
data = {
'proposition_id': proposition.id,
'relation_type': ArgumentType.PRO.name,
'title': 'test title',
'abstract': 'test abstract',
'details': 'test details'
}
with assert_difference(db_query(ArgumentRelation).count, 1):
client.post(f"/p/{proposition.id}/a/", data, status=302)
def test_does_not_create_without_title(db_query, client, logged_in_user, proposition):
data = {
'proposition_id': proposition.id,
'relation_type': ArgumentType.PRO.name,
'abstract': 'test abstract',
'details': 'test details'
}
with assert_no_difference(db_query(ArgumentRelation).count):
client.post(f"/p/{proposition.id}/a/", data, status=200)
def test_vote(db_query, client, logged_in_user, argument_relation):
url = f"/p/{argument_relation.proposition_id}/a/{argument_relation.argument_id}/vote"
client.post(url, {'weight': 1}, status=302)
qq = db_query(ArgumentVote).filter_by(member_id=logged_in_user.id, relation_id=argument_relation.id).one
vote = qq()
assert vote.weight == 1
client.post(url, {'weight': 0}, status=302)
vote = qq()
assert vote.weight == 0
client.post(url, {'weight': -1}, status=302)
vote = qq()
assert vote.weight == -1
client.post(url, {'weight': -2}, status=400)
vote = qq()
assert vote.weight == -1
| dpausp/arguments | tests/concepts/argument_relation/test_argument_relation.py | Python | agpl-3.0 | 2,982 |
# Implementa una clase para resolver el problema de "Misioneros y Caníbales"
# Incluye ejemplos de su uso para resolverlo mediante el algoritmo
# de búsqueda primero en anchura
#
# Autor: Dr. Santiago Enrique Conant Pablos
# Fecha: 24 de agosto de 2016
from search import ( # Bases para construcción de problemas
Problem, Node, Graph, UndirectedGraph,
SimpleProblemSolvingAgentProgram,
GraphProblem
)
from search import ( # Algoritmos de búsqueda no informada
tree_search, graph_search, best_first_graph_search,
breadth_first_tree_search, breadth_first_search,
depth_first_tree_search, depth_first_graph_search,
depth_limited_search, iterative_deepening_search,
uniform_cost_search,
compare_searchers
)
from search import ( # Algoritmos de búsqueda informada (heurística)
greedy_best_first_graph_search, astar_search
)
class MisionerosYCanibales(Problem):
"""El problema de misioneros y canibales.
Estado: (# Misioneros en lado 1, # Canibales en lado 1, Lado de la barca)
puede establecerse la cantidad de misioneros y caníbales involucrados"""
def __init__(self, inicial=(3,3,1), meta=(0,0,0), myc=3):
Problem.__init__(self, inicial, meta)
self.misycan = myc # No. de misioneros = No. de caníbales
self.acciones = ['M1M','M2M','M1C','M2C','M1M1C'] # acciones posibles
def actions(self, estado):
"Dependen de la distribución de misioneros y caníbales."
accs = []
for accion in self.acciones:
if accion == 'M1M' and \
not estado_ilegal(nuevo_estado(estado,1,0), self.misycan):
accs.append('M1M')
elif accion == 'M2M' and \
not estado_ilegal(nuevo_estado(estado,2,0), self.misycan):
accs.append('M2M')
elif accion == 'M1C' and \
not estado_ilegal(nuevo_estado(estado,0,1), self.misycan):
accs.append('M1C')
elif accion == 'M2C' and \
not estado_ilegal(nuevo_estado(estado,0,2), self.misycan):
accs.append('M2C')
elif accion == 'M1M1C' and \
not estado_ilegal(nuevo_estado(estado,1,1), self.misycan):
accs.append('M1M1C')
return accs
def result(self, estado, accion):
"El resultado se calcula sumando o restando misioneros y/o caníbales."
if accion == 'M1M':
return nuevo_estado(estado,1,0)
elif accion == 'M2M':
return nuevo_estado(estado,2,0)
elif accion == 'M1C':
return nuevo_estado(estado,0,1)
elif accion == 'M2C':
return nuevo_estado(estado,0,2)
elif accion == 'M1M1C':
return nuevo_estado(estado,1,1)
def h(self, node):
"Diferencia entre meta y estado actual"
amis, acan, al = node.state
gmis, gcan, gl = self.goal
return abs(gmis-amis) + abs(gcan-acan) + abs(gl-al)
def nuevo_estado(edo, mis, can):
"""Mueve mis misioneros y can caníbales para obtener un nuevo estado.
El estado resultante no se verifica (puede ser inválido)"""
nedo = list(edo)
if nedo[2] == 0:
nedo[2] = 1
else:
mis = - mis
can = - can
nedo[2] = 0
nedo[0] = nedo[0] + mis
nedo[1] = nedo[1] + can
return tuple(nedo)
def estado_ilegal(edo, misycan):
"""Determina si un estado es ilegal"""
return edo[0] < 0 or edo[0] > misycan or \
edo[1] < 0 or edo[1] > misycan or \
(edo[0] > 0 and edo[0] < edo[1]) or \
(edo[0] < misycan and edo[0] > edo[1])
def despliega_solucion(nodo_meta):
"""Despliega la secuencia de estados y acciones de una solución"""
acciones = nodo_meta.solution()
nodos = nodo_meta.path()
print('SOLUCION:')
print('Estado:',nodos[0].state)
for na in range(len(acciones)):
if acciones[na] == 'M1M':
print('Acción: mueve un misionero')
if acciones[na] == 'M2M':
print('Acción: mueve dos misioneros')
if acciones[na] == 'M1C':
print('Acción: mueve un canibal')
if acciones[na] == 'M2C':
print('Acción: mueve dos caníbales')
if acciones[na] == 'M1M1C':
print('Acción: mueve un misionero y un canibal')
print('Estado:',nodos[na+1].state)
print('FIN')
#-------------------------------------------------------------------
# EJEMPLOS DE USO
# Problema 1: (3,3,1) -> (0,0,0) para 3 misioneros y 3 caníbales
prob1 = MisionerosYCanibales()
# Problema 2: (2,2,0) -> (0,0,1) para 3 misioneros y 3 caníbales
prob2 = MisionerosYCanibales((2,2,0),(0,0,1))
# Problema 3: (4,4,1) -> (2,2,0) para 4 misioneros y 4 caníbales
prob3 = MisionerosYCanibales((4,4,1),(2,2,0),4)
# Problema 4: (6,5,1) -> (6,0,0) para 6 misioneros y 6 caníbales
prob4 = MisionerosYCanibales((6,5,1),(6,0,0),6)
# Resolviendo el problema 1:
print("Solución del Problema 1 mediante búsqueda primero en anchura")
meta1 = breadth_first_search(prob1)
if meta1:
despliega_solucion(meta1)
else:
print("Falla: no se encontró una solución")
# Resolviendo el problema 2:
print("Solución del Problema 2 mediante búsqueda primero en anchura")
meta2 = breadth_first_search(prob2)
if meta2:
despliega_solucion(meta2)
else:
print("Falla: no se encontró una solución")
# Resolviendo el problema 3:
print("Solución del Problema 3 mediante búsqueda primero en anchura")
meta3 = breadth_first_search(prob3)
if meta3:
despliega_solucion(meta3)
else:
print("Falla: no se encontró una solución")
# Resolviendo el problema 4:
print("Solución del Problema 4 mediante búsqueda primero en anchura")
meta4 = breadth_first_search(prob4)
if meta4:
despliega_solucion(meta4)
else:
print("Falla: no se encontró una solución")
| nikodtbVf/aima-si | misioneros.py | Python | mit | 6,024 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.