repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
cainmatt/django | django/db/models/fields/related.py | 5 | 118182 | from __future__ import unicode_literals
import warnings
from functools import partial
from operator import attrgetter
from django import forms
from django.apps import apps
from django.core import checks, exceptions
from django.core.exceptions import FieldDoesNotExist
from django.db import connection, connections, router, transaction
from django.db.backends import utils
from django.db.models import Q, signals
from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL
from django.db.models.fields import (
BLANK_CHOICE_DASH, AutoField, Field, IntegerField, PositiveIntegerField,
PositiveSmallIntegerField,
)
from django.db.models.fields.related_lookups import (
RelatedExact, RelatedGreaterThan, RelatedGreaterThanOrEqual, RelatedIn,
RelatedLessThan, RelatedLessThanOrEqual,
)
from django.db.models.query import QuerySet
from django.db.models.query_utils import PathInfo
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from django.utils.encoding import force_text, smart_text
from django.utils.functional import cached_property, curry
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_docs_version
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
def resolve_relation(scope_model, relation):
"""
Transform relation into a model or fully-qualified model string of the form
"app_label.ModelName", relative to scope_model.
The relation argument can be:
* RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case
the model argument will be returned.
* A bare model name without an app_label, in which case scope_model's
app_label will be prepended.
* An "app_label.ModelName" string.
* A model class, which will be returned unchanged.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
relation = scope_model
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
if "." not in relation:
relation = "%s.%s" % (scope_model._meta.app_label, relation)
return relation
def lazy_related_operation(function, model, *related_models, **kwargs):
"""
Schedule `function` to be called once `model` and all `related_models`
have been imported and registered with the app registry. `function` will
be called with the newly-loaded model classes as its positional arguments,
plus any optional keyword arguments.
The `model` argument must be a model class. Each subsequent positional
argument is another model, or a reference to another model - see
`resolve_relation()` for the various forms these may take. Any relative
references will be resolved relative to `model`.
This is a convenience wrapper for `Apps.lazy_model_operation` - the app
registry model used is the one found in `model._meta.apps`.
"""
models = [model] + [resolve_relation(model, rel) for rel in related_models]
model_keys = (make_model_tuple(m) for m in models)
apps = model._meta.apps
return apps.lazy_model_operation(partial(function, **kwargs), *model_keys)
def add_lazy_relation(cls, field, relation, operation):
warnings.warn(
"add_lazy_relation() has been superseded by lazy_related_operation() "
"and related methods on the Apps class.",
RemovedInDjango20Warning, stacklevel=2)
# Rearrange args for new Apps.lazy_model_operation
function = lambda local, related, field: operation(field, related, local)
lazy_related_operation(function, cls, relation, field=field)
class RelatedField(Field):
"""
Base class that all relational fields inherit from.
"""
# Field flags
one_to_many = False
one_to_one = False
many_to_many = False
many_to_one = False
@cached_property
def related_model(self):
# Can't cache this property until all the models are loaded.
apps.check_models_ready()
return self.remote_field.model
def check(self, **kwargs):
errors = super(RelatedField, self).check(**kwargs)
errors.extend(self._check_related_name_is_valid())
errors.extend(self._check_relation_model_exists())
errors.extend(self._check_referencing_to_swapped_model())
errors.extend(self._check_clashes())
return errors
def _check_related_name_is_valid(self):
import re
import keyword
related_name = self.remote_field.related_name
if not related_name:
return []
is_valid_id = True
if keyword.iskeyword(related_name):
is_valid_id = False
if six.PY3:
if not related_name.isidentifier():
is_valid_id = False
else:
if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*\Z', related_name):
is_valid_id = False
if not (is_valid_id or related_name.endswith('+')):
return [
checks.Error(
"The name '%s' is invalid related_name for field %s.%s" %
(self.remote_field.related_name, self.model._meta.object_name,
self.name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=self,
id='fields.E306',
)
]
return []
def _check_relation_model_exists(self):
rel_is_missing = self.remote_field.model not in apps.get_models()
rel_is_string = isinstance(self.remote_field.model, six.string_types)
model_name = self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name
if rel_is_missing and (rel_is_string or not self.remote_field.model._meta.swapped):
return [
checks.Error(
("Field defines a relation with model '%s', which "
"is either not installed, or is abstract.") % model_name,
hint=None,
obj=self,
id='fields.E300',
)
]
return []
def _check_referencing_to_swapped_model(self):
if (self.remote_field.model not in apps.get_models() and
not isinstance(self.remote_field.model, six.string_types) and
self.remote_field.model._meta.swapped):
model = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name
)
return [
checks.Error(
("Field defines a relation with the model '%s', "
"which has been swapped out.") % model,
hint="Update the relation to point at 'settings.%s'." % self.remote_field.model._meta.swappable,
obj=self,
id='fields.E301',
)
]
return []
def _check_clashes(self):
"""
Check accessor and reverse query name clashes.
"""
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# `f.remote_field.model` may be a string instead of a model. Skip if model name is
# not resolved.
if not isinstance(self.remote_field.model, ModelBase):
return []
# If the field doesn't install backward relation on the target model (so
# `is_hidden` returns True), then there are no clashes to check and we
# can skip these fields.
if self.remote_field.is_hidden():
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
rel_opts = self.remote_field.model._meta
# rel_opts.object_name == "Target"
rel_name = self.remote_field.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
field_name = "%s.%s" % (opts.object_name,
self.name) # i. e. "Model.field"
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i.e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
clash_name = "%s.%s" % (rel_opts.object_name,
clash_field.name) # i. e. "Target.model_set"
if clash_field.name == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E302',
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name),
hint=("Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'.") % (clash_name, field_name),
obj=self,
id='fields.E303',
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = (r for r in rel_opts.related_objects if r.field is not self)
for clash_field in potential_clashes:
clash_name = "%s.%s" % ( # i. e. "Model.m2m"
clash_field.related_model._meta.object_name,
clash_field.field.name)
if clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
"Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E304',
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name for '%s'."
% (field_name, clash_name),
hint=("Add or change a related_name argument "
"to the definition for '%s' or '%s'.") % (field_name, clash_name),
obj=self,
id='fields.E305',
)
)
return errors
def db_type(self, connection):
# By default related field will not have a column as it relates to
# columns from another table.
return None
def contribute_to_class(self, cls, name, virtual_only=False):
super(RelatedField, self).contribute_to_class(cls, name, virtual_only=virtual_only)
self.opts = cls._meta
if not cls._meta.abstract:
if self.remote_field.related_name:
related_name = force_text(self.remote_field.related_name) % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.remote_field.related_name = related_name
def resolve_related_class(model, related, field):
field.remote_field.model = related
field.do_related_class(related, model)
lazy_related_operation(resolve_related_class, cls, self.remote_field.model, field=self)
def get_forward_related_filter(self, obj):
"""
Return the keyword arguments that when supplied to
self.model.object.filter(), would select all instances related through
this field to the remote obj. This is used to build the querysets
returned by related descriptors. obj is an instance of
self.related_field.model.
"""
return {
'%s__%s' % (self.name, rh_field.name): getattr(obj, rh_field.attname)
for _, rh_field in self.related_fields
}
def get_reverse_related_filter(self, obj):
"""
Complement to get_forward_related_filter(). Return the keyword
arguments that when passed to self.related_field.model.object.filter()
select all instances of self.related_field.model related through
this field to obj. obj is an instance of self.model.
"""
base_filter = {
rh_field.attname: getattr(obj, lh_field.attname)
for lh_field, rh_field in self.related_fields
}
base_filter.update(self.get_extra_descriptor_filter(obj) or {})
return base_filter
@property
def swappable_setting(self):
"""
Get the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.remote_field.model, six.string_types):
to_string = self.remote_field.model
else:
to_string = self.remote_field.model._meta.label
return apps.get_swappable_settings_name(to_string)
return None
def set_attributes_from_rel(self):
self.name = (
self.name or
(self.remote_field.model._meta.model_name + '_' + self.remote_field.model._meta.pk.name)
)
if self.verbose_name is None:
self.verbose_name = self.remote_field.model._meta.verbose_name
self.remote_field.set_field_name()
@property
def related(self):
warnings.warn(
"Usage of field.related has been deprecated. Use field.remote_field instead.",
RemovedInDjango110Warning, 2)
return self.remote_field
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.contribute_to_related_class(other, self.remote_field)
def get_limit_choices_to(self):
"""
Return ``limit_choices_to`` for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.remote_field.limit_choices_to):
return self.remote_field.limit_choices_to()
return self.remote_field.limit_choices_to
def formfield(self, **kwargs):
"""
Pass ``limit_choices_to`` to the field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.remote_field, 'get_related_field'):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.remote_field.limit_choices_to
defaults.update({
'limit_choices_to': limit_choices_to,
})
defaults.update(kwargs)
return super(RelatedField, self).formfield(**defaults)
def related_query_name(self):
"""
Define the name that can be used to identify this related object in a
table-spanning query.
"""
return self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name
@property
def target_field(self):
"""
When filtering against this relation, returns the field on the remote
model against which the filtering should happen.
"""
target_fields = self.get_path_info()[-1].target_fields
if len(target_fields) > 1:
raise exceptions.FieldError(
"The relation has multiple target fields, but only single target field was asked for")
return target_fields[0]
class SingleRelatedObjectDescriptor(object):
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``place.restaurant`` is a ``SingleRelatedObjectDescriptor`` instance.
"""
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ReverseSingleRelatedObjectDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.related.related_model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.related.related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' % (
instance._meta.object_name,
self.related.get_accessor_name(),
)
)
elif value is not None and not isinstance(value, self.related.related_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one relation.
In the example::
class Choice(Model):
poll = ForeignKey(Place, related_name='choices')
`choice.poll` is a ReverseSingleRelatedObjectDescriptor instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.model` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.remote_field.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.field.remote_field.model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.field.remote_field.model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
qs = self.get_queryset(instance=instance)
qs = qs.filter(**self.field.get_reverse_related_filter(instance))
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
if not self.field.remote_field.multiple:
setattr(rel_obj, self.field.remote_field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name)
)
elif value is not None and not isinstance(value, self.field.remote_field.model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.remote_field.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.remote_field.multiple:
setattr(value, self.field.remote_field.get_cache_name(), instance)
def create_foreign_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_foreign_related_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
qs = super(RelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
qs = qs.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {self.field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ForeignRelatedObjectsDescriptor(object):
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Choice(Model):
poll = ForeignKey(Place, related_name='choices')
``poll.choices`` is a ``ForeignRelatedObjectsDescriptor`` instance.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cls(self):
return create_foreign_related_manager(
self.rel.related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
manager = self.__get__(instance)
manager.set(value)
def create_many_related_manager(superclass, rel, reverse):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super(ManyRelatedManager, self).__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_many_related_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
qs = super(ManyRelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
return qs._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def set(self, objs, **kwargs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(ForeignRelatedObjectsDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``pizza.toppings`` and ``topping.pizzas`` are ManyRelatedObjectsDescriptor
instances.
"""
def __init__(self, rel, reverse=False):
super(ManyRelatedObjectsDescriptor, self).__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
model = self.rel.related_model if self.reverse else self.rel.model
return create_many_related_manager(
model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
class ForeignObjectRel(object):
"""
Used by ForeignObject to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
"""
# Field flags
auto_created = True
concrete = False
editable = False
is_relation = True
# Reverse relations are always nullable (Django can't enforce that a
# foreign key on the related model points to this model).
null = True
def __init__(self, field, to, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, on_delete=None):
self.field = field
self.model = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.parent_link = parent_link
self.on_delete = on_delete
self.symmetrical = False
self.multiple = True
# Some of the following cached_properties can't be initialized in
# __init__ as the field doesn't have its model yet. Calling these methods
# before field.contribute_to_class() has been called will result in
# AttributeError
@property
def to(self):
warnings.warn(
"Usage of ForeignObjectRel.to attribute has been deprecated. "
"Use the model attribute instead.",
RemovedInDjango20Warning, 2)
return self.model
@cached_property
def hidden(self):
return self.is_hidden()
@cached_property
def name(self):
return self.field.related_query_name()
@property
def remote_field(self):
return self.field
@property
def target_field(self):
"""
When filtering against this relation, returns the field on the remote
model against which the filtering should happen.
"""
target_fields = self.get_path_info()[-1].target_fields
if len(target_fields) > 1:
raise exceptions.FieldError("Can't use target_field for multicolumn relations.")
return target_fields[0]
@cached_property
def related_model(self):
if not self.field.model:
raise AttributeError(
"This property can't be accessed before self.field.contribute_to_class has been called.")
return self.field.model
@cached_property
def many_to_many(self):
return self.field.many_to_many
@cached_property
def many_to_one(self):
return self.field.one_to_many
@cached_property
def one_to_many(self):
return self.field.many_to_one
@cached_property
def one_to_one(self):
return self.field.one_to_one
def get_prep_lookup(self, lookup_name, value):
return self.field.get_prep_lookup(lookup_name, value)
def get_lookup(self, lookup_name):
return self.field.get_lookup(lookup_name)
def get_internal_type(self):
return self.field.get_internal_type()
@property
def db_type(self):
return self.field.db_type
def __repr__(self):
return '<%s: %s.%s>' % (
type(self).__name__,
self.related_model._meta.app_label,
self.related_model._meta.model_name,
)
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH,
limit_to_currently_related=False):
"""
Return choices with a default blank choices included, for use as
SelectField choices for this field.
Analog of django.db.models.fields.Field.get_choices(), provided
initially for utilization by RelatedFieldListFilter.
"""
first_choice = blank_choice if include_blank else []
queryset = self.related_model._default_manager.all()
if limit_to_currently_related:
queryset = queryset.complex_filter(
{'%s__isnull' % self.related_model._meta.model_name: False}
)
lst = [(x._get_pk_val(), smart_text(x)) for x in queryset]
return first_choice + lst
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value, connection=connection, prepared=prepared)
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name is not None and self.related_name[-1] == '+'
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Sets the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
def get_accessor_name(self, model=None):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
# Due to backwards compatibility ModelForms need to be able to provide
# an alternate model. See BaseInlineFormSet.get_default_prefix().
opts = model._meta if model else self.related_model._meta
model = model or self.related_model
if self.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if self.symmetrical and model == self.model:
return None
if self.related_name:
return self.related_name
if opts.default_related_name:
return opts.default_related_name % {
'model_name': opts.model_name.lower(),
'app_label': opts.app_label.lower(),
}
return opts.model_name + ('_set' if self.multiple else '')
def get_cache_name(self):
return "_%s_cache" % self.get_accessor_name()
def get_path_info(self):
return self.field.get_reverse_path_info()
class ManyToOneRel(ForeignObjectRel):
"""
Used by the ForeignKey field to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
Note: Because we somewhat abuse the Rel objects by using them as reverse
fields we get the funny situation where
``ManyToOneRel.many_to_one == False`` and
``ManyToOneRel.one_to_many == True``. This is unfortunate but the actual
ManyToOneRel class is a private API and there is work underway to turn
reverse relations into actual fields.
"""
def __init__(self, field, to, field_name, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, on_delete=None):
super(ManyToOneRel, self).__init__(
field, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
self.field_name = field_name
def __getstate__(self):
state = self.__dict__.copy()
state.pop('related_model', None)
return state
def get_related_field(self):
"""
Return the Field in the 'to' object to which this relationship is tied.
"""
field = self.model._meta.get_field(self.field_name)
if not field.concrete:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return field
def set_field_name(self):
self.field_name = self.field_name or self.model._meta.pk.name
class OneToOneRel(ManyToOneRel):
"""
Used by OneToOneField to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
"""
def __init__(self, field, to, field_name, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, on_delete=None):
super(OneToOneRel, self).__init__(
field, to, field_name,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
self.multiple = False
class ManyToManyRel(ForeignObjectRel):
"""
Used by ManyToManyField to store information about the relation.
``_meta.get_fields()`` returns this class to provide access to the field
flags for the reverse relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None,
limit_choices_to=None, symmetrical=True, through=None, through_fields=None,
db_constraint=True):
super(ManyToManyRel, self).__init__(
field, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
self.through = through
if through_fields and not through:
raise ValueError("Cannot specify through_fields without a through model")
self.through_fields = through_fields
self.symmetrical = symmetrical
self.db_constraint = db_constraint
def get_related_field(self):
"""
Return the field in the 'to' object to which this relationship is tied.
Provided for symmetry with ManyToOneRel.
"""
opts = self.through._meta
if self.through_fields:
field = opts.get_field(self.through_fields[0])
else:
for field in opts.fields:
rel = getattr(field, 'remote_field', None)
if rel and rel.model == self.model:
break
return field.foreign_related_fields[0]
class ForeignObject(RelatedField):
"""
Abstraction of the ForeignKey relation, supports multi-column relations.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
requires_unique_target = True
related_accessor_class = ForeignRelatedObjectsDescriptor
rel_class = ForeignObjectRel
def __init__(self, to, on_delete, from_fields, to_fields, rel=None, related_name=None,
related_query_name=None, limit_choices_to=None, parent_link=False,
swappable=True, **kwargs):
if rel is None:
rel = self.rel_class(
self, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
super(ForeignObject, self).__init__(rel=rel, **kwargs)
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
def check(self, **kwargs):
errors = super(ForeignObject, self).check(**kwargs)
errors.extend(self._check_unique_target())
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.remote_field.model, six.string_types)
if rel_is_string or not self.requires_unique_target:
return []
try:
self.foreign_related_fields
except FieldDoesNotExist:
return []
has_unique_field = any(rel_field.unique
for rel_field in self.foreign_related_fields)
if not has_unique_field and len(self.foreign_related_fields) > 1:
field_combination = ', '.join("'%s'" % rel_field.name
for rel_field in self.foreign_related_fields)
model_name = self.remote_field.model.__name__
return [
checks.Error(
"None of the fields %s on model '%s' have a unique=True constraint."
% (field_combination, model_name),
hint=None,
obj=self,
id='fields.E310',
)
]
elif not has_unique_field:
field_name = self.foreign_related_fields[0].name
model_name = self.remote_field.model.__name__
return [
checks.Error(
("'%s.%s' must set unique=True "
"because it is referenced by a foreign key.") % (model_name, field_name),
hint=None,
obj=self,
id='fields.E311',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ForeignObject, self).deconstruct()
kwargs['on_delete'] = self.remote_field.on_delete
kwargs['from_fields'] = self.from_fields
kwargs['to_fields'] = self.to_fields
if self.remote_field.related_name is not None:
kwargs['related_name'] = self.remote_field.related_name
if self.remote_field.related_query_name is not None:
kwargs['related_query_name'] = self.remote_field.related_query_name
if self.remote_field.parent_link:
kwargs['parent_link'] = self.remote_field.parent_link
# Work out string form of "to"
if isinstance(self.remote_field.model, six.string_types):
kwargs['to'] = self.remote_field.model
else:
kwargs['to'] = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name,
)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ForeignKey pointing to a model "
"that is swapped in place of more than one model (%s and %s)"
% (kwargs['to'].setting_name, swappable_setting)
)
# Set it
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.remote_field.model, six.string_types):
raise ValueError('Related model %r cannot be resolved' % self.remote_field.model)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field(from_field_name))
to_field = (self.remote_field.model._meta.pk if to_field_name is None
else self.remote_field.model._meta.get_field(to_field_name))
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@property
def foreign_related_fields(self):
return tuple(rhs_field for lhs_field, rhs_field in self.related_fields)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (not possible_parent_link or
possible_parent_link.primary_key or
possible_parent_link.model._meta.abstract):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Return an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Return a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(compiler, connection)
method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.remote_field.model._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.remote_field.model._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.remote_field, not self.unique, False)]
return pathinfos
def get_lookup(self, lookup_name):
if lookup_name == 'in':
return RelatedIn
elif lookup_name == 'exact':
return RelatedExact
elif lookup_name == 'gt':
return RelatedGreaterThan
elif lookup_name == 'gte':
return RelatedGreaterThanOrEqual
elif lookup_name == 'lt':
return RelatedLessThan
elif lookup_name == 'lte':
return RelatedLessThanOrEqual
elif lookup_name != 'isnull':
raise TypeError('Related Field got invalid lookup: %s' % lookup_name)
return super(ForeignObject, self).get_lookup(lookup_name)
def get_transform(self, *args, **kwargs):
raise NotImplementedError('Relational fields do not support transforms.')
@property
def attnames(self):
return tuple(field.attname for field in self.local_related_fields)
def get_defaults(self):
return tuple(field.get_default() for field in self.local_related_fields)
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.remote_field.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), self.related_accessor_class(related))
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.remote_field.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.remote_field.limit_choices_to)
class ForeignKey(ForeignObject):
"""
Provide a many-to-one relation by adding a column to the local model
to hold the remote value.
By default ForeignKey will target the pk of the remote model but this
behavior can be changed by using the ``to_field`` argument.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
rel_class = ManyToOneRel
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, on_delete=None, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, to_field=None,
db_constraint=True, **kwargs):
try:
to._meta.model_name
except AttributeError:
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string %r" % (
self.__class__.__name__, to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if on_delete is None:
warnings.warn(
"on_delete will be a required arg for %s in Django 2.0. "
"Set it to models.CASCADE if you want to maintain the current default behavior. "
"See https://docs.djangoproject.com/en/%s/ref/models/fields/"
"#django.db.models.ForeignKey.on_delete" % (
self.__class__.__name__,
get_docs_version(),
),
RemovedInDjango20Warning, 2)
on_delete = CASCADE
elif not callable(on_delete):
warnings.warn(
"The signature for {0} will change in Django 2.0. "
"Pass to_field='{1}' as a kwarg instead of as an arg.".format(
self.__class__.__name__,
on_delete,
),
RemovedInDjango20Warning, 2)
on_delete, to_field = to_field, on_delete
kwargs['rel'] = self.rel_class(
self, to, to_field,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
kwargs['db_index'] = kwargs.get('db_index', True)
super(ForeignKey, self).__init__(
to, on_delete, from_fields=['self'], to_fields=[to_field], **kwargs)
self.db_constraint = db_constraint
def check(self, **kwargs):
errors = super(ForeignKey, self).check(**kwargs)
errors.extend(self._check_on_delete())
errors.extend(self._check_unique())
return errors
def _check_on_delete(self):
on_delete = getattr(self.remote_field, 'on_delete', None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=self,
id='fields.E320',
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=self,
id='fields.E321',
)
]
else:
return []
def _check_unique(self, **kwargs):
return [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=self,
id='fields.W342',
)
] if self.unique else []
def deconstruct(self):
name, path, args, kwargs = super(ForeignKey, self).deconstruct()
del kwargs['to_fields']
del kwargs['from_fields']
# Handle the simpler arguments
if self.db_index:
del kwargs['db_index']
else:
kwargs['db_index'] = False
if self.db_constraint is not True:
kwargs['db_constraint'] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.remote_field.model, "_meta", None)
if self.remote_field.field_name and (
not to_meta or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name)):
kwargs['to_field'] = self.remote_field.field_name
return name, path, args, kwargs
@property
def target_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.remote_field.model._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.remote_field, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.remote_field.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.remote_field.model._default_manager.using(using).filter(
**{self.remote_field.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={
'model': self.remote_field.model._meta.verbose_name, 'pk': value,
'field': self.remote_field.field_name, 'value': value,
}, # 'pk' is included for backwards compatibility
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.remote_field.model):
return getattr(field_default, self.target_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (value == '' and
(not self.target_field.empty_strings_allowed or
connection.features.interprets_empty_strings_as_nulls)):
return None
else:
return self.target_field.get_db_prep_save(value, connection=connection)
def get_db_prep_value(self, value, connection, prepared=False):
return self.target_field.get_db_prep_value(value, connection, prepared)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.remote_field.field_name is None:
self.remote_field.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.remote_field.model, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.remote_field.model))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.remote_field.model._default_manager.using(db),
'to_field_name': self.remote_field.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.target_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": []}
def convert_empty_strings(self, value, expression, connection, context):
if (not value) and isinstance(value, six.string_types):
return None
return value
def get_db_converters(self, connection):
converters = super(ForeignKey, self).get_db_converters(connection)
if connection.features.interprets_empty_strings_as_nulls:
converters += [self.convert_empty_strings]
return converters
def get_col(self, alias, output_field=None):
return super(ForeignKey, self).get_col(alias, output_field or self.target_field)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that it always carries a "unique" constraint with it and the reverse
relation always returns the object pointed to (since there will only ever
be one), rather than returning a list.
"""
# Field flags
many_to_many = False
many_to_one = False
one_to_many = False
one_to_one = True
related_accessor_class = SingleRelatedObjectDescriptor
rel_class = OneToOneRel
description = _("One-to-one relationship")
def __init__(self, to, on_delete=None, to_field=None, **kwargs):
kwargs['unique'] = True
if on_delete is None:
warnings.warn(
"on_delete will be a required arg for %s in Django 2.0. "
"Set it to models.CASCADE if you want to maintain the current default behavior. "
"See https://docs.djangoproject.com/en/%s/ref/models/fields/"
"#django.db.models.ForeignKey.on_delete" % (
self.__class__.__name__,
get_docs_version(),
),
RemovedInDjango20Warning, 2)
on_delete = CASCADE
elif not callable(on_delete):
warnings.warn(
"The signature for {0} will change in Django 2.0. "
"Pass to_field='{1}' as a kwarg instead of as an arg.".format(
self.__class__.__name__,
on_delete,
),
RemovedInDjango20Warning, 2)
to_field = on_delete
on_delete = CASCADE # Avoid warning in superclass
super(OneToOneField, self).__init__(to, on_delete, to_field=to_field, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(OneToOneField, self).deconstruct()
if "unique" in kwargs:
del kwargs['unique']
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.remote_field.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.remote_field.model):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def _check_unique(self, **kwargs):
# Override ForeignKey since check isn't applicable here.
return []
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
def set_managed(model, related, through):
through._meta.managed = model._meta.managed or related._meta.managed
to_model = resolve_relation(klass, field.remote_field.model)
name = '%s_%s' % (klass._meta.object_name, field.name)
lazy_related_operation(set_managed, klass, to_model, name)
to = make_model_tuple(to_model)[1]
from_ = klass._meta.model_name
if to == from_:
to = 'to_%s' % to
from_ = 'from_%s' % from_
meta = type(str('Meta'), (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
'apps': field.model._meta.apps,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(
klass,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
),
to: models.ForeignKey(
to_model,
related_name='%s+' % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
)
})
class ManyToManyField(RelatedField):
"""
Provide a many-to-many relation by using an intermediary model that
holds two ForeignKey fields pointed at the two sides of the relation.
Unless a ``through`` model was provided, ManyToManyField will use the
create_many_to_many_intermediary_model factory to automatically generate
the intermediary model.
"""
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
rel_class = ManyToManyRel
description = _("Many-to-many relationship")
def __init__(self, to, related_name=None, related_query_name=None,
limit_choices_to=None, symmetrical=None, through=None,
through_fields=None, db_constraint=True, db_table=None,
swappable=True, **kwargs):
try:
to._meta
except AttributeError:
assert isinstance(to, six.string_types), (
"%s(%r) is invalid. First parameter to ManyToManyField must be "
"either a model, a model name, or the string %r" %
(self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
)
# Class names must be ASCII in Python 2.x, so we forcibly coerce it
# here to break early if there's a problem.
to = str(to)
if symmetrical is None:
symmetrical = (to == RECURSIVE_RELATIONSHIP_CONSTANT)
if through is not None:
assert db_table is None, (
"Cannot specify a db_table if an intermediary model is used."
)
kwargs['rel'] = self.rel_class(
self, to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
symmetrical=symmetrical,
through=through,
through_fields=through_fields,
db_constraint=db_constraint,
)
self.has_null_arg = 'null' in kwargs
super(ManyToManyField, self).__init__(**kwargs)
self.db_table = db_table
self.swappable = swappable
def check(self, **kwargs):
errors = super(ManyToManyField, self).check(**kwargs)
errors.extend(self._check_unique(**kwargs))
errors.extend(self._check_relationship_model(**kwargs))
errors.extend(self._check_ignored_options(**kwargs))
return errors
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=self,
id='fields.E330',
)
]
return []
def _check_ignored_options(self, **kwargs):
warnings = []
if self.has_null_arg:
warnings.append(
checks.Warning(
'null has no effect on ManyToManyField.',
hint=None,
obj=self,
id='fields.W340',
)
)
if len(self._validators) > 0:
warnings.append(
checks.Warning(
'ManyToManyField does not support validators.',
hint=None,
obj=self,
id='fields.W341',
)
)
return warnings
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.remote_field.through, '_meta'):
qualified_model_name = "%s.%s" % (
self.remote_field.through._meta.app_label, self.remote_field.through.__name__)
else:
qualified_model_name = self.remote_field.through
errors = []
if self.remote_field.through not in apps.get_models(include_auto_created=True):
# The relationship model is not installed.
errors.append(
checks.Error(
("Field specifies a many-to-many relation through model "
"'%s', which has not been installed.") %
qualified_model_name,
hint=None,
obj=self,
id='fields.E331',
)
)
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
# Set some useful local variables
to_model = resolve_relation(from_model, self.remote_field.model)
from_model_name = from_model._meta.object_name
if isinstance(to_model, six.string_types):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.remote_field.through._meta.object_name
self_referential = from_model == to_model
# Check symmetrical attribute.
if (self_referential and self.remote_field.symmetrical and
not self.remote_field.through._meta.auto_created):
errors.append(
checks.Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=self,
id='fields.E332',
)
)
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(from_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields)
if seen_self > 2 and not self.remote_field.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=("Use through_fields to specify which two "
"foreign keys Django should use."),
obj=self.remote_field.through,
id='fields.E333',
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(from_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields)
seen_to = sum(to_model == getattr(field.remote_field, 'model', None)
for field in self.remote_field.through._meta.fields)
if seen_from > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, from_model_name),
hint=('If you want to create a recursive relationship, '
'use ForeignKey("self", symmetrical=False, '
'through="%s").') % relationship_model_name,
obj=self,
id='fields.E334',
)
)
if seen_to > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument.") % (self, to_model_name),
hint=('If you want to create a recursive '
'relationship, use ForeignKey("self", '
'symmetrical=False, through="%s").') % relationship_model_name,
obj=self,
id='fields.E335',
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
("The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'.") % (
self, from_model_name, to_model_name
),
hint=None,
obj=self.remote_field.through,
id='fields.E336',
)
)
# Validate `through_fields`.
if self.remote_field.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy".
if not (len(self.remote_field.through_fields) >= 2 and
self.remote_field.through_fields[0] and self.remote_field.through_fields[1]):
errors.append(
checks.Error(
("Field specifies 'through_fields' but does not "
"provide the names of the two link fields that should be "
"used for the relation through model "
"'%s'.") % qualified_model_name,
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=self,
id='fields.E337',
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models.
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
source, through, target = from_model, self.remote_field.through, self.remote_field.model
source_field_name, target_field_name = self.remote_field.through_fields[:2]
for field_name, related_model in ((source_field_name, source),
(target_field_name, target)):
possible_field_names = []
for f in through._meta.fields:
if hasattr(f, 'remote_field') and getattr(f.remote_field, 'model', None) == related_model:
possible_field_names.append(f.name)
if possible_field_names:
hint = ("Did you mean one of the following foreign "
"keys to '%s': %s?") % (related_model._meta.object_name,
', '.join(possible_field_names))
else:
hint = None
try:
field = through._meta.get_field(field_name)
except FieldDoesNotExist:
errors.append(
checks.Error(
("The intermediary model '%s' has no field '%s'.") % (
qualified_model_name, field_name),
hint=hint,
obj=self,
id='fields.E338',
)
)
else:
if not (hasattr(field, 'remote_field') and
getattr(field.remote_field, 'model', None) == related_model):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'." % (
through._meta.object_name, field_name,
related_model._meta.object_name),
hint=hint,
obj=self,
id='fields.E339',
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super(ManyToManyField, self).deconstruct()
# Handle the simpler arguments.
if self.db_table is not None:
kwargs['db_table'] = self.db_table
if self.remote_field.db_constraint is not True:
kwargs['db_constraint'] = self.remote_field.db_constraint
if self.remote_field.related_name is not None:
kwargs['related_name'] = self.remote_field.related_name
if self.remote_field.related_query_name is not None:
kwargs['related_query_name'] = self.remote_field.related_query_name
# Rel needs more work.
if isinstance(self.remote_field.model, six.string_types):
kwargs['to'] = self.remote_field.model
else:
kwargs['to'] = "%s.%s" % (
self.remote_field.model._meta.app_label,
self.remote_field.model._meta.object_name,
)
if getattr(self.remote_field, 'through', None) is not None:
if isinstance(self.remote_field.through, six.string_types):
kwargs['through'] = self.remote_field.through
elif not self.remote_field.through._meta.auto_created:
kwargs['through'] = "%s.%s" % (
self.remote_field.through._meta.app_label,
self.remote_field.through._meta.object_name,
)
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error.
if hasattr(kwargs['to'], "setting_name"):
if kwargs['to'].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ManyToManyField pointing to a "
"model that is swapped in place of more than one model "
"(%s and %s)" % (kwargs['to'].setting_name, swappable_setting)
)
from django.db.migrations.writer import SettingsReference
kwargs['to'] = SettingsReference(
kwargs['to'],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False):
"""
Called by both direct and indirect m2m traversal.
"""
pathinfos = []
int_model = self.remote_field.through
linkfield1 = int_model._meta.get_field(self.m2m_field_name())
linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name())
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"""
Function that can be curried to provide the m2m table name for this
relation.
"""
if self.remote_field.through is not None:
return self.remote_field.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return utils.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"""
Function that can be curried to provide the source accessor or DB
column name for the m2m table.
"""
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[0]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if (f.is_relation and f.remote_field.model == related.related_model and
(link_field_name is None or link_field_name == f.name)):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"""
Function that can be curried to provide the related accessor or DB
column name for the m2m table.
"""
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[1]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if f.is_relation and f.remote_field.model == related.model:
if link_field_name is None and related.related_model == related.model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name, **kwargs):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.remote_field.symmetrical and (
self.remote_field.model == "self" or self.remote_field.model == cls._meta.object_name):
self.remote_field.related_name = "%s_rel_+" % name
elif self.remote_field.is_hidden():
# If the backwards relation is disabled, replace the original
# related_name with one generated from the m2m field name. Django
# still uses backwards relations internally and we need to avoid
# clashes between multiple m2m fields with related_name == '+'.
self.remote_field.related_name = "_%s_%s_+" % (cls.__name__.lower(), name)
super(ManyToManyField, self).contribute_to_class(cls, name, **kwargs)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not cls._meta.abstract:
if self.remote_field.through:
def resolve_through_model(_, model, field):
field.remote_field.through = model
lazy_related_operation(resolve_through_model, cls, self.remote_field.through, field=self)
elif not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation.
setattr(cls, self.name, ManyRelatedObjectsDescriptor(self.remote_field, reverse=False))
# Set up the accessor for the m2m table name for the relation.
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.remote_field.is_hidden() and not related.related_model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(self.remote_field, reverse=True))
# Set up the accessors for the column names on the m2m table.
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'remote_field')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'remote_field')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"""
Return the value of this field in the given model instance.
"""
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.remote_field.model._default_manager.using(db),
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
| bsd-3-clause | 3,037,988,771,969,174,500 | 41.328797 | 119 | 0.568327 | false |
Nebelhom/WordPuzzleCreator | lib/werkzeug/debug/console.py | 314 | 5557 | # -*- coding: utf-8 -*-
"""
werkzeug.debug.console
~~~~~~~~~~~~~~~~~~~~~~
Interactive console support.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import sys
import code
from types import CodeType
from werkzeug.utils import escape
from werkzeug.local import Local
from werkzeug.debug.repr import debug_repr, dump, helper
_local = Local()
class HTMLStringO(object):
"""A StringO version that HTML escapes on write."""
def __init__(self):
self._buffer = []
def isatty(self):
return False
def close(self):
pass
def flush(self):
pass
def seek(self, n, mode=0):
pass
def readline(self):
if len(self._buffer) == 0:
return ''
ret = self._buffer[0]
del self._buffer[0]
return ret
def reset(self):
val = ''.join(self._buffer)
del self._buffer[:]
return val
def _write(self, x):
if isinstance(x, bytes):
x = x.decode('utf-8', 'replace')
self._buffer.append(x)
def write(self, x):
self._write(escape(x))
def writelines(self, x):
self._write(escape(''.join(x)))
class ThreadedStream(object):
"""Thread-local wrapper for sys.stdout for the interactive console."""
def push():
if not isinstance(sys.stdout, ThreadedStream):
sys.stdout = ThreadedStream()
_local.stream = HTMLStringO()
push = staticmethod(push)
def fetch():
try:
stream = _local.stream
except AttributeError:
return ''
return stream.reset()
fetch = staticmethod(fetch)
def displayhook(obj):
try:
stream = _local.stream
except AttributeError:
return _displayhook(obj)
# stream._write bypasses escaping as debug_repr is
# already generating HTML for us.
if obj is not None:
_local._current_ipy.locals['_'] = obj
stream._write(debug_repr(obj))
displayhook = staticmethod(displayhook)
def __setattr__(self, name, value):
raise AttributeError('read only attribute %s' % name)
def __dir__(self):
return dir(sys.__stdout__)
def __getattribute__(self, name):
if name == '__members__':
return dir(sys.__stdout__)
try:
stream = _local.stream
except AttributeError:
stream = sys.__stdout__
return getattr(stream, name)
def __repr__(self):
return repr(sys.__stdout__)
# add the threaded stream as display hook
_displayhook = sys.displayhook
sys.displayhook = ThreadedStream.displayhook
class _ConsoleLoader(object):
def __init__(self):
self._storage = {}
def register(self, code, source):
self._storage[id(code)] = source
# register code objects of wrapped functions too.
for var in code.co_consts:
if isinstance(var, CodeType):
self._storage[id(var)] = source
def get_source_by_code(self, code):
try:
return self._storage[id(code)]
except KeyError:
pass
def _wrap_compiler(console):
compile = console.compile
def func(source, filename, symbol):
code = compile(source, filename, symbol)
console.loader.register(code, source)
return code
console.compile = func
class _InteractiveConsole(code.InteractiveInterpreter):
def __init__(self, globals, locals):
code.InteractiveInterpreter.__init__(self, locals)
self.globals = dict(globals)
self.globals['dump'] = dump
self.globals['help'] = helper
self.globals['__loader__'] = self.loader = _ConsoleLoader()
self.more = False
self.buffer = []
_wrap_compiler(self)
def runsource(self, source):
source = source.rstrip() + '\n'
ThreadedStream.push()
prompt = self.more and '... ' or '>>> '
try:
source_to_eval = ''.join(self.buffer + [source])
if code.InteractiveInterpreter.runsource(self,
source_to_eval, '<debugger>', 'single'):
self.more = True
self.buffer.append(source)
else:
self.more = False
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
return prompt + source + output
def runcode(self, code):
try:
eval(code, self.globals, self.locals)
except Exception:
self.showtraceback()
def showtraceback(self):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=1)
sys.stdout._write(tb.render_summary())
def showsyntaxerror(self, filename=None):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=4)
sys.stdout._write(tb.render_summary())
def write(self, data):
sys.stdout.write(data)
class Console(object):
"""An interactive console."""
def __init__(self, globals=None, locals=None):
if locals is None:
locals = {}
if globals is None:
globals = {}
self._ipy = _InteractiveConsole(globals, locals)
def eval(self, code):
_local._current_ipy = self._ipy
old_sys_stdout = sys.stdout
try:
return self._ipy.runsource(code)
finally:
sys.stdout = old_sys_stdout
| apache-2.0 | 6,249,035,870,519,065,000 | 25.336493 | 76 | 0.573691 | false |
suncycheng/intellij-community | python/testData/inspections/PyPropertyDefinitionInspection26/test.py | 22 | 6220 | import abc
class A(object):
def __init__(self):
self._x = 1
@property
def foo(self):
return self._x
@foo.setter
def foo(self, x):
self._x = x
@foo.deleter
def foo(self):
pass
@property
def boo(self):
return self._x
<warning descr="Names of function and decorator don't match; property accessor is not created">@boo.setter</warning>
def boo1(self, x):
self._x = x
<warning descr="Names of function and decorator don't match; property accessor is not created">@boo.deleter</warning>
def boo2(self):
pass
@property
def <warning descr="Getter should return or yield something">moo</warning>(self):
pass
@moo.setter
def <warning descr="Setter should not return a value">moo</warning>(self, x):
return 1
@moo.deleter
def <warning descr="Deleter should not return a value">moo</warning>(self):
return self._x
@qoo.setter # unknown qoo is reported in ref inspection
def qoo(self, v):
self._x = v
@property
def futuroo(self):
raise NotImplementedError("Override!") # ok though no return
@property
def futuroo(self):
"""Docstring."""
raise NotImplementedError("Override!") # ok though no return
@property
def xoo(self):
return self._x
@xoo.setter
def xoo(self, x):
self._x = x
return
get_foo2 = lambda self: 'foo2'
foo2 = property(get_foo2)
@property
@abc.abstractproperty
def abstract_property(self):
pass
# PY-19701
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
def inner_func(n):
return n
self._myprop = inner_func(val)
myprop = property(get_myprop, set_myprop)
# all flows have exit point
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
if a > b:
<error descr="Python versions < 3.3 do not allow 'return' with argument inside generator.">return self._myprop</error>
elif a < b:
raise self._myprop
else:
yield self._myprop
myprop = property(get_myprop)
# some flows have not exit point
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
if a > b:
return self._myprop
elif a < b:
raise self._myprop
myprop = property(get_myprop)
# some flows have not exit point
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
if a > b:
return self._myprop
myprop = property(get_myprop)
# non-empty for
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
for i in range(5):
yield i
myprop = property(get_myprop)
# empty for
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
for i in []:
yield i
myprop = property(get_myprop) # shouldn't pass with better analysis, pass at the moment
# non-empty while
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
i = 0
while i < 5:
yield i
i += 1
myprop = property(get_myprop)
# empty while
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
while False:
yield i
myprop = property(get_myprop) # shouldn't pass with better analysis, pass at the moment
# non-empty while with two conditions
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
i = 0
j = 0
while i < 5 and j == 0:
yield i
i += 1
myprop = property(get_myprop)
# empty while with two conditions
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
i = 0
j = 0
while i > 5 and j == 0:
yield i
myprop = property(get_myprop) # shouldn't pass with better analysis, pass at the moment
# setter has exit point
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
return 10
myprop = property(get_myprop, <warning descr="Setter should not return a value">set_myprop</warning>)
# setter has exit point
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
yield 10
myprop = property(get_myprop, <warning descr="Setter should not return a value">set_myprop</warning>)
# setter has raise statement
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
raise NotImplementedError()
myprop = property(get_myprop, set_myprop)
# setter has exit point in some flow
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
if a > b:
return 10
myprop = property(get_myprop, <warning descr="Setter should not return a value">set_myprop</warning>)
# setter has exit point in some flow
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
if a > b:
yield 10
myprop = property(get_myprop, <warning descr="Setter should not return a value">set_myprop</warning>)
# setter has raise statement in some flow
class Test(object):
def __init__(self):
self._myprop = None
def get_myprop(self):
return self._myprop
def set_myprop(self, val):
self._myprop = val
if a > b:
raise NotImplementedError()
myprop = property(get_myprop, set_myprop)
| apache-2.0 | -2,405,500,138,406,625,300 | 19.528053 | 130 | 0.592444 | false |
forrestv/bitcoin | contrib/testgen/gen_base58_test_vectors.py | 1000 | 4343 | #!/usr/bin/env python
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 0
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 128
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
valid = False
for template in templates:
prefix = str(bytearray(template[0]))
suffix = str(bytearray(template[2]))
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = str(bytearray(template[0]))
payload = os.urandom(template[1])
suffix = str(bytearray(template[2]))
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None])
yield (rv, b2a_hex(payload), metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = str(bytearray(template[0]))
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = str(bytearray(template[2]))
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys, json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| mit | -951,837,210,162,159,000 | 33.468254 | 97 | 0.591066 | false |
Bismarrck/tensorflow | tensorflow/python/debug/cli/evaluator_test.py | 89 | 11162 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arbitrary expression evaluator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.debug.cli import evaluator
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ParseDebugTensorNameTest(test_util.TensorFlowTestCase):
def testParseNamesWithoutPrefixOrSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("foo:1"))
self.assertIsNone(device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("hidden_0/Weights:0"))
self.assertIsNone(device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
def testParseNamesWithoutPrefixWithDebugOpSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("foo:1:DebugNanCount"))
self.assertIsNone(device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugNanCount", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"hidden_0/Weights:0:DebugNumericSummary"))
self.assertIsNone(device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugNumericSummary", debug_op)
self.assertEqual(0, exec_index)
def testParseNamesWithDeviceNamePrefixWithoutDebugOpSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/task:2/cpu:0:foo:1"))
self.assertEqual("/job:ps/replica:0/task:2/cpu:0", device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:worker/replica:0/task:3/gpu:0:hidden_0/Weights:0"))
self.assertEqual("/job:worker/replica:0/task:3/gpu:0", device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(0, exec_index)
def testParseNamesWithDeviceNamePrefixWithDebugOpSuffix(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/task:2/cpu:0:foo:1:DebugNanCount"))
self.assertEqual("/job:ps/replica:0/task:2/cpu:0", device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugNanCount", debug_op)
self.assertEqual(0, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name(
"/job:worker/replica:0/task:3/gpu:0:"
"hidden_0/Weights:0:DebugNumericSummary"))
self.assertEqual("/job:worker/replica:0/task:3/gpu:0", device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugNumericSummary", debug_op)
self.assertEqual(0, exec_index)
def testParseMalformedDebugTensorName(self):
with self.assertRaisesRegexp(
ValueError,
r"The debug tensor name in the to-be-evaluated expression is "
r"malformed:"):
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/task:2/cpu:0:foo:1:DebugNanCount:1337")
with self.assertRaisesRegexp(
ValueError,
r"The debug tensor name in the to-be-evaluated expression is "
r"malformed:"):
evaluator._parse_debug_tensor_name(
"/job:ps/replica:0/cpu:0:foo:1:DebugNanCount")
with self.assertRaises(ValueError):
evaluator._parse_debug_tensor_name(
"foo:1:DebugNanCount[]")
with self.assertRaises(ValueError):
evaluator._parse_debug_tensor_name(
"foo:1[DebugNanCount]")
def testParseNamesWithExecIndex(self):
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("foo:1[20]"))
self.assertIsNone(device_name)
self.assertEqual("foo", node_name)
self.assertEqual(1, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(20, exec_index)
device_name, node_name, output_slot, debug_op, exec_index = (
evaluator._parse_debug_tensor_name("hidden_0/Weights:0[3]"))
self.assertIsNone(device_name)
self.assertEqual("hidden_0/Weights", node_name)
self.assertEqual(0, output_slot)
self.assertEqual("DebugIdentity", debug_op)
self.assertEqual(3, exec_index)
class EvaluatorTest(test_util.TensorFlowTestCase):
def testEvaluateSingleTensor(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del node_name, output_slot, debug_op, device_name # Unused.
return [np.array([[1.0, 2.0, 3.0]])]
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
self.assertEqual(3, ev.evaluate("np.size(`a:0`)"))
# Whitespace in backticks should be tolerated.
self.assertEqual(3, ev.evaluate("np.size(` a:0 `)"))
def testEvaluateTwoTensors(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del debug_op, device_name # Unused.
if node_name == "a" and output_slot == 0:
return [np.array([[1.0, -2.0], [0.0, 1.0]])]
elif node_name == "b" and output_slot == 0:
return [np.array([[-1.0], [1.0]])]
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
self.assertAllClose([[-3.0], [1.0]],
ev.evaluate("np.matmul(`a:0`, `b:0`)"))
self.assertAllClose(
[[-4.0], [2.0]], ev.evaluate("np.matmul(`a:0`, `b:0`) + `b:0`"))
def testEvaluateNoneExistentTensorGeneratesError(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del node_name, output_slot, debug_op, device_name # Unused.
raise debug_data.WatchKeyDoesNotExistInDebugDumpDirError()
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
with self.assertRaisesRegexp(
ValueError, "Eval failed due to the value of .* being unavailable"):
ev.evaluate("np.matmul(`a:0`, `b:0`)")
def testEvaluateWithMultipleDevicesContainingTheSameTensorName(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del output_slot, debug_op # Unused.
if node_name == "a" and device_name is None:
raise ValueError(
"There are multiple (2) devices with nodes named 'a' but "
"device_name is not specified")
elif (node_name == "a" and
device_name == "/job:worker/replica:0/task:0/cpu:0"):
return [np.array(10.0)]
elif (node_name == "a" and
device_name == "/job:worker/replica:0/task:1/cpu:0"):
return [np.array(20.0)]
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
with self.assertRaisesRegexp(ValueError, r"multiple \(2\) devices"):
ev.evaluate("`a:0` + `a:0`")
self.assertAllClose(
30.0,
ev.evaluate("`/job:worker/replica:0/task:0/cpu:0:a:0` + "
"`/job:worker/replica:0/task:1/cpu:0:a:0`"))
def testEvaluateWithNonDefaultDebugOp(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del device_name # Unused.
if node_name == "a" and output_slot == 0 and debug_op == "DebugIdentity":
return [np.array([[-1.0], [1.0]])]
elif node_name == "a" and output_slot == 0 and debug_op == "DebugFoo":
return [np.array([[-2.0, 2.0]])]
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
self.assertAllClose(
[[4.0]],
ev.evaluate("np.matmul(`a:0:DebugFoo`, `a:0:DebugIdentity`)"))
def testEvaluateWithMultipleExecIndexes(self):
dump = test.mock.MagicMock()
def fake_get_tensors(node_name, output_slot, debug_op, device_name=None):
del debug_op, device_name # Unused.
if node_name == "a" and output_slot == 0:
return [np.array([[-1.0], [1.0]]), np.array([[-2.0], [2.0]])]
with test.mock.patch.object(
dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
ev = evaluator.ExpressionEvaluator(dump)
self.assertAllClose(
[[4.0]], ev.evaluate("np.matmul(`a:0[1]`.T, `a:0[0]`)"))
def testEvaluateExpressionWithUnmatchedBacktick(self):
dump = test.mock.MagicMock()
ev = evaluator.ExpressionEvaluator(dump)
with self.assertRaises(SyntaxError):
ev.evaluate("np.matmul(`a:0`, `b:0`) + `b:0")
def testEvaluateExpressionWithInvalidDebugTensorName(self):
dump = test.mock.MagicMock()
ev = evaluator.ExpressionEvaluator(dump)
with self.assertRaisesRegexp(
ValueError, r".* tensor name .* expression .* malformed"):
ev.evaluate("np.matmul(`a`, `b`)")
with self.assertRaisesRegexp(
ValueError, r".* tensor name .* expression .* malformed"):
ev.evaluate("np.matmul(`a:0:DebugIdentity:0`, `b:1:DebugNanCount:2`)")
with self.assertRaises(ValueError):
ev.evaluate("np.matmul(`a:0[]`, `b:0[]`)")
if __name__ == "__main__":
test.main()
| apache-2.0 | 6,509,914,859,158,450,000 | 40.649254 | 80 | 0.662247 | false |
giacomov/astromodels | astromodels/core/parameter_transformation.py | 2 | 1122 | import numpy as np
class ParameterTransformation(object):
def forward(self, external_value):
raise NotImplementedError("You have to implement this")
def backward(self, internal_value):
raise NotImplementedError("You have to implement this")
class LogarithmicTransformation(ParameterTransformation):
def forward(self, external_value):
# Throw an error if taking the logarithm of a negative number (or nan)
with np.errstate(invalid='raise'):
res = np.log10(external_value)
return res
def backward(self, internal_value):
return 10**internal_value
_known_transformations = {'log10': LogarithmicTransformation}
def get_transformation(transformation_name):
"""
Returns an instance of a transformation by name
:param transformation_name:
:return: instance of transformation with provided name
"""
if not transformation_name in _known_transformations:
raise ValueError("Transformation %s is not known" % transformation_name)
else:
return _known_transformations[transformation_name]()
| bsd-3-clause | 4,455,478,577,003,843,600 | 21.44 | 80 | 0.699643 | false |
avadacatavra/servo | tests/wpt/web-platform-tests/tools/manifest/vcs.py | 11 | 3257 | import os
import subprocess
from .sourcefile import SourceFile
class Git(object):
def __init__(self, repo_root, url_base):
self.root = os.path.abspath(repo_root)
self.git = Git.get_func(repo_root)
self.url_base = url_base
@staticmethod
def get_func(repo_path):
def git(cmd, *args):
full_cmd = ["git", cmd] + list(args)
try:
return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT)
except WindowsError:
full_cmd[0] = "git.bat"
return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT)
return git
@classmethod
def for_path(cls, path, url_base):
git = Git.get_func(path)
try:
return cls(git("rev-parse", "--show-toplevel").rstrip(), url_base)
except subprocess.CalledProcessError:
return None
def _local_changes(self):
changes = {}
cmd = ["status", "-z", "--ignore-submodules=all"]
data = self.git(*cmd)
if data == "":
return changes
rename_data = None
for entry in data.split("\0")[:-1]:
if rename_data is not None:
status, rel_path = entry.split(" ")
if status[0] == "R":
rename_data = (rel_path, status)
else:
changes[rel_path] = (status, None)
else:
rel_path = entry
changes[rel_path] = rename_data
rename_data = None
return changes
def _show_file(self, path):
path = os.path.relpath(os.path.abspath(path), self.root)
return self.git("show", "HEAD:%s" % path)
def __iter__(self):
cmd = ["ls-tree", "-r", "-z", "--name-only", "HEAD"]
local_changes = self._local_changes()
for rel_path in self.git(*cmd).split("\0")[:-1]:
if not os.path.isdir(os.path.join(self.root, rel_path)):
if rel_path in local_changes:
contents = self._show_file(rel_path)
else:
contents = None
yield SourceFile(self.root,
rel_path,
self.url_base,
contents=contents)
class FileSystem(object):
def __init__(self, root, url_base):
self.root = root
self.url_base = url_base
from gitignore import gitignore
self.path_filter = gitignore.PathFilter(self.root)
def __iter__(self):
is_root = True
for dir_path, dir_names, filenames in os.walk(self.root):
rel_root = os.path.relpath(dir_path, self.root)
if is_root:
dir_names[:] = [item for item in dir_names if item not in
["tools", "resources", ".git"]]
is_root = False
for filename in filenames:
rel_path = os.path.join(rel_root, filename)
if self.path_filter(rel_path):
yield SourceFile(self.root,
rel_path,
self.url_base)
| mpl-2.0 | -6,813,501,468,117,658,000 | 33.284211 | 97 | 0.498618 | false |
lthurlow/Network-Grapher | proj/external/numpy-1.7.0/numpy/distutils/fcompiler/absoft.py | 89 | 5525 |
# http://www.absoft.com/literature/osxuserguide.pdf
# http://www.absoft.com/documentation.html
# Notes:
# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
# generated extension modules (works for f2py v2.45.241_1936 and up)
import os
from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
from numpy.distutils.misc_util import cyg2win32
compilers = ['AbsoftFCompiler']
class AbsoftFCompiler(FCompiler):
compiler_type = 'absoft'
description = 'Absoft Corp Fortran Compiler'
#version_pattern = r'FORTRAN 77 Compiler (?P<version>[^\s*,]*).*?Absoft Corp'
version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\
r' (?P<version>[^\s*,]*)(.*?Absoft Corp|)'
# on windows: f90 -V -c dummy.f
# f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16
# samt5735(8)$ f90 -V -c dummy.f
# f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0
# Note that fink installs g77 as f77, so need to use f90 for detection.
executables = {
'version_cmd' : None, # set by update_executables
'compiler_f77' : ["f77"],
'compiler_fix' : ["f90"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
if os.name=='nt':
library_switch = '/out:' #No space after /out:!
module_dir_switch = None
module_include_switch = '-p'
def update_executables(self):
f = cyg2win32(dummy_fortran_file())
self.executables['version_cmd'] = ['<F90>', '-V', '-c',
f+'.f', '-o', f+'.o']
def get_flags_linker_so(self):
if os.name=='nt':
opt = ['/dll']
# The "-K shared" switches are being left in for pre-9.0 versions
# of Absoft though I don't think versions earlier than 9 can
# actually be used to build shared libraries. In fact, version
# 8 of Absoft doesn't recognize "-K shared" and will fail.
elif self.get_version() >= '9.0':
opt = ['-shared']
else:
opt = ["-K","shared"]
return opt
def library_dir_option(self, dir):
if os.name=='nt':
return ['-link','/PATH:"%s"' % (dir)]
return "-L" + dir
def library_option(self, lib):
if os.name=='nt':
return '%s.lib' % (lib)
return "-l" + lib
def get_library_dirs(self):
opt = FCompiler.get_library_dirs(self)
d = os.environ.get('ABSOFT')
if d:
if self.get_version() >= '10.0':
# use shared libraries, the static libraries were not compiled -fPIC
prefix = 'sh'
else:
prefix = ''
if cpu.is_64bit():
suffix = '64'
else:
suffix = ''
opt.append(os.path.join(d, '%slib%s' % (prefix, suffix)))
return opt
def get_libraries(self):
opt = FCompiler.get_libraries(self)
if self.get_version() >= '11.0':
opt.extend(['af90math', 'afio', 'af77math', 'amisc'])
elif self.get_version() >= '10.0':
opt.extend(['af90math', 'afio', 'af77math', 'U77'])
elif self.get_version() >= '8.0':
opt.extend(['f90math','fio','f77math','U77'])
else:
opt.extend(['fio','f90math','fmath','U77'])
if os.name =='nt':
opt.append('COMDLG32')
return opt
def get_flags(self):
opt = FCompiler.get_flags(self)
if os.name != 'nt':
opt.extend(['-s'])
if self.get_version():
if self.get_version()>='8.2':
opt.append('-fpic')
return opt
def get_flags_f77(self):
opt = FCompiler.get_flags_f77(self)
opt.extend(['-N22','-N90','-N110'])
v = self.get_version()
if os.name == 'nt':
if v and v>='8.0':
opt.extend(['-f','-N15'])
else:
opt.append('-f')
if v:
if v<='4.6':
opt.append('-B108')
else:
# Though -N15 is undocumented, it works with
# Absoft 8.0 on Linux
opt.append('-N15')
return opt
def get_flags_f90(self):
opt = FCompiler.get_flags_f90(self)
opt.extend(["-YCFRL=1","-YCOM_NAMES=LCS","-YCOM_PFX","-YEXT_PFX",
"-YCOM_SFX=_","-YEXT_SFX=_","-YEXT_NAMES=LCS"])
if self.get_version():
if self.get_version()>'4.6':
opt.extend(["-YDEALLOC=ALL"])
return opt
def get_flags_fix(self):
opt = FCompiler.get_flags_fix(self)
opt.extend(["-YCFRL=1","-YCOM_NAMES=LCS","-YCOM_PFX","-YEXT_PFX",
"-YCOM_SFX=_","-YEXT_SFX=_","-YEXT_NAMES=LCS"])
opt.extend(["-f","fixed"])
return opt
def get_flags_opt(self):
opt = ['-O']
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='absoft')
compiler.customize()
print(compiler.get_version())
| mit | -3,692,320,067,876,852,700 | 33.748428 | 155 | 0.527783 | false |
sugartom/tensorflow-alien | tensorflow/examples/learn/text_classification.py | 39 | 5106 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import encoders
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
features = encoders.bow_encoder(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def rnn_model(features, target):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
model_fn = bag_of_words_model
classifier = learn.Estimator(model_fn=model_fn)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | 660,270,608,775,467,100 | 33.5 | 80 | 0.707991 | false |
biicode/bii-server | test/model/social_account_test.py | 2 | 2004 | import unittest
from biicode.server.model.social_account import SocialAccount, SocialAccountToken
from biicode.server.model.epoch.utc_datetime import UtcDatetime
import datetime
class SocialAccountTest(unittest.TestCase):
def setUp(self):
self.utc_datetime = UtcDatetime.deserialize(datetime.datetime.now())
def test_social_token_serialization(self):
social_token = SocialAccountToken("xxzc", "zxcc", self.utc_datetime)
serialized_social_token = social_token.serialize()
self.assertEquals(SocialAccountToken.deserialize(serialized_social_token), social_token)
def test_social_token_no_secret_serialization(self):
social_token = SocialAccountToken("xxzc", "", self.utc_datetime)
serialized_social_token = social_token.serialize()
self.assertEquals(SocialAccountToken.deserialize(serialized_social_token), social_token)
def test_social_account_serialization(self):
tokens = [SocialAccountToken("xxzc", "zxcc", self.utc_datetime),
SocialAccountToken("xxzc", "zxcc", self.utc_datetime)]
social_account = SocialAccount("zcas",
self.utc_datetime,
self.utc_datetime,
tokens,
"zcc")
serialized_social_account = social_account.serialize()
self.assertEquals(SocialAccount.deserialize(serialized_social_account), social_account)
def test_social_account_without_token_serialization(self):
tokens = []
social_account = SocialAccount("zcas",
self.utc_datetime,
self.utc_datetime,
tokens,
"zcc")
serialized_social_account = social_account.serialize()
self.assertEquals(SocialAccount.deserialize(serialized_social_account), social_account)
| mit | -9,038,586,863,494,634,000 | 44.545455 | 96 | 0.616267 | false |
PythonicNinja/django-ddp | dddp/management/commands/dddp.py | 1 | 5318 | """Django DDP WebSocket service."""
from __future__ import print_function, absolute_import
import collections
import inspect
import optparse
import random
import signal
import socket
from django.core.management.base import BaseCommand
from django.db import connection, close_old_connections
from django.utils.module_loading import import_string
import ejson
import gevent
import gevent.monkey
import gevent.queue
import gevent.select
import geventwebsocket
import psycogreen.gevent
from dddp import autodiscover
from dddp.postgres import PostgresGreenlet
from dddp.websocket import DDPWebSocketApplication
def ddpp_sockjs_xhr(environ, start_response):
"""Dummy method that doesn't handle XHR requests."""
start_response(
'404 Not found',
[
('Content-Type', 'text/plain; charset=UTF-8'),
(
'Access-Control-Allow-Origin',
'/'.join(environ['HTTP_REFERER'].split('/')[:3]),
),
('Access-Control-Allow-Credentials', 'true'),
# ('access-control-allow-credentials', 'true'),
('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0'),
('Connection', 'keep-alive'),
('Vary', 'Origin'),
],
)
yield 'No.'
def ddpp_sockjs_info(environ, start_response):
"""Inform client that WebSocket service is available."""
start_response(
'200 OK',
[
('Content-Type', 'application/json; charset=UTF-8'),
(
'Access-Control-Allow-Origin',
'/'.join(environ['HTTP_REFERER'].split('/')[:3]),
),
('Access-Control-Allow-Credentials', 'true'),
# ('access-control-allow-credentials', 'true'),
('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0'),
('Connection', 'keep-alive'),
('Vary', 'Origin'),
],
)
yield ejson.dumps(collections.OrderedDict([
('websocket', True),
('origins', [
'*:*',
]),
('cookie_needed', False),
('entropy', random.getrandbits(32)),
]))
class Command(BaseCommand):
"""Command to run DDP web service."""
args = 'HOST PORT'
help = 'Run DDP service'
requires_system_checks = False
option_list = BaseCommand.option_list + (
optparse.make_option(
'-H', '--host', dest="host", metavar='HOST',
help='TCP listening host (default: localhost)', default='localhost',
),
optparse.make_option(
'-p', '--port', dest="port", metavar='PORT',
help='TCP listening port (default: 8000)', default='8000',
),
)
def handle(self, *args, **options):
"""Spawn greenlets for handling websockets and PostgreSQL calls."""
# shutdown existing connections, mokey patch stdlib for gevent.
close_old_connections()
gevent.monkey.patch_all()
psycogreen.gevent.patch_psycopg()
debug = int(options['verbosity']) > 1
# setup PostgresGreenlet to multiplex DB calls
postgres = PostgresGreenlet(connection, debug=debug)
DDPWebSocketApplication.pgworker = postgres
# use settings.WSGI_APPLICATION or fallback to default Django WSGI app
from django.conf import settings
if hasattr(settings, 'WSGI_APPLICATION'):
wsgi_name = settings.WSGI_APPLICATION
wsgi_app = import_string(wsgi_name)
else:
from django.core.wsgi import get_wsgi_application
wsgi_app = get_wsgi_application()
wsgi_name = str(wsgi_app.__class__)
resource = geventwebsocket.Resource({
r'/websocket': DDPWebSocketApplication,
r'^/sockjs/\d+/\w+/websocket$': DDPWebSocketApplication,
r'^/sockjs/\d+/\w+/xhr$': ddpp_sockjs_xhr,
r'^/sockjs/info$': ddpp_sockjs_info,
r'^/(?!(websocket|sockjs)/)': wsgi_app,
})
# setup WebSocketServer to dispatch web requests
host = options['host']
port = options['port']
if port.isdigit():
port = int(port)
else:
port = socket.getservbyname(port)
webserver = geventwebsocket.WebSocketServer(
(host, port),
resource,
debug=debug,
)
def killall(*args, **kwargs):
"""Kill all green threads."""
postgres.stop()
webserver.stop()
# die gracefully with SIGINT or SIGQUIT
gevent.signal(signal.SIGINT, killall)
gevent.signal(signal.SIGQUIT, killall)
print('=> Discovering DDP endpoints...')
ddp = autodiscover()
ddp.pgworker = postgres
print(
'\n'.join(
' %s' % api_path
for api_path
in sorted(ddp.api_path_map())
),
)
# start greenlets
postgres.start()
print('=> Started PostgresGreenlet.')
web = gevent.spawn(webserver.serve_forever)
print('=> Started DDPWebSocketApplication.')
print('=> Started your app (%s).' % wsgi_name)
print('')
print('=> App running at: http://%s:%d/' % (host, port))
gevent.joinall([postgres, web])
| mit | 2,294,262,070,700,432,100 | 31.036145 | 80 | 0.573524 | false |
mrkm4ntr/incubator-airflow | airflow/migrations/versions/03bc53e68815_add_sm_dag_index.py | 8 | 1274 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""merge_heads_2
Revision ID: 03bc53e68815
Revises: 0a2a5b66e19d, bf00311e1990
Create Date: 2018-11-24 20:21:46.605414
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '03bc53e68815'
down_revision = ('0a2a5b66e19d', 'bf00311e1990')
branch_labels = None
depends_on = None
def upgrade(): # noqa: D103
op.create_index('sm_dag', 'sla_miss', ['dag_id'], unique=False)
def downgrade(): # noqa: D103
op.drop_index('sm_dag', table_name='sla_miss')
| apache-2.0 | -6,633,541,885,562,129,000 | 30.85 | 67 | 0.744113 | false |
stuarteberg/numpy | numpy/lib/tests/test_stride_tricks.py | 40 | 14732 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
run_module_suite, assert_equal, assert_array_equal,
assert_raises, assert_
)
from numpy.lib.stride_tricks import (
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to
)
def assert_shapes_correct(input_shapes, expected_shape):
# Broadcast a list of arrays with the given input shapes and check the
# common output shape.
inarrays = [np.zeros(s) for s in input_shapes]
outarrays = broadcast_arrays(*inarrays)
outshapes = [a.shape for a in outarrays]
expected = [expected_shape] * len(inarrays)
assert_equal(outshapes, expected)
def assert_incompatible_shapes_raise(input_shapes):
# Broadcast a list of arrays with the given (incompatible) input shapes
# and check that they raise a ValueError.
inarrays = [np.zeros(s) for s in input_shapes]
assert_raises(ValueError, broadcast_arrays, *inarrays)
def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False):
# Broadcast two shapes against each other and check that the data layout
# is the same as if a ufunc did the broadcasting.
x0 = np.zeros(shape0, dtype=int)
# Note that multiply.reduce's identity element is 1.0, so when shape1==(),
# this gives the desired n==1.
n = int(np.multiply.reduce(shape1))
x1 = np.arange(n).reshape(shape1)
if transposed:
x0 = x0.T
x1 = x1.T
if flipped:
x0 = x0[::-1]
x1 = x1[::-1]
# Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the
# result should be exactly the same as the broadcasted view of x1.
y = x0 + x1
b0, b1 = broadcast_arrays(x0, x1)
assert_array_equal(y, b1)
def test_same():
x = np.arange(10)
y = np.arange(10)
bx, by = broadcast_arrays(x, y)
assert_array_equal(x, bx)
assert_array_equal(y, by)
def test_one_off():
x = np.array([[1, 2, 3]])
y = np.array([[1], [2], [3]])
bx, by = broadcast_arrays(x, y)
bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
by0 = bx0.T
assert_array_equal(bx0, bx)
assert_array_equal(by0, by)
def test_same_input_shapes():
# Check that the final shape is just the input shape.
data = [
(),
(1,),
(3,),
(0, 1),
(0, 3),
(1, 0),
(3, 0),
(1, 3),
(3, 1),
(3, 3),
]
for shape in data:
input_shapes = [shape]
# Single input.
assert_shapes_correct(input_shapes, shape)
# Double input.
input_shapes2 = [shape, shape]
assert_shapes_correct(input_shapes2, shape)
# Triple input.
input_shapes3 = [shape, shape, shape]
assert_shapes_correct(input_shapes3, shape)
def test_two_compatible_by_ones_input_shapes():
# Check that two different input shapes of the same length, but some have
# ones, broadcast to the correct shape.
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
[[(3, 1), (3, 3)], (3, 3)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 1), (3, 3)], (3, 3)],
[[(1, 1), (1, 3)], (1, 3)],
[[(1, 1), (3, 1)], (3, 1)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_shapes_correct(input_shapes, expected_shape)
# Reverse the input shapes since broadcasting should be symmetric.
assert_shapes_correct(input_shapes[::-1], expected_shape)
def test_two_compatible_by_prepending_ones_input_shapes():
# Check that two different input shapes (of different lengths) broadcast
# to the correct shape.
data = [
[[(), (3,)], (3,)],
[[(3,), (3, 3)], (3, 3)],
[[(3,), (3, 1)], (3, 3)],
[[(1,), (3, 3)], (3, 3)],
[[(), (3, 3)], (3, 3)],
[[(1, 1), (3,)], (1, 3)],
[[(1,), (3, 1)], (3, 1)],
[[(1,), (1, 3)], (1, 3)],
[[(), (1, 3)], (1, 3)],
[[(), (3, 1)], (3, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_shapes_correct(input_shapes, expected_shape)
# Reverse the input shapes since broadcasting should be symmetric.
assert_shapes_correct(input_shapes[::-1], expected_shape)
def test_incompatible_shapes_raise_valueerror():
# Check that a ValueError is raised for incompatible shapes.
data = [
[(3,), (4,)],
[(2, 3), (2,)],
[(3,), (3,), (4,)],
[(1, 3, 4), (2, 3, 3)],
]
for input_shapes in data:
assert_incompatible_shapes_raise(input_shapes)
# Reverse the input shapes since broadcasting should be symmetric.
assert_incompatible_shapes_raise(input_shapes[::-1])
def test_same_as_ufunc():
# Check that the data layout is the same as if a ufunc did the operation.
data = [
[[(1,), (3,)], (3,)],
[[(1, 3), (3, 3)], (3, 3)],
[[(3, 1), (3, 3)], (3, 3)],
[[(1, 3), (3, 1)], (3, 3)],
[[(1, 1), (3, 3)], (3, 3)],
[[(1, 1), (1, 3)], (1, 3)],
[[(1, 1), (3, 1)], (3, 1)],
[[(1, 0), (0, 0)], (0, 0)],
[[(0, 1), (0, 0)], (0, 0)],
[[(1, 0), (0, 1)], (0, 0)],
[[(1, 1), (0, 0)], (0, 0)],
[[(1, 1), (1, 0)], (1, 0)],
[[(1, 1), (0, 1)], (0, 1)],
[[(), (3,)], (3,)],
[[(3,), (3, 3)], (3, 3)],
[[(3,), (3, 1)], (3, 3)],
[[(1,), (3, 3)], (3, 3)],
[[(), (3, 3)], (3, 3)],
[[(1, 1), (3,)], (1, 3)],
[[(1,), (3, 1)], (3, 1)],
[[(1,), (1, 3)], (1, 3)],
[[(), (1, 3)], (1, 3)],
[[(), (3, 1)], (3, 1)],
[[(), (0,)], (0,)],
[[(0,), (0, 0)], (0, 0)],
[[(0,), (0, 1)], (0, 0)],
[[(1,), (0, 0)], (0, 0)],
[[(), (0, 0)], (0, 0)],
[[(1, 1), (0,)], (1, 0)],
[[(1,), (0, 1)], (0, 1)],
[[(1,), (1, 0)], (1, 0)],
[[(), (1, 0)], (1, 0)],
[[(), (0, 1)], (0, 1)],
]
for input_shapes, expected_shape in data:
assert_same_as_ufunc(input_shapes[0], input_shapes[1],
"Shapes: %s %s" % (input_shapes[0], input_shapes[1]))
# Reverse the input shapes since broadcasting should be symmetric.
assert_same_as_ufunc(input_shapes[1], input_shapes[0])
# Try them transposed, too.
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True)
# ... and flipped for non-rank-0 inputs in order to test negative
# strides.
if () not in input_shapes:
assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True)
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True)
def test_broadcast_to_succeeds():
data = [
[np.array(0), (0,), np.array(0)],
[np.array(0), (1,), np.zeros(1)],
[np.array(0), (3,), np.zeros(3)],
[np.ones(1), (1,), np.ones(1)],
[np.ones(1), (2,), np.ones(2)],
[np.ones(1), (1, 2, 3), np.ones((1, 2, 3))],
[np.arange(3), (3,), np.arange(3)],
[np.arange(3), (1, 3), np.arange(3).reshape(1, -1)],
[np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])],
# test if shape is not a tuple
[np.ones(0), 0, np.ones(0)],
[np.ones(1), 1, np.ones(1)],
[np.ones(1), 2, np.ones(2)],
# these cases with size 0 are strange, but they reproduce the behavior
# of broadcasting with ufuncs (see test_same_as_ufunc above)
[np.ones(1), (0,), np.ones(0)],
[np.ones((1, 2)), (0, 2), np.ones((0, 2))],
[np.ones((2, 1)), (2, 0), np.ones((2, 0))],
]
for input_array, shape, expected in data:
actual = broadcast_to(input_array, shape)
assert_array_equal(expected, actual)
def test_broadcast_to_raises():
data = [
[(0,), ()],
[(1,), ()],
[(3,), ()],
[(3,), (1,)],
[(3,), (2,)],
[(3,), (4,)],
[(1, 2), (2, 1)],
[(1, 1), (1,)],
[(1,), -1],
[(1,), (-1,)],
[(1, 2), (-1, 2)],
]
for orig_shape, target_shape in data:
arr = np.zeros(orig_shape)
assert_raises(ValueError, lambda: broadcast_to(arr, target_shape))
def test_broadcast_shape():
# broadcast_shape is already exercized indirectly by broadcast_arrays
assert_raises(ValueError, _broadcast_shape)
assert_equal(_broadcast_shape([1, 2]), (2,))
assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1))
assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4))
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2))
assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2))
# regression tests for gh-5862
assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,))
bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32
assert_raises(ValueError, lambda: _broadcast_shape(*bad_args))
def test_as_strided():
a = np.array([None])
a_view = as_strided(a)
expected = np.array([None])
assert_array_equal(a_view, np.array([None]))
a = np.array([1, 2, 3, 4])
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
expected = np.array([1, 3])
assert_array_equal(a_view, expected)
a = np.array([1, 2, 3, 4])
a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize))
expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
assert_array_equal(a_view, expected)
# Regression test for gh-5081
dt = np.dtype([('num', 'i4'), ('obj', 'O')])
a = np.empty((4,), dtype=dt)
a['num'] = np.arange(1, 5)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
expected_num = [[1, 2, 3, 4]] * 3
expected_obj = [[None]*4]*3
assert_equal(a_view.dtype, dt)
assert_array_equal(expected_num, a_view['num'])
assert_array_equal(expected_obj, a_view['obj'])
# Make sure that void types without fields are kept unchanged
a = np.empty((4,), dtype='V4')
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
# Make sure that the only type that could fail is properly handled
dt = np.dtype({'names': [''], 'formats': ['V4']})
a = np.empty((4,), dtype=dt)
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
def as_strided_writeable():
arr = np.ones(10)
view = as_strided(arr, writeable=False)
assert_(not view.flags.writeable)
# Check that writeable also is fine:
view = as_strided(arr, writeable=True)
assert_(view.flags.writeable)
view[...] = 3
assert_array_equal(arr, np.full_like(arr, 3))
# Test that things do not break down for readonly:
arr.flags.writeable = False
view = as_strided(arr, writeable=False)
view = as_strided(arr, writeable=True)
assert_(not view.flags.writeable)
class VerySimpleSubClass(np.ndarray):
def __new__(cls, *args, **kwargs):
kwargs['subok'] = True
return np.array(*args, **kwargs).view(cls)
class SimpleSubClass(VerySimpleSubClass):
def __new__(cls, *args, **kwargs):
kwargs['subok'] = True
self = np.array(*args, **kwargs).view(cls)
self.info = 'simple'
return self
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '') + ' finalized'
def test_subclasses():
# test that subclass is preserved only if subok=True
a = VerySimpleSubClass([1, 2, 3, 4])
assert_(type(a) is VerySimpleSubClass)
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))
assert_(type(a_view) is np.ndarray)
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
assert_(type(a_view) is VerySimpleSubClass)
# test that if a subclass has __array_finalize__, it is used
a = SimpleSubClass([1, 2, 3, 4])
a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
# similar tests for broadcast_arrays
b = np.arange(len(a)).reshape(-1, 1)
a_view, b_view = broadcast_arrays(a, b)
assert_(type(a_view) is np.ndarray)
assert_(type(b_view) is np.ndarray)
assert_(a_view.shape == b_view.shape)
a_view, b_view = broadcast_arrays(a, b, subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
assert_(type(b_view) is np.ndarray)
assert_(a_view.shape == b_view.shape)
# and for broadcast_to
shape = (2, 4)
a_view = broadcast_to(a, shape)
assert_(type(a_view) is np.ndarray)
assert_(a_view.shape == shape)
a_view = broadcast_to(a, shape, subok=True)
assert_(type(a_view) is SimpleSubClass)
assert_(a_view.info == 'simple finalized')
assert_(a_view.shape == shape)
def test_writeable():
# broadcast_to should return a readonly array
original = np.array([1, 2, 3])
result = broadcast_to(original, (2, 3))
assert_equal(result.flags.writeable, False)
assert_raises(ValueError, result.__setitem__, slice(None), 0)
# but the result of broadcast_arrays needs to be writeable (for now), to
# preserve backwards compatibility
for results in [broadcast_arrays(original),
broadcast_arrays(0, original)]:
for result in results:
assert_equal(result.flags.writeable, True)
# keep readonly input readonly
original.flags.writeable = False
_, result = broadcast_arrays(0, original)
assert_equal(result.flags.writeable, False)
# regresssion test for GH6491
shape = (2,)
strides = [0]
tricky_array = as_strided(np.array(0), shape, strides)
other = np.zeros((1,))
first, second = broadcast_arrays(tricky_array, other)
assert_(first.shape == second.shape)
def test_reference_types():
input_array = np.array('a', dtype=object)
expected = np.array(['a'] * 3, dtype=object)
actual = broadcast_to(input_array, (3,))
assert_array_equal(expected, actual)
actual, _ = broadcast_arrays(input_array, np.ones(3))
assert_array_equal(expected, actual)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 6,835,383,099,777,426,000 | 33.260465 | 82 | 0.530546 | false |
oseledets/pybtex | pybtex/database/input/bibtexml.py | 1 | 2801 | # Copyright (c) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from xml.etree import cElementTree as ET
from pybtex.database import Entry, Person
from pybtex.database.input import BaseParser
bibtexns = '{http://bibtexml.sf.net/}'
def remove_ns(s):
if s.startswith(bibtexns):
return s[len(bibtexns):]
class Parser(BaseParser):
default_suffix = '.xml'
def parse_stream(self, stream):
t = ET.parse(stream)
entries = t.findall(bibtexns + 'entry')
self.data.add_entries(self.process_entry(entry) for entry in entries)
return self.data
def process_entry(self, entry):
def process_person(person_entry, role):
persons = person_entry.findall(bibtexns + 'person')
if persons:
for person in persons:
process_person(person, role)
else:
text = person_entry.text.strip()
if text:
e.add_person(Person(text), role)
else:
names = {}
for name in person_entry.getchildren():
names[remove_ns(name.tag)] = name.text
e.add_person(Person(**names), role)
id_ = entry.get('id')
item = entry.getchildren()[0]
type = remove_ns(item.tag)
e = Entry(type)
for field in item.getchildren():
field_name = remove_ns(field.tag)
if field_name in Person.valid_roles:
process_person(field, field_name)
else:
field_text = field.text if field.text is not None else ''
e.fields[field_name] = field_text
return id_, e
| mit | 3,705,326,811,759,129,600 | 38.450704 | 77 | 0.63513 | false |
zzzeek/test | mako/ext/beaker_cache.py | 2 | 1947 | """Provide a :class:`.CacheImpl` for the Beaker caching system."""
from mako import exceptions
from mako.cache import CacheImpl
_beaker_cache = None
class BeakerCacheImpl(CacheImpl):
"""A :class:`.CacheImpl` provided for the Beaker caching system.
This plugin is used by default, based on the default
value of ``'beaker'`` for the ``cache_impl`` parameter of the
:class:`.Template` or :class:`.TemplateLookup` classes.
"""
def __init__(self, cache):
global _beaker_cache
if _beaker_cache is None:
try:
from beaker import cache as beaker_cache
except ImportError, e:
raise exceptions.RuntimeException(
"the Beaker package is required to use cache "
"functionality.")
_beaker_cache = beaker_cache.CacheManager()
super(BeakerCacheImpl, self).__init__(cache)
def _get_cache(self, **kw):
expiretime = kw.pop('timeout', None)
if 'dir' in kw:
kw['data_dir'] = kw.pop('dir')
elif self.cache.template.module_directory:
kw['data_dir'] = self.cache.template.module_directory
if kw.get('type') == 'memcached':
kw['type'] = 'ext:memcached'
return _beaker_cache.get_cache(self.cache.id, **kw), \
{'expiretime':expiretime, 'starttime':self.cache.starttime}
def get_and_replace(self, key, creation_function, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, createfunc=creation_function, **kw)
def put(self, key, value, **kw):
cache, kw = self._get_cache(**kw)
cache.put(key, value, **kw)
def get(self, key, **kw):
cache, kw = self._get_cache(**kw)
return cache.get(key, **kw)
def invalidate(self, key, **kw):
cache, kw = self._get_cache(**kw)
cache.remove_value(key, **kw)
| mit | 390,992,043,370,332,860 | 33.157895 | 79 | 0.577298 | false |
espadrine/opera | chromium/src/third_party/libvpx/source/libvpx/third_party/googletest/src/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| bsd-3-clause | 7,998,095,299,691,934,000 | 44.36 | 80 | 0.737654 | false |
xombiemp/CouchPotatoServer | libs/requests/sessions.py | 43 | 24273 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .packages.urllib3._collections import RecentlyUsedContainer
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
REDIRECT_CACHE_SIZE = 1000
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""
Determines appropriate setting for a given request, taking into account the
explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None.
for (k, v) in merged_setting.items():
if v is None:
del merged_setting[k]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""
Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = [] # keep track of history
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
method = req.method
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if (resp.status_code == codes.see_other and
method != 'HEAD'):
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if resp.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""
This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get(scheme)
if proxy:
new_proxies.setdefault(scheme, environ_proxies[scheme])
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
200
Or as a context manager::
>>> with requests.Session() as s:
>>> s.get('http://httpbin.org/get')
200
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol to the URL of the proxy (e.g.
#: {'http': 'foo.bar:3128'}) to be used on each
#: :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Should we trust the environment?
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
# Only store 1000 redirects to prevent using infinite memory
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the
:class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of
the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) if ``True``, the SSL cert will be verified.
A CA_BUNDLE path can also be provided.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
"""
method = to_native_string(method)
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
json = json,
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if not isinstance(request, PreparedRequest):
raise ValueError('You can only send PreparedRequests.')
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""Check the environment and merge it with some settings."""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""Returns the appropriate connnection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length."""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
state['redirect_cache'] = dict(self.redirect_cache)
return state
def __setstate__(self, state):
redirect_cache = state.pop('redirect_cache', {})
for attr, value in state.items():
setattr(self, attr, value)
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
for redirect, to in redirect_cache.items():
self.redirect_cache[redirect] = to
def session():
"""Returns a :class:`Session` for context-management."""
return Session()
| gpl-3.0 | -6,701,061,798,473,751,000 | 34.641703 | 115 | 0.604606 | false |
ai-ku/langvis | jython-2.1/Lib/gzip.py | 4 | 12370 | """Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import struct, sys, time
import zlib
import __builtin__
__all__ = ["GzipFile","open"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def write32(output, value):
output.write(struct.pack("<l", value))
def write32u(output, value):
if value < 0:
value = value + 0x100000000L
output.write(struct.pack("<L", value))
def read32(input):
return struct.unpack("<l", input.read(4))[0]
def open(filename, mode="rb", compresslevel=9):
return GzipFile(filename, mode, compresslevel)
class GzipFile:
myfileobj = None
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None):
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
if filename is None:
if hasattr(fileobj, 'name'): filename = fileobj.name
else: filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'rb'
if mode[0:1] == 'r':
self.mode = READ
# Set flag indicating start of a new member
self._new_member = 1
self.extrabuf = ""
self.extrasize = 0
self.filename = filename
elif mode[0:1] == 'w' or mode[0:1] == 'a':
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
else:
raise ValueError, "Mode " + mode + " not supported"
self.fileobj = fileobj
if self.mode == WRITE:
self._write_gzip_header()
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _init_write(self, filename):
if filename[-3:] != '.gz':
filename = filename + '.gz'
self.filename = filename
self.crc = zlib.crc32("")
self.size = 0
self.writebuf = []
self.bufsize = 0
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
fname = self.filename[:-3]
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags))
write32u(self.fileobj, long(time.time()))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def _init_read(self):
self.crc = zlib.crc32("")
self.size = 0
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise IOError, 'Not a gzipped file'
method = ord( self.fileobj.read(1) )
if method != 8:
raise IOError, 'Unknown compression method'
flag = ord( self.fileobj.read(1) )
# modtime = self.fileobj.read(4)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(6)
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen=ord(self.fileobj.read(1))
xlen=xlen+256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while (1):
s=self.fileobj.read(1)
if not s or s=='\000': break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while (1):
s=self.fileobj.read(1)
if not s or s=='\000': break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
def write(self,data):
if self.fileobj is None:
raise ValueError, "write() on closed GzipFile object"
if len(data) > 0:
self.size = self.size + len(data)
self.crc = zlib.crc32(data, self.crc)
self.fileobj.write( self.compress.compress(data) )
def writelines(self,lines):
self.write(" ".join(lines))
def read(self, size=-1):
if self.extrasize <= 0 and self.fileobj is None:
return ''
readsize = 1024
if size < 0: # get the whole thing
try:
while 1:
self._read(readsize)
readsize = readsize * 2
except EOFError:
size = self.extrasize
else: # just get some more of it
try:
while size > self.extrasize:
self._read(readsize)
readsize = readsize * 2
except EOFError:
if size > self.extrasize:
size = self.extrasize
chunk = self.extrabuf[:size]
self.extrabuf = self.extrabuf[size:]
self.extrasize = self.extrasize - size
return chunk
def _unread(self, buf):
self.extrabuf = buf + self.extrabuf
self.extrasize = len(buf) + self.extrasize
def _read(self, size=1024):
if self.fileobj is None: raise EOFError, "Reached EOF"
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
#
# First, check if we're at the end of the file;
# if so, it's time to stop; no more members to read.
pos = self.fileobj.tell() # Save current position
self.fileobj.seek(0, 2) # Seek to end of file
if pos == self.fileobj.tell():
self.fileobj = None
raise EOFError, "Reached EOF"
else:
self.fileobj.seek( pos ) # Return to original position
self._init_read()
self._read_gzip_header()
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self._new_member = 0
# Read a chunk of data from the file
buf = self.fileobj.read(size)
# If the EOF has been reached, flush the decompression object
# and mark this object as finished.
if buf == "":
uncompress = self.decompress.flush()
self._read_eof()
self.fileobj = None
self._add_read_data( uncompress )
raise EOFError, 'Reached EOF'
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
if self.decompress.unused_data != "":
# Ending case: we've come to the end of a member in the file,
# so seek back to the start of the unused data, finish up
# this member, and read a new gzip header.
# (The number of bytes to seek back is the length of the unused
# data, minus 8 because _read_eof() will rewind a further 8 bytes)
self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
# Check the CRC and file size, and set the flag so we read
# a new member on the next call
self._read_eof()
self._new_member = 1
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc)
self.extrabuf = self.extrabuf + data
self.extrasize = self.extrasize + len(data)
self.size = self.size + len(data)
def _read_eof(self):
# We've read to the end of the file, so we have to rewind in order
# to reread the 8 bytes containing the CRC and the file size.
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values.
self.fileobj.seek(-8, 1)
crc32 = read32(self.fileobj)
isize = read32(self.fileobj)
if crc32%0x100000000L != self.crc%0x100000000L:
raise ValueError, "CRC check failed"
elif isize != self.size:
raise ValueError, "Incorrect length of data produced"
def close(self):
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
write32(self.fileobj, self.crc)
write32(self.fileobj, self.size)
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def __del__(self):
try:
if (self.myfileobj is None and
self.fileobj is None):
return
except AttributeError:
return
self.close()
def flush(self):
self.fileobj.flush()
def isatty(self):
return 0
def readline(self, size=-1):
if size < 0: size = sys.maxint
bufs = []
orig_size = size
readsize = min(100, size) # Read from the file in small chunks
while 1:
if size == 0:
return "".join(bufs) # Return resulting line
c = self.read(readsize)
i = c.find('\n')
if size is not None:
# We set i=size to break out of the loop under two
# conditions: 1) there's no newline, and the chunk is
# larger than size, or 2) there is a newline, but the
# resulting line would be longer than 'size'.
if i==-1 and len(c) > size: i=size-1
elif size <= i: i = size -1
if i >= 0 or c == '':
bufs.append(c[:i+1]) # Add portion of last chunk
self._unread(c[i+1:]) # Push back rest of chunk
return ''.join(bufs) # Return resulting line
# Append chunk to list, decrease 'size',
bufs.append(c)
size = size - len(c)
readsize = min(size, readsize * 2)
def readlines(self, sizehint=0):
# Negative numbers result in reading all the lines
if sizehint <= 0: sizehint = sys.maxint
L = []
while sizehint > 0:
line = self.readline()
if line == "": break
L.append( line )
sizehint = sizehint - len(line)
return L
def writelines(self, L):
for line in L:
self.write(line)
def _test():
# Act like gzip; with -d, act like gunzip.
# The input file is not deleted, however, nor are any other gzip
# options or features supported.
import sys
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
if not args:
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
g = sys.stdout
else:
if arg[-3:] != ".gz":
print "filename doesn't end in .gz:", `arg`
continue
f = open(arg, "rb")
g = __builtin__.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
else:
f = __builtin__.open(arg, "rb")
g = open(arg + ".gz", "wb")
while 1:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout:
g.close()
if f is not sys.stdin:
f.close()
if __name__ == '__main__':
_test()
| mit | -2,339,869,515,513,239,000 | 32.456825 | 79 | 0.503476 | false |
lambder/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Options/BoolOption.py | 61 | 2003 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/BoolOption.py 5134 2010/08/16 23:02:40 bdeegan"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def BoolOption(*args, **kw):
global warned
if not warned:
msg = "The BoolOption() function is deprecated; use the BoolVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return SCons.Variables.BoolVariable(*args, **kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 | -7,988,955,372,094,683,000 | 39.06 | 97 | 0.752371 | false |
CyanogenMod/android_external_chromium_org | tools/android/adb_profile_chrome/profiler.py | 9 | 2949 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from adb_profile_chrome import trace_packager
from adb_profile_chrome import ui
from pylib import constants
def _StartTracing(controllers, interval):
for controller in controllers:
controller.StartTracing(interval)
def _StopTracing(controllers):
for controller in controllers:
controller.StopTracing()
def _PullTraces(controllers, output, compress, write_json):
ui.PrintMessage('Downloading...', eol='')
trace_files = [controller.PullTrace() for controller in controllers]
trace_files = [trace for trace in trace_files if trace]
if not trace_files:
ui.PrintMessage('No results')
return []
result = trace_packager.PackageTraces(trace_files,
output=output,
compress=compress,
write_json=write_json)
ui.PrintMessage('done')
ui.PrintMessage('Trace written to file://%s' % os.path.abspath(result))
return result
def GetSupportedBrowsers():
"""Returns the package names of all supported browsers."""
# Add aliases for backwards compatibility.
supported_browsers = {
'stable': constants.PACKAGE_INFO['chrome_stable'],
'beta': constants.PACKAGE_INFO['chrome_beta'],
'dev': constants.PACKAGE_INFO['chrome_dev'],
'build': constants.PACKAGE_INFO['chrome'],
}
supported_browsers.update(constants.PACKAGE_INFO)
unsupported_browsers = ['content_browsertests', 'gtest', 'legacy_browser']
for browser in unsupported_browsers:
del supported_browsers[browser]
return supported_browsers
def CaptureProfile(controllers, interval, output=None, compress=False,
write_json=False):
"""Records a profiling trace saves the result to a file.
Args:
controllers: List of tracing controllers.
interval: Time interval to capture in seconds. An interval of None (or 0)
continues tracing until stopped by the user.
output: Output file name or None to use an automatically generated name.
compress: If True, the result will be compressed either with gzip or zip
depending on the number of captured subtraces.
write_json: If True, prefer JSON output over HTML.
Returns:
Path to saved profile.
"""
trace_type = ' + '.join(map(str, controllers))
try:
_StartTracing(controllers, interval)
if interval:
ui.PrintMessage('Capturing %d-second %s. Press Enter to stop early...' % \
(interval, trace_type), eol='')
ui.WaitForEnter(interval)
else:
ui.PrintMessage('Capturing %s. Press Enter to stop...' % \
trace_type, eol='')
raw_input()
finally:
_StopTracing(controllers)
if interval:
ui.PrintMessage('done')
return _PullTraces(controllers, output, compress, write_json)
| bsd-3-clause | 8,712,717,741,996,541,000 | 32.896552 | 80 | 0.682943 | false |
Zord13appdesa/python-for-android | python-modules/twisted/twisted/test/test_socks.py | 59 | 17748 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.protocol.socks}, an implementation of the SOCKSv4 and
SOCKSv4a protocols.
"""
import struct, socket
from twisted.trial import unittest
from twisted.test import proto_helpers
from twisted.internet import defer, address, reactor
from twisted.internet.error import DNSLookupError
from twisted.protocols import socks
class StringTCPTransport(proto_helpers.StringTransport):
stringTCPTransport_closing = False
peer = None
def getPeer(self):
return self.peer
def getHost(self):
return address.IPv4Address('TCP', '2.3.4.5', 42)
def loseConnection(self):
self.stringTCPTransport_closing = True
class FakeResolverReactor:
"""
Bare-bones reactor with deterministic behavior for the resolve method.
"""
def __init__(self, names):
"""
@type names: C{dict} containing C{str} keys and C{str} values.
@param names: A hostname to IP address mapping. The IP addresses are
stringified dotted quads.
"""
self.names = names
def resolve(self, hostname):
"""
Resolve a hostname by looking it up in the C{names} dictionary.
"""
try:
return defer.succeed(self.names[hostname])
except KeyError:
return defer.fail(
DNSLookupError("FakeResolverReactor couldn't find " + hostname))
class SOCKSv4Driver(socks.SOCKSv4):
# last SOCKSv4Outgoing instantiated
driver_outgoing = None
# last SOCKSv4IncomingFactory instantiated
driver_listen = None
def connectClass(self, host, port, klass, *args):
# fake it
proto = klass(*args)
proto.transport = StringTCPTransport()
proto.transport.peer = address.IPv4Address('TCP', host, port)
proto.connectionMade()
self.driver_outgoing = proto
return defer.succeed(proto)
def listenClass(self, port, klass, *args):
# fake it
factory = klass(*args)
self.driver_listen = factory
if port == 0:
port = 1234
return defer.succeed(('6.7.8.9', port))
class Connect(unittest.TestCase):
"""
Tests for SOCKS and SOCKSv4a connect requests using the L{SOCKSv4} protocol.
"""
def setUp(self):
self.sock = SOCKSv4Driver()
self.sock.transport = StringTCPTransport()
self.sock.connectionMade()
self.sock.reactor = FakeResolverReactor({"localhost":"127.0.0.1"})
def tearDown(self):
outgoing = self.sock.driver_outgoing
if outgoing is not None:
self.assert_(outgoing.transport.stringTCPTransport_closing,
"Outgoing SOCKS connections need to be closed.")
def test_simple(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 34)
+ socket.inet_aton('1.2.3.4'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
self.assert_(self.sock.driver_outgoing is not None)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(self.sock.driver_outgoing.transport.value(),
'hello, world')
# the other way around
self.sock.driver_outgoing.dataReceived('hi there')
self.assertEqual(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4aSuccessfulResolution(self):
"""
If the destination IP address has zeros for the first three octets and
non-zero for the fourth octet, the client is attempting a v4a
connection. A hostname is specified after the user ID string and the
server connects to the address that hostname resolves to.
@see: U{http://en.wikipedia.org/wiki/SOCKS#SOCKS_4a_protocol}
"""
# send the domain name "localhost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'localhost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
sent = self.sock.transport.value()
self.sock.transport.clear()
# Verify that the server responded with the address which will be
# connected to.
self.assertEquals(
sent,
struct.pack('!BBH', 0, 90, 34) + socket.inet_aton('127.0.0.1'))
self.assertFalse(self.sock.transport.stringTCPTransport_closing)
self.assertNotIdentical(self.sock.driver_outgoing, None)
# Pass some data through and verify it is forwarded to the outgoing
# connection.
self.sock.dataReceived('hello, world')
self.assertEquals(
self.sock.driver_outgoing.transport.value(), 'hello, world')
# Deliver some data from the output connection and verify it is
# passed along to the incoming side.
self.sock.driver_outgoing.dataReceived('hi there')
self.assertEquals(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4aFailedResolution(self):
"""
Failed hostname resolution on a SOCKSv4a packet results in a 91 error
response and the connection getting closed.
"""
# send the domain name "failinghost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'failinghost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
# Verify that the server responds with a 91 error.
sent = self.sock.transport.value()
self.assertEquals(
sent,
struct.pack('!BBH', 0, 91, 0) + socket.inet_aton('0.0.0.0'))
# A failed resolution causes the transport to drop the connection.
self.assertTrue(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_outgoing, None)
def test_accessDenied(self):
self.sock.authorize = lambda code, server, port, user: 0
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 4242)
+ socket.inet_aton('10.2.3.4')
+ 'fooBAR'
+ '\0')
self.assertEqual(self.sock.transport.value(),
struct.pack('!BBH', 0, 91, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_outgoing, None)
def test_eofRemote(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(self.sock.driver_outgoing.transport.value(),
'hello, world')
# now close it from the server side
self.sock.driver_outgoing.transport.loseConnection()
self.sock.driver_outgoing.connectionLost('fake reason')
def test_eofLocal(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(self.sock.driver_outgoing.transport.value(),
'hello, world')
# now close it from the client side
self.sock.connectionLost('fake reason')
class Bind(unittest.TestCase):
"""
Tests for SOCKS and SOCKSv4a bind requests using the L{SOCKSv4} protocol.
"""
def setUp(self):
self.sock = SOCKSv4Driver()
self.sock.transport = StringTCPTransport()
self.sock.connectionMade()
self.sock.reactor = FakeResolverReactor({"localhost":"127.0.0.1"})
## def tearDown(self):
## # TODO ensure the listen port is closed
## listen = self.sock.driver_listen
## if listen is not None:
## self.assert_(incoming.transport.stringTCPTransport_closing,
## "Incoming SOCKS connections need to be closed.")
def test_simple(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 1234)
+ socket.inet_aton('6.7.8.9'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
self.assert_(self.sock.driver_listen is not None)
# connect
incoming = self.sock.driver_listen.buildProtocol(('1.2.3.4', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(incoming.transport.value(),
'hello, world')
# the other way around
incoming.dataReceived('hi there')
self.assertEqual(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4a(self):
"""
If the destination IP address has zeros for the first three octets and
non-zero for the fourth octet, the client is attempting a v4a
connection. A hostname is specified after the user ID string and the
server connects to the address that hostname resolves to.
@see: U{http://en.wikipedia.org/wiki/SOCKS#SOCKS_4a_protocol}
"""
# send the domain name "localhost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'localhost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
sent = self.sock.transport.value()
self.sock.transport.clear()
# Verify that the server responded with the address which will be
# connected to.
self.assertEquals(
sent,
struct.pack('!BBH', 0, 90, 1234) + socket.inet_aton('6.7.8.9'))
self.assertFalse(self.sock.transport.stringTCPTransport_closing)
self.assertNotIdentical(self.sock.driver_listen, None)
# connect
incoming = self.sock.driver_listen.buildProtocol(('127.0.0.1', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assertNotIdentical(
self.sock.transport.stringTCPTransport_closing, None)
# Deliver some data from the output connection and verify it is
# passed along to the incoming side.
self.sock.dataReceived('hi there')
self.assertEquals(incoming.transport.value(), 'hi there')
# the other way around
incoming.dataReceived('hi there')
self.assertEqual(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4aFailedResolution(self):
"""
Failed hostname resolution on a SOCKSv4a packet results in a 91 error
response and the connection getting closed.
"""
# send the domain name "failinghost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'failinghost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
# Verify that the server responds with a 91 error.
sent = self.sock.transport.value()
self.assertEquals(
sent,
struct.pack('!BBH', 0, 91, 0) + socket.inet_aton('0.0.0.0'))
# A failed resolution causes the transport to drop the connection.
self.assertTrue(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_outgoing, None)
def test_accessDenied(self):
self.sock.authorize = lambda code, server, port, user: 0
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 4242)
+ socket.inet_aton('10.2.3.4')
+ 'fooBAR'
+ '\0')
self.assertEqual(self.sock.transport.value(),
struct.pack('!BBH', 0, 91, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_listen, None)
def test_eofRemote(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# connect
incoming = self.sock.driver_listen.buildProtocol(('1.2.3.4', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(incoming.transport.value(),
'hello, world')
# now close it from the server side
incoming.transport.loseConnection()
incoming.connectionLost('fake reason')
def test_eofLocal(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# connect
incoming = self.sock.driver_listen.buildProtocol(('1.2.3.4', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(incoming.transport.value(),
'hello, world')
# now close it from the client side
self.sock.connectionLost('fake reason')
def test_badSource(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# connect from WRONG address
incoming = self.sock.driver_listen.buildProtocol(('1.6.6.6', 666))
self.assertIdentical(incoming, None)
# Now we should have the second reply packet and it should
# be a failure. The connection should be closing.
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 91, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(self.sock.transport.stringTCPTransport_closing)
| apache-2.0 | -6,670,363,254,858,823,000 | 34.638554 | 80 | 0.595391 | false |
sushramesh/lwc | lib/python2.7/site-packages/pip/utils/outdated.py | 191 | 5555 | from __future__ import absolute_import
import datetime
import json
import logging
import os.path
import sys
from pip._vendor import lockfile
from pip._vendor.packaging import version as packaging_version
from pip.compat import total_seconds, WINDOWS
from pip.index import PyPI
from pip.locations import USER_CACHE_DIR, running_under_virtualenv
from pip.utils import ensure_dir, get_installed_version
from pip.utils.filesystem import check_path_owner
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
logger = logging.getLogger(__name__)
class VirtualenvSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)
except (IOError, ValueError):
self.state = {}
def save(self, pypi_version, current_time):
# Attempt to write out our version check file
with open(self.statefile_path, "w") as statefile:
json.dump(
{
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
},
statefile,
sort_keys=True,
separators=(",", ":")
)
class GlobalSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)[sys.prefix]
except (IOError, ValueError, KeyError):
self.state = {}
def save(self, pypi_version, current_time):
# Check to make sure that we own the directory
if not check_path_owner(os.path.dirname(self.statefile_path)):
return
# Now that we've ensured the directory is owned by this user, we'll go
# ahead and make sure that all our directories are created.
ensure_dir(os.path.dirname(self.statefile_path))
# Attempt to write out our version check file
with lockfile.LockFile(self.statefile_path):
if os.path.exists(self.statefile_path):
with open(self.statefile_path) as statefile:
state = json.load(statefile)
else:
state = {}
state[sys.prefix] = {
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
with open(self.statefile_path, "w") as statefile:
json.dump(state, statefile, sort_keys=True,
separators=(",", ":"))
def load_selfcheck_statefile():
if running_under_virtualenv():
return VirtualenvSelfCheckState()
else:
return GlobalSelfCheckState()
def pip_version_check(session):
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
"""
installed_version = get_installed_version("pip")
if installed_version is None:
return
pip_version = packaging_version.parse(installed_version)
pypi_version = None
try:
state = load_selfcheck_statefile()
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
resp = session.get(
PyPI.pip_json_url,
headers={"Accept": "application/json"},
)
resp.raise_for_status()
pypi_version = [
v for v in sorted(
list(resp.json()["releases"]),
key=packaging_version.parse,
)
if not packaging_version.parse(v).is_prerelease
][-1]
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = packaging_version.parse(pypi_version)
# Determine if our pypi_version is older
if (pip_version < remote_version and
pip_version.base_version != remote_version.base_version):
# Advise "python -m pip" on Windows to avoid issues
# with overwriting pip.exe.
if WINDOWS:
pip_cmd = "python -m pip"
else:
pip_cmd = "pip"
logger.warning(
"You are using pip version %s, however version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command." % (pip_version,
pypi_version,
pip_cmd)
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
)
| mit | -1,358,175,049,972,279,800 | 33.079755 | 78 | 0.569577 | false |
certik/python-theora | examples/chop.py | 1 | 1424 | #! /usr/bin/env python
"""
Analog to the oggz-chop program.
Example:
examples/chop.py -o s.ogv -s 20 -e 30 video.ogv
See "./chop.py -h" for help.
"""
from optparse import OptionParser
from theora import Theora, TheoraEncoder
def convert(infile, outfile, start, end):
print "converting %s to %s, between the times %d:%d" % \
(infile, outfile, start, end)
a = Theora(infile)
b = TheoraEncoder(outfile, a.width, a.height, quality=63)
a.seek(time=start)
while a.read_frame() and a.time < end:
print "frame: %d, time=%f" % (a.frame, a.time)
A = a.get_frame_array()
b.write_frame_array(A)
usage = """\
%prog [options] file_in
Extract the part of a Theora video file between start and/or end times.
"""
def main():
parser = OptionParser(usage=usage)
parser.add_option("-o", "--output", dest="filename",
help="Specify output filename")
parser.add_option("-s", "--start", dest="start_time", type="int",
help="Specify start time")
parser.add_option("-e", "--end", dest="end_time", type="int",
help="Specify end time")
options, args = parser.parse_args()
if options.filename and options.start_time and options.end_time and \
len(args) == 1:
convert(args[0], options.filename, options.start_time, options.end_time)
else:
parser.print_help()
if __name__ == "__main__":
main()
| bsd-3-clause | -4,427,279,083,637,898,000 | 28.666667 | 80 | 0.614466 | false |
tiagoarasilva/django-boilerplate | project_name/lib/audit/middleware.py | 1 | 3075 | # Copyright (c) 2009 James Aylett <http://tartarus.org/james/computers/django/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from django.db.models.signals import pre_save
import threading
stash = threading.local()
def get_current_user():
"""Get the user whose session resulted in the current code running. (Only valid during requests.)"""
return getattr(stash, 'current_user', None)
def set_current_user(user):
stash.current_user = user
def onanymodel_presave(sender, **kwargs):
current_user = get_current_user()
if current_user is None or current_user.is_anonymous():
# if there is no current user or we're an anonymous user (ie: guest) then
# don't do anything. The save() will fail if created_by or modified_by are
# null=False, and not otherwise; ie the behaviour is controlled by the
# modelos, as desired.
current_user = None
obj = kwargs['instance']
if hasattr(obj, 'modified_by_id') and current_user:
obj.modified_by = current_user
if not obj.pk and current_user and hasattr(obj, 'created_by_id'):
try:
if not obj.created_by:
obj.created_by = current_user
except obj.__class__.created_by.field.rel.to.DoesNotExist:
# FRAGILE: reliant on Django internals
# (django.db.modelos.fields.related.ReverseSingleRelatedObjectDescriptor and down)
#
# however will work if you don't use the django auth system, and make the created_by
# field a ForeignKey to whatever you use instead of django.contrib.auth.modelos.User.
obj.created_by = current_user
pre_save.connect(onanymodel_presave)
class AutoCreatedAndModifiedFields:
def process_request(self, request):
set_current_user(request.user)
def process_response(self, request, response):
set_current_user(None)
return response
def process_exception(self, request, exception):
set_current_user(None)
| mit | 2,165,794,235,012,168,400 | 44.590909 | 104 | 0.694959 | false |
jpush/jbox | Server/venv/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/universaldetector.py | 1776 | 6840 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| mit | -4,015,657,032,522,133,500 | 39.235294 | 86 | 0.556579 | false |
xindus40223115/w16b_test | static/Brython3.1.0-20150301-090019/Lib/unittest/test/test_assertions.py | 738 | 15398 | import datetime
import warnings
import unittest
from itertools import product
class Test_Assertions(unittest.TestCase):
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.1, 1.0, places=1)
self.assertAlmostEqual(0, .1+.1j, places=0)
self.assertNotAlmostEqual(0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 0, .1+.1j, places=0)
self.assertAlmostEqual(float('inf'), float('inf'))
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
float('inf'), float('inf'))
def test_AmostEqualWithDelta(self):
self.assertAlmostEqual(1.1, 1.0, delta=0.5)
self.assertAlmostEqual(1.0, 1.1, delta=0.5)
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
self.assertRaises(self.failureException, self.assertAlmostEqual,
1.1, 1.0, delta=0.05)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.1, 1.0, delta=0.5)
self.assertRaises(TypeError, self.assertAlmostEqual,
1.1, 1.0, places=2, delta=2)
self.assertRaises(TypeError, self.assertNotAlmostEqual,
1.1, 1.0, places=2, delta=2)
first = datetime.datetime.now()
second = first + datetime.timedelta(seconds=10)
self.assertAlmostEqual(first, second,
delta=datetime.timedelta(seconds=20))
self.assertNotAlmostEqual(first, second,
delta=datetime.timedelta(seconds=5))
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
with self.assertRaises(KeyError) as cm:
try:
raise KeyError
except Exception as e:
exc = e
raise
self.assertIs(cm.exception, exc)
with self.assertRaises(KeyError):
raise KeyError("key")
try:
with self.assertRaises(KeyError):
pass
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
with self.assertRaises(KeyError):
raise ValueError
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
def testAssertNotRegex(self):
self.assertNotRegex('Ala ma kota', r'r+')
try:
self.assertNotRegex('Ala ma kota', r'k.t', 'Message')
except self.failureException as e:
self.assertIn("'kot'", e.args[0])
self.assertIn('Message', e.args[0])
else:
self.fail('assertNotRegex should have failed.')
class TestLongMessage(unittest.TestCase):
"""Test that the individual asserts honour longMessage.
This actually tests all the message behaviour for
asserts that use longMessage."""
def setUp(self):
class TestableTestFalse(unittest.TestCase):
longMessage = False
failureException = self.failureException
def testTest(self):
pass
class TestableTestTrue(unittest.TestCase):
longMessage = True
failureException = self.failureException
def testTest(self):
pass
self.testableTrue = TestableTestTrue('testTest')
self.testableFalse = TestableTestFalse('testTest')
def testDefault(self):
self.assertTrue(unittest.TestCase.longMessage)
def test_formatMsg(self):
self.assertEqual(self.testableFalse._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableFalse._formatMessage("foo", "bar"), "foo")
self.assertEqual(self.testableTrue._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableTrue._formatMessage("foo", "bar"), "bar : foo")
# This blows up if _formatMessage uses string concatenation
self.testableTrue._formatMessage(object(), 'foo')
def test_formatMessage_unicode_error(self):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing msg
self.testableTrue._formatMessage(one, '\uFFFD')
def assertMessages(self, methodName, args, errors):
"""
Check that methodName(*args) raises the correct error messages.
errors should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
def getMethod(i):
useTestableFalse = i < 2
if useTestableFalse:
test = self.testableFalse
else:
test = self.testableTrue
return getattr(test, methodName)
for i, expected_regex in enumerate(errors):
testMethod = getMethod(i)
kwargs = {}
withMsg = i % 2
if withMsg:
kwargs = {"msg": "oops"}
with self.assertRaisesRegex(self.failureException,
expected_regex=expected_regex):
testMethod(*args, **kwargs)
def testAssertTrue(self):
self.assertMessages('assertTrue', (False,),
["^False is not true$", "^oops$", "^False is not true$",
"^False is not true : oops$"])
def testAssertFalse(self):
self.assertMessages('assertFalse', (True,),
["^True is not false$", "^oops$", "^True is not false$",
"^True is not false : oops$"])
def testNotEqual(self):
self.assertMessages('assertNotEqual', (1, 1),
["^1 == 1$", "^oops$", "^1 == 1$",
"^1 == 1 : oops$"])
def testAlmostEqual(self):
self.assertMessages('assertAlmostEqual', (1, 2),
["^1 != 2 within 7 places$", "^oops$",
"^1 != 2 within 7 places$", "^1 != 2 within 7 places : oops$"])
def testNotAlmostEqual(self):
self.assertMessages('assertNotAlmostEqual', (1, 1),
["^1 == 1 within 7 places$", "^oops$",
"^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"])
def test_baseAssertEqual(self):
self.assertMessages('_baseAssertEqual', (1, 2),
["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"])
def testAssertSequenceEqual(self):
# Error messages are multiline so not testing on full message
# assertTupleEqual and assertListEqual delegate to this method
self.assertMessages('assertSequenceEqual', ([], [None]),
["\+ \[None\]$", "^oops$", r"\+ \[None\]$",
r"\+ \[None\] : oops$"])
def testAssertSetEqual(self):
self.assertMessages('assertSetEqual', (set(), set([None])),
["None$", "^oops$", "None$",
"None : oops$"])
def testAssertIn(self):
self.assertMessages('assertIn', (None, []),
['^None not found in \[\]$', "^oops$",
'^None not found in \[\]$',
'^None not found in \[\] : oops$'])
def testAssertNotIn(self):
self.assertMessages('assertNotIn', (None, [None]),
['^None unexpectedly found in \[None\]$', "^oops$",
'^None unexpectedly found in \[None\]$',
'^None unexpectedly found in \[None\] : oops$'])
def testAssertDictEqual(self):
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
[r"\+ \{'key': 'value'\}$", "^oops$",
"\+ \{'key': 'value'\}$",
"\+ \{'key': 'value'\} : oops$"])
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}),
["^Missing: 'key'$", "^oops$",
"^Missing: 'key'$",
"^Missing: 'key' : oops$"])
def testAssertMultiLineEqual(self):
self.assertMessages('assertMultiLineEqual', ("", "foo"),
[r"\+ foo$", "^oops$",
r"\+ foo$",
r"\+ foo : oops$"])
def testAssertLess(self):
self.assertMessages('assertLess', (2, 1),
["^2 not less than 1$", "^oops$",
"^2 not less than 1$", "^2 not less than 1 : oops$"])
def testAssertLessEqual(self):
self.assertMessages('assertLessEqual', (2, 1),
["^2 not less than or equal to 1$", "^oops$",
"^2 not less than or equal to 1$",
"^2 not less than or equal to 1 : oops$"])
def testAssertGreater(self):
self.assertMessages('assertGreater', (1, 2),
["^1 not greater than 2$", "^oops$",
"^1 not greater than 2$",
"^1 not greater than 2 : oops$"])
def testAssertGreaterEqual(self):
self.assertMessages('assertGreaterEqual', (1, 2),
["^1 not greater than or equal to 2$", "^oops$",
"^1 not greater than or equal to 2$",
"^1 not greater than or equal to 2 : oops$"])
def testAssertIsNone(self):
self.assertMessages('assertIsNone', ('not None',),
["^'not None' is not None$", "^oops$",
"^'not None' is not None$",
"^'not None' is not None : oops$"])
def testAssertIsNotNone(self):
self.assertMessages('assertIsNotNone', (None,),
["^unexpectedly None$", "^oops$",
"^unexpectedly None$",
"^unexpectedly None : oops$"])
def testAssertIs(self):
self.assertMessages('assertIs', (None, 'foo'),
["^None is not 'foo'$", "^oops$",
"^None is not 'foo'$",
"^None is not 'foo' : oops$"])
def testAssertIsNot(self):
self.assertMessages('assertIsNot', (None, None),
["^unexpectedly identical: None$", "^oops$",
"^unexpectedly identical: None$",
"^unexpectedly identical: None : oops$"])
def assertMessagesCM(self, methodName, args, func, errors):
"""
Check that the correct error messages are raised while executing:
with method(*args):
func()
*errors* should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
p = product((self.testableFalse, self.testableTrue),
({}, {"msg": "oops"}))
for (cls, kwargs), err in zip(p, errors):
method = getattr(cls, methodName)
with self.assertRaisesRegex(cls.failureException, err):
with method(*args, **kwargs) as cm:
func()
def testAssertRaises(self):
self.assertMessagesCM('assertRaises', (TypeError,), lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
def testAssertRaisesRegex(self):
# test error not raised
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'unused regex'),
lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
# test error raised but with wrong message
def raise_wrong_message():
raise TypeError('foo')
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
def testAssertWarns(self):
self.assertMessagesCM('assertWarns', (UserWarning,), lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
def testAssertWarnsRegex(self):
# test error not raised
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'unused regex'),
lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
# test warning raised but with wrong message
def raise_wrong_message():
warnings.warn('foo')
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
| gpl-3.0 | -6,973,187,124,517,343,000 | 41.891365 | 92 | 0.51195 | false |
akosel/incubator-airflow | airflow/www_rbac/decorators.py | 9 | 4418 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import gzip
import functools
import pendulum
from io import BytesIO as IO
from flask import after_this_request, redirect, request, url_for, g
from airflow import models, settings
def action_logging(f):
"""
Decorator to log user actions
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
session = settings.Session()
if g.user.is_anonymous():
user = 'anonymous'
else:
user = g.user.username
log = models.Log(
event=f.__name__,
task_instance=None,
owner=user,
extra=str(list(request.args.items())),
task_id=request.args.get('task_id'),
dag_id=request.args.get('dag_id'))
if 'execution_date' in request.args:
log.execution_date = pendulum.parse(
request.args.get('execution_date'))
session.add(log)
session.commit()
return f(*args, **kwargs)
return wrapper
def gzipped(f):
"""
Decorator to make a view compressed
"""
@functools.wraps(f)
def view_func(*args, **kwargs):
@after_this_request
def zipper(response):
accept_encoding = request.headers.get('Accept-Encoding', '')
if 'gzip' not in accept_encoding.lower():
return response
response.direct_passthrough = False
if (response.status_code < 200 or response.status_code >= 300 or
'Content-Encoding' in response.headers):
return response
gzip_buffer = IO()
gzip_file = gzip.GzipFile(mode='wb',
fileobj=gzip_buffer)
gzip_file.write(response.data)
gzip_file.close()
response.data = gzip_buffer.getvalue()
response.headers['Content-Encoding'] = 'gzip'
response.headers['Vary'] = 'Accept-Encoding'
response.headers['Content-Length'] = len(response.data)
return response
return f(*args, **kwargs)
return view_func
def has_dag_access(**dag_kwargs):
"""
Decorator to check whether the user has read / write permission on the dag.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
has_access = self.appbuilder.sm.has_access
dag_id = request.args.get('dag_id')
# if it is false, we need to check whether user has write access on the dag
can_dag_edit = dag_kwargs.get('can_dag_edit', False)
# 1. check whether the user has can_dag_edit permissions on all_dags
# 2. if 1 false, check whether the user
# has can_dag_edit permissions on the dag
# 3. if 2 false, check whether it is can_dag_read view,
# and whether user has the permissions
if (
has_access('can_dag_edit', 'all_dags') or
has_access('can_dag_edit', dag_id) or (not can_dag_edit and
(has_access('can_dag_read',
'all_dags') or
has_access('can_dag_read',
dag_id)))):
return f(self, *args, **kwargs)
else:
return redirect(url_for(self.appbuilder.sm.auth_view.
__class__.__name__ + ".login"))
return wrapper
return decorator
| apache-2.0 | -9,051,525,222,329,713,000 | 34.344 | 87 | 0.562698 | false |
kuangrewawa/OnosFw | tools/test/topos/obelisk.py | 38 | 2612 | #!/usr/bin/env python
from mininet.topo import Topo
class ObeliskTopo( Topo ):
def __init__( self ):
Topo.__init__( self )
topSwitch = self.addSwitch('s1',dpid='1000'.zfill(16))
leftTopSwitch = self.addSwitch('s2',dpid='2000'.zfill(16))
rightTopSwitch = self.addSwitch('s5',dpid='5000'.zfill(16))
leftBotSwitch = self.addSwitch('s3',dpid='3000'.zfill(16))
rightBotSwitch = self.addSwitch('s6',dpid='6000'.zfill(16))
midBotSwitch = self.addSwitch('s28',dpid='2800'.zfill(16))
topHost = self.addHost( 'h1' )
leftTopHost = self.addHost('h2')
rightTopHost = self.addHost('h5')
leftBotHost = self.addHost('h3')
rightBotHost = self.addHost('h6')
midBotHost = self.addHost('h28')
self.addLink(topSwitch,topHost)
self.addLink(leftTopSwitch,leftTopHost)
self.addLink(rightTopSwitch,rightTopHost)
self.addLink(leftBotSwitch,leftBotHost)
self.addLink(rightBotSwitch,rightBotHost)
self.addLink(midBotSwitch,midBotHost)
self.addLink(leftTopSwitch,rightTopSwitch)
self.addLink(topSwitch,leftTopSwitch)
self.addLink(topSwitch,rightTopSwitch)
self.addLink(leftTopSwitch,leftBotSwitch)
self.addLink(rightTopSwitch,rightBotSwitch)
self.addLink(leftBotSwitch,midBotSwitch)
self.addLink(midBotSwitch,rightBotSwitch)
agg1Switch = self.addSwitch('s4',dpid = '3004'.zfill(16))
agg2Switch = self.addSwitch('s7',dpid = '6007'.zfill(16))
agg1Host = self.addHost('h4')
agg2Host = self.addHost('h7')
self.addLink(agg1Switch,agg1Host)
self.addLink(agg2Switch,agg2Host)
self.addLink(agg1Switch, leftBotSwitch)
self.addLink(agg2Switch, rightBotSwitch)
for i in range(10):
num = str(i+8)
switch = self.addSwitch('s'+num,dpid = ('30'+num.zfill(2)).zfill(16))
host = self.addHost('h'+num)
self.addLink(switch, host)
self.addLink(switch, agg1Switch)
for i in range(10):
num = str(i+18)
switch = self.addSwitch('s'+num,dpid = ('60'+num.zfill(2)).zfill(16))
host = self.addHost('h'+num)
self.addLink(switch, host)
self.addLink(switch, agg2Switch)
topos = { 'obelisk': (lambda: ObeliskTopo() ) }
def run():
topo = ObeliskTopo()
net = Mininet( topo=topo, controller=RemoteController, autoSetMacs=True )
net.start()
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
run()
| apache-2.0 | 1,814,563,503,752,382,500 | 36.314286 | 81 | 0.617534 | false |
CeltonMcGrath/TACTIC | src/pyasm/biz/preference.py | 6 | 3572 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['PrefSetting', 'PrefList']
from pyasm.search import SObject, Search, DatabaseException
from pyasm.common import Container, TacticException, Environment
class PrefList(SObject):
'''Defines all of the pref settings in the Admin area'''
SEARCH_TYPE = "sthpw/pref_list"
def get_value_by_key(cls, key, search_type=None):
prod_setting = cls.get_by_key(key, search_type)
value = ""
if prod_setting:
value = prod_setting.get_value("options")
return value
get_value_by_key = classmethod(get_value_by_key)
def get_by_key(cls, key, search_type=None):
dict_key = '%s:%s' %(key, search_type)
cached = cls.get_cached_obj(dict_key)
if cached:
return cached
search = Search(cls.SEARCH_TYPE)
search.add_filter("key", key)
if search_type:
search.add_filter("search_type", search_type)
prod_setting = search.get_sobject()
dict = cls.get_cache_dict()
dict[dict_key] = prod_setting
return prod_setting
get_by_key = classmethod(get_by_key)
class PrefSetting(PrefList):
'''Defines all of the user settings for a given prodution'''
SEARCH_TYPE = "sthpw/pref_setting"
def get_value_by_key(cls, key, user=None):
''' get the value of this pref '''
#try:
# from pyasm.biz import SearchTypeCache
# cache = SearchTypeCache.get(cls.SEARCH_TYPE)
#except Exception:
# print "WARNING: Cache not enabled"
# protect against database connection issues (This is called at a very
# low level, so it needs this)
try:
pref_setting = cls.get_by_key(key,user)
value = ''
if pref_setting:
value = pref_setting.get_value("value")
except DatabaseException:
value = ''
return value
get_value_by_key = classmethod(get_value_by_key)
def get_by_key(cls, key, user=None):
if not user:
user = Environment.get_user_name()
# ignore the project_code column for now
dict_key = '%s:%s' %(cls.SEARCH_TYPE, user)
settings_dict = Container.get(dict_key)
# explicit check for None
if settings_dict == None:
settings_dict = {}
Container.put(dict_key, settings_dict)
search = Search(cls.SEARCH_TYPE)
search.add_filter("login", user)
# don't filter with the key in order to build a dict
pref_settings = search.get_sobjects()
for setting in pref_settings:
settings_dict[setting.get_value('key')] = setting
pref_setting = settings_dict.get(key)
return pref_setting
get_by_key = classmethod(get_by_key)
def create(cls, key, value):
setting = cls.get_by_key(key)
if not setting:
setting = PrefSetting.create_new()
setting.set_value("key", key)
user = Environment.get_user_name()
setting.set_value("login", user)
setting.set_value("value", value)
setting.commit()
return setting
create = classmethod(create)
| epl-1.0 | -4,967,089,896,131,039,000 | 27.125984 | 78 | 0.581747 | false |
AnuchitPrasertsang/robotframework-selenium2library | src/Selenium2Library/locators/tableelementfinder.py | 31 | 3986 | from selenium.common.exceptions import NoSuchElementException
from Selenium2Library import utils
from elementfinder import ElementFinder
class TableElementFinder(object):
def __init__(self, element_finder=None):
if not element_finder:
element_finder = ElementFinder()
self._element_finder = element_finder
self._locator_suffixes = {
('css', 'default'): [''],
('css', 'content'): [''],
('css', 'header'): [' th'],
('css', 'footer'): [' tfoot td'],
('css', 'row'): [' tr:nth-child(%s)'],
('css', 'col'): [' tr td:nth-child(%s)', ' tr th:nth-child(%s)'],
('jquery', 'default'): [''],
('jquery', 'content'): [''],
('jquery', 'header'): [' th'],
('jquery', 'footer'): [' tfoot td'],
('jquery', 'row'): [' tr:nth-child(%s)'],
('jquery', 'col'): [' tr td:nth-child(%s)', ' tr th:nth-child(%s)'],
('sizzle', 'default'): [''],
('sizzle', 'content'): [''],
('sizzle', 'header'): [' th'],
('sizzle', 'footer'): [' tfoot td'],
('sizzle', 'row'): [' tr:nth-child(%s)'],
('sizzle', 'col'): [' tr td:nth-child(%s)', ' tr th:nth-child(%s)'],
('xpath', 'default'): [''],
('xpath', 'content'): ['//*'],
('xpath', 'header'): ['//th'],
('xpath', 'footer'): ['//tfoot//td'],
('xpath', 'row'): ['//tr[%s]//*'],
('xpath', 'col'): ['//tr//*[self::td or self::th][%s]']
};
def find(self, browser, table_locator):
locators = self._parse_table_locator(table_locator, 'default')
return self._search_in_locators(browser, locators, None)
def find_by_content(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'content')
return self._search_in_locators(browser, locators, content)
def find_by_header(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'header')
return self._search_in_locators(browser, locators, content)
def find_by_footer(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'footer')
return self._search_in_locators(browser, locators, content)
def find_by_row(self, browser, table_locator, col, content):
locators = self._parse_table_locator(table_locator, 'row')
locators = [locator % str(col) for locator in locators]
return self._search_in_locators(browser, locators, content)
def find_by_col(self, browser, table_locator, col, content):
locators = self._parse_table_locator(table_locator, 'col')
locators = [locator % str(col) for locator in locators]
return self._search_in_locators(browser, locators, content)
def _parse_table_locator(self, table_locator, location_method):
if table_locator.startswith('xpath='):
table_locator_type = 'xpath'
elif table_locator.startswith('jquery=') or table_locator.startswith('sizzle='):
table_locator_type = 'sizzle'
else:
if not table_locator.startswith('css='):
table_locator = "css=table#%s" % table_locator
table_locator_type = 'css'
locator_suffixes = self._locator_suffixes[(table_locator_type, location_method)]
return map(
lambda locator_suffix: table_locator + locator_suffix,
locator_suffixes)
def _search_in_locators(self, browser, locators, content):
for locator in locators:
elements = self._element_finder.find(browser, locator)
for element in elements:
if content is None: return element
element_text = element.text
if element_text and content in element_text:
return element
return None
| apache-2.0 | -1,989,258,441,963,690,500 | 42.326087 | 88 | 0.558956 | false |
Ldpe2G/mxnet | python/mxnet/rtc.py | 15 | 4121 | """Interface to runtime cuda kernel compile module."""
from __future__ import absolute_import
import ctypes
from .base import _LIB, NDArrayHandle, RtcHandle, mx_uint, c_array, check_call
class Rtc(object):
"""MXRtc object in mxnet.
This class allow you to write CUDA kernels in Python
and call them with NDArray.
Parameters
----------
name : str
Name of the kernel.
inputs : tuple of (str, mxnet.ndarray)
List of input names and ndarray.
outputs : tuple of (str, mxnet.ndarray)
List of output names and ndarray.
kernel : str
The actual kernel code.
Note that this is only the body of the kernel, i.e.
after { and before }. Rtc will decorate the kernel.
For example, if ``name = "mykernel"`` and
inputs = [('x', mx.nd.zeros((10,)))]
outputs = [('y', mx.nd.zeros((10,)))]
kernel = "y[threadIdx.x] = x[threadIdx.x];",
then the compiled kernel will be:
extern "C" __global__ mykernel(float *x, float *y) {
const int x_ndim = 1;
const int x_dims = { 10 };
const int y_ndim = 1;
const int y_dims = { 10 };
y[threadIdx.x] = x[threadIdx.x];
}
"""
def __init__(self, name, inputs, outputs, kernel):
self.handle = RtcHandle()
input_names = ctypes.cast(c_array(ctypes.c_char_p, [i[0] for i in inputs]),
ctypes.POINTER(ctypes.c_char_p))
output_names = ctypes.cast(c_array(ctypes.c_char_p, [i[0] for i in outputs]),
ctypes.POINTER(ctypes.c_char_p))
input_nds = ctypes.cast(c_array(NDArrayHandle, [i[1].handle for i in inputs]),
ctypes.POINTER(NDArrayHandle))
output_nds = ctypes.cast(c_array(NDArrayHandle, [i[1].handle for i in outputs]),
ctypes.POINTER(NDArrayHandle))
check_call(_LIB.MXRtcCreate(ctypes.c_char_p(name),
mx_uint(len(inputs)),
mx_uint(len(outputs)),
input_names,
output_names,
input_nds,
output_nds,
ctypes.c_char_p(kernel),
ctypes.byref(self.handle)))
def __del__(self):
check_call(_LIB.MXRtcFree(self.handle))
def push(self, inputs, outputs, grid_dims, block_dims):
"""Run the kernel.
Parameters
----------
inputs : list of NDArray
List of inputs. Can contain different NDArrays than those used for the constructor,
but its elements must have the same shapes and appear in the same order.
outputs : list of NDArray
List of outputs. Can contain different ndarrays than used for the constructor,
but must have the same shapes and appear in the same order.
grid_dims : tuple of 3 uint
Grid dimension for kernel launch.
block_dims : tuple of 3 uint
Block dimension for kernel launch.
"""
input_nds = ctypes.cast(c_array(NDArrayHandle, [i.handle for i in inputs]),
ctypes.POINTER(NDArrayHandle))
output_nds = ctypes.cast(c_array(NDArrayHandle, [i.handle for i in outputs]),
ctypes.POINTER(NDArrayHandle))
check_call(_LIB.MXRtcPush(self.handle,
mx_uint(len(inputs)),
mx_uint(len(outputs)),
input_nds,
output_nds,
mx_uint(grid_dims[0]),
mx_uint(grid_dims[1]),
mx_uint(grid_dims[2]),
mx_uint(block_dims[0]),
mx_uint(block_dims[1]),
mx_uint(block_dims[2])))
| apache-2.0 | -3,610,586,737,455,926,000 | 44.285714 | 95 | 0.499879 | false |
idem2lyon/persomov | libs/cache/__init__.py | 99 | 8343 | """
copied from
werkzeug.contrib.cache
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from cache.posixemulation import rename
from itertools import izip
from time import time
import os
import re
import tempfile
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
try:
import cPickle as pickle
except ImportError:
import pickle
def _items(mappingorseq):
"""Wrapper for efficient iteration over mappings represented by dicts
or sequences::
>>> for k, v in _items((i, i*i) for i in xrange(5)):
... assert k*k == v
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
... assert k*k == v
"""
return mappingorseq.iteritems() if hasattr(mappingorseq, 'iteritems') \
else mappingorseq
class BaseCache(object):
"""Baseclass for the cache systems. All the cache systems implement this
API or a superset of it.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`set`.
"""
def __init__(self, default_timeout = 300):
self.default_timeout = default_timeout
def delete(self, key):
"""Deletes `key` from the cache. If it does not exist in the cache
nothing happens.
:param key: the key to delete.
"""
pass
def get_many(self, *keys):
"""Returns a list of values for the given keys.
For each key a item in the list is created. Example::
foo, bar = cache.get_many("foo", "bar")
If a key can't be looked up `None` is returned for that key
instead.
:param keys: The function accepts multiple keys as positional
arguments.
"""
return map(self.get, keys)
def get_dict(self, *keys):
"""Works like :meth:`get_many` but returns a dict::
d = cache.get_dict("foo", "bar")
foo = d["foo"]
bar = d["bar"]
:param keys: The function accepts multiple keys as positional
arguments.
"""
return dict(izip(keys, self.get_many(*keys)))
def set(self, key, value, timeout = None):
"""Adds a new key/value to the cache (overwrites value, if key already
exists in the cache).
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
"""
pass
def add(self, key, value, timeout = None):
"""Works like :meth:`set` but does not overwrite the values of already
existing keys.
:param key: the key to set
:param value: the value for the key
:param timeout: the cache timeout for the key or the default
timeout if not specified.
"""
pass
def set_many(self, mapping, timeout = None):
"""Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
"""
for key, value in _items(mapping):
self.set(key, value, timeout)
def delete_many(self, *keys):
"""Deletes multiple keys at once.
:param keys: The function accepts multiple keys as positional
arguments.
"""
for key in keys:
self.delete(key)
def clear(self):
"""Clears the cache. Keep in mind that not all caches support
completely clearing the cache.
"""
pass
def inc(self, key, delta = 1):
"""Increments the value of a key by `delta`. If the key does
not yet exist it is initialized with `delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to add.
"""
self.set(key, (self.get(key) or 0) + delta)
def dec(self, key, delta = 1):
"""Decrements the value of a key by `delta`. If the key does
not yet exist it is initialized with `-delta`.
For supporting caches this is an atomic operation.
:param key: the key to increment.
:param delta: the delta to subtract.
"""
self.set(key, (self.get(key) or 0) - delta)
class FileSystemCache(BaseCache):
"""A cache that stores the items on the file system. This cache depends
on being the only user of the `cache_dir`. Make absolutely sure that
nobody but this cache stores files there or otherwise the cache will
randomly delete files therein.
:param cache_dir: the directory where cache files are stored.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
:param mode: the file mode wanted for the cache files, default 0600
"""
#: used for temporary files by the FileSystemCache
_fs_transaction_suffix = '.__wz_cache'
def __init__(self, cache_dir, threshold = 500, default_timeout = 300, mode = 0600):
BaseCache.__init__(self, default_timeout)
self._path = cache_dir
self._threshold = threshold
self._mode = mode
if not os.path.exists(self._path):
os.makedirs(self._path)
def _list_dir(self):
"""return a list of (fully qualified) cache filenames
"""
return [os.path.join(self._path, fn) for fn in os.listdir(self._path)
if not fn.endswith(self._fs_transaction_suffix)]
def _prune(self):
entries = self._list_dir()
if len(entries) > self._threshold:
now = time()
for idx, fname in enumerate(entries):
remove = False
f = None
try:
try:
f = open(fname, 'rb')
expires = pickle.load(f)
remove = expires <= now or idx % 3 == 0
finally:
if f is not None:
f.close()
except Exception:
pass
if remove:
try:
os.remove(fname)
except (IOError, OSError):
pass
def clear(self):
for fname in self._list_dir():
try:
os.remove(fname)
except (IOError, OSError):
pass
def _get_filename(self, key):
hash = md5(key).hexdigest()
return os.path.join(self._path, hash)
def get(self, key):
filename = self._get_filename(key)
try:
f = open(filename, 'rb')
try:
if pickle.load(f) >= time():
return pickle.load(f)
finally:
f.close()
os.remove(filename)
except Exception:
return None
def add(self, key, value, timeout = None):
filename = self._get_filename(key)
if not os.path.exists(filename):
self.set(key, value, timeout)
def set(self, key, value, timeout = None):
if timeout is None:
timeout = self.default_timeout
filename = self._get_filename(key)
self._prune()
try:
fd, tmp = tempfile.mkstemp(suffix = self._fs_transaction_suffix,
dir = self._path)
f = os.fdopen(fd, 'wb')
try:
pickle.dump(int(time() + timeout), f, 1)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
finally:
f.close()
rename(tmp, filename)
os.chmod(filename, self._mode)
except (IOError, OSError):
pass
def delete(self, key):
try:
os.remove(self._get_filename(key))
except (IOError, OSError):
pass
| gpl-3.0 | -8,850,912,241,227,319,000 | 30.843511 | 87 | 0.548843 | false |
tkjone/guides-django | series_2/p_03/myproject/server/config/settings/base.py | 4 | 8713 | # -*- coding: utf-8 -*-
"""
Django settings for myproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 4 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('server')
env = environ.Env()
# ------------------------------------------------------------------------------
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# ------------------------------------------------------------------------------
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS = (
# wagtail dependencies
'compressor',
'taggit',
'modelcluster',
# wagtail
'wagtail.wagtailcore',
'wagtail.wagtailadmin',
'wagtail.wagtaildocs',
'wagtail.wagtailsnippets',
'wagtail.wagtailusers',
'wagtail.wagtailimages',
'wagtail.wagtailsearch',
'wagtail.wagtailsites',
'wagtail.wagtailredirects',
'wagtail.wagtailforms',
)
LOCAL_APPS = (
'apps.wagtail.pages',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# ------------------------------------------------------------------------------
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# Wagtail Middleware
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
]
# ------------------------------------------------------------------------------
# DEBUG
# ------------------------------------------------------------------------------
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DJANGO_DEBUG", default=True)
# ------------------------------------------------------------------------------
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': env.db("DATABASE_URL", default="postgres://dev:dev@localhost/myproject")
}
# ------------------------------------------------------------------------------
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
ALLOWED_HOSTS = []
# ------------------------------------------------------------------------------
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(APPS_DIR('templates'))
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# ------------------------------------------------------------------------------
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
STATIC_ROOT = str(ROOT_DIR.path('server/staticfiles'))
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# ------------------------------------------------------------------------------
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(ROOT_DIR('server/media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# ------------------------------------------------------------------------------
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# ------------------------------------------------------------------------------
# OTHER Configuration
# ------------------------------------------------------------------------------
ADMINS = (
("""{{cookiecutter.author_name}}""", '{{cookiecutter.email}}'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
WSGI_APPLICATION = 'config.wsgi.application'
# ------------------------------------------------------------------------------
# LOGGIN INFORMATION
# ------------------------------------------------------------------------------
LOG_DIR = env("LOG_DIR", default=str(ROOT_DIR('logs')))
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': "[%(levelname)s] -- %(asctime)s -- %(module)s:%(lineno)s ___ %(message)s >>> "
"{ process: %(process)d | thread: %(thread)d }",
'datefmt': "%b %e, %I:%M:%S %p"
},
'simple': {
'format': '[%(levelname)s] -- %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false', ],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'file_error': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'class': 'logging.handlers.RotatingFileHandler',
'filename': LOG_DIR + '/server/django.log',
'maxBytes': 20 * 1024 * 1024,
'formatter': 'verbose'
},
},
'loggers': {
'django.request': {
'handlers': ['file_error', 'mail_admins', ],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['file_error', 'console', 'mail_admins', ],
'propagate': True
},
'development': {
'handlers': ['console', ],
'level': 'DEBUG',
'propagate': True
},
},
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# ------------------------------------------------------------------------------
# WAGTAIL SETTINGS
# ------------------------------------------------------------------------------
WAGTAIL_SITE_NAME = 'myproject'
WAGTAILADMIN_NOTIFICATION_FROM_EMAIL = True
TAGGIT_CASE_INSENSITIVE = True
| mit | 8,342,975,429,903,870,000 | 29.465035 | 100 | 0.473201 | false |
gangadhar-kadam/smrterp | erpnext/buying/doctype/purchase_order/purchase_order.py | 3 | 9024 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt
from frappe import msgprint, _, throw
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.buying_controller import BuyingController
class PurchaseOrder(BuyingController):
tname = 'Purchase Order Item'
fname = 'po_details'
def __init__(self, arg1, arg2=None):
super(PurchaseOrder, self).__init__(arg1, arg2)
self.status_updater = [{
'source_dt': 'Purchase Order Item',
'target_dt': 'Material Request Item',
'join_field': 'prevdoc_detail_docname',
'target_field': 'ordered_qty',
'target_parent_dt': 'Material Request',
'target_parent_field': 'per_ordered',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'prevdoc_docname',
'overflow_type': 'order'
}]
def validate(self):
super(PurchaseOrder, self).validate()
if not self.status:
self.status = "Draft"
from erpnext.utilities import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped",
"Cancelled"])
pc_obj = frappe.get_doc('Purchase Common')
pc_obj.validate_for_items(self)
self.check_for_stopped_status(pc_obj)
self.validate_uom_is_integer("uom", "qty")
self.validate_uom_is_integer("stock_uom", ["qty", "required_qty"])
self.validate_with_previous_doc()
self.validate_for_subcontracting()
self.validate_minimum_order_qty()
self.create_raw_materials_supplied("po_raw_material_details")
def validate_with_previous_doc(self):
super(PurchaseOrder, self).validate_with_previous_doc(self.tname, {
"Supplier Quotation": {
"ref_dn_field": "supplier_quotation",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Supplier Quotation Item": {
"ref_dn_field": "supplier_quotation_item",
"compare_fields": [["rate", "="], ["project_name", "="], ["item_code", "="],
["uom", "="]],
"is_child_table": True
}
})
def validate_minimum_order_qty(self):
itemwise_min_order_qty = frappe._dict(frappe.db.sql("select name, min_order_qty from tabItem"))
for d in self.get("po_details"):
if flt(d.stock_qty) < flt(itemwise_min_order_qty.get(d.item_code)):
frappe.throw(_("Row #{0}: Ordered qty can not less than item's minimum order qty (defined in item master).").format(d.idx))
def get_schedule_dates(self):
for d in self.get('po_details'):
if d.prevdoc_detail_docname and not d.schedule_date:
d.schedule_date = frappe.db.get_value("Material Request Item",
d.prevdoc_detail_docname, "schedule_date")
def get_last_purchase_rate(self):
frappe.get_doc('Purchase Common').get_last_purchase_rate(self)
# Check for Stopped status
def check_for_stopped_status(self, pc_obj):
check_list =[]
for d in self.get('po_details'):
if d.meta.get_field('prevdoc_docname') and d.prevdoc_docname and d.prevdoc_docname not in check_list:
check_list.append(d.prevdoc_docname)
pc_obj.check_for_stopped_status( d.prevdoc_doctype, d.prevdoc_docname)
def update_bin(self, is_submit, is_stopped = 0):
from erpnext.stock.utils import update_bin
pc_obj = frappe.get_doc('Purchase Common')
for d in self.get('po_details'):
#1. Check if is_stock_item == 'Yes'
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == "Yes":
# this happens when item is changed from non-stock to stock item
if not d.warehouse:
continue
ind_qty, po_qty = 0, flt(d.qty) * flt(d.conversion_factor)
if is_stopped:
po_qty = flt(d.qty) > flt(d.received_qty) and \
flt( flt(flt(d.qty) - flt(d.received_qty))*flt(d.conversion_factor)) or 0
# No updates in Material Request on Stop / Unstop
if cstr(d.prevdoc_doctype) == 'Material Request' and not is_stopped:
# get qty and pending_qty of prevdoc
curr_ref_qty = pc_obj.get_qty(d.doctype, 'prevdoc_detail_docname',
d.prevdoc_detail_docname, 'Material Request Item',
'Material Request - Purchase Order', self.name)
max_qty, qty, curr_qty = flt(curr_ref_qty.split('~~~')[1]), \
flt(curr_ref_qty.split('~~~')[0]), 0
if flt(qty) + flt(po_qty) > flt(max_qty):
curr_qty = flt(max_qty) - flt(qty)
# special case as there is no restriction
# for Material Request - Purchase Order
curr_qty = curr_qty > 0 and curr_qty or 0
else:
curr_qty = flt(po_qty)
ind_qty = -flt(curr_qty)
# Update ordered_qty and indented_qty in bin
args = {
"item_code": d.item_code,
"warehouse": d.warehouse,
"ordered_qty": (is_submit and 1 or -1) * flt(po_qty),
"indented_qty": (is_submit and 1 or -1) * flt(ind_qty),
"posting_date": self.transaction_date
}
update_bin(args)
def check_modified_date(self):
mod_db = frappe.db.sql("select modified from `tabPurchase Order` where name = %s",
self.name)
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" % ( mod_db[0][0],cstr(self.modified)))
if date_diff and date_diff[0][0]:
msgprint(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name),
raise_exception=True)
def update_status(self, status):
self.check_modified_date()
# step 1:=> Set Status
frappe.db.set(self,'status',cstr(status))
# step 2:=> Update Bin
self.update_bin(is_submit = (status == 'Submitted') and 1 or 0, is_stopped = 1)
# step 3:=> Acknowledge user
msgprint(_("Status of {0} {1} is now {2}").format(self.doctype, self.name, status))
def on_submit(self):
purchase_controller = frappe.get_doc("Purchase Common")
self.update_prevdoc_status()
self.update_bin(is_submit = 1, is_stopped = 0)
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.grand_total)
purchase_controller.update_last_purchase_rate(self, is_submit = 1)
frappe.db.set(self,'status','Submitted')
def on_cancel(self):
pc_obj = frappe.get_doc('Purchase Common')
self.check_for_stopped_status(pc_obj)
# Check if Purchase Receipt has been submitted against current Purchase Order
pc_obj.check_docstatus(check = 'Next', doctype = 'Purchase Receipt', docname = self.name, detail_doctype = 'Purchase Receipt Item')
# Check if Purchase Invoice has been submitted against current Purchase Order
submitted = frappe.db.sql_list("""select t1.name
from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2
where t1.name = t2.parent and t2.purchase_order = %s and t1.docstatus = 1""",
self.name)
if submitted:
throw(_("Purchase Invoice {0} is already submitted").format(", ".join(submitted)))
frappe.db.set(self,'status','Cancelled')
self.update_prevdoc_status()
self.update_bin( is_submit = 0, is_stopped = 0)
pc_obj.update_last_purchase_rate(self, is_submit = 0)
def on_update(self):
pass
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
@frappe.whitelist()
def make_purchase_receipt(source_name, target_doc=None):
def update_item(obj, target, source_parent):
target.qty = flt(obj.qty) - flt(obj.received_qty)
target.stock_qty = (flt(obj.qty) - flt(obj.received_qty)) * flt(obj.conversion_factor)
target.amount = (flt(obj.qty) - flt(obj.received_qty)) * flt(obj.rate)
target.base_amount = (flt(obj.qty) - flt(obj.received_qty)) * flt(obj.base_rate)
doc = get_mapped_doc("Purchase Order", source_name, {
"Purchase Order": {
"doctype": "Purchase Receipt",
"validation": {
"docstatus": ["=", 1],
}
},
"Purchase Order Item": {
"doctype": "Purchase Receipt Item",
"field_map": {
"name": "prevdoc_detail_docname",
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
},
"postprocess": update_item,
"condition": lambda doc: doc.received_qty < doc.qty
},
"Purchase Taxes and Charges": {
"doctype": "Purchase Taxes and Charges",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doc
@frappe.whitelist()
def make_purchase_invoice(source_name, target_doc=None):
def update_item(obj, target, source_parent):
target.amount = flt(obj.amount) - flt(obj.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
if flt(obj.base_rate):
target.qty = target.base_amount / flt(obj.base_rate)
doc = get_mapped_doc("Purchase Order", source_name, {
"Purchase Order": {
"doctype": "Purchase Invoice",
"validation": {
"docstatus": ["=", 1],
}
},
"Purchase Order Item": {
"doctype": "Purchase Invoice Item",
"field_map": {
"name": "po_detail",
"parent": "purchase_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.base_amount==0 or doc.billed_amt < doc.amount
},
"Purchase Taxes and Charges": {
"doctype": "Purchase Taxes and Charges",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doc
| agpl-3.0 | -7,721,533,082,421,722,000 | 33.841699 | 133 | 0.669991 | false |
ericmckean/syzygy | syzygy/build/generate_coverage.py | 4 | 15218 | #!python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility script to perform code coverage analysis."""
import glob
import logging
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
# The list of DLLs we want to instrument in addition to _unittests executables.
_DLLS_TO_INSTRUMENT = [
'basic_block_entry_client.dll',
'call_trace_client.dll',
'coverage_client.dll',
'kasko.dll',
'profile_client.dll',
'syzyasan_rtl.dll',
]
# The list of file patterns to copy to the staging/coverage area.
_FILE_PATTERNS_TO_COPY = [
'*_harness.exe',
'*_tests.exe',
'*_unittests.exe',
'*.dll',
'*.pdb',
'agent_logger.exe',
'call_trace_service.exe',
'test_data',
]
_SYZYGY_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# This is hardcoded to the Visual Studio default install location.
_PERF_TOOLS_DIR = ('C:/Program Files (x86)/Microsoft Visual Studio 9.0/'
'Team Tools/Performance Tools')
_COVERAGE_ANALYZER_DIR = os.path.normpath(
os.path.join(_SYZYGY_DIR, '../third_party/coverage_analyzer/bin'))
_LOGGER = logging.getLogger(os.path.basename(__file__))
def _Subprocess(command, failure_msg, **kw):
_LOGGER.info('Executing command line %s.', command)
ret = subprocess.call(command, **kw)
if ret != 0:
_LOGGER.error(failure_msg)
raise RuntimeError(failure_msg)
class _ScopedTempDir(object):
"""A utility class for creating a scoped temporary directory."""
def __init__(self):
self._path = None
def Create(self):
self._path = tempfile.mkdtemp()
def path(self):
return self._path
def __del__(self):
if self._path:
shutil.rmtree(self._path)
class _CodeCoverageRunnerBase(object):
"""A worker class to take care of running through instrumentation,
profiling and coverage generation. This base class expects derived
classes to implement the following (see class definition for details):
_InstrumentOneFile(self, file_path)
_StartCoverageCapture(self)
_StopCoverageCapture(self)
_ProcessCoverage(self, output_path)
"""
_COVERAGE_FILE = 'unittests'
def __init__(self, build_dir, keep_work_dir):
build_dir = os.path.abspath(build_dir)
self._build_dir = build_dir
self._keep_work_dir = keep_work_dir
self._work_dir = None
self._html_dir = os.path.join(self._build_dir, 'cov')
def __del__(self):
self._CleanupWorkdir()
def Run(self):
"""Performs the code coverage capture for all unittests."""
self._CreateWorkdir()
try:
self._CaptureCoverage()
finally:
self._CleanupWorkdir()
def _InstrumentOneFile(self, file_path):
"""Instruments the provided module for coverage, in place.
Args:
file_path: The path of the module to be instrumented.
"""
raise NotImplementedError()
def _StartCoverageCapture(self):
"""Starts the coverage capture process."""
raise NotImplementedError()
def _StopCoverageCapture(self):
"""Stops the coverage capture process."""
raise NotImplementedError()
def _ProcessCoverage(self, output_path):
"""Processes coverage results and produces an GCOV/LCOV formatted
coverage results file in |output_path|.
Args:
output_path: The path of the output file to produce.
"""
raise NotImplementedError()
def _CreateWorkdir(self):
assert(self._work_dir == None)
# The work dir must be a sibling to build_dir, as unittests refer
# to test data through relative paths from their own executable.
work_parent = os.path.abspath(os.path.join(self._build_dir, '..'))
self._work_dir = tempfile.mkdtemp(prefix='instr-', dir=work_parent)
_LOGGER.info('Created working directory "%s".', self._work_dir)
def _CleanupWorkdir(self):
# Clean up our working directory if it still exists.
work_dir = self._work_dir
self._work_dir = None
if not work_dir:
return
if self._keep_work_dir:
_LOGGER.info('Keeping working directory "%s".', work_dir)
else:
_LOGGER.info('Removing working directory "%s".', work_dir)
shutil.rmtree(work_dir, ignore_errors=True)
def _InstrumentExecutables(self):
build_dir = self._build_dir
work_dir = self._work_dir
_LOGGER.info('Build dir "%s".', build_dir)
# Copy all unittest related files to work_dir.
for pattern in _FILE_PATTERNS_TO_COPY:
files = glob.glob(os.path.join(build_dir, pattern))
for path in files:
_LOGGER.info('Copying "%s" to "%s".', path, work_dir)
if os.path.isdir(path):
# If the source file is a directory, do a recursive copy.
dst = os.path.join(work_dir, os.path.basename(path))
shutil.copytree(path, dst)
else:
shutil.copy(path, work_dir)
# Instrument all EXEs in the work dir.
for exe in glob.glob(os.path.join(work_dir, '*.exe')):
self._InstrumentOneFile(exe)
# And the DLLs we've specified.
for dll in _DLLS_TO_INSTRUMENT:
self._InstrumentOneFile(os.path.join(work_dir, dll))
def _RunUnittests(self):
unittests = (glob.glob(os.path.join(self._work_dir, '*_unittests.exe')) +
glob.glob(os.path.join(self._work_dir, '*_tests.exe')))
for unittest in unittests:
_LOGGER.info('Running unittest "%s".', unittest)
# Run single threaded, and with a 5 minute (in ms) timeout. This
# conserves existing buildbot behaviour with the new sharded tests.
_Subprocess([unittest,
'--single-process-tests',
'--test-launcher-timeout=300000'],
'Unittests "%s" failed.' % os.path.basename(unittest))
def _GenerateHtml(self, input_path):
croc = os.path.abspath(
os.path.join(_SYZYGY_DIR, '../tools/code_coverage/croc.py'))
config = os.path.join(_SYZYGY_DIR, 'build/syzygy.croc')
# The HTML directory is already deleted. Create it now.
os.makedirs(self._html_dir)
cmd = [sys.executable, croc,
'--tree',
'--config', config,
'--input', input_path,
'--html', self._html_dir]
# The coverage html generator wants to run in the directory
# containing our src root.
cwd = os.path.abspath(os.path.join(_SYZYGY_DIR, '../..'))
_LOGGER.info('Generating HTML report')
_Subprocess(cmd, 'Failed to generate HTML coverage report.', cwd=cwd)
def _CaptureCoverage(self):
# Clean up old coverage results. We do this immediately so that previous
# coverage results won't still be around if this script fails.
shutil.rmtree(self._html_dir, ignore_errors=True)
self._InstrumentExecutables()
self._StartCoverageCapture()
try:
self._RunUnittests()
finally:
self._StopCoverageCapture()
output_path = os.path.join(self._work_dir,
'%s.coverage.lcov' % self._COVERAGE_FILE)
self._ProcessCoverage(output_path)
self._GenerateHtml(output_path)
class _CodeCoverageRunnerVS(_CodeCoverageRunnerBase):
"""Code coverage runner that uses the Microsoft Visual Studio Team Tools
instrumenter.
"""
def __init__(self, build_dir, perf_tools_dir, coverage_analyzer_dir,
keep_work_dir):
super(_CodeCoverageRunnerVS, self).__init__(build_dir, keep_work_dir)
self._perf_tools_dir = os.path.abspath(perf_tools_dir)
self._coverage_analyzer_dir = os.path.abspath(coverage_analyzer_dir)
def _InstrumentOneFile(self, file_path):
cmd = [os.path.join(self._perf_tools_dir, 'vsinstr.exe'),
'/coverage',
'/verbose',
file_path]
_LOGGER.info('Instrumenting "%s".', file_path)
_Subprocess(cmd, 'Failed to instrument "%s"' % file_path)
def _StartCoverageCapture(self):
cmd = [os.path.join(self._perf_tools_dir, 'vsperfcmd.exe'),
'/start:coverage',
'/output:"%s"' % os.path.join(self._work_dir, self._COVERAGE_FILE)]
_LOGGER.info('Starting coverage capture.')
_Subprocess(cmd, 'Failed to start coverage capture.')
def _StopCoverageCapture(self):
cmd = [os.path.join(self._perf_tools_dir, 'vsperfcmd.exe'), '/shutdown']
_LOGGER.info('Halting coverage capture.')
_Subprocess(cmd, 'Failed to stop coverage capture.')
def _ProcessCoverage(self, output_path):
# The vsperf tool creates an output with suffix '.coverage'.
input_path = os.path.join(self._work_dir,
'%s.coverage' % self._COVERAGE_FILE)
# Coverage analyzer will go ahead and place its output in
# input_file + '.lcov'.
default_output_path = input_path + '.lcov'
cmd = [os.path.join(self._coverage_analyzer_dir, 'coverage_analyzer.exe'),
'-noxml', '-sym_path=%s' % self._work_dir,
input_path]
_LOGGER.info('Generating LCOV file.')
_Subprocess(cmd, 'LCOV generation failed.')
# Move the default output location if necessary.
if default_output_path != output_path:
shutil.move(default_output_path, output_path)
class _CodeCoverageRunnerSyzygy(_CodeCoverageRunnerBase):
"""Code coverage runner that uses the Syzygy code coverage client."""
_SYZYCOVER = 'syzycover'
def __init__(self, build_dir, keep_work_dir):
super(_CodeCoverageRunnerSyzygy, self).__init__(build_dir, keep_work_dir)
self._temp_dir = _ScopedTempDir()
self._temp_dir.Create()
def _InstrumentOneFile(self, file_path):
temp_path = os.path.join(self._temp_dir.path(),
os.path.basename(file_path))
shutil.copy(file_path, temp_path)
cmd = [os.path.join(self._build_dir, 'instrument.exe'),
'--mode=COVERAGE',
'--agent=%s.dll' % self._SYZYCOVER,
'--input-image=%s' % temp_path,
'--output-image=%s' % file_path,
'--no-augment-pdb',
'--overwrite']
_LOGGER.info('Instrumenting "%s".', file_path)
_Subprocess(cmd, 'Failed to instrument "%s"' % file_path)
def _StartCoverageCapture(self):
# Grab a copy of the coverage client and place it in the work directory.
# We give it a different name so that it doesn't conflict with the
# instrumented coverage_client.dll.
syzycover = os.path.abspath(os.path.join(
self._work_dir, '%s.dll' % self._SYZYCOVER))
shutil.copy(os.path.join(self._build_dir, 'coverage_client.dll'),
syzycover)
# Set up the environment so that the coverage client will connect to
# the appropriate call trace client. Also make it so that it will crash if
# the RPC connection is unable to be made.
os.environ['SYZYGY_RPC_INSTANCE_ID'] = '%s,%s' % (syzycover,
self._SYZYCOVER)
os.environ['SYZYGY_RPC_SESSION_MANDATORY'] = '%s,1' % (syzycover)
# Start an instance of the call-trace service in the background.
cmd = [os.path.join(self._build_dir, 'call_trace_service.exe'),
'spawn',
'--instance-id=%s' % self._SYZYCOVER,
'--trace-dir=%s' % self._work_dir]
_LOGGER.info('Starting coverage capture.')
_Subprocess(cmd, 'Failed to start coverage capture.')
def _StopCoverageCapture(self):
cmd = [os.path.join(self._build_dir, 'call_trace_service.exe'),
'stop',
'--instance-id=%s' % self._SYZYCOVER]
_LOGGER.info('Halting coverage capture.')
_Subprocess(cmd, 'Failed to stop coverage capture.')
def _ProcessCoverage(self, output_path):
_LOGGER.info('Generating LCOV file.')
cmd = [os.path.join(self._build_dir, 'grinder.exe'),
'--mode=coverage',
'--output-file=%s' % output_path,
os.path.join(self._work_dir, 'trace-*.bin')]
_Subprocess(cmd, 'LCOV generation failed.')
_USAGE = """\
%prog [options]
Generates a code coverage report for unittests in a given build directory.
On a successful run, the HTML report will be produced in a subdirectory
of the given build directory named "cov".
"""
def _ParseArguments():
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose', dest='verbose',
action='store_true', default=False,
help='Enable verbose logging.')
parser.add_option('--build-dir', dest='build_dir',
help='The directory where build output is placed.')
parser.add_option('--target', dest='target',
help='The build profile for which coverage is being '
'generated. If not specified, default to None. '
'Will be appended to --build-dir to generate the '
'name of the directory containing the binaries '
'to analyze.')
parser.add_option('--perf-tools-dir', dest='perf_tools_dir',
default=_PERF_TOOLS_DIR,
help='The directory where the VS performance tools, '
'"vsinstr.exe" and "vsperfcmd.exe" are found. '
'Ignored if --syzygy is specified.')
parser.add_option('--coverage-analyzer-dir', dest='coverage_analyzer_dir',
default=_COVERAGE_ANALYZER_DIR,
help='The directory where "coverage_analyzer.exe" '
'is found. Ignored if --syzygy is specified.')
parser.add_option('--keep-work-dir', action='store_true', default=False,
help='Keep temporary directory after run.')
parser.add_option('--syzygy', action='store_true', default=False,
help='Use Syzygy coverage tools.')
(opts, args) = parser.parse_args()
if args:
parser.error('This script does not accept any arguments.')
if not opts.build_dir:
parser.error('You must provide a build directory.')
opts.build_dir = os.path.abspath(opts.build_dir)
# If a target name was specified, then refine the build path with that.
if opts.target:
opts.build_dir = os.path.abspath(os.path.join(opts.build_dir, opts.target))
if not os.path.isdir(opts.build_dir):
parser.error('Path does not exist: %s' % opts.build_dir)
if opts.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.ERROR)
return opts
def main():
opts = _ParseArguments()
if opts.syzygy:
runner = _CodeCoverageRunnerSyzygy(opts.build_dir,
opts.keep_work_dir)
else:
runner = _CodeCoverageRunnerVS(opts.build_dir,
opts.perf_tools_dir,
opts.coverage_analyzer_dir,
opts.keep_work_dir)
runner.Run()
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 3,955,026,010,808,740,000 | 34.390698 | 79 | 0.636417 | false |
rushiagr/keystone | keystone/tests/unit/test_contrib_s3_core.py | 10 | 2130 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone.contrib import s3
from keystone import exception
from keystone.tests import unit as tests
class S3ContribCore(tests.TestCase):
def setUp(self):
super(S3ContribCore, self).setUp()
self.load_backends()
self.controller = s3.S3Controller()
def test_good_signature(self):
creds_ref = {'secret':
'b121dd41cdcc42fe9f70e572e84295aa'}
credentials = {'token':
'UFVUCjFCMk0yWThBc2dUcGdBbVk3UGhDZmc9PQphcHB'
'saWNhdGlvbi9vY3RldC1zdHJlYW0KVHVlLCAxMSBEZWMgMjAxM'
'iAyMTo0MTo0MSBHTVQKL2NvbnRfczMvdXBsb2FkZWRfZnJ'
'vbV9zMy50eHQ=',
'signature': 'IL4QLcLVaYgylF9iHj6Wb8BGZsw='}
self.assertIsNone(self.controller.check_signature(creds_ref,
credentials))
def test_bad_signature(self):
creds_ref = {'secret':
'b121dd41cdcc42fe9f70e572e84295aa'}
credentials = {'token':
'UFVUCjFCMk0yWThBc2dUcGdBbVk3UGhDZmc9PQphcHB'
'saWNhdGlvbi9vY3RldC1zdHJlYW0KVHVlLCAxMSBEZWMgMjAxM'
'iAyMTo0MTo0MSBHTVQKL2NvbnRfczMvdXBsb2FkZWRfZnJ'
'vbV9zMy50eHQ=',
'signature': uuid.uuid4().hex}
self.assertRaises(exception.Unauthorized,
self.controller.check_signature,
creds_ref, credentials)
| apache-2.0 | 6,060,047,175,400,259,000 | 37.727273 | 75 | 0.6277 | false |
molobrakos/home-assistant | homeassistant/components/nest/binary_sensor.py | 7 | 5052 | """Support for Nest Thermostat binary sensors."""
from itertools import chain
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.const import CONF_MONITORED_CONDITIONS
from . import (
CONF_BINARY_SENSORS, DATA_NEST, DATA_NEST_CONFIG, NestSensorDevice)
_LOGGER = logging.getLogger(__name__)
BINARY_TYPES = {'online': 'connectivity'}
CLIMATE_BINARY_TYPES = {
'fan': None,
'is_using_emergency_heat': 'heat',
'is_locked': None,
'has_leaf': None,
}
CAMERA_BINARY_TYPES = {
'motion_detected': 'motion',
'sound_detected': 'sound',
'person_detected': 'occupancy',
}
STRUCTURE_BINARY_TYPES = {
'away': None,
}
STRUCTURE_BINARY_STATE_MAP = {
'away': {'away': True, 'home': False},
}
_BINARY_TYPES_DEPRECATED = [
'hvac_ac_state',
'hvac_aux_heater_state',
'hvac_heater_state',
'hvac_heat_x2_state',
'hvac_heat_x3_state',
'hvac_alt_heat_state',
'hvac_alt_heat_x2_state',
'hvac_emer_heat_state',
]
_VALID_BINARY_SENSOR_TYPES = {
**BINARY_TYPES,
**CLIMATE_BINARY_TYPES,
**CAMERA_BINARY_TYPES,
**STRUCTURE_BINARY_TYPES,
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Nest binary sensors.
No longer used.
"""
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a Nest binary sensor based on a config entry."""
nest = hass.data[DATA_NEST]
discovery_info = \
hass.data.get(DATA_NEST_CONFIG, {}).get(CONF_BINARY_SENSORS, {})
# Add all available binary sensors if no Nest binary sensor config is set
if discovery_info == {}:
conditions = _VALID_BINARY_SENSOR_TYPES
else:
conditions = discovery_info.get(CONF_MONITORED_CONDITIONS, {})
for variable in conditions:
if variable in _BINARY_TYPES_DEPRECATED:
wstr = (variable + " is no a longer supported "
"monitored_conditions. See "
"https://home-assistant.io/components/binary_sensor.nest/ "
"for valid options.")
_LOGGER.error(wstr)
def get_binary_sensors():
"""Get the Nest binary sensors."""
sensors = []
for structure in nest.structures():
sensors += [NestBinarySensor(structure, None, variable)
for variable in conditions
if variable in STRUCTURE_BINARY_TYPES]
device_chain = chain(
nest.thermostats(), nest.smoke_co_alarms(), nest.cameras())
for structure, device in device_chain:
sensors += [NestBinarySensor(structure, device, variable)
for variable in conditions
if variable in BINARY_TYPES]
sensors += [NestBinarySensor(structure, device, variable)
for variable in conditions
if variable in CLIMATE_BINARY_TYPES
and device.is_thermostat]
if device.is_camera:
sensors += [NestBinarySensor(structure, device, variable)
for variable in conditions
if variable in CAMERA_BINARY_TYPES]
for activity_zone in device.activity_zones:
sensors += [NestActivityZoneSensor(
structure, device, activity_zone)]
return sensors
async_add_entities(await hass.async_add_job(get_binary_sensors), True)
class NestBinarySensor(NestSensorDevice, BinarySensorDevice):
"""Represents a Nest binary sensor."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
@property
def device_class(self):
"""Return the device class of the binary sensor."""
return _VALID_BINARY_SENSOR_TYPES.get(self.variable)
def update(self):
"""Retrieve latest state."""
value = getattr(self.device, self.variable)
if self.variable in STRUCTURE_BINARY_TYPES:
self._state = bool(STRUCTURE_BINARY_STATE_MAP
[self.variable].get(value))
else:
self._state = bool(value)
class NestActivityZoneSensor(NestBinarySensor):
"""Represents a Nest binary sensor for activity in a zone."""
def __init__(self, structure, device, zone):
"""Initialize the sensor."""
super(NestActivityZoneSensor, self).__init__(structure, device, "")
self.zone = zone
self._name = "{} {} activity".format(self._name, self.zone.name)
@property
def unique_id(self):
"""Return unique id based on camera serial and zone id."""
return "{}-{}".format(self.device.serial, self.zone.zone_id)
@property
def device_class(self):
"""Return the device class of the binary sensor."""
return 'motion'
def update(self):
"""Retrieve latest state."""
self._state = self.device.has_ongoing_motion_in_zone(self.zone.zone_id)
| apache-2.0 | -213,200,203,026,963,200 | 30.974684 | 79 | 0.606295 | false |
JohnOrlando/gnuradio-bitshark | gnuradio-examples/python/usrp/usrp_benchmark_usb.py | 11 | 3302 | #!/usr/bin/env python
#
# Copyright 2004,2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Benchmark the USB/USRP throughput. Finds the maximum full-duplex speed
the USRP/USB combination can sustain without errors.
This program does not currently give reliable results. Sorry about that...
"""
from gnuradio import gr
from gnuradio import usrp
from gnuradio import eng_notation
import sys
def run_test (usb_throughput, verbose):
# usb_throughput is in bytes/sec.
#
# Returns True or False
nsec = 1
stream_length = int (usb_throughput/2 * nsec) # length of stream to examine
adc_freq = 64e6
dac_freq = 128e6
sizeof_sample = 2 * gr.sizeof_short
usb_throughput_in_samples = usb_throughput / sizeof_sample
# allocate usb throughput 50/50 between Tx and Rx
tx_interp = int (dac_freq) / int (usb_throughput_in_samples / 2)
rx_decim = int (adc_freq) / int (usb_throughput_in_samples / 2)
# print "tx_interp =", tx_interp, "rx_decim =", rx_decim
assert (tx_interp == 2 * rx_decim)
tb = gr.top_block ()
# Build the Tx pipeline
data_src = gr.lfsr_32k_source_s ()
src_head = gr.head (gr.sizeof_short, int (stream_length * 2))
usrp_tx = usrp.sink_s (0, tx_interp)
tb.connect (data_src, src_head, usrp_tx)
# and the Rx pipeline
usrp_rx = usrp.source_s (0, rx_decim, 1, 0x32103210, usrp.FPGA_MODE_LOOPBACK)
head = gr.head (gr.sizeof_short, stream_length)
check = gr.check_lfsr_32k_s ()
tb.connect (usrp_rx, head, check)
tb.run ()
ntotal = check.ntotal ()
nright = check.nright ()
runlength = check.runlength ()
if verbose:
print "usb_throughput =", eng_notation.num_to_str (usb_throughput)
print "ntotal =", ntotal
print "nright =", nright
print "runlength =", runlength
print "delta =", ntotal - runlength
return runlength >= stream_length - 80000
def main ():
verbose = True
best_rate = 0
usb_rate = [ 2e6, 4e6, 8e6, 16e6, 32e6 ]
#usb_rate = [ 32e6, 32e6, 32e6, 32e6, 32e6 ]
# usb_rate.reverse ()
for rate in usb_rate:
sys.stdout.write ("Testing %sB/sec... " % (eng_notation.num_to_str (rate)))
sys.stdout.flush ()
ok = run_test (rate, verbose)
if ok:
best_rate = max (best_rate, rate)
sys.stdout.write ("OK\n")
else:
sys.stdout.write ("FAILED\n")
print "Max USB/USRP throughput = %sB/sec" % (eng_notation.num_to_str (best_rate),)
if __name__ == '__main__':
main ()
| gpl-3.0 | -3,161,924,546,570,675,700 | 30.150943 | 86 | 0.645366 | false |
ioram7/keystone-federado-pgid2013 | build/paste/build/lib.linux-x86_64-2.7/paste/debug/testserver.py | 28 | 3385 | # (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
# This code was written with funding by http://prometheusresearch.com
"""
WSGI Test Server
This builds upon paste.util.baseserver to customize it for regressions
where using raw_interactive won't do.
"""
import time
from paste.httpserver import *
class WSGIRegressionServer(WSGIServer):
"""
A threaded WSGIServer for use in regression testing. To use this
module, call serve(application, regression=True), and then call
server.accept() to let it handle one request. When finished, use
server.stop() to shutdown the server. Note that all pending requests
are processed before the server shuts down.
"""
defaulttimeout = 10
def __init__ (self, *args, **kwargs):
WSGIServer.__init__(self, *args, **kwargs)
self.stopping = []
self.pending = []
self.timeout = self.defaulttimeout
# this is a local connection, be quick
self.socket.settimeout(2)
def serve_forever(self):
from threading import Thread
thread = Thread(target=self.serve_pending)
thread.start()
def reset_expires(self):
if self.timeout:
self.expires = time.time() + self.timeout
def close_request(self, *args, **kwargs):
WSGIServer.close_request(self, *args, **kwargs)
self.pending.pop()
self.reset_expires()
def serve_pending(self):
self.reset_expires()
while not self.stopping or self.pending:
now = time.time()
if now > self.expires and self.timeout:
# note regression test doesn't handle exceptions in
# threads very well; so we just print and exit
print "\nWARNING: WSGIRegressionServer timeout exceeded\n"
break
if self.pending:
self.handle_request()
time.sleep(.1)
def stop(self):
""" stop the server (called from tester's thread) """
self.stopping.append(True)
def accept(self, count = 1):
""" accept another request (called from tester's thread) """
assert not self.stopping
[self.pending.append(True) for x in range(count)]
def serve(application, host=None, port=None, handler=None):
server = WSGIRegressionServer(application, host, port, handler)
print "serving on %s:%s" % server.server_address
server.serve_forever()
return server
if __name__ == '__main__':
import urllib
from paste.wsgilib import dump_environ
server = serve(dump_environ)
baseuri = ("http://%s:%s" % server.server_address)
def fetch(path):
# tell the server to humor exactly one more request
server.accept(1)
# not needed; but this is what you do if the server
# may not respond in a resonable time period
import socket
socket.setdefaulttimeout(5)
# build a uri, fetch and return
return urllib.urlopen(baseuri + path).read()
assert "PATH_INFO: /foo" in fetch("/foo")
assert "PATH_INFO: /womble" in fetch("/womble")
# ok, let's make one more final request...
server.accept(1)
# and then schedule a stop()
server.stop()
# and then... fetch it...
urllib.urlopen(baseuri)
| apache-2.0 | -3,639,080,563,956,388,000 | 35.397849 | 74 | 0.636337 | false |
bjori/grpc | src/python/grpcio_test/grpc_test/framework/foundation/_later_test.py | 35 | 5102 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests of the later module."""
import threading
import time
import unittest
from grpc.framework.foundation import later
TICK = 0.1
class LaterTest(unittest.TestCase):
def test_simple_delay(self):
lock = threading.Lock()
cell = [0]
return_value = object()
def computation():
with lock:
cell[0] += 1
return return_value
computation_future = later.later(TICK * 2, computation)
self.assertFalse(computation_future.done())
self.assertFalse(computation_future.cancelled())
time.sleep(TICK)
self.assertFalse(computation_future.done())
self.assertFalse(computation_future.cancelled())
with lock:
self.assertEqual(0, cell[0])
time.sleep(TICK * 2)
self.assertTrue(computation_future.done())
self.assertFalse(computation_future.cancelled())
with lock:
self.assertEqual(1, cell[0])
self.assertEqual(return_value, computation_future.result())
def test_callback(self):
lock = threading.Lock()
cell = [0]
callback_called = [False]
future_passed_to_callback = [None]
def computation():
with lock:
cell[0] += 1
computation_future = later.later(TICK * 2, computation)
def callback(outcome):
with lock:
callback_called[0] = True
future_passed_to_callback[0] = outcome
computation_future.add_done_callback(callback)
time.sleep(TICK)
with lock:
self.assertFalse(callback_called[0])
time.sleep(TICK * 2)
with lock:
self.assertTrue(callback_called[0])
self.assertTrue(future_passed_to_callback[0].done())
callback_called[0] = False
future_passed_to_callback[0] = None
computation_future.add_done_callback(callback)
with lock:
self.assertTrue(callback_called[0])
self.assertTrue(future_passed_to_callback[0].done())
def test_cancel(self):
lock = threading.Lock()
cell = [0]
callback_called = [False]
future_passed_to_callback = [None]
def computation():
with lock:
cell[0] += 1
computation_future = later.later(TICK * 2, computation)
def callback(outcome):
with lock:
callback_called[0] = True
future_passed_to_callback[0] = outcome
computation_future.add_done_callback(callback)
time.sleep(TICK)
with lock:
self.assertFalse(callback_called[0])
computation_future.cancel()
self.assertTrue(computation_future.cancelled())
self.assertFalse(computation_future.running())
self.assertTrue(computation_future.done())
with lock:
self.assertTrue(callback_called[0])
self.assertTrue(future_passed_to_callback[0].cancelled())
def test_result(self):
lock = threading.Lock()
cell = [0]
callback_called = [False]
future_passed_to_callback_cell = [None]
return_value = object()
def computation():
with lock:
cell[0] += 1
return return_value
computation_future = later.later(TICK * 2, computation)
def callback(future_passed_to_callback):
with lock:
callback_called[0] = True
future_passed_to_callback_cell[0] = future_passed_to_callback
computation_future.add_done_callback(callback)
returned_value = computation_future.result()
self.assertEqual(return_value, returned_value)
# The callback may not yet have been called! Sleep a tick.
time.sleep(TICK)
with lock:
self.assertTrue(callback_called[0])
self.assertEqual(return_value, future_passed_to_callback_cell[0].result())
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause | 5,994,615,253,012,325,000 | 32.788079 | 80 | 0.698942 | false |
dfc/beets | test/test_info.py | 25 | 3581 | # This file is part of beets.
# Copyright 2015, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from test._common import unittest
from test.helper import TestHelper
from beets.mediafile import MediaFile
from beets.util import displayable_path
class InfoTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('info')
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def run_command(self, *args):
super(InfoTest, self).run_command('info', *args)
def test_path(self):
path = self.create_mediafile_fixture()
mediafile = MediaFile(path)
mediafile.albumartist = 'AAA'
mediafile.disctitle = 'DDD'
mediafile.genres = ['a', 'b', 'c']
mediafile.composer = None
mediafile.save()
out = self.run_with_output(path)
self.assertIn(path, out)
self.assertIn('albumartist: AAA', out)
self.assertIn('disctitle: DDD', out)
self.assertIn('genres: a; b; c', out)
self.assertNotIn('composer:', out)
def test_item_query(self):
item1, item2 = self.add_item_fixtures(count=2)
item1.album = 'xxxx'
item1.write()
item1.album = 'yyyy'
item1.store()
out = self.run_with_output('album:yyyy')
self.assertIn(displayable_path(item1.path), out)
self.assertIn(u'album: xxxx', out)
self.assertNotIn(displayable_path(item2.path), out)
def test_item_library_query(self):
item, = self.add_item_fixtures()
item.album = 'xxxx'
item.store()
out = self.run_with_output('--library', 'album:xxxx')
self.assertIn(displayable_path(item.path), out)
self.assertIn(u'album: xxxx', out)
def test_collect_item_and_path(self):
path = self.create_mediafile_fixture()
mediafile = MediaFile(path)
item, = self.add_item_fixtures()
item.album = mediafile.album = 'AAA'
item.tracktotal = mediafile.tracktotal = 5
item.title = 'TTT'
mediafile.title = 'SSS'
item.write()
item.store()
mediafile.save()
out = self.run_with_output('--summarize', 'album:AAA', path)
self.assertIn(u'album: AAA', out)
self.assertIn(u'tracktotal: 5', out)
self.assertIn(u'title: [various]', out)
def test_include_pattern(self):
item, = self.add_item_fixtures()
item.album = 'xxxx'
item.store()
out = self.run_with_output('--library', 'album:xxxx',
'--include-keys', '*lbu*')
self.assertIn(displayable_path(item.path), out)
self.assertNotIn(u'title:', out)
self.assertIn(u'album: xxxx', out)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| mit | 1,886,791,080,089,099,800 | 30.973214 | 71 | 0.627199 | false |
parmegv/keymanager | setup.py | 1 | 4865 | # -*- coding: utf-8 -*-
# setup.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
setup file for leap.keymanager
"""
import re
from setuptools import setup
from setuptools import find_packages
import versioneer
versioneer.versionfile_source = 'src/leap/keymanager/_version.py'
versioneer.versionfile_build = 'leap/keymanager/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'leap.keymanager-'
from pkg import utils
trove_classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Communications :: Email',
'Topic :: Internet',
'Topic :: Security :: Cryptography',
'Topic :: Software Development :: Libraries',
]
DOWNLOAD_BASE = ('https://github.com/leapcode/keymanager/'
'archive/%s.tar.gz')
_versions = versioneer.get_versions()
VERSION = _versions['version']
VERSION_FULL = _versions['full']
DOWNLOAD_URL = ""
# get the short version for the download url
_version_short = re.findall('\d+\.\d+\.\d+', VERSION)
if len(_version_short) > 0:
VERSION_SHORT = _version_short[0]
DOWNLOAD_URL = DOWNLOAD_BASE % VERSION_SHORT
cmdclass = versioneer.get_cmdclass()
from setuptools import Command
class freeze_debianver(Command):
"""
Freezes the version in a debian branch.
To be used after merging the development branch onto the debian one.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
proceed = str(raw_input(
"This will overwrite the file _version.py. Continue? [y/N] "))
if proceed != "y":
print("He. You scared. Aborting.")
return
template = r"""
# This file was generated by the `freeze_debianver` command in setup.py
# Using 'versioneer.py' (0.7+) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '{version}'
version_full = '{version_full}'
"""
templatefun = r"""
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
subst_template = template.format(
version=VERSION_SHORT,
version_full=VERSION_FULL) + templatefun
with open(versioneer.versionfile_source, 'w') as f:
f.write(subst_template)
cmdclass["freeze_debianver"] = freeze_debianver
# XXX add ref to docs
requirements = utils.parse_requirements()
if utils.is_develop_mode():
print
print ("[WARNING] Skipping leap-specific dependencies "
"because development mode is detected.")
print ("[WARNING] You can install "
"the latest published versions with "
"'pip install -r pkg/requirements-leap.pip'")
print ("[WARNING] Or you can instead do 'python setup.py develop' "
"from the parent folder of each one of them.")
print
else:
requirements += utils.parse_requirements(
reqfiles=["pkg/requirements-leap.pip"])
setup(
name='leap.keymanager',
version=VERSION,
cmdclass=cmdclass,
url='https://leap.se/',
download_url=DOWNLOAD_URL,
license='GPLv3+',
description='LEAP\'s Key Manager',
author='The LEAP Encryption Access Project',
author_email='[email protected]',
maintainer='Kali Kaneko',
maintainer_email='[email protected]',
long_description=(
"The Key Manager handles all types of keys to allow for "
"point-to-point encryption between parties communicating through "
"LEAP infrastructure."
),
classifiers=trove_classifiers,
namespace_packages=["leap"],
packages=find_packages('src', exclude=['leap.keymanager.tests']),
package_dir={'': 'src'},
test_suite='leap.keymanager.tests',
install_requires=requirements,
tests_require=utils.parse_requirements(
reqfiles=['pkg/requirements-testing.pip']),
)
| gpl-3.0 | -2,928,850,491,316,940,000 | 31.218543 | 77 | 0.674203 | false |
nikolas/django-extensions | django_extensions/mongodb/models.py | 28 | 2544 | """
Django Extensions abstract base mongoengine Document classes.
"""
import datetime
from django.utils.translation import ugettext_lazy as _
from mongoengine.document import Document
from mongoengine.fields import DateTimeField, IntField, StringField
from mongoengine.queryset import QuerySetManager
from django_extensions.mongodb.fields import (
AutoSlugField, CreationDateTimeField, ModificationDateTimeField,
)
class TimeStampedModel(Document):
""" TimeStampedModel
An abstract base class model that provides self-managed "created" and
"modified" fields.
"""
created = CreationDateTimeField(_('created'))
modified = ModificationDateTimeField(_('modified'))
class Meta:
abstract = True
class TitleSlugDescriptionModel(Document):
""" TitleSlugDescriptionModel
An abstract base class model that provides title and description fields
and a self-managed "slug" field that populates from the title.
"""
title = StringField(_('title'), max_length=255)
slug = AutoSlugField(_('slug'), populate_from='title')
description = StringField(_('description'), blank=True, null=True)
class Meta:
abstract = True
class ActivatorModelManager(QuerySetManager):
""" ActivatorModelManager
Manager to return instances of ActivatorModel: SomeModel.objects.active() / .inactive()
"""
def active(self):
""" Returns active instances of ActivatorModel: SomeModel.objects.active() """
return super(ActivatorModelManager, self).get_query_set().filter(status=1)
def inactive(self):
""" Returns inactive instances of ActivatorModel: SomeModel.objects.inactive() """
return super(ActivatorModelManager, self).get_query_set().filter(status=0)
class ActivatorModel(Document):
""" ActivatorModel
An abstract base class model that provides activate and deactivate fields.
"""
STATUS_CHOICES = (
(0, _('Inactive')),
(1, _('Active')),
)
status = IntField(_('status'), choices=STATUS_CHOICES, default=1)
activate_date = DateTimeField(blank=True, null=True, help_text=_('keep empty for an immediate activation'))
deactivate_date = DateTimeField(blank=True, null=True, help_text=_('keep empty for indefinite activation'))
objects = ActivatorModelManager()
class Meta:
abstract = True
def save(self, *args, **kwargs):
if not self.activate_date:
self.activate_date = datetime.datetime.now()
super(ActivatorModel, self).save(*args, **kwargs)
| mit | 5,918,699,271,887,179,000 | 33.849315 | 111 | 0.705582 | false |
aflaxman/scikit-learn | sklearn/metrics/regression.py | 47 | 19967 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Karan Desai <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
from ..externals.six import string_types
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average',
'variance_weighted')
if isinstance(multioutput, string_types):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str,
multioutput))
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_log_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared logarithmic error regression loss
Read more in the :ref:`User Guide <mean_squared_log_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average'] \
or array-like of shape = (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_log_error
>>> y_true = [3, 5, 2.5, 7]
>>> y_pred = [2.5, 5, 4, 8]
>>> mean_squared_log_error(y_true, y_pred) # doctest: +ELLIPSIS
0.039...
>>> y_true = [[0.5, 1], [1, 2], [7, 6]]
>>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]]
>>> mean_squared_log_error(y_true, y_pred) # doctest: +ELLIPSIS
0.044...
>>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.004..., 0.083...])
>>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.060...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if not (y_true >= 0).all() and not (y_pred >= 0).all():
raise ValueError("Mean Squared Logarithmic Error cannot be used when "
"targets contain negative values.")
return mean_squared_error(np.log(y_true + 1), np.log(y_pred + 1),
sample_weight, multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred, sample_weight=None,
multioutput="uniform_average"):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default is "uniform_average".
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
.. versionchanged:: 0.19
Default value of multioutput is 'uniform_average'.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted')
... # doctest: +ELLIPSIS
0.938...
>>> y_true = [1,2,3]
>>> y_pred = [1,2,3]
>>> r2_score(y_true, y_pred)
1.0
>>> y_true = [1,2,3]
>>> y_pred = [2,2,2]
>>> r2_score(y_true, y_pred)
0.0
>>> y_true = [1,2,3]
>>> y_pred = [3,2,1]
>>> r2_score(y_true, y_pred)
-3.0
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause | 4,722,567,385,122,498,000 | 34.029825 | 79 | 0.589122 | false |
tima/ansible | test/units/module_utils/facts/test_ansible_collector.py | 25 | 12747 | # -*- coding: utf-8 -*-
#
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
# for testing
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock, patch
from ansible.module_utils.facts import collector
from ansible.module_utils.facts import ansible_collector
from ansible.module_utils.facts import namespace
from ansible.module_utils.facts.other.facter import FacterFactCollector
from ansible.module_utils.facts.other.ohai import OhaiFactCollector
from ansible.module_utils.facts.system.apparmor import ApparmorFactCollector
from ansible.module_utils.facts.system.caps import SystemCapabilitiesFactCollector
from ansible.module_utils.facts.system.date_time import DateTimeFactCollector
from ansible.module_utils.facts.system.env import EnvFactCollector
from ansible.module_utils.facts.system.distribution import DistributionFactCollector
from ansible.module_utils.facts.system.dns import DnsFactCollector
from ansible.module_utils.facts.system.fips import FipsFactCollector
from ansible.module_utils.facts.system.local import LocalFactCollector
from ansible.module_utils.facts.system.lsb import LSBFactCollector
from ansible.module_utils.facts.system.pkg_mgr import PkgMgrFactCollector, OpenBSDPkgMgrFactCollector
from ansible.module_utils.facts.system.platform import PlatformFactCollector
from ansible.module_utils.facts.system.python import PythonFactCollector
from ansible.module_utils.facts.system.selinux import SelinuxFactCollector
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
from ansible.module_utils.facts.system.user import UserFactCollector
# from ansible.module_utils.facts.hardware.base import HardwareCollector
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.virtual.base import VirtualCollector
ALL_COLLECTOR_CLASSES = \
[PlatformFactCollector,
DistributionFactCollector,
SelinuxFactCollector,
ApparmorFactCollector,
SystemCapabilitiesFactCollector,
FipsFactCollector,
PkgMgrFactCollector,
OpenBSDPkgMgrFactCollector,
ServiceMgrFactCollector,
LSBFactCollector,
DateTimeFactCollector,
UserFactCollector,
LocalFactCollector,
EnvFactCollector,
DnsFactCollector,
PythonFactCollector,
# FIXME: re-enable when hardware doesnt Hardware() doesnt munge self.facts
# HardwareCollector
NetworkCollector,
VirtualCollector,
OhaiFactCollector,
FacterFactCollector]
def mock_module(gather_subset=None):
if gather_subset is None:
gather_subset = ['all', '!facter', '!ohai']
mock_module = Mock()
mock_module.params = {'gather_subset': gather_subset,
'gather_timeout': 5,
'filter': '*'}
mock_module.get_bin_path = Mock(return_value=None)
return mock_module
def _collectors(module,
all_collector_classes=None,
minimal_gather_subset=None):
gather_subset = module.params.get('gather_subset')
if all_collector_classes is None:
all_collector_classes = ALL_COLLECTOR_CLASSES
if minimal_gather_subset is None:
minimal_gather_subset = frozenset([])
collector_classes = \
collector.collector_classes_from_gather_subset(all_collector_classes=all_collector_classes,
minimal_gather_subset=minimal_gather_subset,
gather_subset=gather_subset)
collectors = []
for collector_class in collector_classes:
collector_obj = collector_class()
collectors.append(collector_obj)
# Add a collector that knows what gather_subset we used so it it can provide a fact
collector_meta_data_collector = \
ansible_collector.CollectorMetaDataCollector(gather_subset=gather_subset,
module_setup=True)
collectors.append(collector_meta_data_collector)
return collectors
ns = namespace.PrefixFactNamespace('ansible_facts', 'ansible_')
# FIXME: this is brute force, but hopefully enough to get some refactoring to make facts testable
class TestInPlace(unittest.TestCase):
def _mock_module(self, gather_subset=None):
return mock_module(gather_subset=gather_subset)
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
return _collectors(module=module,
all_collector_classes=all_collector_classes,
minimal_gather_subset=minimal_gather_subset)
def test(self):
gather_subset = ['all']
mock_module = self._mock_module(gather_subset=gather_subset)
all_collector_classes = [EnvFactCollector]
collectors = self._collectors(mock_module,
all_collector_classes=all_collector_classes)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
res = fact_collector.collect(module=mock_module)
self.assertIsInstance(res, dict)
self.assertIn('env', res)
self.assertIn('gather_subset', res)
self.assertEqual(res['gather_subset'], ['all'])
def test1(self):
gather_subset = ['all']
mock_module = self._mock_module(gather_subset=gather_subset)
collectors = self._collectors(mock_module)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
res = fact_collector.collect(module=mock_module)
self.assertIsInstance(res, dict)
# just assert it's not almost empty
# with run_command and get_file_content mock, many facts are empty, like network
self.assertGreater(len(res), 20)
def test_empty_all_collector_classes(self):
mock_module = self._mock_module()
all_collector_classes = []
collectors = self._collectors(mock_module,
all_collector_classes=all_collector_classes)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
res = fact_collector.collect()
self.assertIsInstance(res, dict)
# just assert it's not almost empty
self.assertLess(len(res), 3)
# def test_facts_class(self):
# mock_module = self._mock_module()
# Facts(mock_module)
# def test_facts_class_load_on_init_false(self):
# mock_module = self._mock_module()
# Facts(mock_module, load_on_init=False)
# # FIXME: assert something
class TestCollectedFacts(unittest.TestCase):
gather_subset = ['all', '!facter', '!ohai']
min_fact_count = 30
max_fact_count = 1000
# TODO: add ansible_cmdline, ansible_*_pubkey* back when TempFactCollector goes away
expected_facts = ['date_time',
'user_id', 'distribution',
'gather_subset', 'module_setup',
'env']
not_expected_facts = ['facter', 'ohai']
def _mock_module(self, gather_subset=None):
return mock_module(gather_subset=self.gather_subset)
def setUp(self):
mock_module = self._mock_module()
collectors = self._collectors(mock_module)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
self.facts = fact_collector.collect(module=mock_module)
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
return _collectors(module=module,
all_collector_classes=all_collector_classes,
minimal_gather_subset=minimal_gather_subset)
def test_basics(self):
self._assert_basics(self.facts)
def test_expected_facts(self):
self._assert_expected_facts(self.facts)
def test_not_expected_facts(self):
self._assert_not_expected_facts(self.facts)
def _assert_basics(self, facts):
self.assertIsInstance(facts, dict)
# just assert it's not almost empty
self.assertGreaterEqual(len(facts), self.min_fact_count)
# and that is not huge number of keys
self.assertLess(len(facts), self.max_fact_count)
# everything starts with ansible_ namespace
def _assert_ansible_namespace(self, facts):
# FIXME: kluge for non-namespace fact
facts.pop('module_setup', None)
facts.pop('gather_subset', None)
for fact_key in facts:
self.assertTrue(fact_key.startswith('ansible_'),
'The fact name "%s" does not startwith "ansible_"' % fact_key)
def _assert_expected_facts(self, facts):
facts_keys = sorted(facts.keys())
for expected_fact in self.expected_facts:
self.assertIn(expected_fact, facts_keys)
def _assert_not_expected_facts(self, facts):
facts_keys = sorted(facts.keys())
for not_expected_fact in self.not_expected_facts:
self.assertNotIn(not_expected_fact, facts_keys)
class ExceptionThrowingCollector(collector.BaseFactCollector):
def collect(self, module=None, collected_facts=None):
raise Exception('A collector failed')
class TestExceptionCollectedFacts(TestCollectedFacts):
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
collectors = _collectors(module=module,
all_collector_classes=all_collector_classes,
minimal_gather_subset=minimal_gather_subset)
c = [ExceptionThrowingCollector()] + collectors
return c
class TestOnlyExceptionCollector(TestCollectedFacts):
expected_facts = []
min_fact_count = 0
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
return [ExceptionThrowingCollector()]
class TestMinimalCollectedFacts(TestCollectedFacts):
gather_subset = ['!all']
min_fact_count = 1
max_fact_count = 10
expected_facts = ['gather_subset',
'module_setup']
not_expected_facts = ['lsb']
class TestFacterCollectedFacts(TestCollectedFacts):
gather_subset = ['!all', 'facter']
min_fact_count = 1
max_fact_count = 10
expected_facts = ['gather_subset',
'module_setup']
not_expected_facts = ['lsb']
class TestOhaiCollectedFacts(TestCollectedFacts):
gather_subset = ['!all', 'ohai']
min_fact_count = 1
max_fact_count = 10
expected_facts = ['gather_subset',
'module_setup']
not_expected_facts = ['lsb']
class TestPkgMgrFacts(TestCollectedFacts):
gather_subset = ['pkg_mgr']
min_fact_count = 1
max_fact_count = 10
expected_facts = ['gather_subset',
'module_setup',
'pkg_mgr']
class TestOpenBSDPkgMgrFacts(TestPkgMgrFacts):
def test_is_openbsd_pkg(self):
self.assertIn('pkg_mgr', self.facts)
self.assertEqual(self.facts['pkg_mgr'], 'openbsd_pkg')
def setUp(self):
self.patcher = patch('platform.system')
mock_platform = self.patcher.start()
mock_platform.return_value = 'OpenBSD'
mock_module = self._mock_module()
collectors = self._collectors(mock_module)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
self.facts = fact_collector.collect(module=mock_module)
def tearDown(self):
self.patcher.stop()
| gpl-3.0 | -5,755,613,756,221,977,000 | 36.055233 | 101 | 0.651369 | false |
fujicoin/electrum-fjc | electrum/x509.py | 3 | 11467 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import time
from datetime import datetime
import ecdsa
from . import util
from .util import profiler, bh2u
from .logging import get_logger
_logger = get_logger(__name__)
# algo OIDs
ALGO_RSA_SHA1 = '1.2.840.113549.1.1.5'
ALGO_RSA_SHA256 = '1.2.840.113549.1.1.11'
ALGO_RSA_SHA384 = '1.2.840.113549.1.1.12'
ALGO_RSA_SHA512 = '1.2.840.113549.1.1.13'
ALGO_ECDSA_SHA256 = '1.2.840.10045.4.3.2'
# prefixes, see http://stackoverflow.com/questions/3713774/c-sharp-how-to-calculate-asn-1-der-encoding-of-a-particular-hash-algorithm
PREFIX_RSA_SHA256 = bytearray(
[0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20])
PREFIX_RSA_SHA384 = bytearray(
[0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30])
PREFIX_RSA_SHA512 = bytearray(
[0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40])
# types used in ASN1 structured data
ASN1_TYPES = {
'BOOLEAN' : 0x01,
'INTEGER' : 0x02,
'BIT STRING' : 0x03,
'OCTET STRING' : 0x04,
'NULL' : 0x05,
'OBJECT IDENTIFIER': 0x06,
'SEQUENCE' : 0x70,
'SET' : 0x71,
'PrintableString' : 0x13,
'IA5String' : 0x16,
'UTCTime' : 0x17,
'GeneralizedTime' : 0x18,
'ENUMERATED' : 0x0A,
'UTF8String' : 0x0C,
}
class CertificateError(Exception):
pass
# helper functions
def bitstr_to_bytestr(s):
if s[0] != 0x00:
raise TypeError('no padding')
return s[1:]
def bytestr_to_int(s):
i = 0
for char in s:
i <<= 8
i |= char
return i
def decode_OID(s):
r = []
r.append(s[0] // 40)
r.append(s[0] % 40)
k = 0
for i in s[1:]:
if i < 128:
r.append(i + 128 * k)
k = 0
else:
k = (i - 128) + 128 * k
return '.'.join(map(str, r))
def encode_OID(oid):
x = [int(i) for i in oid.split('.')]
s = chr(x[0] * 40 + x[1])
for i in x[2:]:
ss = chr(i % 128)
while i > 128:
i //= 128
ss = chr(128 + i % 128) + ss
s += ss
return s
class ASN1_Node(bytes):
def get_node(self, ix):
# return index of first byte, first content byte and last byte.
first = self[ix + 1]
if (first & 0x80) == 0:
length = first
ixf = ix + 2
ixl = ixf + length - 1
else:
lengthbytes = first & 0x7F
length = bytestr_to_int(self[ix + 2:ix + 2 + lengthbytes])
ixf = ix + 2 + lengthbytes
ixl = ixf + length - 1
return ix, ixf, ixl
def root(self):
return self.get_node(0)
def next_node(self, node):
ixs, ixf, ixl = node
return self.get_node(ixl + 1)
def first_child(self, node):
ixs, ixf, ixl = node
if self[ixs] & 0x20 != 0x20:
raise TypeError('Can only open constructed types.', hex(self[ixs]))
return self.get_node(ixf)
def is_child_of(node1, node2):
ixs, ixf, ixl = node1
jxs, jxf, jxl = node2
return ((ixf <= jxs) and (jxl <= ixl)) or ((jxf <= ixs) and (ixl <= jxl))
def get_all(self, node):
# return type + length + value
ixs, ixf, ixl = node
return self[ixs:ixl + 1]
def get_value_of_type(self, node, asn1_type):
# verify type byte and return content
ixs, ixf, ixl = node
if ASN1_TYPES[asn1_type] != self[ixs]:
raise TypeError('Wrong type:', hex(self[ixs]), hex(ASN1_TYPES[asn1_type]))
return self[ixf:ixl + 1]
def get_value(self, node):
ixs, ixf, ixl = node
return self[ixf:ixl + 1]
def get_children(self, node):
nodes = []
ii = self.first_child(node)
nodes.append(ii)
while ii[2] < node[2]:
ii = self.next_node(ii)
nodes.append(ii)
return nodes
def get_sequence(self):
return list(map(lambda j: self.get_value(j), self.get_children(self.root())))
def get_dict(self, node):
p = {}
for ii in self.get_children(node):
for iii in self.get_children(ii):
iiii = self.first_child(iii)
oid = decode_OID(self.get_value_of_type(iiii, 'OBJECT IDENTIFIER'))
iiii = self.next_node(iiii)
value = self.get_value(iiii)
p[oid] = value
return p
def decode_time(self, ii):
GENERALIZED_TIMESTAMP_FMT = '%Y%m%d%H%M%SZ'
UTCTIME_TIMESTAMP_FMT = '%y%m%d%H%M%SZ'
try:
return time.strptime(self.get_value_of_type(ii, 'UTCTime').decode('ascii'), UTCTIME_TIMESTAMP_FMT)
except TypeError:
return time.strptime(self.get_value_of_type(ii, 'GeneralizedTime').decode('ascii'), GENERALIZED_TIMESTAMP_FMT)
class X509(object):
def __init__(self, b):
self.bytes = bytearray(b)
der = ASN1_Node(b)
root = der.root()
cert = der.first_child(root)
# data for signature
self.data = der.get_all(cert)
# optional version field
if der.get_value(cert)[0] == 0xa0:
version = der.first_child(cert)
serial_number = der.next_node(version)
else:
serial_number = der.first_child(cert)
self.serial_number = bytestr_to_int(der.get_value_of_type(serial_number, 'INTEGER'))
# signature algorithm
sig_algo = der.next_node(serial_number)
ii = der.first_child(sig_algo)
self.sig_algo = decode_OID(der.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
# issuer
issuer = der.next_node(sig_algo)
self.issuer = der.get_dict(issuer)
# validity
validity = der.next_node(issuer)
ii = der.first_child(validity)
self.notBefore = der.decode_time(ii)
ii = der.next_node(ii)
self.notAfter = der.decode_time(ii)
# subject
subject = der.next_node(validity)
self.subject = der.get_dict(subject)
subject_pki = der.next_node(subject)
public_key_algo = der.first_child(subject_pki)
ii = der.first_child(public_key_algo)
self.public_key_algo = decode_OID(der.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
if self.public_key_algo != '1.2.840.10045.2.1': # for non EC public key
# pubkey modulus and exponent
subject_public_key = der.next_node(public_key_algo)
spk = der.get_value_of_type(subject_public_key, 'BIT STRING')
spk = ASN1_Node(bitstr_to_bytestr(spk))
r = spk.root()
modulus = spk.first_child(r)
exponent = spk.next_node(modulus)
rsa_n = spk.get_value_of_type(modulus, 'INTEGER')
rsa_e = spk.get_value_of_type(exponent, 'INTEGER')
self.modulus = ecdsa.util.string_to_number(rsa_n)
self.exponent = ecdsa.util.string_to_number(rsa_e)
else:
subject_public_key = der.next_node(public_key_algo)
spk = der.get_value_of_type(subject_public_key, 'BIT STRING')
self.ec_public_key = spk
# extensions
self.CA = False
self.AKI = None
self.SKI = None
i = subject_pki
while i[2] < cert[2]:
i = der.next_node(i)
d = der.get_dict(i)
for oid, value in d.items():
value = ASN1_Node(value)
if oid == '2.5.29.19':
# Basic Constraints
self.CA = bool(value)
elif oid == '2.5.29.14':
# Subject Key Identifier
r = value.root()
value = value.get_value_of_type(r, 'OCTET STRING')
self.SKI = bh2u(value)
elif oid == '2.5.29.35':
# Authority Key Identifier
self.AKI = bh2u(value.get_sequence()[0])
else:
pass
# cert signature
cert_sig_algo = der.next_node(cert)
ii = der.first_child(cert_sig_algo)
self.cert_sig_algo = decode_OID(der.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
cert_sig = der.next_node(cert_sig_algo)
self.signature = der.get_value(cert_sig)[1:]
def get_keyID(self):
# http://security.stackexchange.com/questions/72077/validating-an-ssl-certificate-chain-according-to-rfc-5280-am-i-understanding-th
return self.SKI if self.SKI else repr(self.subject)
def get_issuer_keyID(self):
return self.AKI if self.AKI else repr(self.issuer)
def get_common_name(self):
return self.subject.get('2.5.4.3', b'unknown').decode()
def get_signature(self):
return self.cert_sig_algo, self.signature, self.data
def check_ca(self):
return self.CA
def check_date(self):
now = time.gmtime()
if self.notBefore > now:
raise CertificateError('Certificate has not entered its valid date range. (%s)' % self.get_common_name())
if self.notAfter <= now:
dt = datetime.utcfromtimestamp(time.mktime(self.notAfter))
raise CertificateError(f'Certificate ({self.get_common_name()}) has expired (at {dt} UTC).')
def getFingerprint(self):
return hashlib.sha1(self.bytes).digest()
@profiler
def load_certificates(ca_path):
from . import pem
ca_list = {}
ca_keyID = {}
# ca_path = '/tmp/tmp.txt'
with open(ca_path, 'r', encoding='utf-8') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
for b in bList:
try:
x = X509(b)
x.check_date()
except BaseException as e:
# with open('/tmp/tmp.txt', 'w') as f:
# f.write(pem.pem(b, 'CERTIFICATE').decode('ascii'))
_logger.info(f"cert error: {e}")
continue
fp = x.getFingerprint()
ca_list[fp] = x
ca_keyID[x.get_keyID()] = fp
return ca_list, ca_keyID
if __name__ == "__main__":
import certifi
ca_path = certifi.where()
ca_list, ca_keyID = load_certificates(ca_path)
| mit | -5,439,504,780,668,002,000 | 31.856734 | 139 | 0.574518 | false |
slyphon/pants | src/python/pants/goal/run_tracker.py | 4 | 13092 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
import sys
import threading
import time
import uuid
from contextlib import contextmanager
import requests
from pants.base.build_environment import get_pants_cachedir
from pants.base.run_info import RunInfo
from pants.base.worker_pool import SubprocPool, WorkerPool
from pants.base.workunit import WorkUnit
from pants.goal.aggregated_timings import AggregatedTimings
from pants.goal.artifact_cache_stats import ArtifactCacheStats
from pants.reporting.report import Report
from pants.stats.statsdb import StatsDBFactory
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import relative_symlink, safe_file_dump
class RunTracker(Subsystem):
"""Tracks and times the execution of a pants run.
Also manages background work.
Use like this:
run_tracker.start()
with run_tracker.new_workunit('compile'):
with run_tracker.new_workunit('java'):
...
with run_tracker.new_workunit('scala'):
...
run_tracker.close()
Can track execution against multiple 'roots', e.g., one for the main thread and another for
background threads.
"""
options_scope = 'run-tracker'
# The name of the tracking root for the main thread (and the foreground worker threads).
DEFAULT_ROOT_NAME = 'main'
# The name of the tracking root for the background worker threads.
BACKGROUND_ROOT_NAME = 'background'
@classmethod
def subsystem_dependencies(cls):
return (StatsDBFactory,)
@classmethod
def register_options(cls, register):
register('--stats-upload-url', advanced=True, default=None,
help='Upload stats to this URL on run completion.')
register('--stats-upload-timeout', advanced=True, type=int, default=2,
help='Wait at most this many seconds for the stats upload to complete.')
register('--num-foreground-workers', advanced=True, type=int, default=8,
help='Number of threads for foreground work.')
register('--num-background-workers', advanced=True, type=int, default=8,
help='Number of threads for background work.')
def __init__(self, *args, **kwargs):
super(RunTracker, self).__init__(*args, **kwargs)
run_timestamp = time.time()
cmd_line = ' '.join(['pants'] + sys.argv[1:])
# run_id is safe for use in paths.
millis = int((run_timestamp * 1000) % 1000)
run_id = 'pants_run_{}_{}_{}'.format(
time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(run_timestamp)), millis,
uuid.uuid4().hex)
info_dir = os.path.join(self.get_options().pants_workdir, self.options_scope)
self.run_info_dir = os.path.join(info_dir, run_id)
self.run_info = RunInfo(os.path.join(self.run_info_dir, 'info'))
self.run_info.add_basic_info(run_id, run_timestamp)
self.run_info.add_info('cmd_line', cmd_line)
# Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
link_to_latest = os.path.join(os.path.dirname(self.run_info_dir), 'latest')
relative_symlink(self.run_info_dir, link_to_latest)
# Time spent in a workunit, including its children.
self.cumulative_timings = AggregatedTimings(os.path.join(self.run_info_dir,
'cumulative_timings'))
# Time spent in a workunit, not including its children.
self.self_timings = AggregatedTimings(os.path.join(self.run_info_dir, 'self_timings'))
# Hit/miss stats for the artifact cache.
self.artifact_cache_stats = \
ArtifactCacheStats(os.path.join(self.run_info_dir, 'artifact_cache_stats'))
# Number of threads for foreground work.
self._num_foreground_workers = self.get_options().num_foreground_workers
# Number of threads for background work.
self._num_background_workers = self.get_options().num_background_workers
# We report to this Report.
self.report = None
# self._threadlocal.current_workunit contains the current workunit for the calling thread.
# Note that multiple threads may share a name (e.g., all the threads in a pool).
self._threadlocal = threading.local()
# For main thread work. Created on start().
self._main_root_workunit = None
# For background work. Created lazily if needed.
self._background_worker_pool = None
self._background_root_workunit = None
# Trigger subproc pool init while our memory image is still clean (see SubprocPool docstring)
SubprocPool.foreground()
self._aborted = False
def register_thread(self, parent_workunit):
"""Register the parent workunit for all work in the calling thread.
Multiple threads may have the same parent (e.g., all the threads in a pool).
"""
self._threadlocal.current_workunit = parent_workunit
def is_under_main_root(self, workunit):
"""Is the workunit running under the main thread's root."""
return workunit.root() == self._main_root_workunit
def start(self, report):
"""Start tracking this pants run.
report: an instance of pants.reporting.Report."""
self.report = report
self.report.open()
self._main_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
name=RunTracker.DEFAULT_ROOT_NAME, cmd=None)
self.register_thread(self._main_root_workunit)
self._main_root_workunit.start()
self.report.start_workunit(self._main_root_workunit)
def set_root_outcome(self, outcome):
"""Useful for setup code that doesn't have a reference to a workunit."""
self._main_root_workunit.set_outcome(outcome)
@contextmanager
def new_workunit(self, name, labels=None, cmd='', log_config=None):
"""Creates a (hierarchical) subunit of work for the purpose of timing and reporting.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
- log_config: An optional tuple WorkUnit.LogConfig of task-level options affecting reporting.
Use like this:
with run_tracker.new_workunit(name='compile', labels=[WorkUnitLabel.TASK]) as workunit:
<do scoped work here>
<set the outcome on workunit if necessary>
Note that the outcome will automatically be set to failure if an exception is raised
in a workunit, and to success otherwise, so usually you only need to set the
outcome explicitly if you want to set it to warning.
"""
parent = self._threadlocal.current_workunit
with self.new_workunit_under_parent(name, parent=parent, labels=labels, cmd=cmd,
log_config=log_config) as workunit:
self._threadlocal.current_workunit = workunit
try:
yield workunit
finally:
self._threadlocal.current_workunit = parent
@contextmanager
def new_workunit_under_parent(self, name, parent, labels=None, cmd='', log_config=None):
"""Creates a (hierarchical) subunit of work for the purpose of timing and reporting.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- parent: The new workunit is created under this parent.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
Task code should not typically call this directly.
"""
workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=parent, name=name, labels=labels,
cmd=cmd, log_config=log_config)
workunit.start()
try:
self.report.start_workunit(workunit)
yield workunit
except KeyboardInterrupt:
workunit.set_outcome(WorkUnit.ABORTED)
self._aborted = True
raise
except:
workunit.set_outcome(WorkUnit.FAILURE)
raise
else:
workunit.set_outcome(WorkUnit.SUCCESS)
finally:
self.end_workunit(workunit)
def log(self, level, *msg_elements):
"""Log a message against the current workunit."""
self.report.log(self._threadlocal.current_workunit, level, *msg_elements)
@classmethod
def post_stats(cls, url, stats, timeout=2):
"""POST stats to the given url.
:return: True if upload was successful, False otherwise.
"""
def error(msg):
# Report aleady closed, so just print error.
print('WARNING: Failed to upload stats to {} due to {}'.format(url, msg),
file=sys.stderr)
return False
# TODO(benjy): The upload protocol currently requires separate top-level params, with JSON
# values. Probably better for there to be one top-level JSON value, namely json.dumps(stats).
# But this will first require changing the upload receiver at every shop that uses this
# (probably only Foursquare at present).
params = {k: json.dumps(v) for (k, v) in stats.items()}
try:
r = requests.post(url, data=params, timeout=timeout)
if r.status_code != requests.codes.ok:
return error("HTTP error code: {}".format(r.status_code))
except Exception as e: # Broad catch - we don't want to fail the build over upload errors.
return error("Error: {}".format(e))
return True
def store_stats(self):
"""Store stats about this run in local and optionally remote stats dbs."""
stats = {
'run_info': self.run_info.get_as_dict(),
'cumulative_timings': self.cumulative_timings.get_all(),
'self_timings': self.self_timings.get_all(),
'artifact_cache_stats': self.artifact_cache_stats.get_all()
}
# Dump individual stat file.
# TODO(benjy): Do we really need these, once the statsdb is mature?
stats_file = os.path.join(get_pants_cachedir(), 'stats',
'{}.json'.format(self.run_info.get_info('id')))
safe_file_dump(stats_file, json.dumps(stats))
# Add to local stats db.
StatsDBFactory.global_instance().get_db().insert_stats(stats)
# Upload to remote stats db.
stats_url = self.get_options().stats_upload_url
if stats_url:
self.post_stats(stats_url, stats, timeout=self.get_options().stats_upload_timeout)
_log_levels = [Report.ERROR, Report.ERROR, Report.WARN, Report.INFO, Report.INFO]
def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
"""
if self._background_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting background workers.")
self._background_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for background workers to finish.")
self._background_worker_pool.shutdown()
self.end_workunit(self._background_root_workunit)
SubprocPool.shutdown(self._aborted)
# Run a dummy work unit to write out one last timestamp
with self.new_workunit("complete"):
pass
self.end_workunit(self._main_root_workunit)
outcome = self._main_root_workunit.outcome()
if self._background_root_workunit:
outcome = min(outcome, self._background_root_workunit.outcome())
outcome_str = WorkUnit.outcome_string(outcome)
log_level = RunTracker._log_levels[outcome]
self.log(log_level, outcome_str)
if self.run_info.get_info('outcome') is None:
# If the goal is clean-all then the run info dir no longer exists, so ignore that error.
self.run_info.add_info('outcome', outcome_str, ignore_errors=True)
self.report.close()
self.store_stats()
def end_workunit(self, workunit):
self.report.end_workunit(workunit)
path, duration, self_time, is_tool = workunit.end()
self.cumulative_timings.add_timing(path, duration, is_tool)
self.self_timings.add_timing(path, self_time, is_tool)
def get_background_root_workunit(self):
if self._background_root_workunit is None:
self._background_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
name='background', cmd=None)
self._background_root_workunit.start()
self.report.start_workunit(self._background_root_workunit)
return self._background_root_workunit
def background_worker_pool(self):
if self._background_worker_pool is None: # Initialize lazily.
self._background_worker_pool = WorkerPool(parent_workunit=self.get_background_root_workunit(),
run_tracker=self,
num_workers=self._num_background_workers)
return self._background_worker_pool
| apache-2.0 | 3,802,772,412,755,832,300 | 39.036697 | 100 | 0.67736 | false |
wgwoods/anaconda | dracut/driver_updates.py | 3 | 21945 | #!/usr/bin/python3
#
# Copyright (C) 2015 by Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s):
# Brian C. Lane <[email protected]>
# Will Woods <[email protected]>
#
"""
Driver Update Disk handler program.
This will be called once for each requested driverdisk (non-interactive), and
once for interactive mode (if requested).
Usage is one of:
driver-updates --disk DISKSTR DEVNODE
DISKSTR is the string passed by the user ('/dev/sda3', 'LABEL=DD', etc.)
DEVNODE is the actual device node or image (/dev/sda3, /dev/sr0, etc.)
DEVNODE must be mountable, but need not actually be a block device
(e.g. /dd.iso is valid if the user has inserted /dd.iso into initrd)
driver-updates --net URL LOCALFILE
URL is the string passed by the user ('http://.../something.iso')
LOCALFILE is the location of the downloaded file
driver-updates --interactive
The user will be presented with a menu where they can choose a disk
and pick individual drivers to install.
/tmp/dd_net contains the list of URLs given by the user.
/tmp/dd_disk contains the list of disk devices given by the user.
/tmp/dd_interactive contains "menu" if interactive mode was requested.
/tmp/dd.done should be created when all the user-requested stuff above has been
handled; the installer won't start up until this file is created.
Packages will be extracted to /updates, which gets overlaid on top
of the installer's filesystem when we leave the initramfs.
Modules and firmware get moved to /lib/modules/`uname -r`/updates and
/lib/firmware/updates (under /updates, as above). They also get copied into the
corresponding paths in the initramfs, so we can load them immediately.
The repositories get copied into /run/install/DD-1, /run/install/DD-2, etc.
Driver package names are saved in /run/install/dd_packages.
During system installation, anaconda will install the packages listed in
/run/install/dd_packages to the target system.
"""
import logging
import sys
import os
import subprocess
import fnmatch
# Import readline so raw_input gets readline features, like history, and
# backspace working right. Do not import readline if not connected to a tty
# because it breaks sometimes.
if os.isatty(0):
import readline # pylint:disable=unused-import
from contextlib import contextmanager
from logging.handlers import SysLogHandler
# py2 compat
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open("/dev/null", 'a+')
try:
_input = raw_input # pylint: disable=undefined-variable
except NameError:
_input = input
log = logging.getLogger("DD")
# NOTE: Yes, the version is wrong, but previous versions of this utility also
# hardcoded this value, because changing it will break any driver disk that has
# binary/library packages with "installer-enhancement = 19.0"..
# If we *need* to break compatibility, this should definitely get changed, but
# otherwise we probably shouldn't change this unless/until we're sure that
# everyone is using something like "installer-enhancement >= 19.0" instead..
ANACONDAVER = "19.0"
ARCH = os.uname()[4]
KERNELVER = os.uname()[2]
MODULE_UPDATES_DIR = "/lib/modules/%s/updates" % KERNELVER
FIRMWARE_UPDATES_DIR = "/lib/firmware/updates"
def mkdir_seq(stem):
"""
Create sequentially-numbered directories starting with stem.
For example, mkdir_seq("/tmp/DD-") would create "/tmp/DD-1";
if that already exists, try "/tmp/DD-2", "/tmp/DD-3", and so on,
until a directory is created.
Returns the newly-created directory name.
"""
n = 1
while True:
dirname = str(stem) + str(n)
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != 17: raise
n += 1
else:
return dirname
def find_repos(mnt):
"""find any valid driverdisk repos that exist under mnt."""
dd_repos = []
for root, dirs, files in os.walk(mnt, followlinks=True):
repo = root+"/rpms/"+ARCH
if "rhdd3" in files and "rpms" in dirs and os.path.isdir(repo):
log.debug("found repo: %s", repo)
dd_repos.append(repo)
return dd_repos
# NOTE: it's unclear whether or not we're supposed to recurse subdirs looking
# for .iso files, but that seems like a bad idea if you mount some huge disk..
# So I've made a judgement call: we only load .iso files from the toplevel.
def find_isos(mnt):
"""find files named '.iso' at the top level of mnt."""
return [mnt+'/'+f for f in os.listdir(mnt) if f.lower().endswith('.iso')]
class Driver(object):
"""Represents a single driver (rpm), as listed by dd_list"""
def __init__(self, source="", name="", flags="", description="", repo=""):
self.source = source
self.name = name
self.flags = flags
self.description = description
self.repo = repo
def dd_list(dd_path, anaconda_ver=None, kernel_ver=None):
log.debug("dd_list: listing %s", dd_path)
if not anaconda_ver:
anaconda_ver = ANACONDAVER
if not kernel_ver:
kernel_ver = KERNELVER
cmd = ["dd_list", '-d', dd_path, '-k', kernel_ver, '-a', anaconda_ver]
out = subprocess.check_output(cmd, stderr=DEVNULL)
out = out.decode('utf-8')
drivers = [Driver(*d.split('\n',3)) for d in out.split('\n---\n') if d]
log.debug("dd_list: found drivers: %s", ' '.join(d.name for d in drivers))
for d in drivers: d.repo = dd_path
return drivers
def dd_extract(rpm_path, outdir, kernel_ver=None, flags='-blmf'):
log.debug("dd_extract: extracting %s", rpm_path)
if not kernel_ver:
kernel_ver = KERNELVER
cmd = ["dd_extract", flags, '-r', rpm_path, '-d', outdir, '-k', kernel_ver]
subprocess.check_output(cmd, stderr=DEVNULL) # discard stdout
def list_drivers(repos, anaconda_ver=None, kernel_ver=None):
return [d for r in repos for d in dd_list(r, anaconda_ver, kernel_ver)]
def mount(dev, mnt=None):
"""Mount the given dev at the mountpoint given by mnt."""
# NOTE: dev may be a filesystem image - "-o loop" is not necessary anymore
if not mnt:
mnt = mkdir_seq("/media/DD-")
cmd = ["mount", dev, mnt]
log.debug("mounting %s at %s", dev, mnt)
subprocess.check_call(cmd)
return mnt
def umount(mnt):
log.debug("unmounting %s", mnt)
subprocess.call(["umount", mnt])
@contextmanager
def mounted(dev, mnt=None):
mnt = mount(dev, mnt)
try:
yield mnt
finally:
umount(mnt)
def iter_files(topdir, pattern=None):
"""iterator; yields full paths to files under topdir that match pattern."""
for head, _, files in os.walk(topdir):
for f in files:
if pattern is None or fnmatch.fnmatch(f, pattern):
yield os.path.join(head, f)
def ensure_dir(d):
"""make sure the given directory exists."""
subprocess.check_call(["mkdir", "-p", d])
def move_files(files, destdir):
"""move files into destdir (iff they're not already under destdir)"""
ensure_dir(destdir)
for f in files:
if f.startswith(destdir):
continue
subprocess.call(["mv", "-f", f, destdir])
def copy_files(files, destdir):
"""copy files into destdir (iff they're not already under destdir)"""
ensure_dir(destdir)
for f in files:
if f.startswith(destdir):
continue
subprocess.call(["cp", "-a", f, destdir])
def append_line(filename, line):
"""simple helper to append a line to a file"""
if not line.endswith("\n"):
line += "\n"
with open(filename, 'a') as outf:
outf.write(line)
# NOTE: items returned by read_lines should match items passed to append_line,
# which is why we remove the newlines
def read_lines(filename):
"""return a list containing each line in filename, with newlines removed."""
try:
return [line.rstrip('\n') for line in open(filename)]
except IOError:
return []
def save_repo(repo, target="/run/install"):
"""copy a repo to the place where the installer will look for it later."""
newdir = mkdir_seq(os.path.join(target, "DD-"))
log.debug("save_repo: copying %s to %s", repo, newdir)
subprocess.call(["cp", "-arT", repo, newdir])
return newdir
def extract_drivers(drivers=None, repos=None, outdir="/updates",
pkglist="/run/install/dd_packages"):
"""
Extract drivers - either a user-selected driver list or full repos.
drivers should be a list of Drivers to extract, or None.
repos should be a list of repo paths to extract, or None.
Raises ValueError if you pass both.
If any packages containing modules or firmware are extracted, also:
* call save_repo for that package's repo
* write the package name(s) to pkglist.
Returns True if any package containing modules was extracted.
"""
if not drivers:
drivers = []
if drivers and repos:
raise ValueError("extract_drivers: drivers or repos, not both")
if repos:
drivers = list_drivers(repos)
save_repos = set()
new_drivers = False
ensure_dir(outdir)
for driver in drivers:
log.info("Extracting: %s", driver.name)
dd_extract(driver.source, outdir)
# Make sure we install modules/firmware into the target system
if 'modules' in driver.flags or 'firmwares' in driver.flags:
append_line(pkglist, driver.name)
save_repos.add(driver.repo)
new_drivers = True
# save the repos containing those packages
for repo in save_repos:
save_repo(repo)
return new_drivers
def grab_driver_files(outdir="/updates"):
"""
copy any modules/firmware we just extracted into the running system.
return a list of the names of any modules we just copied.
"""
modules = list(iter_files(outdir+'/lib/modules',"*.ko*"))
firmware = list(iter_files(outdir+'/lib/firmware'))
copy_files(modules, MODULE_UPDATES_DIR)
copy_files(firmware, FIRMWARE_UPDATES_DIR)
move_files(modules, outdir+MODULE_UPDATES_DIR)
move_files(firmware, outdir+FIRMWARE_UPDATES_DIR)
return [os.path.basename(m).split('.ko')[0] for m in modules]
def load_drivers(modnames):
"""run depmod and try to modprobe all the given module names."""
log.debug("load_drivers: %s", modnames)
subprocess.call(["depmod", "-a"])
subprocess.call(["modprobe", "-a"] + modnames)
# We *could* pass in "outdir" if we wanted to extract things somewhere else,
# but right now the only use case is running inside the initramfs, so..
def process_driver_disk(dev, interactive=False):
try:
_process_driver_disk(dev, interactive=interactive)
except (subprocess.CalledProcessError, IOError) as e:
log.error("ERROR: %s", e)
def _process_driver_disk(dev, interactive=False):
"""
Main entry point for processing a single driver disk.
Mount the device/image, find repos, and install drivers from those repos.
If there are no repos, look for .iso files, and (if present) recursively
process those.
If interactive, ask the user which driver(s) to install from the repos,
or ask which iso file to process (if no repos).
"""
log.info("Examining %s", dev)
with mounted(dev) as mnt:
repos = find_repos(mnt)
isos = find_isos(mnt)
if repos:
if interactive:
new_modules = extract_drivers(drivers=repo_menu(repos))
else:
new_modules = extract_drivers(repos=repos)
if new_modules:
modules = grab_driver_files()
load_drivers(modules)
elif isos:
if interactive:
isos = iso_menu(isos)
for iso in isos:
process_driver_disk(iso, interactive=interactive)
else:
print("=== No driver disks found in %s! ===\n" % dev)
def process_driver_rpm(rpm):
try:
_process_driver_rpm(rpm)
except (subprocess.CalledProcessError, IOError) as e:
log.error("ERROR: %s", e)
def _process_driver_rpm(rpm):
"""
Process a single driver rpm. Extract it, install it, and copy the
rpm for Anaconda to install on the target system.
"""
log.info("Examining %s", rpm)
new_modules = extract_drivers(repos=[os.path.dirname(rpm)])
if new_modules:
modules = grab_driver_files()
load_drivers(modules)
def mark_finished(user_request, topdir="/tmp"):
log.debug("marking %s complete in %s", user_request, topdir)
append_line(topdir+"/dd_finished", user_request)
def all_finished(topdir="/tmp"):
finished = read_lines(topdir+"/dd_finished")
todo = read_lines(topdir+"/dd_todo")
return all(r in finished for r in todo)
def finish(user_request, topdir="/tmp"):
# mark that we've finished processing this request
mark_finished(user_request, topdir)
# if we're done now, let dracut know
if all_finished(topdir):
append_line(topdir+"/dd.done", "true")
# --- DEVICE LISTING HELPERS FOR THE MENU -----------------------------------
class DeviceInfo(object):
def __init__(self, **kwargs):
self.device = kwargs.get("DEVNAME", '')
self.uuid = kwargs.get("UUID", '')
self.fs_type = kwargs.get("TYPE", '')
self.label = kwargs.get("LABEL", '')
def __repr__(self):
return '<DeviceInfo %s>' % self.device
@property
def shortdev(self):
# resolve any symlinks (/dev/disk/by-label/OEMDRV -> /dev/sr0)
dev = os.path.realpath(self.device)
# NOTE: not os.path.basename 'cuz some devices legitimately have
# a '/' in their name: /dev/cciss/c0d0, /dev/i2o/hda, etc.
if dev.startswith('/dev/'):
dev = dev[5:]
return dev
def blkid():
try:
out = subprocess.check_output("blkid -o export -s UUID -s TYPE".split())
out = out.decode('ascii')
return [dict(kv.split('=',1) for kv in block.splitlines())
for block in out.split('\n\n')]
except subprocess.CalledProcessError:
return []
# We use this to get disk labels because blkid's encoding of non-printable and
# non-ascii characters is weird and doesn't match what you'd expect to see.
def get_disk_labels():
return {os.path.realpath(s):os.path.basename(s)
for s in iter_files("/dev/disk/by-label")}
def get_deviceinfo():
disk_labels = get_disk_labels()
deviceinfo = [DeviceInfo(**d) for d in blkid()]
for dev in deviceinfo:
dev.label = disk_labels.get(dev.device, '')
return deviceinfo
# --- INTERACTIVE MENU JUNK ------------------------------------------------
class TextMenu(object):
def __init__(self, items, title=None, formatter=None, headeritem=None,
refresher=None, multi=False, page_height=20):
self.items = items
self.title = title
self.formatter = formatter
self.headeritem = headeritem
self.refresher = refresher
self.multi = multi
self.page_height = page_height
self.pagenum = 1
self.selected_items = []
self.is_done = False
if callable(items):
self.refresher = items
self.refresh()
@property
def num_pages(self):
pages, leftover = divmod(len(self.items), self.page_height)
if leftover:
return pages+1
else:
return pages
def next(self):
if self.pagenum < self.num_pages:
self.pagenum += 1
def prev(self):
if self.pagenum > 1:
self.pagenum -= 1
def refresh(self):
if callable(self.refresher):
self.items = self.refresher()
def done(self):
self.is_done = True
def invalid(self, k):
print("Invalid selection %r" % k)
def toggle_item(self, item):
if item in self.selected_items:
self.selected_items.remove(item)
else:
self.selected_items.append(item)
if not self.multi:
self.done()
def items_on_page(self):
start_idx = (self.pagenum-1) * self.page_height
if start_idx > len(self.items):
return []
else:
items = self.items[start_idx:start_idx+self.page_height]
return enumerate(items, start=start_idx)
def format_item(self, item):
if callable(self.formatter):
return self.formatter(item)
else:
return str(item)
def format_items(self):
for n, i in self.items_on_page():
if self.multi:
x = 'x' if i in self.selected_items else ' '
yield "%2d) [%s] %s" % (n+1, x, self.format_item(i))
else:
yield "%2d) %s" % (n+1, self.format_item(i))
def format_header(self):
if self.multi:
return (8*' ')+self.format_item(self.headeritem)
else:
return (4*' ')+self.format_item(self.headeritem)
def action_dict(self):
actions = {
'r': self.refresh,
'n': self.next,
'p': self.prev,
'c': self.done,
}
for n, i in self.items_on_page():
actions[str(n+1)] = lambda item=i: self.toggle_item(item)
return actions
def format_page(self):
page = '\n(Page {pagenum} of {num_pages}) {title}\n{items}'
items = list(self.format_items())
if self.headeritem:
items.insert(0, self.format_header())
return page.format(pagenum=self.pagenum,
num_pages=self.num_pages,
title=self.title or '',
items='\n'.join(items))
def format_prompt(self):
options = [
'# to toggle selection' if self.multi else '# to select',
"'r'-refresh" if callable(self.refresher) else None,
"'n'-next page" if self.pagenum < self.num_pages else None,
"'p'-previous page" if self.pagenum > 1 else None,
"or 'c'-continue"
]
return ', '.join(o for o in options if o is not None) + ': '
def run(self):
while not self.is_done:
print(self.format_page())
k = _input(self.format_prompt())
action = self.action_dict().get(k)
if action:
action()
else:
self.invalid(k)
return self.selected_items
def repo_menu(repos):
drivers = list_drivers(repos)
if not drivers:
log.info("No suitable drivers found.")
return []
menu = TextMenu(drivers, title="Select drivers to install",
formatter=lambda d: d.source,
multi=True)
result = menu.run()
return result
def iso_menu(isos):
menu = TextMenu(isos, title="Choose driver disk ISO file")
result = menu.run()
return result
def device_menu():
fmt = '{0.shortdev:<8.8} {0.fs_type:<8.8} {0.label:<20.20} {0.uuid:<.36}'
hdr = DeviceInfo(DEVNAME='DEVICE', TYPE='TYPE', LABEL='LABEL', UUID='UUID')
menu = TextMenu(get_deviceinfo, title="Driver disk device selection",
formatter=fmt.format,
headeritem=hdr)
result = menu.run()
return result
# --- COMMANDLINE-TYPE STUFF ------------------------------------------------
def setup_log():
log.setLevel(logging.DEBUG)
handler = SysLogHandler(address="/dev/log")
log.addHandler(handler)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("DD: %(message)s")
handler.setFormatter(formatter)
log.addHandler(handler)
def print_usage():
print("usage: driver-updates --interactive")
print(" driver-updates --disk DISK KERNELDEV")
print(" driver-updates --net URL LOCALFILE")
def check_args(args):
if args and args[0] == '--interactive':
return True
elif len(args) == 3 and args[0] in ('--disk', '--net'):
return True
else:
return False
def main(args):
if not check_args(args):
print_usage()
raise SystemExit(2)
mode = args.pop(0)
if mode in ('--disk', '--net'):
request, dev = args
if dev.endswith(".rpm"):
process_driver_rpm(dev)
else:
process_driver_disk(dev)
elif mode == '--interactive':
log.info("starting interactive mode")
request = 'menu'
while True:
dev = device_menu()
if not dev: break
process_driver_disk(dev.pop().device, interactive=True)
finish(request)
# When using inst.dd and a cdrom stage2 it isn't mounted before running driver-updates
# In order to get the stage2 cdrom mounted it either needs to be swapped back in
# or we need to re-trigger the block rules.
if os.path.exists("/tmp/anaconda-dd-on-cdrom") and not os.path.exists("/dev/root"):
cmd = ["udevadm", "trigger", "--action=change", "--subsystem-match=block"]
subprocess.check_call(cmd)
if __name__ == '__main__':
setup_log()
try:
main(sys.argv[1:])
except KeyboardInterrupt:
log.info("exiting.")
| gpl-2.0 | 4,975,429,610,738,657,000 | 33.289063 | 90 | 0.61964 | false |
anhngduc/google-python-exersice | google-python-exercises/basic/solution/string2.py | 208 | 3094 | #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
# LAB(begin solution)
if len(s) >= 3:
if s[-3:] != 'ing': s = s + 'ing'
else: s = s + 'ly'
return s
# LAB(replace solution)
# return
# LAB(end solution)
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
# LAB(begin solution)
n = s.find('not')
b = s.find('bad')
if n != -1 and b != -1 and b > n:
s = s[:n] + 'good' + s[b+3:]
return s
# LAB(replace solution)
# return
# LAB(end solution)
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
# LAB(begin solution)
# Figure out the middle position of each string.
a_middle = len(a) / 2
b_middle = len(b) / 2
if len(a) % 2 == 1: # add 1 if length is odd
a_middle = a_middle + 1
if len(b) % 2 == 1:
b_middle = b_middle + 1
return a[:a_middle] + b[:b_middle] + a[a_middle:] + b[b_middle:]
# LAB(replace solution)
# return
# LAB(end solution)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| apache-2.0 | 4,579,872,254,210,249,700 | 28.188679 | 77 | 0.636393 | false |
mtougeron/python-openstacksdk | openstack/tests/unit/auth/test_service_filter.py | 2 | 7250 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import testtools
from openstack.auth import service_filter as filt
from openstack import exceptions
from openstack.identity import identity_service
class TestServiceFilter(testtools.TestCase):
def test_minimum(self):
sot = filt.ServiceFilter()
self.assertEqual("service_type=any,interface=public",
six.text_type(sot))
def test_maximum(self):
sot = filt.ServiceFilter(service_type='compute', interface='admin',
region='b', service_name='c')
exp = "service_type=compute,interface=admin,region=b,service_name=c"
self.assertEqual(exp, six.text_type(sot))
def test_interface(self):
sot = filt.ServiceFilter(service_type='identity', interface='public')
self.assertEqual("service_type=identity,interface=public",
six.text_type(sot))
sot = filt.ServiceFilter(service_type='identity',
interface='internal')
self.assertEqual("service_type=identity,interface=internal",
six.text_type(sot))
sot = filt.ServiceFilter(service_type='identity', interface='admin')
self.assertEqual("service_type=identity,interface=admin",
six.text_type(sot))
sot = filt.ServiceFilter(service_type='identity',
interface='publicURL')
self.assertEqual("service_type=identity,interface=public",
six.text_type(sot))
sot = filt.ServiceFilter(service_type='identity',
interface='internalURL')
self.assertEqual("service_type=identity,interface=internal",
six.text_type(sot))
sot = filt.ServiceFilter(service_type='identity',
interface='adminURL')
self.assertEqual("service_type=identity,interface=admin",
six.text_type(sot))
self.assertRaises(exceptions.SDKException, filt.ServiceFilter,
service_type='identity', interface='b')
sot = filt.ServiceFilter(service_type='identity', interface=None)
self.assertEqual("service_type=identity", six.text_type(sot))
def test_match_service_type(self):
sot = filt.ServiceFilter(service_type='identity')
self.assertTrue(sot.match_service_type('identity'))
self.assertFalse(sot.match_service_type('compute'))
def test_match_service_type_any(self):
sot = filt.ServiceFilter()
self.assertTrue(sot.match_service_type('identity'))
self.assertTrue(sot.match_service_type('compute'))
def test_match_service_name(self):
sot = filt.ServiceFilter(service_type='identity')
self.assertTrue(sot.match_service_name('keystone'))
self.assertTrue(sot.match_service_name('ldap'))
self.assertTrue(sot.match_service_name(None))
sot = filt.ServiceFilter(service_type='identity',
service_name='keystone')
self.assertTrue(sot.match_service_name('keystone'))
self.assertFalse(sot.match_service_name('ldap'))
self.assertFalse(sot.match_service_name(None))
def test_match_region(self):
sot = filt.ServiceFilter(service_type='identity')
self.assertTrue(sot.match_region('East'))
self.assertTrue(sot.match_region('West'))
self.assertTrue(sot.match_region(None))
sot = filt.ServiceFilter(service_type='identity', region='East')
self.assertTrue(sot.match_region('East'))
self.assertFalse(sot.match_region('West'))
self.assertFalse(sot.match_region(None))
def test_match_interface(self):
sot = filt.ServiceFilter(service_type='identity',
interface='internal')
self.assertFalse(sot.match_interface('admin'))
self.assertTrue(sot.match_interface('internal'))
self.assertFalse(sot.match_interface('public'))
def test_join(self):
a = filt.ServiceFilter(region='east')
b = filt.ServiceFilter(service_type='identity')
result = a.join(b)
self.assertEqual("service_type=identity,interface=public,region=east",
six.text_type(result))
self.assertEqual("service_type=any,interface=public,region=east",
six.text_type(a))
self.assertEqual("service_type=identity,interface=public",
six.text_type(b))
def test_join_interface(self):
user_preference = filt.ServiceFilter(interface='public')
service_default = filt.ServiceFilter(interface='admin')
result = user_preference.join(service_default)
self.assertEqual("public", result.interface)
user_preference = filt.ServiceFilter(interface=None)
service_default = filt.ServiceFilter(interface='admin')
result = user_preference.join(service_default)
self.assertEqual("admin", result.interface)
def test_join_version(self):
user_preference = filt.ServiceFilter(version='v2')
service_default = filt.ServiceFilter()
self.assertEqual('v2', user_preference.join(service_default).version)
service_default = filt.ServiceFilter(
version=filt.ServiceFilter.UNVERSIONED
)
self.assertEqual('', user_preference.join(service_default).version)
def test_set_interface(self):
sot = filt.ServiceFilter()
sot.set_interface("PUBLICURL")
self.assertEqual('public', sot.interface)
sot.set_interface("INTERNALURL")
self.assertEqual('internal', sot.interface)
sot.set_interface("ADMINURL")
self.assertEqual('admin', sot.interface)
def test_get_module(self):
sot = identity_service.IdentityService()
self.assertEqual('openstack.identity.v3', sot.get_module())
self.assertEqual('identity', sot.get_service_module())
def test_get_version_path(self):
sot = identity_service.IdentityService()
self.assertEqual('v3', sot.get_version_path('v2'))
sot = identity_service.IdentityService(version='v2')
self.assertEqual('v2', sot.get_version_path('v3'))
sot = identity_service.IdentityService(version='v2.1')
self.assertEqual('v2.1', sot.get_version_path('v3'))
sot = identity_service.IdentityService(version='')
self.assertEqual('', sot.get_version_path('v3'))
class TestValidVersion(testtools.TestCase):
def test_constructor(self):
sot = filt.ValidVersion('v1.0', 'v1')
self.assertEqual('v1.0', sot.module)
self.assertEqual('v1', sot.path)
| apache-2.0 | -6,729,216,327,489,510,000 | 44.886076 | 78 | 0.641241 | false |
trac-hacks/trac-oidc | trac_oidc/tests/test_authenticator.py | 1 | 7340 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Geoffrey T. Dairiki
#
"""
"""
from __future__ import absolute_import
import json
import logging
from urlparse import parse_qsl, urlsplit, urlunsplit
import mock
from oauth2client.client import FlowExchangeError
import pytest
@pytest.fixture
def redirect_url():
return 'http://localhost/trac_oidc/redirect'
@pytest.fixture
def openid_realm():
return 'http://example.net/'
@pytest.fixture
def web_secrets(redirect_url):
return {
'auth_uri': "https://accounts.example.com/auth",
'token_uri': "https://accounts.example.com/token",
'client_id': "ID",
'client_secret': "SEKRET",
'redirect_uris': [redirect_url],
}
@pytest.fixture
def client_secret_file(tmpdir, web_secrets):
secret_file = tmpdir.join('client_secret.json')
secret_file.write(json.dumps({'web': web_secrets}))
return str(secret_file)
class _RequestArgs(dict):
getfirst = dict.get
class DummyRequest(object):
def __init__(self, query=None, oauth_state=None):
self.args = _RequestArgs(query or {})
self.session = {}
if oauth_state is not None:
self.oauth_state = oauth_state
@property
def oauth_state(self): # pragma: NO COVER
return self.session['trac_oidc.oauth_state']
@oauth_state.setter
def oauth_state(self, state): # pragma: NO COVER
self.session['trac_oidc.oauth_state'] = state
@oauth_state.deleter
def oauth_state(self): # pragma: NO COVER
del self.session['trac_oidc.oauth_state']
class TestAuthenticator(object):
@pytest.fixture
def log(self):
return logging.getLogger('Trac')
@pytest.fixture
def authenticator(self, client_secret_file, redirect_url, openid_realm,
log):
from ..authenticator import Authenticator
return Authenticator(client_secret_file, redirect_url,
openid_realm, log)
def test_flow(self, authenticator,
redirect_url, openid_realm, web_secrets):
flow = authenticator.flow
assert flow.client_secret == web_secrets['client_secret']
assert flow.redirect_uri == redirect_url
assert flow.params['access_type'] == 'online'
assert flow.params['openid.realm'] == openid_realm
def test_get_auth_url(self, authenticator, web_secrets):
req = DummyRequest()
auth_url = authenticator.get_auth_url(req)
split = urlsplit(auth_url)
assert urlunsplit((split.scheme, split.netloc, split.path, '', '')) \
== web_secrets['auth_uri']
query = dict(parse_qsl(split.query))
state = query['state']
assert state
assert req.session[authenticator.STATE_SKEY] == state
def test_get_identity(self, authenticator):
req = DummyRequest(query={'code': 'CODE', 'state': 'STATE'},
oauth_state='STATE')
authenticator._get_credentials = mock.Mock()
authenticator._get_openid_profile = mock.Mock(return_value={})
credentials = authenticator._get_credentials.return_value
credentials.id_token = {'iss': 'https://example.net', 'sub': '42'}
id_token = authenticator.get_identity(req)
assert dict(id_token) == credentials.id_token
def test_get_identity_resets_state(self, authenticator):
from ..authenticator import AuthenticationError
req = DummyRequest(query={'code': 'CODE', 'state': 'STATE'},
oauth_state='STATE')
authenticator._get_credentials = mock.Mock()
authenticator._get_openid_profile = mock.Mock(return_value={})
credentials = authenticator._get_credentials.return_value
credentials.id_token = {'iss': 'https://example.net', 'sub': '42'}
authenticator.get_identity(req)
with pytest.raises(AuthenticationError):
authenticator.get_identity(req)
def test_get_code(self, authenticator):
state = 'abcdef'
req = DummyRequest(query={'code': 'CODE', 'state': state},
oauth_state=state)
assert authenticator._get_code(req) == 'CODE'
def test_get_code_authentication_failure(self, authenticator):
from ..authenticator import AuthenticationFailed
req = DummyRequest(query={'error': 'error message'})
with pytest.raises(AuthenticationFailed) as exc_info:
authenticator._get_code(req)
assert 'error message' in exc_info.exconly()
@pytest.mark.parametrize('state, oauth_state', [
('wrong', 'somestate'),
(None, 'somestate'),
(None, None),
('unexpected', None),
])
def test_get_code_incorrect_state(self, authenticator,
state, oauth_state):
from ..authenticator import AuthenticationError
req = DummyRequest(query={'state': state} if state else None,
oauth_state=oauth_state)
with pytest.raises(AuthenticationError):
authenticator._get_code(req)
def test_get_code_missing_code(self, authenticator):
from ..authenticator import AuthenticationError
state = 'abcdef'
req = DummyRequest(query={'state': state}, oauth_state=state)
with pytest.raises(AuthenticationError):
authenticator._get_code(req)
def test_get_credentials(self, authenticator):
authenticator.flow = flow = mock.Mock(name='flow')
credentials = authenticator._get_credentials('CODE')
assert flow.mock_calls == [mock.call.step2_exchange('CODE')]
assert credentials == flow.step2_exchange.return_value
def test_get_credentials_failure(self, authenticator):
from ..authenticator import AuthenticationError
authenticator.flow = flow = mock.Mock(name='flow')
flow.step2_exchange.side_effect = FlowExchangeError('testing')
with pytest.raises(AuthenticationError):
authenticator._get_credentials('CODE')
def test_get_openid_profile(self, authenticator):
credentials = mock.Mock(name='credentials')
http = credentials.authorize.return_value
resp = mock.Mock(name='Response', status=200)
content = b'{"foo": "bar"}'
http.request.return_value = resp, content
profile = authenticator._get_openid_profile(credentials)
assert profile == {'foo': 'bar'}
def test_get_openid_profile_failure(self, authenticator, caplog):
credentials = mock.Mock(name='credentials')
http = credentials.authorize.return_value
resp = mock.Mock(name='Response', status=500)
content = b'{"foo": "bar"}'
http.request.return_value = resp, content
assert authenticator._get_openid_profile(credentials) == {}
assert 'Failed to retrieve profile' in caplog.text()
def test_get_openid_profile_bad_json(self, authenticator, caplog):
credentials = mock.Mock(name='credentials')
http = credentials.authorize.return_value
resp = mock.Mock(name='Response', status=200)
content = b'}'
http.request.return_value = resp, content
assert authenticator._get_openid_profile(credentials) == {}
assert 'Response is not valid JSON' in caplog.text()
| bsd-3-clause | 5,488,640,400,473,788,000 | 36.641026 | 77 | 0.633515 | false |
diagramsoftware/odoo | addons/portal/tests/__init__.py | 261 | 1078 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_portal
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,437,878,046,700,597,000 | 45.869565 | 78 | 0.618738 | false |
pratikmallya/hue | desktop/core/ext-py/python-ldap-2.3.13/Lib/ldif.py | 44 | 13729 | """
ldif - generate and parse LDIF data (see RFC 2849)
See http://www.python-ldap.org/ for details.
$Id: ldif.py,v 1.56 2010/07/19 08:23:22 stroeder Exp $
Python compability note:
Tested with Python 2.0+, but should work with Python 1.5.2+.
"""
__version__ = '2.3.12'
__all__ = [
# constants
'ldif_pattern',
# functions
'AttrTypeandValueLDIF','CreateLDIF','ParseLDIF',
# classes
'LDIFWriter',
'LDIFParser',
'LDIFRecordList',
'LDIFCopy',
]
import urlparse,urllib,base64,re,types
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
attrtype_pattern = r'[\w;.-]+(;[\w_-]+)*'
attrvalue_pattern = r'(([^,]|\\,)+|".*?")'
attrtypeandvalue_pattern = attrtype_pattern + r'[ ]*=[ ]*' + attrvalue_pattern
rdn_pattern = attrtypeandvalue_pattern + r'([ ]*\+[ ]*' + attrtypeandvalue_pattern + r')*[ ]*'
dn_pattern = rdn_pattern + r'([ ]*,[ ]*' + rdn_pattern + r')*[ ]*'
dn_regex = re.compile('^%s$' % dn_pattern)
ldif_pattern = '^((dn(:|::) %(dn_pattern)s)|(%(attrtype_pattern)s(:|::) .*)$)+' % vars()
MOD_OP_INTEGER = {
'add':0,'delete':1,'replace':2
}
MOD_OP_STR = {
0:'add',1:'delete',2:'replace'
}
CHANGE_TYPES = ['add','delete','modify','modrdn']
valid_changetype_dict = {}
for c in CHANGE_TYPES:
valid_changetype_dict[c]=None
def is_dn(s):
"""
returns 1 if s is a LDAP DN
"""
if s=='':
return 1
rm = dn_regex.match(s)
return rm!=None and rm.group(0)==s
SAFE_STRING_PATTERN = '(^(\000|\n|\r| |:|<)|[\000\n\r\200-\377]+|[ ]+$)'
safe_string_re = re.compile(SAFE_STRING_PATTERN)
def list_dict(l):
"""
return a dictionary with all items of l being the keys of the dictionary
"""
return dict([(i,None) for i in l])
class LDIFWriter:
"""
Write LDIF entry or change records to file object
Copy LDIF input to a file output object containing all data retrieved
via URLs
"""
def __init__(self,output_file,base64_attrs=None,cols=76,line_sep='\n'):
"""
output_file
file object for output
base64_attrs
list of attribute types to be base64-encoded in any case
cols
Specifies how many columns a line may have before it's
folded into many lines.
line_sep
String used as line separator
"""
self._output_file = output_file
self._base64_attrs = list_dict([a.lower() for a in (base64_attrs or [])])
self._cols = cols
self._line_sep = line_sep
self.records_written = 0
def _unfoldLDIFLine(self,line):
"""
Write string line as one or more folded lines
"""
# Check maximum line length
line_len = len(line)
if line_len<=self._cols:
self._output_file.write(line)
self._output_file.write(self._line_sep)
else:
# Fold line
pos = self._cols
self._output_file.write(line[0:min(line_len,self._cols)])
self._output_file.write(self._line_sep)
while pos<line_len:
self._output_file.write(' ')
self._output_file.write(line[pos:min(line_len,pos+self._cols-1)])
self._output_file.write(self._line_sep)
pos = pos+self._cols-1
return # _unfoldLDIFLine()
def _needs_base64_encoding(self,attr_type,attr_value):
"""
returns 1 if attr_value has to be base-64 encoded because
of special chars or because attr_type is in self._base64_attrs
"""
return self._base64_attrs.has_key(attr_type.lower()) or \
not safe_string_re.search(attr_value) is None
def _unparseAttrTypeandValue(self,attr_type,attr_value):
"""
Write a single attribute type/value pair
attr_type
attribute type
attr_value
attribute value
"""
if self._needs_base64_encoding(attr_type,attr_value):
# Encode with base64
self._unfoldLDIFLine(':: '.join([attr_type,base64.encodestring(attr_value).replace('\n','')]))
else:
self._unfoldLDIFLine(': '.join([attr_type,attr_value]))
return # _unparseAttrTypeandValue()
def _unparseEntryRecord(self,entry):
"""
entry
dictionary holding an entry
"""
attr_types = entry.keys()[:]
attr_types.sort()
for attr_type in attr_types:
for attr_value in entry[attr_type]:
self._unparseAttrTypeandValue(attr_type,attr_value)
def _unparseChangeRecord(self,modlist):
"""
modlist
list of additions (2-tuple) or modifications (3-tuple)
"""
mod_len = len(modlist[0])
if mod_len==2:
changetype = 'add'
elif mod_len==3:
changetype = 'modify'
else:
raise ValueError,"modlist item of wrong length"
self._unparseAttrTypeandValue('changetype',changetype)
for mod in modlist:
if mod_len==2:
mod_type,mod_vals = mod
elif mod_len==3:
mod_op,mod_type,mod_vals = mod
self._unparseAttrTypeandValue(MOD_OP_STR[mod_op],mod_type)
else:
raise ValueError,"Subsequent modlist item of wrong length"
if mod_vals:
for mod_val in mod_vals:
self._unparseAttrTypeandValue(mod_type,mod_val)
if mod_len==3:
self._output_file.write('-'+self._line_sep)
def unparse(self,dn,record):
"""
dn
string-representation of distinguished name
record
Either a dictionary holding the LDAP entry {attrtype:record}
or a list with a modify list like for LDAPObject.modify().
"""
if not record:
# Simply ignore empty records
return
# Start with line containing the distinguished name
self._unparseAttrTypeandValue('dn',dn)
# Dispatch to record type specific writers
if isinstance(record,types.DictType):
self._unparseEntryRecord(record)
elif isinstance(record,types.ListType):
self._unparseChangeRecord(record)
else:
raise ValueError, "Argument record must be dictionary or list"
# Write empty line separating the records
self._output_file.write(self._line_sep)
# Count records written
self.records_written = self.records_written+1
return # unparse()
def CreateLDIF(dn,record,base64_attrs=None,cols=76):
"""
Create LDIF single formatted record including trailing empty line.
This is a compability function. Use is deprecated!
dn
string-representation of distinguished name
record
Either a dictionary holding the LDAP entry {attrtype:record}
or a list with a modify list like for LDAPObject.modify().
base64_attrs
list of attribute types to be base64-encoded in any case
cols
Specifies how many columns a line may have before it's
folded into many lines.
"""
f = StringIO()
ldif_writer = LDIFWriter(f,base64_attrs,cols,'\n')
ldif_writer.unparse(dn,record)
s = f.getvalue()
f.close()
return s
class LDIFParser:
"""
Base class for a LDIF parser. Applications should sub-class this
class and override method handle() to implement something meaningful.
Public class attributes:
records_read
Counter for records processed so far
"""
def _stripLineSep(self,s):
"""
Strip trailing line separators from s, but no other whitespaces
"""
if s[-2:]=='\r\n':
return s[:-2]
elif s[-1:]=='\n':
return s[:-1]
else:
return s
def __init__(
self,
input_file,
ignored_attr_types=None,
max_entries=0,
process_url_schemes=None,
line_sep='\n'
):
"""
Parameters:
input_file
File-object to read the LDIF input from
ignored_attr_types
Attributes with these attribute type names will be ignored.
max_entries
If non-zero specifies the maximum number of entries to be
read from f.
process_url_schemes
List containing strings with URLs schemes to process with urllib.
An empty list turns off all URL processing and the attribute
is ignored completely.
line_sep
String used as line separator
"""
self._input_file = input_file
self._max_entries = max_entries
self._process_url_schemes = list_dict([s.lower() for s in (process_url_schemes or [])])
self._ignored_attr_types = list_dict([a.lower() for a in (ignored_attr_types or [])])
self._line_sep = line_sep
self.records_read = 0
def handle(self,dn,entry):
"""
Process a single content LDIF record. This method should be
implemented by applications using LDIFParser.
"""
def _unfoldLDIFLine(self):
"""
Unfold several folded lines with trailing space into one line
"""
unfolded_lines = [ self._stripLineSep(self._line) ]
self._line = self._input_file.readline()
while self._line and self._line[0]==' ':
unfolded_lines.append(self._stripLineSep(self._line[1:]))
self._line = self._input_file.readline()
return ''.join(unfolded_lines)
def _parseAttrTypeandValue(self):
"""
Parse a single attribute type and value pair from one or
more lines of LDIF data
"""
# Reading new attribute line
unfolded_line = self._unfoldLDIFLine()
# Ignore comments which can also be folded
while unfolded_line and unfolded_line[0]=='#':
unfolded_line = self._unfoldLDIFLine()
if not unfolded_line or unfolded_line=='\n' or unfolded_line=='\r\n':
return None,None
try:
colon_pos = unfolded_line.index(':')
except ValueError:
# Treat malformed lines without colon as non-existent
return None,None
attr_type = unfolded_line[0:colon_pos]
# if needed attribute value is BASE64 decoded
value_spec = unfolded_line[colon_pos:colon_pos+2]
if value_spec=='::':
# attribute value needs base64-decoding
attr_value = base64.decodestring(unfolded_line[colon_pos+2:])
elif value_spec==':<':
# fetch attribute value from URL
url = unfolded_line[colon_pos+2:].strip()
attr_value = None
if self._process_url_schemes:
u = urlparse.urlparse(url)
if self._process_url_schemes.has_key(u[0]):
attr_value = urllib.urlopen(url).read()
elif value_spec==':\r\n' or value_spec=='\n':
attr_value = ''
else:
attr_value = unfolded_line[colon_pos+2:].lstrip()
return attr_type,attr_value
def parse(self):
"""
Continously read and parse LDIF records
"""
self._line = self._input_file.readline()
while self._line and \
(not self._max_entries or self.records_read<self._max_entries):
# Reset record
version = None; dn = None; changetype = None; modop = None; entry = {}
attr_type,attr_value = self._parseAttrTypeandValue()
while attr_type!=None and attr_value!=None:
if attr_type=='dn':
# attr type and value pair was DN of LDIF record
if dn!=None:
raise ValueError, 'Two lines starting with dn: in one record.'
if not is_dn(attr_value):
raise ValueError, 'No valid string-representation of distinguished name %s.' % (repr(attr_value))
dn = attr_value
elif attr_type=='version' and dn is None:
version = 1
elif attr_type=='changetype':
# attr type and value pair was DN of LDIF record
if dn is None:
raise ValueError, 'Read changetype: before getting valid dn: line.'
if changetype!=None:
raise ValueError, 'Two lines starting with changetype: in one record.'
if not valid_changetype_dict.has_key(attr_value):
raise ValueError, 'changetype value %s is invalid.' % (repr(attr_value))
changetype = attr_value
elif attr_value!=None and \
not self._ignored_attr_types.has_key(attr_type.lower()):
# Add the attribute to the entry if not ignored attribute
if entry.has_key(attr_type):
entry[attr_type].append(attr_value)
else:
entry[attr_type]=[attr_value]
# Read the next line within an entry
attr_type,attr_value = self._parseAttrTypeandValue()
if entry:
# append entry to result list
self.handle(dn,entry)
self.records_read = self.records_read+1
return # parse()
class LDIFRecordList(LDIFParser):
"""
Collect all records of LDIF input into a single list.
of 2-tuples (dn,entry). It can be a memory hog!
"""
def __init__(
self,
input_file,
ignored_attr_types=None,max_entries=0,process_url_schemes=None
):
"""
See LDIFParser.__init__()
Additional Parameters:
all_records
List instance for storing parsed records
"""
LDIFParser.__init__(self,input_file,ignored_attr_types,max_entries,process_url_schemes)
self.all_records = []
def handle(self,dn,entry):
"""
Append single record to dictionary of all records.
"""
self.all_records.append((dn,entry))
class LDIFCopy(LDIFParser):
"""
Copy LDIF input to LDIF output containing all data retrieved
via URLs
"""
def __init__(
self,
input_file,output_file,
ignored_attr_types=None,max_entries=0,process_url_schemes=None,
base64_attrs=None,cols=76,line_sep='\n'
):
"""
See LDIFParser.__init__() and LDIFWriter.__init__()
"""
LDIFParser.__init__(self,input_file,ignored_attr_types,max_entries,process_url_schemes)
self._output_ldif = LDIFWriter(output_file,base64_attrs,cols,line_sep)
def handle(self,dn,entry):
"""
Write single LDIF record to output file.
"""
self._output_ldif.unparse(dn,entry)
def ParseLDIF(f,ignore_attrs=None,maxentries=0):
"""
Parse LDIF records read from file.
This is a compability function. Use is deprecated!
"""
ldif_parser = LDIFRecordList(
f,ignored_attr_types=ignore_attrs,max_entries=maxentries,process_url_schemes=0
)
ldif_parser.parse()
return ldif_parser.all_records
| apache-2.0 | -5,334,279,743,565,623,000 | 29.173626 | 109 | 0.637774 | false |
kennethreitz/python-logplex | logplex/packages/requests/packages/charade/eucjpprober.py | 206 | 3768 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| bsd-2-clause | 4,596,553,156,639,948,000 | 39.866667 | 78 | 0.603503 | false |
xin3liang/platform_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py | 48 | 3070 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for xcodeproj.py."""
import unittest
import xcodeproj
class TestErrorHandler(object):
"""Error handler for XcodeProjectFileChecker unittests"""
def __init__(self, handler):
self.handler = handler
def turn_off_line_filtering(self):
pass
def __call__(self, line_number, category, confidence, message):
self.handler(self, line_number, category, confidence, message)
return True
class XcodeProjectFileCheckerTest(unittest.TestCase):
"""Tests XcodeProjectFileChecker class."""
def assert_no_error(self, lines):
def handler(error_handler, line_number, category, confidence, message):
self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
error_handler = TestErrorHandler(handler)
checker = xcodeproj.XcodeProjectFileChecker('', error_handler)
checker.check(lines)
def assert_error(self, lines, expected_message):
self.had_error = False
def handler(error_handler, line_number, category, confidence, message):
self.assertEqual(expected_message, message)
self.had_error = True
error_handler = TestErrorHandler(handler)
checker = xcodeproj.XcodeProjectFileChecker('', error_handler)
checker.check(lines)
self.assertTrue(self.had_error, '%s should have error: %s.' % (lines, expected_message))
def test_detect_development_region(self):
self.assert_no_error(['developmentRegion = English;'])
self.assert_error([''], 'Missing "developmentRegion = English".')
self.assert_error(['developmentRegion = Japanese;'],
'developmentRegion is not English.')
| bsd-3-clause | -8,856,060,458,466,783,000 | 43.492754 | 101 | 0.715961 | false |
Bysmyyr/blink-crosswalk | Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py | 17 | 38706 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for run_perf_tests."""
import StringIO
import datetime
import json
import re
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.port.driver import DriverOutput
from webkitpy.layout_tests.port.test import TestPort
from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
from webkitpy.performance_tests.perftest import PerfTest
from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
class MainTest(unittest.TestCase):
def create_runner(self, args=[]):
options, parsed_args = PerfTestsRunner._parse_args(args)
test_port = TestPort(host=MockHost(), options=options)
runner = PerfTestsRunner(args=args, port=test_port)
runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
return runner, test_port
def _add_file(self, runner, dirname, filename, content=True):
dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
runner._host.filesystem.maybe_make_directory(dirname)
runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
def test_collect_tests(self):
runner, port = self.create_runner()
self._add_file(runner, 'inspector', 'a_file.html', 'a content')
tests = runner._collect_tests()
self.assertEqual(len(tests), 1)
def _collect_tests_and_sort_test_name(self, runner):
return sorted([test.test_name() for test in runner._collect_tests()])
def test_collect_tests_with_multile_files(self):
runner, port = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
def add_file(filename):
port.host.filesystem.files[runner._host.filesystem.join(runner._base_path, filename)] = 'some content'
add_file('test1.html')
add_file('test2.html')
add_file('test3.html')
port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
def test_collect_tests_with_skipped_list(self):
runner, port = self.create_runner()
self._add_file(runner, 'inspector', 'test1.html')
self._add_file(runner, 'inspector', 'unsupported_test1.html')
self._add_file(runner, 'inspector', 'test2.html')
self._add_file(runner, 'inspector/resources', 'resource_file.html')
self._add_file(runner, 'unsupported', 'unsupported_test2.html')
port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
def test_collect_tests_with_skipped_list_and_files(self):
runner, port = self.create_runner(args=['Suite/Test1.html', 'Suite/SkippedTest1.html', 'SkippedSuite/Test1.html'])
self._add_file(runner, 'SkippedSuite', 'Test1.html')
self._add_file(runner, 'SkippedSuite', 'Test2.html')
self._add_file(runner, 'Suite', 'Test1.html')
self._add_file(runner, 'Suite', 'Test2.html')
self._add_file(runner, 'Suite', 'SkippedTest1.html')
self._add_file(runner, 'Suite', 'SkippedTest2.html')
port.skipped_perf_tests = lambda: ['Suite/SkippedTest1.html', 'Suite/SkippedTest1.html', 'SkippedSuite']
self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner),
['SkippedSuite/Test1.html', 'Suite/SkippedTest1.html', 'Suite/Test1.html'])
def test_collect_tests_with_ignored_skipped_list(self):
runner, port = self.create_runner(args=['--force'])
self._add_file(runner, 'inspector', 'test1.html')
self._add_file(runner, 'inspector', 'unsupported_test1.html')
self._add_file(runner, 'inspector', 'test2.html')
self._add_file(runner, 'inspector/resources', 'resource_file.html')
self._add_file(runner, 'unsupported', 'unsupported_test2.html')
port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
def test_default_args(self):
runner, port = self.create_runner()
options, args = PerfTestsRunner._parse_args([])
self.assertTrue(options.build)
self.assertEqual(options.time_out_ms, 600 * 1000)
self.assertTrue(options.generate_results)
self.assertTrue(options.show_results)
self.assertTrue(options.use_skipped_list)
self.assertEqual(options.repeat, 1)
self.assertEqual(options.test_runner_count, DEFAULT_TEST_RUNNER_COUNT)
def test_parse_args(self):
runner, port = self.create_runner()
options, args = PerfTestsRunner._parse_args([
'--build-directory=folder42',
'--platform=platform42',
'--builder-name', 'webkit-mac-1',
'--build-number=56',
'--time-out-ms=42',
'--no-show-results',
'--reset-results',
'--output-json-path=a/output.json',
'--slave-config-json-path=a/source.json',
'--test-results-server=somehost',
'--additional-driver-flag=--enable-threaded-parser',
'--additional-driver-flag=--awesomesauce',
'--repeat=5',
'--test-runner-count=5',
'--debug'])
self.assertTrue(options.build)
self.assertEqual(options.build_directory, 'folder42')
self.assertEqual(options.platform, 'platform42')
self.assertEqual(options.builder_name, 'webkit-mac-1')
self.assertEqual(options.build_number, '56')
self.assertEqual(options.time_out_ms, '42')
self.assertEqual(options.configuration, 'Debug')
self.assertFalse(options.show_results)
self.assertTrue(options.reset_results)
self.assertEqual(options.output_json_path, 'a/output.json')
self.assertEqual(options.slave_config_json_path, 'a/source.json')
self.assertEqual(options.test_results_server, 'somehost')
self.assertEqual(options.additional_driver_flag, ['--enable-threaded-parser', '--awesomesauce'])
self.assertEqual(options.repeat, 5)
self.assertEqual(options.test_runner_count, 5)
def test_upload_json(self):
runner, port = self.create_runner()
port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
class MockFileUploader:
called = []
upload_single_text_file_throws = False
upload_single_text_file_return_value = None
@classmethod
def reset(cls):
cls.called = []
cls.upload_single_text_file_throws = False
cls.upload_single_text_file_return_value = None
def __init__(mock, url, timeout):
self.assertEqual(url, 'https://some.host/some/path')
self.assertTrue(isinstance(timeout, int) and timeout)
mock.called.append('FileUploader')
def upload_single_text_file(mock, filesystem, content_type, filename):
self.assertEqual(filesystem, port.host.filesystem)
self.assertEqual(content_type, 'application/json')
self.assertEqual(filename, 'some.json')
mock.called.append('upload_single_text_file')
if mock.upload_single_text_file_throws:
raise Exception
return mock.upload_single_text_file_return_value
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('OK')
self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('Some error')
output = OutputCapture()
output.capture_output()
self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
_, _, logs = output.restore_output()
self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got a bad response:\nSome error\n')
# Throwing an exception upload_single_text_file shouldn't blow up _upload_json
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_throws = True
self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "OK"}')
self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "SomethingHasFailed", "failureStored": false}')
output = OutputCapture()
output.capture_output()
self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
_, _, logs = output.restore_output()
serialized_json = json.dumps({'status': 'SomethingHasFailed', 'failureStored': False}, indent=4)
self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got an error:\n%s\n' % serialized_json)
class InspectorPassTestData:
text = 'RESULT group_name: test_name= 42 ms'
output = """Running inspector/pass.html (2 of 2)
RESULT group_name: test_name= 42 ms
Finished: 0.1 s
"""
class EventTargetWrapperTestData:
text = """Running 20 times
Ignoring warm-up run (1502)
1504
1505
1510
1504
1507
1509
1510
1487
1488
1472
1472
1488
1473
1472
1475
1487
1486
1486
1475
1471
Time:
values 1486, 1471, 1510, 1505, 1478, 1490 ms
avg 1490 ms
median 1488 ms
stdev 15.13935 ms
min 1471 ms
max 1510 ms
"""
output = """Running Bindings/event-target-wrapper.html (1 of 2)
RESULT Bindings: event-target-wrapper: Time= 1490.0 ms
median= 1488.0 ms, stdev= 14.11751 ms, min= 1471.0 ms, max= 1510.0 ms
Finished: 0.1 s
"""
results = {'url': 'https://src.chromium.org/viewvc/blink/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}}
class SomeParserTestData:
text = """Running 20 times
Ignoring warm-up run (1115)
Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50861 ms
min 1080 ms
max 1120 ms
"""
output = """Running Parser/some-parser.html (2 of 2)
RESULT Parser: some-parser: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
Finished: 0.1 s
"""
class MemoryTestData:
text = """Running 20 times
Ignoring warm-up run (1115)
Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50861 ms
min 1080 ms
max 1120 ms
JS Heap:
values 825000, 811000, 848000, 837000, 829000 bytes
avg 830000 bytes
median 829000 bytes
stdev 13784.04875 bytes
min 811000 bytes
max 848000 bytes
Malloc:
values 529000, 511000, 548000, 536000, 521000 bytes
avg 529000 bytes
median 529000 bytes
stdev 14124.44689 bytes
min 511000 bytes
max 548000 bytes
"""
output = """Running 1 tests
Running Parser/memory-test.html (1 of 1)
RESULT Parser: memory-test: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
RESULT Parser: memory-test: JSHeap= 830000.0 bytes
median= 829000.0 bytes, stdev= 12649.11064 bytes, min= 811000.0 bytes, max= 848000.0 bytes
RESULT Parser: memory-test: Malloc= 529000.0 bytes
median= 529000.0 bytes, stdev= 12961.48139 bytes, min= 511000.0 bytes, max= 548000.0 bytes
Finished: 0.1 s
"""
results = {'current': [[1080, 1120, 1095, 1101, 1104]] * 4}
js_heap_results = {'current': [[825000, 811000, 848000, 837000, 829000]] * 4}
malloc_results = {'current': [[529000, 511000, 548000, 536000, 521000]] * 4}
class TestDriver:
def run_test(self, driver_input, stop_when_done):
text = ''
timeout = False
crash = False
if driver_input.test_name.endswith('pass.html'):
text = InspectorPassTestData.text
elif driver_input.test_name.endswith('timeout.html'):
timeout = True
elif driver_input.test_name.endswith('failed.html'):
text = None
elif driver_input.test_name.endswith('tonguey.html'):
text = 'we are not expecting an output from perf tests but RESULT blablabla'
elif driver_input.test_name.endswith('crash.html'):
crash = True
elif driver_input.test_name.endswith('event-target-wrapper.html'):
text = EventTargetWrapperTestData.text
elif driver_input.test_name.endswith('some-parser.html'):
text = SomeParserTestData.text
elif driver_input.test_name.endswith('memory-test.html'):
text = MemoryTestData.text
return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
def start(self):
"""do nothing"""
def stop(self):
"""do nothing"""
class IntegrationTest(unittest.TestCase):
def _normalize_output(self, log):
return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log))
def _load_output_json(self, runner):
json_content = runner._host.filesystem.read_text_file(runner._output_json_path())
return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_content))
def create_runner(self, args=[], driver_class=TestDriver):
options, parsed_args = PerfTestsRunner._parse_args(args)
test_port = TestPort(host=MockHost(), options=options)
test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
runner = PerfTestsRunner(args=args, port=test_port)
runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
return runner, test_port
def run_test(self, test_name):
runner, port = self.create_runner()
tests = [ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name))]
return runner._run_tests_set(tests) == 0
def test_run_passing_test(self):
self.assertTrue(self.run_test('pass.html'))
def test_run_silent_test(self):
self.assertFalse(self.run_test('silent.html'))
def test_run_failed_test(self):
self.assertFalse(self.run_test('failed.html'))
def test_run_tonguey_test(self):
self.assertFalse(self.run_test('tonguey.html'))
def test_run_timeout_test(self):
self.assertFalse(self.run_test('timeout.html'))
def test_run_crash_test(self):
self.assertFalse(self.run_test('crash.html'))
def _tests_for_runner(self, runner, test_names):
filesystem = runner._host.filesystem
tests = []
for test in test_names:
path = filesystem.join(runner._base_path, test)
dirname = filesystem.dirname(path)
if test.startswith('inspector/'):
tests.append(ChromiumStylePerfTest(runner._port, test, path))
else:
tests.append(PerfTest(runner._port, test, path))
return tests
def test_run_test_set(self):
runner, port = self.create_runner()
tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
output = OutputCapture()
output.capture_output()
try:
unexpected_result_count = runner._run_tests_set(tests)
finally:
stdout, stderr, log = output.restore_output()
self.assertEqual(unexpected_result_count, len(tests) - 1)
self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
def test_run_test_set_kills_drt_per_run(self):
class TestDriverWithStopCount(TestDriver):
stop_count = 0
def stop(self):
TestDriverWithStopCount.stop_count += 1
runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
unexpected_result_count = runner._run_tests_set(tests)
self.assertEqual(TestDriverWithStopCount.stop_count, 6)
def test_run_test_set_for_parser_tests(self):
runner, port = self.create_runner()
tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
output = OutputCapture()
output.capture_output()
try:
unexpected_result_count = runner._run_tests_set(tests)
finally:
stdout, stderr, log = output.restore_output()
self.assertEqual(unexpected_result_count, 0)
self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData.output + SomeParserTestData.output)
def test_run_memory_test(self):
runner, port = self.create_runner_and_setup_results_template()
runner._timestamp = 123456789
port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
output = OutputCapture()
output.capture_output()
try:
unexpected_result_count = runner.run()
finally:
stdout, stderr, log = output.restore_output()
self.assertEqual(unexpected_result_count, 0)
self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\nMOCK: user.open_url: file://...\n')
parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tests']
self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryTestData.results)
self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], MemoryTestData.js_heap_results)
self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], MemoryTestData.malloc_results)
def _test_run_with_json_output(self, runner, filesystem, upload_succeeds=False, results_shown=True, expected_exit_code=0, repeat=1, compare_logs=True):
filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
uploaded = [False]
def mock_upload_json(hostname, json_path, host_path=None):
# FIXME: Get rid of the hard-coded perf.webkit.org once we've completed the transition.
self.assertIn(hostname, ['some.host'])
self.assertIn(json_path, ['/mock-checkout/output.json'])
self.assertIn(host_path, [None, '/api/report'])
uploaded[0] = upload_succeeds
return upload_succeeds
runner._upload_json = mock_upload_json
runner._timestamp = 123456789
runner._utc_timestamp = datetime.datetime(2013, 2, 8, 15, 19, 37, 460000)
output_capture = OutputCapture()
output_capture.capture_output()
try:
self.assertEqual(runner.run(), expected_exit_code)
finally:
stdout, stderr, logs = output_capture.restore_output()
if not expected_exit_code and compare_logs:
expected_logs = ''
for i in xrange(repeat):
runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else ''
expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapperTestData.output + InspectorPassTestData.output
if results_shown:
expected_logs += 'MOCK: user.open_url: file://...\n'
self.assertEqual(self._normalize_output(logs), expected_logs)
self.assertEqual(uploaded[0], upload_succeeds)
return logs
_event_target_wrapper_and_inspector_results = {
"Bindings":
{"url": "https://src.chromium.org/viewvc/blink/trunk/PerformanceTests/Bindings",
"tests": {"event-target-wrapper": EventTargetWrapperTestData.results}}}
def test_run_with_json_output(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
filesystem = port.host.filesystem
self.assertTrue(filesystem.isfile(runner._output_json_path()))
self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_json_path())[0] + '.html'))
def test_run_with_description(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--description', 'some description'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "description": "some description",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
def create_runner_and_setup_results_template(self, args=[]):
runner, port = self.create_runner(args)
filesystem = port.host.filesystem
filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
'<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
return runner, port
def test_run_respects_no_results(self):
runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--no-results'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, results_shown=False)
self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
def test_run_generates_json_by_default(self):
runner, port = self.create_runner_and_setup_results_template()
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
self.assertFalse(filesystem.isfile(output_json_path))
self.assertFalse(filesystem.isfile(results_page_path))
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
self.assertTrue(filesystem.isfile(output_json_path))
self.assertTrue(filesystem.isfile(results_page_path))
def test_run_merges_output_by_default(self):
runner, port = self.create_runner_and_setup_results_template()
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(self._load_output_json(runner), [{"previous": "results"}, {
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
def test_run_respects_reset_results(self):
runner, port = self.create_runner_and_setup_results_template(args=["--reset-results"])
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
pass
def test_run_generates_and_show_results_page(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
page_shown = []
port.show_results_html_file = lambda path: page_shown.append(path)
filesystem = port.host.filesystem
self._test_run_with_json_output(runner, filesystem, results_shown=False)
expected_entry = {"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}
self.maxDiff = None
self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
self.assertEqual(self._load_output_json(runner), [expected_entry])
self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
'<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
self.assertEqual(page_shown[0], '/mock-checkout/output.html')
self._test_run_with_json_output(runner, filesystem, results_shown=False)
self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
self.assertEqual(self._load_output_json(runner), [expected_entry, expected_entry])
self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
'<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
def test_run_respects_no_show_results(self):
show_results_html_file = lambda path: page_shown.append(path)
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
page_shown = []
port.show_results_html_file = show_results_html_file
self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
self.assertEqual(page_shown[0], '/mock-checkout/output.html')
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--no-show-results'])
page_shown = []
port.show_results_html_file = show_results_html_file
self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
self.assertEqual(page_shown, [])
def test_run_with_bad_output_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
def test_run_with_slave_config_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}, "builderKey": "value"}])
def test_run_with_bad_slave_config_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
def test_run_with_multiple_repositories(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"webkit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"},
"some": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
def test_run_with_upload_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertEqual(generated_json[0]['platform'], 'platform1')
self.assertEqual(generated_json[0]['builderName'], 'builder1')
self.assertEqual(generated_json[0]['buildNumber'], 123)
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
def test_run_with_upload_json_should_generate_perf_webkit_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123',
'--slave-config-json-path=/mock-checkout/slave-config.json'])
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value1"}')
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertTrue(isinstance(generated_json, list))
self.assertEqual(len(generated_json), 1)
output = generated_json[0]
self.maxDiff = None
self.assertEqual(output['platform'], 'platform1')
self.assertEqual(output['buildNumber'], 123)
self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000')
self.assertEqual(output['builderName'], 'builder1')
self.assertEqual(output['builderKey'], 'value1')
self.assertEqual(output['revisions'], {'blink': {'revision': '5678', 'timestamp': '2013-02-01 08:48:05 +0000'}})
self.assertEqual(output['tests'].keys(), ['Bindings'])
self.assertEqual(sorted(output['tests']['Bindings'].keys()), ['tests', 'url'])
self.assertEqual(output['tests']['Bindings']['url'], 'https://src.chromium.org/viewvc/blink/trunk/PerformanceTests/Bindings')
self.assertEqual(output['tests']['Bindings']['tests'].keys(), ['event-target-wrapper'])
self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrapper'], {
'url': 'https://src.chromium.org/viewvc/blink/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}})
def test_run_with_repeat(self):
self.maxDiff = None
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--repeat', '5'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True, repeat=5)
self.assertEqual(self._load_output_json(runner), [
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
def test_run_with_test_runner_count(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-runner-count=3'])
self._test_run_with_json_output(runner, port.host.filesystem, compare_logs=False)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertTrue(isinstance(generated_json, list))
self.assertEqual(len(generated_json), 1)
output = generated_json[0]['tests']['Bindings']['tests']['event-target-wrapper']['metrics']['Time']['current']
self.assertEqual(len(output), 3)
expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time']['current'][0]
for metrics in output:
self.assertEqual(metrics, expectedMetrics)
| bsd-3-clause | -7,066,038,236,893,051,000 | 49.728702 | 201 | 0.662223 | false |
Lab603/PicEncyclopedias | jni-build/jni/include/tensorflow/python/client/session.py | 3 | 47046 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `Session.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(fetched_vals[0], fetched_vals[1],
fetched_vals[2]
if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(ops.SparseTensor,
lambda fetch: (
[fetch.indices, fetch.values, fetch.shape],
lambda fetched_vals: ops.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(zip(
[feed.indices, feed.values, feed.shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: (
[fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape],
_get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None
else [feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object,
lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)],
lambda feed: [feed])]
# pylint: enable=g-long-lambda
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond
exactly to the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, dict):
return _DictFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined
in _REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)'
% (fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(f)
if j is None:
j = len(seen_fetches)
seen_fetches[f] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if self._fetch_type == list:
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._keys = fetches.keys()
self._mappers = [_FetchMapper.for_fetch(fetch)
for fetch in fetches.values()]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = {}
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
results[k] = m.build_results([values[j] for j in vi])
return results
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability
and to convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple,
namedtuple, or dict.
feeds: A feed dict where keys are fully resolved tensor names.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
fetch_name = compat.as_bytes(fetch.name)
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch_name)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch_name)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if isinstance(fetch, ops.Tensor) and fetch.op.type == 'GetSessionHandle':
self._fetch_handles[fetch_name] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise ValueError(
'Operation %r has been marked as not fetchable.' % op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned
by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
value = self._feeds.get(self._fetches[i])
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i])
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None,
the default graph will be used.
config: (Optional) ConfigProto proto used to configure the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._opened = False
self._closed = False
self._current_version = 0
self._extend_lock = threading.Lock()
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is not None:
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s'
% type(config))
self._config = config
self._add_shapes = config.graph_options.infer_shapes
else:
self._config = None
self._add_shapes = False
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
with errors.raise_exception_on_not_ok_status() as status:
self._session = tf_session.TF_NewSession(opts, status)
finally:
tf_session.TF_DeleteSessionOptions(opts)
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
with self._extend_lock:
if self._opened and not self._closed:
self._closed = True
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_CloseSession(self._session, status)
def __del__(self):
self.close()
if self._session is not None:
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_DeleteSession(self._session, status)
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
[`Operation.run()`](../../api_docs/python/framework.md#Operation.run) or
[`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval) should be
executed in this session.
```python
c = tf.constant(..)
sess = tf.Session()
with sess.as_default():
assert tf.get_default_session() is sess
print(c.eval())
```
To get the current default session, use
[`tf.get_default_session()`](#get_default_session).
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default graph is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, or dict containing graph elements at its
leaves. A graph element can be one of the following types:
* An [`Operation`](../../api_docs/python/framework.md#Operation).
The corresponding fetched value will be `None`.
* A [`Tensor`](../../api_docs/python/framework.md#Tensor).
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A [`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor).
The corresponding fetched value will be a
[`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue)
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v a Python list with 2 numpy arrays: the numpy array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' the numpy array [10, 20] and
# 'b' the numpy array [1.0, 2.0]
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a [`Tensor`](../../api_docs/python/framework.md#Tensor), the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
[placeholder](../../api_docs/python/io_ops.md#placeholder), the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
[`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor),
the value should be a
[`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue).
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (described above).
feed_dict: A dictionary that maps graph elements to values
(described above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
run_metadata_ptr = tf_session.TF_NewBuffer()
if options:
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString()))
else:
options_ptr = None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements,
or a dictionary whose values are graph elements or lists of graph
elements (see documentation for `run`).
feed_dict: A dictionary that maps graph elements to values
(described above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
feed_list.append(compat.as_bytes(subfeed_t.name))
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: '
+ e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
with errors.raise_exception_on_not_ok_status() as status:
return tf_session.TF_PRunSetup(session, feed_list, fetch_list,
target_list, status)
return self._do_call(_setup_fn, self._session, feed_list,
fetch_handler.fetches(), fetch_handler.targets())
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r'
% (feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_string = {}
feed_map = {}
# Validate and process feed_dict.
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(subfeed, allow_tensor=True,
allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: '
+ e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, or numpy ndarrays.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val,
int) and subfeed_dtype(subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' is not'
' compatible with Tensor type ' + str(subfeed_dtype) + '.'
' Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if not subfeed_t.get_shape().is_compatible_with(np_val.shape):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r'
% (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
subfeed_name = compat.as_bytes(subfeed_t.name)
feed_dict_string[subfeed_name] = np_val
feed_map[subfeed_name] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, feed_dict_string)
# Run request and get response.
# We need to keep the movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
movers = self._update_with_movers(feed_dict_string, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
if final_fetches or final_targets:
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_string, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
# Captures the name of a node in an error status.
_NODEDEF_NAME_RE = re.compile(r'\[\[Node: ([^ ]*?) =')
def _do_run(self, handle, target_list, fetch_list, feed_dict,
options, run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of byte arrays corresponding to names of tensors
or operations to be run to, but not fetched.
fetch_list: A list of byte arrays corresponding to names of tensors to
be fetched and operations to be run.
feed_dict: A dictionary that maps tensor names (as byte arrays) to
numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
def _run_fn(session, feed_dict, fetch_list, target_list, options,
run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
with errors.raise_exception_on_not_ok_status() as status:
return tf_session.TF_Run(session, options,
feed_dict, fetch_list, target_list,
status, run_metadata)
def _prun_fn(session, handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
with errors.raise_exception_on_not_ok_status() as status:
return tf_session.TF_PRun(session, handle, feed_dict, fetch_list,
status)
if handle is None:
return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
target_list, options, run_metadata)
else:
return self._do_call(_prun_fn, self._session, handle, feed_dict,
fetch_list)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(1)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
raise type(e)(node_def, op, message)
def _extend_graph(self):
# Ensure any changes to the graph are reflected in the runtime.
with self._extend_lock:
if self._graph.version > self._current_version:
# pylint: disable=protected-access
graph_def, self._current_version = self._graph._as_graph_def(
from_version=self._current_version,
add_shapes=self._add_shapes)
# pylint: enable=protected-access
with errors.raise_exception_on_not_ok_status() as status:
tf_session.TF_ExtendGraph(
self._session, graph_def.SerializeToString(), status)
self._opened = True
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
# TODO(yuanbyu): For now we use a sequence of runs to minimize the graph
# size and the overhead of graph construction/partitioning.
if tensors_to_delete:
for tensor_handle in tensors_to_delete:
feeds = {}
fetches = []
holder, deleter = session_ops._get_handle_deleter(self.graph,
tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_dict[handle_mover[0]] = np_val
return handles
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
[variables](../../api_docs/python/state_ops.md#Variable), [queues](../../api_docs/python/io_ops.md#QueueBase),
and [readers](../../api_docs/python/io_ops.md#ReaderBase). It is important to release
these resources when they are no longer required. To do this, either
invoke the [`close()`](#Session.close) method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.Session() as sess:
sess.run(...)
```
The [`ConfigProto`]
(https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
```
@@__init__
@@run
@@close
@@graph
@@as_default
@@reset
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine. See [Distributed Tensorflow]
(https://www.tensorflow.org/how_tos/distributed/index.html)
for more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
self._context_managers = [self.graph.as_default(), self.as_default()]
def __enter__(self):
for context_manager in self._context_managers:
context_manager.__enter__()
return self
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
for context_manager in reversed(self._context_managers):
context_manager.__exit__(exec_type, exec_value, exec_tb)
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods [`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval)
and [`Operation.run()`](../../api_docs/python/framework.md#Operation.run)
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
@@__init__
@@close
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()` in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to.
Defaults to using an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
config = config_pb2.ConfigProto()
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_session.__exit__(None, None, None)
| mit | -141,476,206,813,532,000 | 36.278922 | 114 | 0.655911 | false |
nischalsheth/contrail-controller | src/config/schema-transformer/logger.py | 3 | 5204 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
"""
Schema Transformer monitor logger
"""
from sandesh_common.vns.ttypes import Module
from cfgm_common.vnc_logger import ConfigServiceLogger
from schema_transformer.config_db import DBBaseST, VirtualNetworkST,\
RoutingInstanceST, ServiceChain
from schema_transformer.sandesh.st_introspect import ttypes as sandesh
class SchemaTransformerLogger(ConfigServiceLogger):
def __init__(self, args=None, http_server_port=None):
module = Module.SCHEMA_TRANSFORMER
module_pkg = "schema_transformer"
self.context = "to_bgp"
super(SchemaTransformerLogger, self).__init__(
module, module_pkg, args, http_server_port)
def sandesh_init(self, http_server_port=None):
super(SchemaTransformerLogger, self).sandesh_init(http_server_port)
self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf",
size=1000)
def redefine_sandesh_handles(self):
sandesh.VnList.handle_request = self.sandesh_vn_handle_request
sandesh.RoutingInstanceList.handle_request = \
self.sandesh_ri_handle_request
sandesh.ServiceChainList.handle_request = \
self.sandesh_sc_handle_request
sandesh.StObjectReq.handle_request = \
self.sandesh_st_object_handle_request
def sandesh_ri_build(self, vn_name, ri_name):
vn = VirtualNetworkST.get(vn_name)
sandesh_ri_list = []
for riname in vn.routing_instances:
ri = RoutingInstanceST.get(riname)
if ri is None:
continue
sandesh_ri = sandesh.RoutingInstance(name=ri.obj.get_fq_name_str())
sandesh_ri.service_chain = ri.service_chain
sandesh_ri.connections = list(ri.connections)
sandesh_ri_list.append(sandesh_ri)
return sandesh_ri_list
# end sandesh_ri_build
def sandesh_ri_handle_request(self, req):
# Return the list of VNs
ri_resp = sandesh.RoutingInstanceListResp(routing_instances=[])
if req.vn_name is None:
for vn in VirtualNetworkST:
sandesh_ri = self.sandesh_ri_build(vn, req.ri_name)
ri_resp.routing_instances.extend(sandesh_ri)
elif req.vn_name in VirtualNetworkST:
sandesh_ri = self.sandesh_ri_build(req.vn_name, req.ri_name)
ri_resp.routing_instances.extend(sandesh_ri)
ri_resp.response(req.context())
# end sandesh_ri_handle_request
def sandesh_vn_build(self, vn_name):
vn = VirtualNetworkST.get(vn_name)
sandesh_vn = sandesh.VirtualNetwork(name=vn_name)
sandesh_vn.policies = vn.network_policys.keys()
sandesh_vn.connections = list(vn.connections)
sandesh_vn.routing_instances = vn.routing_instances
if vn.acl:
sandesh_vn.acl = vn.acl.get_fq_name_str()
if vn.dynamic_acl:
sandesh_vn.dynamic_acl = vn.dynamic_acl.get_fq_name_str()
return sandesh_vn
# end sandesh_vn_build
def sandesh_vn_handle_request(self, req):
# Return the list of VNs
vn_resp = sandesh.VnListResp(vn_names=[])
if req.vn_name is None:
for vn in VirtualNetworkST:
sandesh_vn = self.sandesh_vn_build(vn)
vn_resp.vn_names.append(sandesh_vn)
elif req.vn_name in VirtualNetworkST:
sandesh_vn = self.sandesh_vn_build(req.vn_name)
vn_resp.vn_names.append(sandesh_vn)
vn_resp.response(req.context())
# end sandesh_vn_handle_request
def sandesh_sc_handle_request(self, req):
sc_resp = sandesh.ServiceChainListResp(service_chains=[])
if req.sc_name is None:
for sc in ServiceChain.values():
sandesh_sc = sc.build_introspect()
sc_resp.service_chains.append(sandesh_sc)
elif req.sc_name in ServiceChain:
sandesh_sc = ServiceChain.get(req.sc_name).build_introspect()
sc_resp.service_chains.append(sandesh_sc)
sc_resp.response(req.context())
# end sandesh_sc_handle_request
def sandesh_st_object_handle_request(self, req):
st_resp = sandesh.StObjectListResp(objects=[])
obj_type_map = DBBaseST.get_obj_type_map()
if req.object_type is not None:
if req.object_type not in obj_type_map:
return st_resp
obj_cls_list = [obj_type_map[req.object_type]]
else:
obj_cls_list = obj_type_map.values()
for obj_cls in obj_cls_list:
id_or_name = req.object_id_or_fq_name
if id_or_name:
obj = obj_cls.get(id_or_name) or \
obj_cls.get_by_uuid(id_or_name)
if obj is None:
continue
st_resp.objects.append(obj.handle_st_object_req())
else:
for obj in obj_cls.values():
st_resp.objects.append(obj.handle_st_object_req())
st_resp.response(req.context())
# end sandesh_st_object_handle_request
| apache-2.0 | 3,715,075,726,233,246,700 | 39.65625 | 79 | 0.619331 | false |
promptworks/keystone | keystone/cli.py | 2 | 22753 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import print_function
import os
from oslo_config import cfg
from oslo_log import log
import pbr.version
from keystone import assignment
from keystone.common import driver_hints
from keystone.common import openssl
from keystone.common import sql
from keystone.common.sql import migration_helpers
from keystone.common import utils
from keystone import config
from keystone import exception
from keystone.i18n import _, _LW
from keystone import identity
from keystone import resource
from keystone import token
from keystone.token.providers.fernet import utils as fernet
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class BaseApp(object):
name = None
@classmethod
def add_argument_parser(cls, subparsers):
parser = subparsers.add_parser(cls.name, help=cls.__doc__)
parser.set_defaults(cmd_class=cls)
return parser
class DbSync(BaseApp):
"""Sync the database."""
name = 'db_sync'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DbSync, cls).add_argument_parser(subparsers)
parser.add_argument('version', default=None, nargs='?',
help=('Migrate the database up to a specified '
'version. If not provided, db_sync will '
'migrate the database to the latest known '
'version.'))
parser.add_argument('--extension', default=None,
help=('Migrate the database for the specified '
'extension. If not provided, db_sync will '
'migrate the common repository.'))
return parser
@staticmethod
def main():
version = CONF.command.version
extension = CONF.command.extension
migration_helpers.sync_database_to_version(extension, version)
class DbVersion(BaseApp):
"""Print the current migration version of the database."""
name = 'db_version'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DbVersion, cls).add_argument_parser(subparsers)
parser.add_argument('--extension', default=None,
help=('Print the migration version of the '
'database for the specified extension. If '
'not provided, print it for the common '
'repository.'))
@staticmethod
def main():
extension = CONF.command.extension
migration_helpers.print_db_version(extension)
class BasePermissionsSetup(BaseApp):
"""Common user/group setup for file permissions."""
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(BasePermissionsSetup,
cls).add_argument_parser(subparsers)
running_as_root = (os.geteuid() == 0)
parser.add_argument('--keystone-user', required=running_as_root)
parser.add_argument('--keystone-group', required=running_as_root)
return parser
@staticmethod
def get_user_group():
keystone_user_id = None
keystone_group_id = None
try:
a = CONF.command.keystone_user
if a:
keystone_user_id = utils.get_unix_user(a)[0]
except KeyError:
raise ValueError("Unknown user '%s' in --keystone-user" % a)
try:
a = CONF.command.keystone_group
if a:
keystone_group_id = utils.get_unix_group(a)[0]
except KeyError:
raise ValueError("Unknown group '%s' in --keystone-group" % a)
return keystone_user_id, keystone_group_id
class BaseCertificateSetup(BasePermissionsSetup):
"""Provides common options for certificate setup."""
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(BaseCertificateSetup,
cls).add_argument_parser(subparsers)
parser.add_argument('--rebuild', default=False, action='store_true',
help=('Rebuild certificate files: erase previous '
'files and regenerate them.'))
return parser
class PKISetup(BaseCertificateSetup):
"""Set up Key pairs and certificates for token signing and verification.
This is NOT intended for production use, see Keystone Configuration
documentation for details.
"""
name = 'pki_setup'
@classmethod
def main(cls):
LOG.warn(_LW('keystone-manage pki_setup is not recommended for '
'production use.'))
keystone_user_id, keystone_group_id = cls.get_user_group()
conf_pki = openssl.ConfigurePKI(keystone_user_id, keystone_group_id,
rebuild=CONF.command.rebuild)
conf_pki.run()
class SSLSetup(BaseCertificateSetup):
"""Create key pairs and certificates for HTTPS connections.
This is NOT intended for production use, see Keystone Configuration
documentation for details.
"""
name = 'ssl_setup'
@classmethod
def main(cls):
LOG.warn(_LW('keystone-manage ssl_setup is not recommended for '
'production use.'))
keystone_user_id, keystone_group_id = cls.get_user_group()
conf_ssl = openssl.ConfigureSSL(keystone_user_id, keystone_group_id,
rebuild=CONF.command.rebuild)
conf_ssl.run()
class FernetSetup(BasePermissionsSetup):
"""Setup a key repository for Fernet tokens.
This also creates a primary key used for both creating and validating
Keystone Lightweight tokens. To improve security, you should rotate your
keys (using keystone-manage fernet_rotate, for example).
"""
name = 'fernet_setup'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
fernet.create_key_directory(keystone_user_id, keystone_group_id)
if fernet.validate_key_repository():
fernet.initialize_key_repository(
keystone_user_id, keystone_group_id)
class FernetRotate(BasePermissionsSetup):
"""Rotate Fernet encryption keys.
This assumes you have already run keystone-manage fernet_setup.
A new primary key is placed into rotation, which is used for new tokens.
The old primary key is demoted to secondary, which can then still be used
for validating tokens. Excess secondary keys (beyond [fernet_tokens]
max_active_keys) are revoked. Revoked keys are permanently deleted. A new
staged key will be created and used to validate tokens. The next time key
rotation takes place, the staged key will be put into rotation as the
primary key.
Rotating keys too frequently, or with [fernet_tokens] max_active_keys set
too low, will cause tokens to become invalid prior to their expiration.
"""
name = 'fernet_rotate'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
if fernet.validate_key_repository():
fernet.rotate_keys(keystone_user_id, keystone_group_id)
class TokenFlush(BaseApp):
"""Flush expired tokens from the backend."""
name = 'token_flush'
@classmethod
def main(cls):
token_manager = token.persistence.PersistenceManager()
token_manager.driver.flush_expired_tokens()
class MappingPurge(BaseApp):
"""Purge the mapping table."""
name = 'mapping_purge'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(MappingPurge, cls).add_argument_parser(subparsers)
parser.add_argument('--all', default=False, action='store_true',
help=('Purge all mappings.'))
parser.add_argument('--domain-name', default=None,
help=('Purge any mappings for the domain '
'specified.'))
parser.add_argument('--public-id', default=None,
help=('Purge the mapping for the Public ID '
'specified.'))
parser.add_argument('--local-id', default=None,
help=('Purge the mappings for the Local ID '
'specified.'))
parser.add_argument('--type', default=None, choices=['user', 'group'],
help=('Purge any mappings for the type '
'specified.'))
return parser
@staticmethod
def main():
def validate_options():
# NOTE(henry-nash); It would be nice to use the argparse automated
# checking for this validation, but the only way I can see doing
# that is to make the default (i.e. if no optional parameters
# are specified) to purge all mappings - and that sounds too
# dangerous as a default. So we use it in a slightly
# unconventional way, where all parameters are optional, but you
# must specify at least one.
if (CONF.command.all is False and
CONF.command.domain_name is None and
CONF.command.public_id is None and
CONF.command.local_id is None and
CONF.command.type is None):
raise ValueError(_('At least one option must be provided'))
if (CONF.command.all is True and
(CONF.command.domain_name is not None or
CONF.command.public_id is not None or
CONF.command.local_id is not None or
CONF.command.type is not None)):
raise ValueError(_('--all option cannot be mixed with '
'other options'))
def get_domain_id(name):
try:
identity.Manager()
# init assignment manager to avoid KeyError in resource.core
assignment.Manager()
resource_manager = resource.Manager()
return resource_manager.driver.get_domain_by_name(name)['id']
except KeyError:
raise ValueError(_("Unknown domain '%(name)s' specified by "
"--domain-name") % {'name': name})
validate_options()
# Now that we have validated the options, we know that at least one
# option has been specified, and if it was the --all option then this
# was the only option specified.
#
# The mapping dict is used to filter which mappings are purged, so
# leaving it empty means purge them all
mapping = {}
if CONF.command.domain_name is not None:
mapping['domain_id'] = get_domain_id(CONF.command.domain_name)
if CONF.command.public_id is not None:
mapping['public_id'] = CONF.command.public_id
if CONF.command.local_id is not None:
mapping['local_id'] = CONF.command.local_id
if CONF.command.type is not None:
mapping['type'] = CONF.command.type
mapping_manager = identity.MappingManager()
mapping_manager.driver.purge_mappings(mapping)
DOMAIN_CONF_FHEAD = 'keystone.'
DOMAIN_CONF_FTAIL = '.conf'
class DomainConfigUploadFiles(object):
def __init__(self):
super(DomainConfigUploadFiles, self).__init__()
self.load_backends()
def load_backends(self):
"""Load the backends needed for uploading domain configs.
We only need the resource and domain_config managers, but there are
some dependencies which mean we have to load the assignment and
identity managers as well.
The order of loading the backends is important, since the resource
manager depends on the assignment manager, which in turn depends on
the identity manager.
"""
identity.Manager()
assignment.Manager()
self.resource_manager = resource.Manager()
self.domain_config_manager = resource.DomainConfigManager()
def valid_options(self):
"""Validate the options, returning True if they are indeed valid.
It would be nice to use the argparse automated checking for this
validation, but the only way I can see doing that is to make the
default (i.e. if no optional parameters are specified) to upload
all configuration files - and that sounds too dangerous as a
default. So we use it in a slightly unconventional way, where all
parameters are optional, but you must specify at least one.
"""
if (CONF.command.all is False and
CONF.command.domain_name is None):
print(_('At least one option must be provided, use either '
'--all or --domain-name'))
raise ValueError
if (CONF.command.all is True and
CONF.command.domain_name is not None):
print(_('The --all option cannot be used with '
'the --domain-name option'))
raise ValueError
def upload_config_to_database(self, file_name, domain_name):
"""Upload a single config file to the database.
:param file_name: the file containing the config options
:param domain_name: the domain name
:raises: ValueError: the domain does not exist or already has domain
specific configurations defined
:raises: Exceptions from oslo config: there is an issue with options
defined in the config file or its
format
The caller of this method should catch the errors raised and handle
appropriately in order that the best UX experience can be provided for
both the case of when a user has asked for a specific config file to
be uploaded, as well as all config files in a directory.
"""
try:
domain_ref = (
self.resource_manager.driver.get_domain_by_name(domain_name))
except exception.DomainNotFound:
print(_('Invalid domain name: %(domain)s found in config file '
'name: %(file)s - ignoring this file.') % {
'domain': domain_name,
'file': file_name})
raise ValueError
if self.domain_config_manager.get_config_with_sensitive_info(
domain_ref['id']):
print(_('Domain: %(domain)s already has a configuration '
'defined - ignoring file: %(file)s.') % {
'domain': domain_name,
'file': file_name})
raise ValueError
sections = {}
try:
parser = cfg.ConfigParser(file_name, sections)
parser.parse()
except Exception:
# We explicitly don't try and differentiate the error cases, in
# order to keep the code in this tool more robust as oslo.config
# changes.
print(_('Error parsing configuration file for domain: %(domain)s, '
'file: %(file)s.') % {
'domain': domain_name,
'file': file_name})
raise
for group in sections:
for option in sections[group]:
sections[group][option] = sections[group][option][0]
self.domain_config_manager.create_config(domain_ref['id'], sections)
def upload_configs_to_database(self, file_name, domain_name):
"""Upload configs from file and load into database.
This method will be called repeatedly for all the config files in the
config directory. To provide a better UX, we differentiate the error
handling in this case (versus when the user has asked for a single
config file to be uploaded).
"""
try:
self.upload_config_to_database(file_name, domain_name)
except ValueError:
# We've already given all the info we can in a message, so carry
# on to the next one
pass
except Exception:
# Some other error occurred relating to this specific config file
# or domain. Since we are trying to upload all the config files,
# we'll continue and hide this exception. However, we tell the
# user how to get more info about this error by re-running with
# just the domain at fault. When we run in single-domain mode we
# will NOT hide the exception.
print(_('To get a more detailed information on this error, re-run '
'this command for the specific domain, i.e.: '
'keystone-manage domain_config_upload --domain-name %s') %
domain_name)
pass
def read_domain_configs_from_files(self):
"""Read configs from file(s) and load into database.
The command line parameters have already been parsed and the CONF
command option will have been set. It is either set to the name of an
explicit domain, or it's None to indicate that we want all domain
config files.
"""
domain_name = CONF.command.domain_name
conf_dir = CONF.identity.domain_config_dir
if not os.path.exists(conf_dir):
print(_('Unable to locate domain config directory: %s') % conf_dir)
raise ValueError
if domain_name:
# Request is to upload the configs for just one domain
fname = DOMAIN_CONF_FHEAD + domain_name + DOMAIN_CONF_FTAIL
self.upload_config_to_database(
os.path.join(conf_dir, fname), domain_name)
return
# Request is to transfer all config files, so let's read all the
# files in the config directory, and transfer those that match the
# filename pattern of 'keystone.<domain_name>.conf'
for r, d, f in os.walk(conf_dir):
for fname in f:
if (fname.startswith(DOMAIN_CONF_FHEAD) and
fname.endswith(DOMAIN_CONF_FTAIL)):
if fname.count('.') >= 2:
self.upload_configs_to_database(
os.path.join(r, fname),
fname[len(DOMAIN_CONF_FHEAD):
-len(DOMAIN_CONF_FTAIL)])
else:
LOG.warn(_LW('Ignoring file (%s) while scanning '
'domain config directory'), fname)
def run(self):
# First off, let's just check we can talk to the domain database
try:
self.resource_manager.driver.list_domains(driver_hints.Hints())
except Exception:
# It is likely that there is some SQL or other backend error
# related to set up
print(_('Unable to access the keystone database, please check it '
'is configured correctly.'))
raise
try:
self.valid_options()
self.read_domain_configs_from_files()
except ValueError:
# We will already have printed out a nice message, so indicate
# to caller the non-success error code to be used.
return 1
class DomainConfigUpload(BaseApp):
"""Upload the domain specific configuration files to the database."""
name = 'domain_config_upload'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DomainConfigUpload, cls).add_argument_parser(subparsers)
parser.add_argument('--all', default=False, action='store_true',
help='Upload contents of all domain specific '
'configuration files. Either use this option '
'or use the --domain-name option to choose a '
'specific domain.')
parser.add_argument('--domain-name', default=None,
help='Upload contents of the specific '
'configuration file for the given domain. '
'Either use this option or use the --all '
'option to upload contents for all domains.')
return parser
@staticmethod
def main():
dcu = DomainConfigUploadFiles()
status = dcu.run()
if status is not None:
exit(status)
class SamlIdentityProviderMetadata(BaseApp):
"""Generate Identity Provider metadata."""
name = 'saml_idp_metadata'
@staticmethod
def main():
# NOTE(marek-denis): Since federation is currently an extension import
# corresponding modules only when they are really going to be used.
from keystone.contrib.federation import idp
metadata = idp.MetadataGenerator().generate_metadata()
print(metadata.to_string())
CMDS = [
DbSync,
DbVersion,
DomainConfigUpload,
FernetRotate,
FernetSetup,
MappingPurge,
PKISetup,
SamlIdentityProviderMetadata,
SSLSetup,
TokenFlush,
]
def add_command_parsers(subparsers):
for cmd in CMDS:
cmd.add_argument_parser(subparsers)
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Available commands',
handler=add_command_parsers)
def main(argv=None, config_files=None):
CONF.register_cli_opt(command_opt)
config.configure()
sql.initialize()
config.set_default_for_default_log_levels()
CONF(args=argv[1:],
project='keystone',
version=pbr.version.VersionInfo('keystone').version_string(),
usage='%(prog)s [' + '|'.join([cmd.name for cmd in CMDS]) + ']',
default_config_files=config_files)
config.setup_logging()
CONF.command.cmd_class.main()
| apache-2.0 | 6,530,400,440,548,993,000 | 37.176174 | 79 | 0.598646 | false |
phalt/django | tests/forms_tests/tests/tests.py | 6 | 16659 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.forms import (
CharField, FileField, Form, ModelChoiceField, ModelForm,
)
from django.forms.models import ModelFormMetaclass
from django.test import SimpleTestCase, TestCase
from django.utils import six
from ..models import (
BoundaryModel, ChoiceFieldModel, ChoiceModel, ChoiceOptionModel, Defaults,
FileModel, Group, OptionalMultiChoiceModel,
)
class ChoiceFieldForm(ModelForm):
class Meta:
model = ChoiceFieldModel
fields = '__all__'
class OptionalMultiChoiceModelForm(ModelForm):
class Meta:
model = OptionalMultiChoiceModel
fields = '__all__'
class ChoiceFieldExclusionForm(ModelForm):
multi_choice = CharField(max_length=50)
class Meta:
exclude = ['multi_choice']
model = ChoiceFieldModel
class EmptyCharLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice']
class EmptyIntegerLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_integer']
class EmptyCharLabelNoneChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_string_w_none']
class FileForm(Form):
file1 = FileField()
class TestModelChoiceField(TestCase):
def test_choices_not_fetched_when_not_rendering(self):
"""
Generating choices for ModelChoiceField should require 1 query (#12510).
"""
self.groups = [Group.objects.create(name=name) for name in 'abc']
# only one query is required to pull the model from DB
with self.assertNumQueries(1):
field = ModelChoiceField(Group.objects.order_by('-name'))
self.assertEqual('a', field.clean(self.groups[0].pk).name)
def test_queryset_manager(self):
f = ModelChoiceField(ChoiceOptionModel.objects)
choice = ChoiceOptionModel.objects.create(name="choice 1")
self.assertEqual(list(f.choices), [('', '---------'), (choice.pk, str(choice))])
class TestTicket14567(TestCase):
"""
The return values of ModelMultipleChoiceFields are QuerySets
"""
def test_empty_queryset_return(self):
"If a model's ManyToManyField has blank=True and is saved with no data, a queryset is returned."
option = ChoiceOptionModel.objects.create(name='default')
form = OptionalMultiChoiceModelForm({'multi_choice_optional': '', 'multi_choice': [option.pk]})
self.assertTrue(form.is_valid())
# The empty value is a QuerySet
self.assertIsInstance(form.cleaned_data['multi_choice_optional'], models.query.QuerySet)
# While we're at it, test whether a QuerySet is returned if there *is* a value.
self.assertIsInstance(form.cleaned_data['multi_choice'], models.query.QuerySet)
class ModelFormCallableModelDefault(TestCase):
def test_no_empty_option(self):
"If a model's ForeignKey has blank=False and a default, no empty option is created (Refs #10792)."
option = ChoiceOptionModel.objects.create(name='default')
choices = list(ChoiceFieldForm().fields['choice'].choices)
self.assertEqual(len(choices), 1)
self.assertEqual(choices[0], (option.pk, six.text_type(option)))
def test_callable_initial_value(self):
"The initial value for a callable default returning a queryset is the pk (refs #13769)"
ChoiceOptionModel.objects.create(id=1, name='default')
ChoiceOptionModel.objects.create(id=2, name='option 2')
ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(
ChoiceFieldForm().as_p(),
"""<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="1" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="1" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple="multiple" name="multi_choice" id="id_multi_choice" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="1" id="initial-id_multi_choice_0" /></p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="1" id="initial-id_multi_choice_int_0" /></p>"""
)
def test_initial_instance_value(self):
"Initial instances for model fields may also be instances (refs #7287)"
ChoiceOptionModel.objects.create(id=1, name='default')
obj2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
obj3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(
ChoiceFieldForm(initial={
'choice': obj2,
'choice_int': obj2,
'multi_choice': [obj2, obj3],
'multi_choice_int': ChoiceOptionModel.objects.exclude(name="default"),
}).as_p(),
"""<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="2" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="2" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple="multiple" name="multi_choice" id="id_multi_choice" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3" selected>ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="2" id="initial-id_multi_choice_0" />
<input type="hidden" name="initial-multi_choice" value="3" id="initial-id_multi_choice_1" /></p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3" selected>ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="2" id="initial-id_multi_choice_int_0" />
<input type="hidden" name="initial-multi_choice_int" value="3" id="initial-id_multi_choice_int_1" /></p>"""
)
class FormsModelTestCase(TestCase):
def test_unicode_filename(self):
# FileModel with unicode filename and data #########################
file1 = SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))
f = FileForm(data={}, files={'file1': file1}, auto_id=False)
self.assertTrue(f.is_valid())
self.assertIn('file1', f.cleaned_data)
m = FileModel.objects.create(file=f.cleaned_data['file1'])
self.assertEqual(m.file.name, 'tests/\u6211\u96bb\u6c23\u588a\u8239\u88dd\u6eff\u6652\u9c54.txt')
m.delete()
def test_boundary_conditions(self):
# Boundary conditions on a PositiveIntegerField #########################
class BoundaryForm(ModelForm):
class Meta:
model = BoundaryModel
fields = '__all__'
f = BoundaryForm({'positive_integer': 100})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': 0})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': -100})
self.assertFalse(f.is_valid())
def test_formfield_initial(self):
# Formfield initial values ########
# If the model has default values for some fields, they are used as the formfield
# initial values.
class DefaultsForm(ModelForm):
class Meta:
model = Defaults
fields = '__all__'
self.assertEqual(DefaultsForm().fields['name'].initial, 'class default value')
self.assertEqual(DefaultsForm().fields['def_date'].initial, datetime.date(1980, 1, 1))
self.assertEqual(DefaultsForm().fields['value'].initial, 42)
r1 = DefaultsForm()['callable_default'].as_widget()
r2 = DefaultsForm()['callable_default'].as_widget()
self.assertNotEqual(r1, r2)
# In a ModelForm that is passed an instance, the initial values come from the
# instance's values, not the model's defaults.
foo_instance = Defaults(name='instance value', def_date=datetime.date(1969, 4, 4), value=12)
instance_form = DefaultsForm(instance=foo_instance)
self.assertEqual(instance_form.initial['name'], 'instance value')
self.assertEqual(instance_form.initial['def_date'], datetime.date(1969, 4, 4))
self.assertEqual(instance_form.initial['value'], 12)
from django.forms import CharField
class ExcludingForm(ModelForm):
name = CharField(max_length=255)
class Meta:
model = Defaults
exclude = ['name', 'callable_default']
f = ExcludingForm({'name': 'Hello', 'value': 99, 'def_date': datetime.date(1999, 3, 2)})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Hello')
obj = f.save()
self.assertEqual(obj.name, 'class default value')
self.assertEqual(obj.value, 99)
self.assertEqual(obj.def_date, datetime.date(1999, 3, 2))
class RelatedModelFormTests(SimpleTestCase):
def test_invalid_loading_order(self):
"""
Test for issue 10405
"""
class A(models.Model):
ref = models.ForeignKey("B", models.CASCADE)
class Meta:
model = A
fields = '__all__'
with self.assertRaises(ValueError):
ModelFormMetaclass(str('Form'), (ModelForm,), {'Meta': Meta})
class B(models.Model):
pass
def test_valid_loading_order(self):
"""
Test for issue 10405
"""
class C(models.Model):
ref = models.ForeignKey("D", models.CASCADE)
class D(models.Model):
pass
class Meta:
model = C
fields = '__all__'
self.assertTrue(issubclass(ModelFormMetaclass(str('Form'), (ModelForm,), {'Meta': Meta}), ModelForm))
class ManyToManyExclusionTestCase(TestCase):
def test_m2m_field_exclusion(self):
# Issue 12337. save_instance should honor the passed-in exclude keyword.
opt1 = ChoiceOptionModel.objects.create(id=1, name='default')
opt2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
opt3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
initial = {
'choice': opt1,
'choice_int': opt1,
}
data = {
'choice': opt2.pk,
'choice_int': opt2.pk,
'multi_choice': 'string data!',
'multi_choice_int': [opt1.pk],
}
instance = ChoiceFieldModel.objects.create(**initial)
instance.multi_choice.set([opt2, opt3])
instance.multi_choice_int.set([opt2, opt3])
form = ChoiceFieldExclusionForm(data=data, instance=instance)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['multi_choice'], data['multi_choice'])
form.save()
self.assertEqual(form.instance.choice.pk, data['choice'])
self.assertEqual(form.instance.choice_int.pk, data['choice_int'])
self.assertEqual(list(form.instance.multi_choice.all()), [opt2, opt3])
self.assertEqual([obj.pk for obj in form.instance.multi_choice_int.all()], data['multi_choice_int'])
class EmptyLabelTestCase(TestCase):
def test_empty_field_char(self):
f = EmptyCharLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" required /></p>
<p><label for="id_choice">Choice:</label> <select id="id_choice" name="choice">
<option value="" selected>No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>"""
)
def test_empty_field_char_none(self):
f = EmptyCharLabelNoneChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" required /></p>
<p><label for="id_choice_string_w_none">Choice string w none:</label>
<select id="id_choice_string_w_none" name="choice_string_w_none">
<option value="" selected>No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>"""
)
def test_save_empty_label_forms(self):
# Saving a form with a blank choice results in the expected
# value being stored in the database.
tests = [
(EmptyCharLabelNoneChoiceForm, 'choice_string_w_none', None),
(EmptyIntegerLabelChoiceForm, 'choice_integer', None),
(EmptyCharLabelChoiceForm, 'choice', ''),
]
for form, key, expected in tests:
f = form({'name': 'some-key', key: ''})
self.assertTrue(f.is_valid())
m = f.save()
self.assertEqual(expected, getattr(m, key))
self.assertEqual('No Preference',
getattr(m, 'get_{}_display'.format(key))())
def test_empty_field_integer(self):
f = EmptyIntegerLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" required /></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected>No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
def test_get_display_value_on_none(self):
m = ChoiceModel.objects.create(name='test', choice='', choice_integer=None)
self.assertIsNone(m.choice_integer)
self.assertEqual('No Preference', m.get_choice_integer_display())
def test_html_rendering_of_prepopulated_models(self):
none_model = ChoiceModel(name='none-test', choice_integer=None)
f = EmptyIntegerLabelChoiceForm(instance=none_model)
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" value="none-test" required /></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected>No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
foo_model = ChoiceModel(name='foo-test', choice_integer=1)
f = EmptyIntegerLabelChoiceForm(instance=foo_model)
self.assertHTMLEqual(
f.as_p(),
"""<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" value="foo-test" required /></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="">No Preference</option>
<option value="1" selected>Foo</option>
<option value="2">Bar</option>
</select></p>"""
)
| bsd-3-clause | -8,607,105,577,402,918,000 | 41.068528 | 127 | 0.643379 | false |
femtotrader/rabbit4mt4 | receive/Python/receive_logs.py | 1 | 2274 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import traceback
import logging.config
import pika
import sys
import argparse
import datetime
def get_logging_level_from_name(name):
try:
name = name.upper()
except:
name = "CRITICAL"
level = logging.getLevelName(name)
if isinstance(level, int):
return(level)
else:
return(logging.CRITICAL)
def callback(ch, method, properties, body):
try:
routing_key = method.routing_key
t_routing_key = routing_key.split(".")
terminal_id = t_routing_key[0]
level = get_logging_level_from_name(t_routing_key[-1])
#logging.info("%s - %s" % (routing_key, body))
logging.log(level, "%s - %s" % (terminal_id, body))
except:
logging.error(traceback.format_exc())
def main(args):
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
exchange = 'topic_logs'
channel.exchange_declare(exchange=exchange,
type='topic')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
binding_keys = args.binding_keys.split(',')
for binding_key in binding_keys:
channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=binding_key)
logging.info(' [*] Waiting for logs. To exit press CTRL+C')
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming()
if __name__ == '__main__':
logging.config.fileConfig("logging.conf")
logger = logging.getLogger("simpleExample")
parser = argparse.ArgumentParser()
parser.add_argument("--binding_keys", help="binding keys (use comma ',' to split several binding keys), default is '#' to receive any message, binding_key can be 'mt4_demo01_123456.events.logs.*.*' or 'mt4_demo01_123456.events.logs.main.debug', 'mt4_demo01_123456.events.logs.main.info', 'mt4_demo01_123456.events.logs.main.warning', 'mt4_demo01_123456.events.logs.main.error' or 'mt4_demo01_123456.events.logs.main.critical'", default="#")
args = parser.parse_args()
main(args) | gpl-2.0 | 6,788,723,334,755,035,000 | 30.164384 | 444 | 0.633685 | false |
pganssle/bdateutil | tests.py | 1 | 17387 | # bdateutil
# ---------
# Adds business day logic and improved data type flexibility to
# python-dateutil. 100% backwards compatible with python-dateutil,
# simply replace dateutil imports with bdateutil.
#
# Author: ryanss <[email protected]>
# Website: https://github.com/ryanss/bdateutil
# License: MIT (see LICENSE file)
import datetime as dt
import unittest
import holidays
from bdateutil import isbday
from bdateutil import relativedelta
from bdateutil import parse
from bdateutil.rrule import *
from bdateutil import date, datetime, time
from testdateutil import *
class TestIsBday(unittest.TestCase):
def test_isbday(self):
self.assertFalse(isbday(date(2014, 1, 4)))
self.assertFalse(isbday("2014-01-04"))
self.assertTrue(isbday(date(2014, 1, 1)))
self.assertTrue(isbday("2014-01-01"))
self.assertFalse(isbday(date(2014, 1, 1), holidays=holidays.US()))
self.assertTrue(isbday(datetime(2014, 1, 1, 16, 30)))
self.assertTrue(isbday(datetime(2014, 1, 1, 17, 30)))
self.assertFalse(isbday(datetime(2014, 1, 1, 16, 30),
holidays=holidays.US()))
self.assertFalse(isbday(datetime(2014, 1, 1, 17, 30),
holidays=holidays.US()))
isbday.holidays = holidays.US()
self.assertFalse(isbday(date(2014, 1, 1)))
self.assertFalse(isbday(date(2014, 7, 4)))
self.assertTrue(isbday(date(2014, 7, 4), holidays=holidays.CA()))
class TestRelativeDelta(unittest.TestCase):
def test_init(self):
self.assertEqual(relativedelta(date(2014, 1, 7), date(2014, 1, 3)),
relativedelta(days=4, bdays=2))
self.assertEqual(relativedelta(date(2014, 1, 31), date(2014, 1, 1)),
relativedelta(days=30, bdays=22))
self.assertEqual(relativedelta(date(2014, 2, 1), date(2014, 1, 1)),
relativedelta(months=1, bdays=23))
self.assertEqual(relativedelta(date(2014, 2, 2), date(2014, 1, 1)),
relativedelta(months=1, days=1, bdays=23))
self.assertEqual(relativedelta(date(2014, 1, 1), date(2014, 2, 2)),
relativedelta(months=-1, days=-1, bdays=-23))
def test_init_time(self):
self.assertEqual(relativedelta(datetime(2015, 1, 5, 9, 15),
datetime(2015, 1, 2, 16, 45)),
relativedelta(days=2, hours=16, minutes=30,
bminutes=30))
self.assertEqual(relativedelta(datetime(2015, 1, 20, 21, 22),
datetime(2015, 1, 9, 3, 0)),
relativedelta(days=11, hours=18, minutes=22,
bdays=7, bhours=8, bminutes=0))
self.assertEqual(relativedelta(datetime(2015, 1, 20, 21, 22),
datetime(2015, 1, 9, 3, 0),
holidays=holidays.US()),
relativedelta(days=11, hours=18, minutes=22,
bdays=6, bhours=8, bminutes=0))
relativedelta.holidays = holidays.US()
self.assertEqual(relativedelta(datetime(2015, 1, 20, 21, 22),
datetime(2015, 1, 9, 3, 0)),
relativedelta(days=11, hours=18, minutes=22,
bdays=6, bhours=8, bminutes=0))
del relativedelta.holidays
self.assertEqual(relativedelta(time(3, 40), time(2, 37)),
relativedelta(hours=1, minutes=3))
def test_add(self):
rd1 = relativedelta(years=+1, months=+2, bdays=+3, days=+4,
bhours=+5, bminutes=+6, bseconds=+7,
hours=+8, minutes=+9, seconds=+10)
rd2 = relativedelta(years=+10, months=-9, bdays=+8, days=-7,
bhours=+6, bminutes=-5, bseconds=+4,
hours=-3, minutes=+2, seconds=-1)
rd3 = relativedelta(years=+11, months=-7, bdays=+11, days=-3,
bhours=+11, bminutes=+1, bseconds=+11,
hours=+5, minutes=+11, seconds=+9)
self.assertEqual(rd1 + rd2, rd3)
self.assertEqual(relativedelta(bdays=3) + date(2014, 1, 3),
date(2014, 1, 8))
rd4 = relativedelta(years=+1, months=+2, days=+1)
rd5 = relativedelta(years=+12, months=-5, bdays=+11, days=-2,
bhours=+11, bminutes=+1, bseconds=+11,
hours=+5, minutes=+11, seconds=+9)
self.assertEqual(rd3 + rd4, rd5)
self.assertEqual("2014-01-01" + relativedelta(weekday=FR),
datetime(2014, 1, 3))
self.assertEqual("2014-11-15" + relativedelta(bdays=1),
datetime(2014, 11, 18))
def test_bdays_zero(self):
self.assertEqual("2014-11-15" + relativedelta(bdays=0),
datetime(2014, 11, 17))
self.assertEqual("2014-11-17" + relativedelta(bdays=0),
datetime(2014, 11, 17))
self.assertEqual("2014-11-15" - relativedelta(bdays=0),
datetime(2014, 11, 14))
self.assertEqual("2014-11-14" - relativedelta(bdays=0),
datetime(2014, 11, 14))
def test_radd(self):
self.assertEqual(date(2014, 1, 3) + relativedelta(bdays=2),
date(2014, 1, 7))
self.assertEqual(date(2014, 1, 7) + relativedelta(bdays=-2),
date(2014, 1, 3))
self.assertEqual(date(2014, 2, 3) + relativedelta(bdays=-19),
date(2014, 1, 7))
self.assertEqual(date(2014, 1, 3) + relativedelta(bdays=1.5),
datetime(2014, 1, 6, 13, 0))
def test_radd_time(self):
self.assertEqual("2015-01-02 16:45" + relativedelta(bminutes=+30),
datetime(2015, 1, 5, 9, 15))
self.assertEqual(date(2015, 1, 2) + relativedelta(bminutes=+30),
datetime(2015, 1, 2, 9, 30))
self.assertEqual(date(2014, 1, 3) + relativedelta(bdays=1, bhours=4),
datetime(2014, 1, 6, 13, 0))
relativedelta.btstart = time(7, 30)
self.assertEqual("2015-01-02 16:45" + relativedelta(bminutes=+30),
datetime(2015, 1, 5, 7, 45))
self.assertEqual("2015-01-02 16:45" + relativedelta(bhours=+0.5),
datetime(2015, 1, 5, 7, 45))
del relativedelta.btstart
def test_sub(self):
rd1 = relativedelta(years=+1, months=+2, bdays=+3, days=+4,
bhours=+5, bminutes=+6, bseconds=+7,
hours=+8, minutes=+9, seconds=+10)
rd2 = relativedelta(years=+10, months=-9, bdays=+8, days=-7,
bhours=+6, bminutes=-5, bseconds=+4,
hours=-3, minutes=+2, seconds=-1)
rd3 = relativedelta(years=-9, months=+11, bdays=-5, days=+11,
bhours=-1, bminutes=+11, bseconds=+3,
hours=+11, minutes=+7, seconds=+11)
self.assertEqual(rd1 - rd2, rd3)
def test_rsub(self):
self.assertEqual(date(2014, 1, 7) - relativedelta(bdays=2),
date(2014, 1, 3))
self.assertEqual(date(2014, 1, 3) - relativedelta(bdays=-2),
date(2014, 1, 7))
self.assertEqual(date(2014, 2, 3) - relativedelta(bdays=19),
date(2014, 1, 7))
self.assertEqual("2014-11-15" - relativedelta(bdays=1),
datetime(2014, 11, 14))
self.assertEqual(date.today() - relativedelta(bdays=+45),
date.today() + relativedelta(bdays=-45))
def test_neg(self):
self.assertEqual(-relativedelta(years=+1, bdays=-3),
relativedelta(years=-1, bdays=+3))
def test_bool(self):
self.assertTrue(relativedelta(bdays=1))
self.assertTrue(relativedelta(days=1))
self.assertFalse(relativedelta())
def test_mul(self):
self.assertEqual(relativedelta(years=+1, bdays=-3) * 3,
relativedelta(years=+3, bdays=-9))
self.assertEqual(relativedelta(years=+1, bdays=-3) * -3,
relativedelta(years=-3, bdays=+9))
self.assertEqual(relativedelta(years=+1, bdays=-3) * 0,
relativedelta(years=0, bdays=0))
def test_rmul(self):
self.assertEqual(3 * relativedelta(years=+1, bdays=-3),
relativedelta(years=+3, bdays=-9))
self.assertEqual(-3 * relativedelta(years=+1, bdays=-3),
relativedelta(years=-3, bdays=+9))
self.assertEqual(0 * relativedelta(years=+1, bdays=-3),
relativedelta(years=0, bdays=0))
def test_eq(self):
r1 = relativedelta(years=1, months=2, days=3, bdays=1,
hours=4, minutes=5, seconds=6, microseconds=7)
r2 = relativedelta(years=1, months=2, days=3, bdays=1,
hours=4, minutes=5, seconds=6, microseconds=7)
self.assertEqual(r1, r2)
self.assertTrue(r1 == r2)
r2.days = 4
self.assertNotEqual(r1, r2)
self.assertFalse(r1 == r2)
r2.days = 3
r2.bdays = 0
self.assertNotEqual(r1, r2)
self.assertFalse(r1 == r2)
self.assertEqual(relativedelta(), relativedelta())
self.assertTrue(relativedelta() == relativedelta())
self.assertNotEqual(relativedelta(days=1), relativedelta(bdays=1))
self.assertFalse(relativedelta() == relativedelta(months=1))
self.assertNotEqual(relativedelta(days=1), relativedelta(bdays=1))
self.assertFalse(relativedelta() == relativedelta(months=1))
def test_ne(self):
r1 = relativedelta(years=1, months=2, days=3, bdays=1,
hours=4, minutes=5, seconds=6, microseconds=7)
r2 = relativedelta(years=1, months=2, days=3, bdays=1,
hours=4, minutes=5, seconds=6, microseconds=7)
self.assertFalse(r1 != r2)
r2.days = 4
self.assertTrue(r1 != r2)
r2.days = 3
r2.bdays = 0
self.assertTrue(r1 != r2)
self.assertFalse(relativedelta() != relativedelta())
self.assertTrue(relativedelta() != relativedelta(months=1))
self.assertTrue(relativedelta() != relativedelta(months=1))
def test_div(self):
self.assertEqual(relativedelta(years=+3, bdays=-9) / 3,
relativedelta(years=+1, bdays=-3))
self.assertEqual(relativedelta(years=+3, bdays=-9) / -3,
relativedelta(years=-1, bdays=+3))
self.assertRaises(ZeroDivisionError,
lambda: relativedelta(bdays=-3) / 0)
def test_truediv(self):
self.assertEqual(relativedelta(years=+4, bdays=-10) / 3.0,
relativedelta(years=+1, bdays=-3))
def test_repr(self):
rd1 = relativedelta(years=+1, months=+2, days=-3)
self.assertEqual(str(rd1),
"relativedelta(years=+1, months=+2, days=-3)")
rd2 = relativedelta(years=+1, months=+2, bdays=-7)
self.assertEqual(str(rd2),
"relativedelta(years=+1, months=+2, bdays=-7)")
rd3 = relativedelta(years=-1, months=-2, bdays=+7)
self.assertEqual(str(rd3),
"relativedelta(years=-1, months=-2, bdays=+7)")
rd4 = relativedelta(year=2014, month=1, day=2)
self.assertEqual(str(rd4),
"relativedelta(year=2014, month=1, day=2)")
class TestParser(unittest.TestCase):
def test_timestamp(self):
self.assertEqual(parse(1388577600).date(), date(2014, 1, 1))
def test_parserinfo(self):
self.assertEqual(parse("1/2/2014"), datetime(2014, 1, 2))
self.assertEqual(parse(b"1/2/2014"), datetime(2014, 1, 2))
self.assertEqual(parse("1/2/2014", dayfirst=True),
datetime(2014, 2, 1))
self.assertEqual(parse("1/2/2014", parserinfo(dayfirst=True)),
datetime(2014, 2, 1))
def test_exceptions(self):
self.assertRaises(ValueError, lambda: parse("abc"))
self.assertRaises(TypeError, lambda: parse(['a', 'b', 'c']))
class TestRRule(unittest.TestCase):
def test_bdaily(self):
start = parse("2014-01-01")
self.assertEqual(list(rrule(BDAILY, count=4, dtstart=start)),
[datetime(2014, 1, 1, 0, 0),
datetime(2014, 1, 2, 0, 0),
datetime(2014, 1, 3, 0, 0),
datetime(2014, 1, 6, 0, 0)])
until = parse("2014-01-09")
self.assertEqual(list(rrule(BDAILY, dtstart=start, until=until)),
[datetime(2014, 1, 1, 0, 0),
datetime(2014, 1, 2, 0, 0),
datetime(2014, 1, 3, 0, 0),
datetime(2014, 1, 6, 0, 0),
datetime(2014, 1, 7, 0, 0),
datetime(2014, 1, 8, 0, 0),
datetime(2014, 1, 9, 0, 0)])
def test_parse(self):
self.assertEqual(list(rrule(BDAILY, count=4, dtstart="2014-01-01")),
[datetime(2014, 1, 1, 0, 0),
datetime(2014, 1, 2, 0, 0),
datetime(2014, 1, 3, 0, 0),
datetime(2014, 1, 6, 0, 0)])
self.assertEqual(list(rrule(BDAILY, count=4, dtstart="2014-01-01",
until="01/04/2014")),
[datetime(2014, 1, 1, 0, 0),
datetime(2014, 1, 2, 0, 0),
datetime(2014, 1, 3, 0, 0)])
def test_holidays(self):
self.assertEqual(list(rrule(BDAILY, count=4, dtstart="2015-07-01")),
[datetime(2015, 7, 1, 0, 0),
datetime(2015, 7, 2, 0, 0),
datetime(2015, 7, 3, 0, 0),
datetime(2015, 7, 6, 0, 0)])
rrule.holidays = holidays.US()
self.assertEqual(list(rrule(BDAILY, count=4, dtstart="2015-07-01")),
[datetime(2015, 7, 1, 0, 0),
datetime(2015, 7, 2, 0, 0),
datetime(2015, 7, 6, 0, 0),
datetime(2015, 7, 7, 0, 0)])
self.assertEqual(list(rrule(BDAILY, count=4, dtstart="2015-07-01",
holidays=holidays.CA())),
[datetime(2015, 7, 2, 0, 0),
datetime(2015, 7, 3, 0, 0),
datetime(2015, 7, 6, 0, 0),
datetime(2015, 7, 7, 0, 0)])
del rrule.holidays
class TestDateTime(unittest.TestCase):
def test_date(self):
self.assertEqual(date("2015-03-25"), dt.date(2015, 3, 25))
self.assertEqual(date("1/2/2014"), dt.date(2014, 1, 2))
self.assertEqual(date(1388577600), dt.date(2014, 1, 1))
self.assertRaises(ValueError, lambda: date("abc"))
self.assertRaises(TypeError, lambda: date(['a', 'b', 'c']))
self.assertEqual(date(2015, 2, 99), date(2015, 2, 28))
self.assertEqual(date.today(), dt.date.today())
self.assertEqual(date.today(days=+1),
dt.date.today() + relativedelta(days=+1))
self.assertEqual(date.today(bdays=+200, holidays=holidays.US()),
dt.date.today()
+ relativedelta(bdays=+200, holidays=holidays.US()))
relativedelta.holidays = holidays.US()
self.assertEqual(date.today(bdays=+200),
dt.date.today() + relativedelta(bdays=+200))
del relativedelta.holidays
def test_datetime(self):
self.assertEqual(datetime("2015-03-25 12:34"),
dt.datetime(2015, 3, 25, 12, 34))
self.assertEqual(datetime(2015, 3, 99, 23, 45),
datetime(2015, 3, 31, 23, 45))
self.assertEqual(datetime.now().date(), dt.datetime.now().date())
self.assertEqual(datetime.now(bdays=-45).date(),
(dt.datetime.now() - relativedelta(bdays=45)).date())
def test_time(self):
self.assertEqual(time("12:45:54"), time(12, 45, 54))
self.assertEqual(time("2:30 PM"), time(14, 30))
self.assertEqual(relativedelta(time("3:40"), time(2, 30)),
relativedelta(hours=1, minutes=10))
self.assertEqual(relativedelta("3:40", time(2, 30)),
relativedelta(hours=1, minutes=10))
self.assertEqual(relativedelta(time(2, 30), time(3, 40)),
relativedelta(hours=-1, minutes=-10))
def test_eomday(self):
self.assertEqual(date("2015-02-15").eomday, dt.date(2015, 2, 28))
self.assertEqual(datetime("2015-03-01 12:34").eomday,
dt.datetime(2015, 3, 31, 12, 34))
if __name__ == "__main__":
unittest.main()
| mit | -2,911,864,845,392,492,500 | 46.247283 | 78 | 0.525047 | false |
ernw/dizzy | dizzy/interaction_state.py | 1 | 2155 | # interaction_state.py
#
# Copyright 2018 Daniel Mende <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from dizzy.state import State
class InteractionState(State):
def __init__(self, obj):
State.__init__(self, obj)
def next(self):
# Mutate the dizz object before the dizz functions is call
self.bak = self.iter.mutate()
# Call the dizz functions and return the current state
self.cur = self.iter.call_functions()
def reset(self):
self.iter.reset()
self.cur = self.bak
| bsd-3-clause | -2,738,729,564,241,336,000 | 45.847826 | 80 | 0.685847 | false |
xuxiao19910803/edx | lms/djangoapps/branding/__init__.py | 45 | 2858 | from xmodule.modulestore.django import modulestore
from xmodule.course_module import CourseDescriptor
from django.conf import settings
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from microsite_configuration import microsite
def get_visible_courses():
"""
Return the set of CourseDescriptors that should be visible in this branded instance
"""
filtered_by_org = microsite.get_value('course_org_filter')
_courses = modulestore().get_courses(org=filtered_by_org)
courses = [c for c in _courses
if isinstance(c, CourseDescriptor)]
courses = sorted(courses, key=lambda course: course.number)
subdomain = microsite.get_value('subdomain', 'default')
# See if we have filtered course listings in this domain
filtered_visible_ids = None
# this is legacy format which is outside of the microsite feature -- also handle dev case, which should not filter
if hasattr(settings, 'COURSE_LISTINGS') and subdomain in settings.COURSE_LISTINGS and not settings.DEBUG:
filtered_visible_ids = frozenset([SlashSeparatedCourseKey.from_deprecated_string(c) for c in settings.COURSE_LISTINGS[subdomain]])
if filtered_by_org:
return [course for course in courses if course.location.org == filtered_by_org]
if filtered_visible_ids:
return [course for course in courses if course.id in filtered_visible_ids]
else:
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
return [course for course in courses if course.location.org not in org_filter_out_set]
def get_university_for_request():
"""
Return the university name specified for the domain, or None
if no university was specified
"""
return microsite.get_value('university')
def get_logo_url():
"""
Return the url for the branded logo image to be used
"""
# if the MicrositeConfiguration has a value for the logo_image_url
# let's use that
image_url = microsite.get_value('logo_image_url')
if image_url:
return '{static_url}{image_url}'.format(
static_url=settings.STATIC_URL,
image_url=image_url
)
# otherwise, use the legacy means to configure this
university = microsite.get_value('university')
if university is None and settings.FEATURES.get('IS_EDX_DOMAIN', False):
return '{static_url}images/edx-theme/edx-logo-77x36.png'.format(
static_url=settings.STATIC_URL
)
elif university:
return '{static_url}images/{uni}-on-edx-logo.png'.format(
static_url=settings.STATIC_URL, uni=university
)
else:
return '{static_url}images/default-theme/logo.png'.format(
static_url=settings.STATIC_URL
)
| agpl-3.0 | 6,574,672,148,467,007,000 | 35.641026 | 138 | 0.688244 | false |
vitaly4uk/django | tests/aggregation_regress/tests.py | 66 | 53789 | from __future__ import unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
F, Q, Avg, Count, Max, StdDev, Sum, Value, Variance,
)
from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from .models import (
Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag,
Publisher, SelfRefFK, Store, WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = HardbackBook.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15), weight=4.5)
cls.b6 = HardbackBook.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15), weight=3.7)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(
select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(
mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
{'c__max': 3}
)
def test_decimal_aggregate_annotation_filter(self):
"""
Filtering on an aggregate annotation with Decimal values should work.
Requires special handling on SQLite (#18247).
"""
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__gt=Decimal(40))),
1
)
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__lte=Decimal(40))),
4
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(
num_authors=Count('authors')).values().get(isbn='013790395')
self.assertEqual(obj, {
'contact_id': self.a8.id,
'id': self.b5.id,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': self.p3.id,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2 * F('num_books')).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry='foo')
c = Clues.objects.create(EntryID=e, Clue='bar')
qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(name="Jonno's House of Books").annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': self.p5.id, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
('Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier'),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.b1.id, 'id__count': 2},
{'pub': self.b2.id, 'id__count': 1},
{'pub': self.b3.id, 'id__count': 2},
{'pub': self.b4.id, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub': 'publisher_id', 'foo': 'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.p1.id, 'id__count': 2},
{'pub': self.p2.id, 'id__count': 1},
{'pub': self.p3.id, 'id__count': 2},
{'pub': self.p4.id, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query)
# Check that there is just one GROUP BY clause (zero commas means at
# most one clause)
self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(
sorted_publishers[0].n_books,
2
)
self.assertEqual(
sorted_publishers[1].n_books,
1
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = Book.objects.values_list("publisher__name").annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
).order_by("-publisher__name")
self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None)
).order_by("num_awards")
self.assertQuerysetEqual(
qs, [
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann"
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn('id', group_by[0][0])
self.assertNotIn('name', group_by[0][0])
self.assertNotIn('age', group_by[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('age', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
# In the case of `group_by_selected_pks` we also group by contact.id because of the select_related.
self.assertEqual(len(grouping), 1 if connection.features.allows_group_by_pk else 2)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('contact', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=django_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(django_book))
ItemTag.objects.create(object_id=django_book.id, tag='django',
content_type=ContentType.objects.get_for_model(django_book))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=ai_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(ai_book))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2) | Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
acount=Count('authors')
).filter(
acount=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
self.assertEqual(vals, {
'select__sum': 10,
'select__avg': Approximate(1.666, places=2),
})
def test_annotate_on_relation(self):
book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
publisher_awards=Sum('publisher__num_awards')
)
self.assertEqual(qs['publisher_awards'], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = Book.objects.values('rating', 'price').distinct().aggregate(result=Sum('rating'))
vals2 = Book.objects.aggregate(result=Sum('rating') - Value(4.0))
self.assertEqual(vals1, vals2)
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count('alfa__name'))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count('contact__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
class SelfReferentialFKTests(TestCase):
def test_ticket_24748(self):
t1 = SelfRefFK.objects.create(name='t1')
SelfRefFK.objects.create(name='t2', parent=t1)
SelfRefFK.objects.create(name='t3', parent=t1)
self.assertQuerysetEqual(
SelfRefFK.objects.annotate(num_children=Count('children')).order_by('name'),
[('t1', 2), ('t2', 0), ('t3', 0)],
lambda x: (x.name, x.num_children)
)
| bsd-3-clause | 7,692,754,123,880,174,000 | 40.217625 | 252 | 0.574393 | false |
M3nin0/supreme-broccoli | Web/Flask/site_/lib/python3.5/site-packages/sqlalchemy/ext/declarative/clsregistry.py | 55 | 10817 | # ext/declarative/clsregistry.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to handle the string class registry used by declarative.
This system allows specification of classes and expressions used in
:func:`.relationship` using strings.
"""
from ...orm.properties import ColumnProperty, RelationshipProperty, \
SynonymProperty
from ...schema import _get_table_key
from ...orm import class_mapper, interfaces
from ... import util
from ... import inspection
from ... import exc
import weakref
# strong references to registries which we place in
# the _decl_class_registry, which is usually weak referencing.
# the internal registries here link to classes with weakrefs and remove
# themselves when all references to contained classes are removed.
_registries = set()
def add_class(classname, cls):
"""Add a class to the _decl_class_registry associated with the
given declarative class.
"""
if classname in cls._decl_class_registry:
# class already exists.
existing = cls._decl_class_registry[classname]
if not isinstance(existing, _MultipleClassMarker):
existing = \
cls._decl_class_registry[classname] = \
_MultipleClassMarker([cls, existing])
else:
cls._decl_class_registry[classname] = cls
try:
root_module = cls._decl_class_registry['_sa_module_registry']
except KeyError:
cls._decl_class_registry['_sa_module_registry'] = \
root_module = _ModuleMarker('_sa_module_registry', None)
tokens = cls.__module__.split(".")
# build up a tree like this:
# modulename: myapp.snacks.nuts
#
# myapp->snack->nuts->(classes)
# snack->nuts->(classes)
# nuts->(classes)
#
# this allows partial token paths to be used.
while tokens:
token = tokens.pop(0)
module = root_module.get_module(token)
for token in tokens:
module = module.get_module(token)
module.add_class(classname, cls)
class _MultipleClassMarker(object):
"""refers to multiple classes of the same name
within _decl_class_registry.
"""
__slots__ = 'on_remove', 'contents', '__weakref__'
def __init__(self, classes, on_remove=None):
self.on_remove = on_remove
self.contents = set([
weakref.ref(item, self._remove_item) for item in classes])
_registries.add(self)
def __iter__(self):
return (ref() for ref in self.contents)
def attempt_get(self, path, key):
if len(self.contents) > 1:
raise exc.InvalidRequestError(
"Multiple classes found for path \"%s\" "
"in the registry of this declarative "
"base. Please use a fully module-qualified path." %
(".".join(path + [key]))
)
else:
ref = list(self.contents)[0]
cls = ref()
if cls is None:
raise NameError(key)
return cls
def _remove_item(self, ref):
self.contents.remove(ref)
if not self.contents:
_registries.discard(self)
if self.on_remove:
self.on_remove()
def add_item(self, item):
# protect against class registration race condition against
# asynchronous garbage collection calling _remove_item,
# [ticket:3208]
modules = set([
cls.__module__ for cls in
[ref() for ref in self.contents] if cls is not None])
if item.__module__ in modules:
util.warn(
"This declarative base already contains a class with the "
"same class name and module name as %s.%s, and will "
"be replaced in the string-lookup table." % (
item.__module__,
item.__name__
)
)
self.contents.add(weakref.ref(item, self._remove_item))
class _ModuleMarker(object):
""""refers to a module name within
_decl_class_registry.
"""
__slots__ = 'parent', 'name', 'contents', 'mod_ns', 'path', '__weakref__'
def __init__(self, name, parent):
self.parent = parent
self.name = name
self.contents = {}
self.mod_ns = _ModNS(self)
if self.parent:
self.path = self.parent.path + [self.name]
else:
self.path = []
_registries.add(self)
def __contains__(self, name):
return name in self.contents
def __getitem__(self, name):
return self.contents[name]
def _remove_item(self, name):
self.contents.pop(name, None)
if not self.contents and self.parent is not None:
self.parent._remove_item(self.name)
_registries.discard(self)
def resolve_attr(self, key):
return getattr(self.mod_ns, key)
def get_module(self, name):
if name not in self.contents:
marker = _ModuleMarker(name, self)
self.contents[name] = marker
else:
marker = self.contents[name]
return marker
def add_class(self, name, cls):
if name in self.contents:
existing = self.contents[name]
existing.add_item(cls)
else:
existing = self.contents[name] = \
_MultipleClassMarker([cls],
on_remove=lambda: self._remove_item(name))
class _ModNS(object):
__slots__ = '__parent',
def __init__(self, parent):
self.__parent = parent
def __getattr__(self, key):
try:
value = self.__parent.contents[key]
except KeyError:
pass
else:
if value is not None:
if isinstance(value, _ModuleMarker):
return value.mod_ns
else:
assert isinstance(value, _MultipleClassMarker)
return value.attempt_get(self.__parent.path, key)
raise AttributeError("Module %r has no mapped classes "
"registered under the name %r" % (
self.__parent.name, key))
class _GetColumns(object):
__slots__ = 'cls',
def __init__(self, cls):
self.cls = cls
def __getattr__(self, key):
mp = class_mapper(self.cls, configure=False)
if mp:
if key not in mp.all_orm_descriptors:
raise exc.InvalidRequestError(
"Class %r does not have a mapped column named %r"
% (self.cls, key))
desc = mp.all_orm_descriptors[key]
if desc.extension_type is interfaces.NOT_EXTENSION:
prop = desc.property
if isinstance(prop, SynonymProperty):
key = prop.name
elif not isinstance(prop, ColumnProperty):
raise exc.InvalidRequestError(
"Property %r is not an instance of"
" ColumnProperty (i.e. does not correspond"
" directly to a Column)." % key)
return getattr(self.cls, key)
inspection._inspects(_GetColumns)(
lambda target: inspection.inspect(target.cls))
class _GetTable(object):
__slots__ = 'key', 'metadata'
def __init__(self, key, metadata):
self.key = key
self.metadata = metadata
def __getattr__(self, key):
return self.metadata.tables[
_get_table_key(key, self.key)
]
def _determine_container(key, value):
if isinstance(value, _MultipleClassMarker):
value = value.attempt_get([], key)
return _GetColumns(value)
class _class_resolver(object):
def __init__(self, cls, prop, fallback, arg):
self.cls = cls
self.prop = prop
self.arg = self._declarative_arg = arg
self.fallback = fallback
self._dict = util.PopulateDict(self._access_cls)
self._resolvers = ()
def _access_cls(self, key):
cls = self.cls
if key in cls._decl_class_registry:
return _determine_container(key, cls._decl_class_registry[key])
elif key in cls.metadata.tables:
return cls.metadata.tables[key]
elif key in cls.metadata._schemas:
return _GetTable(key, cls.metadata)
elif '_sa_module_registry' in cls._decl_class_registry and \
key in cls._decl_class_registry['_sa_module_registry']:
registry = cls._decl_class_registry['_sa_module_registry']
return registry.resolve_attr(key)
elif self._resolvers:
for resolv in self._resolvers:
value = resolv(key)
if value is not None:
return value
return self.fallback[key]
def __call__(self):
try:
x = eval(self.arg, globals(), self._dict)
if isinstance(x, _GetColumns):
return x.cls
else:
return x
except NameError as n:
raise exc.InvalidRequestError(
"When initializing mapper %s, expression %r failed to "
"locate a name (%r). If this is a class name, consider "
"adding this relationship() to the %r class after "
"both dependent classes have been defined." %
(self.prop.parent, self.arg, n.args[0], self.cls)
)
def _resolver(cls, prop):
import sqlalchemy
from sqlalchemy.orm import foreign, remote
fallback = sqlalchemy.__dict__.copy()
fallback.update({'foreign': foreign, 'remote': remote})
def resolve_arg(arg):
return _class_resolver(cls, prop, fallback, arg)
return resolve_arg
def _deferred_relationship(cls, prop):
if isinstance(prop, RelationshipProperty):
resolve_arg = _resolver(cls, prop)
for attr in ('argument', 'order_by', 'primaryjoin', 'secondaryjoin',
'secondary', '_user_defined_foreign_keys', 'remote_side'):
v = getattr(prop, attr)
if isinstance(v, util.string_types):
setattr(prop, attr, resolve_arg(v))
if prop.backref and isinstance(prop.backref, tuple):
key, kwargs = prop.backref
for attr in ('primaryjoin', 'secondaryjoin', 'secondary',
'foreign_keys', 'remote_side', 'order_by'):
if attr in kwargs and isinstance(kwargs[attr],
util.string_types):
kwargs[attr] = resolve_arg(kwargs[attr])
return prop
| apache-2.0 | 7,615,088,361,561,003,000 | 31.978659 | 79 | 0.567902 | false |
poo12138/gem5-stable | src/sim/probe/Probe.py | 62 | 2370 | # -*- mode:python -*-
# Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Matt Horsnell
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class ProbeListenerObject(SimObject):
type = 'ProbeListenerObject'
cxx_header = 'sim/probe/probe.hh'
manager = Param.SimObject(Parent.any, "ProbeManager")
| bsd-3-clause | 8,322,683,299,318,439,000 | 49.425532 | 72 | 0.788186 | false |
mattesno1/CouchPotatoServer | couchpotato/core/settings.py | 42 | 8457 | from __future__ import with_statement
import ConfigParser
from hashlib import md5
from CodernityDB.hash_index import HashIndex
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import mergeDicts, tryInt, tryFloat
class Settings(object):
options = {}
types = {}
def __init__(self):
addApiView('settings', self.view, docs = {
'desc': 'Return the options and its values of settings.conf. Including the default values and group ordering used on the settings page.',
'return': {'type': 'object', 'example': """{
// objects like in __init__.py of plugin
"options": {
"moovee" : {
"groups" : [{
"description" : "SD movies only",
"name" : "#alt.binaries.moovee",
"options" : [{
"default" : false,
"name" : "enabled",
"type" : "enabler"
}],
"tab" : "providers"
}],
"name" : "moovee"
}
},
// object structured like settings.conf
"values": {
"moovee": {
"enabled": false
}
}
}"""}
})
addApiView('settings.save', self.saveView, docs = {
'desc': 'Save setting to config file (settings.conf)',
'params': {
'section': {'desc': 'The section name in settings.conf'},
'name': {'desc': 'The option name'},
'value': {'desc': 'The value you want to save'},
}
})
addEvent('database.setup', self.databaseSetup)
self.file = None
self.p = None
self.log = None
def setFile(self, config_file):
self.file = config_file
self.p = ConfigParser.RawConfigParser()
self.p.read(config_file)
from couchpotato.core.logger import CPLog
self.log = CPLog(__name__)
self.connectEvents()
def databaseSetup(self):
fireEvent('database.setup_index', 'property', PropertyIndex)
def parser(self):
return self.p
def sections(self):
return self.p.sections()
def connectEvents(self):
addEvent('settings.options', self.addOptions)
addEvent('settings.register', self.registerDefaults)
addEvent('settings.save', self.save)
def registerDefaults(self, section_name, options = None, save = True):
if not options: options = {}
self.addSection(section_name)
for option_name, option in options.items():
self.setDefault(section_name, option_name, option.get('default', ''))
# Migrate old settings from old location to the new location
if option.get('migrate_from'):
if self.p.has_option(option.get('migrate_from'), option_name):
previous_value = self.p.get(option.get('migrate_from'), option_name)
self.p.set(section_name, option_name, previous_value)
self.p.remove_option(option.get('migrate_from'), option_name)
if option.get('type'):
self.setType(section_name, option_name, option.get('type'))
if save:
self.save()
def set(self, section, option, value):
return self.p.set(section, option, value)
def get(self, option = '', section = 'core', default = None, type = None):
try:
try: type = self.types[section][option]
except: type = 'unicode' if not type else type
if hasattr(self, 'get%s' % type.capitalize()):
return getattr(self, 'get%s' % type.capitalize())(section, option)
else:
return self.getUnicode(section, option)
except:
return default
def delete(self, option = '', section = 'core'):
self.p.remove_option(section, option)
self.save()
def getEnabler(self, section, option):
return self.getBool(section, option)
def getBool(self, section, option):
try:
return self.p.getboolean(section, option)
except:
return self.p.get(section, option) == 1
def getInt(self, section, option):
try:
return self.p.getint(section, option)
except:
return tryInt(self.p.get(section, option))
def getFloat(self, section, option):
try:
return self.p.getfloat(section, option)
except:
return tryFloat(self.p.get(section, option))
def getUnicode(self, section, option):
value = self.p.get(section, option).decode('unicode_escape')
return toUnicode(value).strip()
def getValues(self):
values = {}
for section in self.sections():
values[section] = {}
for option in self.p.items(section):
(option_name, option_value) = option
is_password = False
try: is_password = self.types[section][option_name] == 'password'
except: pass
values[section][option_name] = self.get(option_name, section)
if is_password and values[section][option_name]:
values[section][option_name] = len(values[section][option_name]) * '*'
return values
def save(self):
with open(self.file, 'wb') as configfile:
self.p.write(configfile)
self.log.debug('Saved settings')
def addSection(self, section):
if not self.p.has_section(section):
self.p.add_section(section)
def setDefault(self, section, option, value):
if not self.p.has_option(section, option):
self.p.set(section, option, value)
def setType(self, section, option, type):
if not self.types.get(section):
self.types[section] = {}
self.types[section][option] = type
def addOptions(self, section_name, options):
if not self.options.get(section_name):
self.options[section_name] = options
else:
self.options[section_name] = mergeDicts(self.options[section_name], options)
def getOptions(self):
return self.options
def view(self, **kwargs):
return {
'options': self.getOptions(),
'values': self.getValues()
}
def saveView(self, **kwargs):
section = kwargs.get('section')
option = kwargs.get('name')
value = kwargs.get('value')
# See if a value handler is attached, use that as value
new_value = fireEvent('setting.save.%s.%s' % (section, option), value, single = True)
self.set(section, option, (new_value if new_value else value).encode('unicode_escape'))
self.save()
# After save (for re-interval etc)
fireEvent('setting.save.%s.%s.after' % (section, option), single = True)
fireEvent('setting.save.%s.*.after' % section, single = True)
return {
'success': True,
}
def getProperty(self, identifier):
from couchpotato import get_db
db = get_db()
prop = None
try:
propert = db.get('property', identifier, with_doc = True)
prop = propert['doc']['value']
except:
pass # self.log.debug('Property "%s" doesn\'t exist: %s', (identifier, traceback.format_exc(0)))
return prop
def setProperty(self, identifier, value = ''):
from couchpotato import get_db
db = get_db()
try:
p = db.get('property', identifier, with_doc = True)
p['doc'].update({
'identifier': identifier,
'value': toUnicode(value),
})
db.update(p['doc'])
except:
db.insert({
'_t': 'property',
'identifier': identifier,
'value': toUnicode(value),
})
class PropertyIndex(HashIndex):
_version = 1
def __init__(self, *args, **kwargs):
kwargs['key_format'] = '32s'
super(PropertyIndex, self).__init__(*args, **kwargs)
def make_key(self, key):
return md5(key).hexdigest()
def make_key_value(self, data):
if data.get('_t') == 'property':
return md5(data['identifier']).hexdigest(), None
| gpl-3.0 | -8,622,863,118,209,742,000 | 30.091912 | 149 | 0.55729 | false |
Kazade/NeHe-Website | google_appengine/lib/cherrypy/cherrypy/process/win32.py | 93 | 5870 | """Windows service. Requires pywin32."""
import os
import win32api
import win32con
import win32event
import win32service
import win32serviceutil
from cherrypy.process import wspbus, plugins
class ConsoleCtrlHandler(plugins.SimplePlugin):
"""A WSPBus plugin for handling Win32 console events (like Ctrl-C)."""
def __init__(self, bus):
self.is_set = False
plugins.SimplePlugin.__init__(self, bus)
def start(self):
if self.is_set:
self.bus.log('Handler for console events already set.', level=40)
return
result = win32api.SetConsoleCtrlHandler(self.handle, 1)
if result == 0:
self.bus.log('Could not SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Set handler for console events.', level=40)
self.is_set = True
def stop(self):
if not self.is_set:
self.bus.log('Handler for console events already off.', level=40)
return
try:
result = win32api.SetConsoleCtrlHandler(self.handle, 0)
except ValueError:
# "ValueError: The object has not been registered"
result = 1
if result == 0:
self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Removed handler for console events.', level=40)
self.is_set = False
def handle(self, event):
"""Handle console control events (like Ctrl-C)."""
if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT,
win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT,
win32con.CTRL_CLOSE_EVENT):
self.bus.log('Console event %s: shutting down bus' % event)
# Remove self immediately so repeated Ctrl-C doesn't re-call it.
try:
self.stop()
except ValueError:
pass
self.bus.exit()
# 'First to return True stops the calls'
return 1
return 0
class Win32Bus(wspbus.Bus):
"""A Web Site Process Bus implementation for Win32.
Instead of time.sleep, this bus blocks using native win32event objects.
"""
def __init__(self):
self.events = {}
wspbus.Bus.__init__(self)
def _get_state_event(self, state):
"""Return a win32event for the given state (creating it if needed)."""
try:
return self.events[state]
except KeyError:
event = win32event.CreateEvent(None, 0, 0,
"WSPBus %s Event (pid=%r)" %
(state.name, os.getpid()))
self.events[state] = event
return event
def _get_state(self):
return self._state
def _set_state(self, value):
self._state = value
event = self._get_state_event(value)
win32event.PulseEvent(event)
state = property(_get_state, _set_state)
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s), KeyboardInterrupt or SystemExit.
Since this class uses native win32event objects, the interval
argument is ignored.
"""
if isinstance(state, (tuple, list)):
# Don't wait for an event that beat us to the punch ;)
if self.state not in state:
events = tuple([self._get_state_event(s) for s in state])
win32event.WaitForMultipleObjects(events, 0, win32event.INFINITE)
else:
# Don't wait for an event that beat us to the punch ;)
if self.state != state:
event = self._get_state_event(state)
win32event.WaitForSingleObject(event, win32event.INFINITE)
class _ControlCodes(dict):
"""Control codes used to "signal" a service via ControlService.
User-defined control codes are in the range 128-255. We generally use
the standard Python value for the Linux signal and add 128. Example:
>>> signal.SIGUSR1
10
control_codes['graceful'] = 128 + 10
"""
def key_for(self, obj):
"""For the given value, return its corresponding key."""
for key, val in self.items():
if val is obj:
return key
raise ValueError("The given object could not be found: %r" % obj)
control_codes = _ControlCodes({'graceful': 138})
def signal_child(service, command):
if command == 'stop':
win32serviceutil.StopService(service)
elif command == 'restart':
win32serviceutil.RestartService(service)
else:
win32serviceutil.ControlService(service, control_codes[command])
class PyWebService(win32serviceutil.ServiceFramework):
"""Python Web Service."""
_svc_name_ = "Python Web Service"
_svc_display_name_ = "Python Web Service"
_svc_deps_ = None # sequence of service names on which this depends
_exe_name_ = "pywebsvc"
_exe_args_ = None # Default to no arguments
# Only exists on Windows 2000 or later, ignored on windows NT
_svc_description_ = "Python Web Service"
def SvcDoRun(self):
from cherrypy import process
process.bus.start()
process.bus.block()
def SvcStop(self):
from cherrypy import process
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
process.bus.exit()
def SvcOther(self, control):
process.bus.publish(control_codes.key_for(control))
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(PyWebService)
| bsd-3-clause | 1,845,480,469,559,153,000 | 32.735632 | 81 | 0.58569 | false |
Cuuuurzel/KiPyCalc | sympy/diffgeom/tests/test_hyperbolic_space.py | 74 | 2447 | '''
unit test describing the hyperbolic half-plane with the Poincare metric. This
is a basic model of hyperbolic geometry on the (positive) half-space
{(x,y) \in R^2 | y > 0}
with the Riemannian metric
ds^2 = (dx^2 + dy^2)/y^2
It has constant negative scalar curvature = -2
https://en.wikipedia.org/wiki/Poincare_half-plane_model
'''
from sympy import diag
from sympy.diffgeom import (twoform_to_matrix,
metric_to_Christoffel_1st, metric_to_Christoffel_2nd,
metric_to_Riemann_components, metric_to_Ricci_components)
import sympy.diffgeom.rn
def test_H2():
TP = sympy.diffgeom.TensorProduct
R2 = sympy.diffgeom.rn.R2
y = R2.y
dy = R2.dy
dx = R2.dx
g = (TP(dx, dx) + TP(dy, dy))*y**(-2)
automat = twoform_to_matrix(g)
mat = diag(y**(-2), y**(-2))
assert mat == automat
gamma1 = metric_to_Christoffel_1st(g)
assert gamma1[0][0][0] == 0
assert gamma1[0][0][1] == -y**(-3)
assert gamma1[0][1][0] == -y**(-3)
assert gamma1[0][1][1] == 0
assert gamma1[1][1][1] == -y**(-3)
assert gamma1[1][1][0] == 0
assert gamma1[1][0][1] == 0
assert gamma1[1][0][0] == y**(-3)
gamma2 = metric_to_Christoffel_2nd(g)
assert gamma2[0][0][0] == 0
assert gamma2[0][0][1] == -y**(-1)
assert gamma2[0][1][0] == -y**(-1)
assert gamma2[0][1][1] == 0
assert gamma2[1][1][1] == -y**(-1)
assert gamma2[1][1][0] == 0
assert gamma2[1][0][1] == 0
assert gamma2[1][0][0] == y**(-1)
Rm = metric_to_Riemann_components(g)
assert Rm[0][0][0][0] == 0
assert Rm[0][0][0][1] == 0
assert Rm[0][0][1][0] == 0
assert Rm[0][0][1][1] == 0
assert Rm[0][1][0][0] == 0
assert Rm[0][1][0][1] == -y**(-2)
assert Rm[0][1][1][0] == y**(-2)
assert Rm[0][1][1][1] == 0
assert Rm[1][0][0][0] == 0
assert Rm[1][0][0][1] == y**(-2)
assert Rm[1][0][1][0] == -y**(-2)
assert Rm[1][0][1][1] == 0
assert Rm[1][1][0][0] == 0
assert Rm[1][1][0][1] == 0
assert Rm[1][1][1][0] == 0
assert Rm[1][1][1][1] == 0
Ric = metric_to_Ricci_components(g)
assert Ric[0][0] == -y**(-2)
assert Ric[0][1] == 0
assert Ric[1][0] == 0
assert Ric[0][0] == -y**(-2)
## scalar curvature is -2
#TODO - it would be nice to have index contraction built-in
R = (Ric[0][0] + Ric[1][1])*y**2
assert R == -2
## Gauss curvature is -1
assert R/2 == -1
| mit | 4,280,191,580,177,619,500 | 27.126437 | 85 | 0.531671 | false |
ak2703/edx-platform | common/lib/symmath/symmath/symmath_check.py | 126 | 12542 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# File: symmath_check.py
# Date: 02-May-12 (creation)
#
# Symbolic mathematical expression checker for edX. Uses sympy to check for expression equality.
#
# Takes in math expressions given as Presentation MathML (from ASCIIMathML), converts to Content MathML using SnuggleTeX
import traceback
from .formula import *
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# check function interface
#
# This is one of the main entry points to call.
def symmath_check_simple(expect, ans, adict={}, symtab=None, extra_options=None):
"""
Check a symbolic mathematical expression using sympy.
The input is an ascii string (not MathML) converted to math using sympy.sympify.
"""
options = {'__MATRIX__': False, '__ABC__': False, '__LOWER__': False}
if extra_options:
options.update(extra_options)
for op in options: # find options in expect string
if op in expect:
expect = expect.replace(op, '')
options[op] = True
expect = expect.replace('__OR__', '__or__') # backwards compatibility
if options['__LOWER__']:
expect = expect.lower()
ans = ans.lower()
try:
ret = check(expect, ans,
matrix=options['__MATRIX__'],
abcsym=options['__ABC__'],
symtab=symtab,
)
except Exception, err:
return {'ok': False,
'msg': 'Error %s<br/>Failed in evaluating check(%s,%s)' % (err, expect, ans)
}
return ret
#-----------------------------------------------------------------------------
# pretty generic checking function
def check(expect, given, numerical=False, matrix=False, normphase=False, abcsym=False, do_qubit=True, symtab=None, dosimplify=False):
"""
Returns dict with
'ok': True if check is good, False otherwise
'msg': response message (in HTML)
"expect" may have multiple possible acceptable answers, separated by "__OR__"
"""
if "__or__" in expect: # if multiple acceptable answers
eset = expect.split('__or__') # then see if any match
for eone in eset:
ret = check(eone, given, numerical, matrix, normphase, abcsym, do_qubit, symtab, dosimplify)
if ret['ok']:
return ret
return ret
flags = {}
if "__autonorm__" in expect:
flags['autonorm'] = True
expect = expect.replace('__autonorm__', '')
matrix = True
threshold = 1.0e-3
if "__threshold__" in expect:
(expect, st) = expect.split('__threshold__')
threshold = float(st)
numerical = True
if str(given) == '' and not str(expect) == '':
return {'ok': False, 'msg': ''}
try:
xgiven = my_sympify(given, normphase, matrix, do_qubit=do_qubit, abcsym=abcsym, symtab=symtab)
except Exception, err:
return {'ok': False, 'msg': 'Error %s<br/> in evaluating your expression "%s"' % (err, given)}
try:
xexpect = my_sympify(expect, normphase, matrix, do_qubit=do_qubit, abcsym=abcsym, symtab=symtab)
except Exception, err:
return {'ok': False, 'msg': 'Error %s<br/> in evaluating OUR expression "%s"' % (err, expect)}
if 'autonorm' in flags: # normalize trace of matrices
try:
xgiven /= xgiven.trace()
except Exception, err:
return {'ok': False, 'msg': 'Error %s<br/> in normalizing trace of your expression %s' % (err, to_latex(xgiven))}
try:
xexpect /= xexpect.trace()
except Exception, err:
return {'ok': False, 'msg': 'Error %s<br/> in normalizing trace of OUR expression %s' % (err, to_latex(xexpect))}
msg = 'Your expression was evaluated as ' + to_latex(xgiven)
# msg += '<br/>Expected ' + to_latex(xexpect)
# msg += "<br/>flags=%s" % flags
if matrix and numerical:
xgiven = my_evalf(xgiven, chop=True)
dm = my_evalf(sympy.Matrix(xexpect) - sympy.Matrix(xgiven), chop=True)
msg += " = " + to_latex(xgiven)
if abs(dm.vec().norm().evalf()) < threshold:
return {'ok': True, 'msg': msg}
else:
pass
#msg += "dm = " + to_latex(dm) + " diff = " + str(abs(dm.vec().norm().evalf()))
#msg += "expect = " + to_latex(xexpect)
elif dosimplify:
if sympy.simplify(xexpect) == sympy.simplify(xgiven):
return {'ok': True, 'msg': msg}
elif numerical:
if abs((xexpect - xgiven).evalf(chop=True)) < threshold:
return {'ok': True, 'msg': msg}
elif xexpect == xgiven:
return {'ok': True, 'msg': msg}
#msg += "<p/>expect='%s', given='%s'" % (expect,given) # debugging
# msg += "<p/> dot test " + to_latex(dot(sympy.Symbol('x'),sympy.Symbol('y')))
return {'ok': False, 'msg': msg}
#-----------------------------------------------------------------------------
# helper function to convert all <p> to <span class='inline-error'>
def make_error_message(msg):
# msg = msg.replace('<p>','<p><span class="inline-error">').replace('</p>','</span></p>')
msg = '<div class="capa_alert">%s</div>' % msg
return msg
def is_within_tolerance(expected, actual, tolerance):
if expected == 0:
return abs(actual) < tolerance
else:
return abs(abs(actual - expected) / expected) < tolerance
#-----------------------------------------------------------------------------
# Check function interface, which takes pmathml input
#
# This is one of the main entry points to call.
def symmath_check(expect, ans, dynamath=None, options=None, debug=None, xml=None):
"""
Check a symbolic mathematical expression using sympy.
The input may be presentation MathML. Uses formula.
This is the default Symbolic Response checking function
Desc of args:
expect is a sympy string representing the correct answer. It is interpreted
using my_sympify (from formula.py), which reads strings as sympy input
(e.g. 'integrate(x^2, (x,1,2))' would be valid, and evaluate to give 1.5)
ans is student-typed answer. It is expected to be ascii math, but the code
below would support a sympy string.
dynamath is the PMathML string converted by MathJax. It is used if
evaluation with ans is not sufficient.
options is a string with these possible substrings, set as an xml property
of the problem:
-matrix - make a sympy matrix, rather than a list of lists, if possible
-qubit - passed to my_sympify
-imaginary - used in formla, presumably to signal to use i as sqrt(-1)?
-numerical - force numerical comparison.
"""
msg = ''
# msg += '<p/>abname=%s' % abname
# msg += '<p/>adict=%s' % (repr(adict).replace('<','<'))
threshold = 1.0e-3 # for numerical comparison (also with matrices)
DEBUG = debug
if xml is not None:
DEBUG = xml.get('debug', False) # override debug flag using attribute in symbolicmath xml
if DEBUG in ['0', 'False']:
DEBUG = False
# options
if options is None:
options = ''
do_matrix = 'matrix' in options
do_qubit = 'qubit' in options
do_numerical = 'numerical' in options
# parse expected answer
try:
fexpect = my_sympify(str(expect), matrix=do_matrix, do_qubit=do_qubit)
except Exception, err:
msg += '<p>Error %s in parsing OUR expected answer "%s"</p>' % (err, expect)
return {'ok': False, 'msg': make_error_message(msg)}
###### Sympy input #######
# if expected answer is a number, try parsing provided answer as a number also
try:
fans = my_sympify(str(ans), matrix=do_matrix, do_qubit=do_qubit)
except Exception, err:
fans = None
# do a numerical comparison if both expected and answer are numbers
if hasattr(fexpect, 'is_number') and fexpect.is_number \
and hasattr(fans, 'is_number') and fans.is_number:
if is_within_tolerance(fexpect, fans, threshold):
return {'ok': True, 'msg': msg}
else:
msg += '<p>You entered: %s</p>' % to_latex(fans)
return {'ok': False, 'msg': msg}
if do_numerical: # numerical answer expected - force numerical comparison
if is_within_tolerance(fexpect, fans, threshold):
return {'ok': True, 'msg': msg}
else:
msg += '<p>You entered: %s (note that a numerical answer is expected)</p>' % to_latex(fans)
return {'ok': False, 'msg': msg}
if fexpect == fans:
msg += '<p>You entered: %s</p>' % to_latex(fans)
return {'ok': True, 'msg': msg}
###### PMathML input ######
# convert mathml answer to formula
try:
mmlans = dynamath[0] if dynamath else None
except Exception, err:
mmlans = None
if not mmlans:
return {'ok': False, 'msg': '[symmath_check] failed to get MathML for input; dynamath=%s' % dynamath}
f = formula(mmlans, options=options)
# get sympy representation of the formula
# if DEBUG: msg += '<p/> mmlans=%s' % repr(mmlans).replace('<','<')
try:
fsym = f.sympy
msg += '<p>You entered: %s</p>' % to_latex(f.sympy)
except Exception, err:
log.exception("Error evaluating expression '%s' as a valid equation", ans)
msg += "<p>Error in evaluating your expression '%s' as a valid equation</p>" % (ans)
if "Illegal math" in str(err):
msg += "<p>Illegal math expression</p>"
if DEBUG:
msg += 'Error: %s' % str(err).replace('<', '<')
msg += '<hr>'
msg += '<p><font color="blue">DEBUG messages:</p>'
msg += "<p><pre>%s</pre></p>" % traceback.format_exc()
msg += '<p>cmathml=<pre>%s</pre></p>' % f.cmathml.replace('<', '<')
msg += '<p>pmathml=<pre>%s</pre></p>' % mmlans.replace('<', '<')
msg += '<hr>'
return {'ok': False, 'msg': make_error_message(msg)}
# do numerical comparison with expected
if hasattr(fexpect, 'is_number') and fexpect.is_number:
if hasattr(fsym, 'is_number') and fsym.is_number:
if abs(abs(fsym - fexpect) / fexpect) < threshold:
return {'ok': True, 'msg': msg}
return {'ok': False, 'msg': msg}
msg += "<p>Expecting a numerical answer!</p>"
msg += "<p>given = %s</p>" % repr(ans)
msg += "<p>fsym = %s</p>" % repr(fsym)
# msg += "<p>cmathml = <pre>%s</pre></p>" % str(f.cmathml).replace('<','<')
return {'ok': False, 'msg': make_error_message(msg)}
# Here is a good spot for adding calls to X.simplify() or X.expand(),
# allowing equivalence over binomial expansion or trig identities
# exactly the same?
if fexpect == fsym:
return {'ok': True, 'msg': msg}
if isinstance(fexpect, list):
try:
xgiven = my_evalf(fsym, chop=True)
dm = my_evalf(sympy.Matrix(fexpect) - sympy.Matrix(xgiven), chop=True)
if abs(dm.vec().norm().evalf()) < threshold:
return {'ok': True, 'msg': msg}
except sympy.ShapeError:
msg += "<p>Error - your input vector or matrix has the wrong dimensions"
return {'ok': False, 'msg': make_error_message(msg)}
except Exception, err:
msg += "<p>Error %s in comparing expected (a list) and your answer</p>" % str(err).replace('<', '<')
if DEBUG:
msg += "<p/><pre>%s</pre>" % traceback.format_exc()
return {'ok': False, 'msg': make_error_message(msg)}
#diff = (fexpect-fsym).simplify()
#fsym = fsym.simplify()
#fexpect = fexpect.simplify()
try:
diff = (fexpect - fsym)
except Exception, err:
diff = None
if DEBUG:
msg += '<hr>'
msg += '<p><font color="blue">DEBUG messages:</p>'
msg += "<p>Got: %s</p>" % repr(fsym)
# msg += "<p/>Got: %s" % str([type(x) for x in fsym.atoms()]).replace('<','<')
msg += "<p>Expecting: %s</p>" % repr(fexpect).replace('**', '^').replace('hat(I)', 'hat(i)')
# msg += "<p/>Expecting: %s" % str([type(x) for x in fexpect.atoms()]).replace('<','<')
if diff:
msg += "<p>Difference: %s</p>" % to_latex(diff)
msg += '<hr>'
# Used to return more keys: 'ex': fexpect, 'got': fsym
return {'ok': False, 'msg': msg}
| agpl-3.0 | -6,169,996,041,442,095,000 | 37.472393 | 133 | 0.560995 | false |
blaggacao/OpenUpgrade | addons/mrp_byproduct/__openerp__.py | 259 | 1819 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MRP Byproducts',
'version': '1.0',
'category': 'Manufacturing',
'description': """
This module allows you to produce several products from one production order.
=============================================================================
You can configure by-products in the bill of material.
Without this module:
--------------------
A + B + C -> D
With this module:
-----------------
A + B + C -> D + E
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/manufacturing',
'depends': ['base', 'mrp'],
'data': [
'security/ir.model.access.csv',
'mrp_byproduct_view.xml'
],
'demo': [],
'test': ['test/mrp_byproduct.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 402,619,816,081,690,400 | 33.320755 | 78 | 0.560198 | false |
yqm/sl4a | python/src/Lib/plat-sunos5/SUNAUDIODEV.py | 66 | 1578 | # Symbolic constants for use with sunaudiodev module
# The names are the same as in audioio.h with the leading AUDIO_
# removed.
from warnings import warnpy3k
warnpy3k("the SUNAUDIODEV module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
# Not all values are supported on all releases of SunOS.
# Encoding types, for fields i_encoding and o_encoding
ENCODING_NONE = 0 # no encoding assigned
ENCODING_ULAW = 1 # u-law encoding
ENCODING_ALAW = 2 # A-law encoding
ENCODING_LINEAR = 3 # Linear PCM encoding
# Gain ranges for i_gain, o_gain and monitor_gain
MIN_GAIN = 0 # minimum gain value
MAX_GAIN = 255 # maximum gain value
# Balance values for i_balance and o_balance
LEFT_BALANCE = 0 # left channel only
MID_BALANCE = 32 # equal left/right channel
RIGHT_BALANCE = 64 # right channel only
BALANCE_SHIFT = 3
# Port names for i_port and o_port
PORT_A = 1
PORT_B = 2
PORT_C = 3
PORT_D = 4
SPEAKER = 0x01 # output to built-in speaker
HEADPHONE = 0x02 # output to headphone jack
LINE_OUT = 0x04 # output to line out
MICROPHONE = 0x01 # input from microphone
LINE_IN = 0x02 # input from line in
CD = 0x04 # input from on-board CD inputs
INTERNAL_CD_IN = CD # input from internal CDROM
| apache-2.0 | -6,704,747,245,499,558,000 | 35.697674 | 79 | 0.564639 | false |
ericgarrigues/ansible-modules-extras | system/locale_gen.py | 20 | 6736 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import os.path
from subprocess import Popen, PIPE, call
import re
DOCUMENTATION = '''
---
module: locale_gen
short_description: Creates or removes locales.
description:
- Manages locales by editing /etc/locale.gen and invoking locale-gen.
version_added: "1.6"
author: "Augustus Kling (@AugustusKling)"
options:
name:
description:
- Name and encoding of the locale, such as "en_GB.UTF-8".
required: true
default: null
aliases: []
state:
description:
- Whether the locale shall be present.
required: false
choices: ["present", "absent"]
default: "present"
'''
EXAMPLES = '''
# Ensure a locale exists.
- locale_gen: name=de_CH.UTF-8 state=present
'''
LOCALE_NORMALIZATION = {
".utf8": ".UTF-8",
".eucjp": ".EUC-JP",
}
# ===========================================
# location module specific support methods.
#
def is_available(name, ubuntuMode):
"""Check if the given locale is available on the system. This is done by
checking either :
* if the locale is present in /etc/locales.gen
* or if the locale is present in /usr/share/i18n/SUPPORTED"""
if ubuntuMode:
__regexp = '^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/usr/share/i18n/SUPPORTED'
else:
__regexp = '^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/etc/locale.gen'
re_compiled = re.compile(__regexp)
fd = open(__locales_available, 'r')
for line in fd:
result = re_compiled.match(line)
if result and result.group('locale') == name:
return True
fd.close()
return False
def is_present(name):
"""Checks if the given locale is currently installed."""
output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
return any(fix_case(name) == fix_case(line) for line in output.splitlines())
def fix_case(name):
"""locale -a might return the encoding in either lower or upper case.
Passing through this function makes them uniform for comparisons."""
for s, r in LOCALE_NORMALIZATION.iteritems():
name = name.replace(s, r)
return name
def replace_line(existing_line, new_line):
"""Replaces lines in /etc/locale.gen"""
try:
f = open("/etc/locale.gen", "r")
lines = [line.replace(existing_line, new_line) for line in f]
finally:
f.close()
try:
f = open("/etc/locale.gen", "w")
f.write("".join(lines))
finally:
f.close()
def set_locale(name, enabled=True):
""" Sets the state of the locale. Defaults to enabled. """
search_string = '#{0,1}\s*%s (?P<charset>.+)' % name
if enabled:
new_string = '%s \g<charset>' % (name)
else:
new_string = '# %s \g<charset>' % (name)
try:
f = open("/etc/locale.gen", "r")
lines = [re.sub(search_string, new_string, line) for line in f]
finally:
f.close()
try:
f = open("/etc/locale.gen", "w")
f.write("".join(lines))
finally:
f.close()
def apply_change(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState=="present":
# Create locale.
set_locale(name, enabled=True)
else:
# Delete locale.
set_locale(name, enabled=False)
localeGenExitValue = call("locale-gen")
if localeGenExitValue!=0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
def apply_change_ubuntu(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState=="present":
# Create locale.
# Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
localeGenExitValue = call(["locale-gen", name])
else:
# Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
try:
f = open("/var/lib/locales/supported.d/local", "r")
content = f.readlines()
finally:
f.close()
try:
f = open("/var/lib/locales/supported.d/local", "w")
for line in content:
locale, charset = line.split(' ')
if locale != name:
f.write(line)
finally:
f.close()
# Purge locales and regenerate.
# Please provide a patch if you know how to avoid regenerating the locales to keep!
localeGenExitValue = call(["locale-gen", "--purge"])
if localeGenExitValue!=0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
# ==============================================================
# main
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['present','absent'], default='present'),
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
if not os.path.exists("/etc/locale.gen"):
if os.path.exists("/var/lib/locales/supported.d/local"):
# Ubuntu created its own system to manage locales.
ubuntuMode = True
else:
module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
else:
# We found the common way to manage locales.
ubuntuMode = False
if not is_available(name, ubuntuMode):
module.fail_json(msg="The locales you've entered is not available "
"on your system.")
if is_present(name):
prev_state = "present"
else:
prev_state = "absent"
changed = (prev_state!=state)
if module.check_mode:
module.exit_json(changed=changed)
else:
if changed:
try:
if ubuntuMode==False:
apply_change(state, name)
else:
apply_change_ubuntu(state, name)
except EnvironmentError, e:
module.fail_json(msg=e.strerror, exitValue=e.errno)
module.exit_json(name=name, changed=changed, msg="OK")
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 2,743,328,600,333,634,600 | 30.476636 | 141 | 0.588331 | false |
jegger/kivy | kivy/uix/bubble.py | 16 | 12684 | '''
Bubble
======
.. versionadded:: 1.1.0
.. image:: images/bubble.jpg
:align: right
The Bubble widget is a form of menu or a small popup where the menu options
are stacked either vertically or horizontally.
The :class:`Bubble` contains an arrow pointing in the direction you
choose.
Simple example
--------------
.. include:: ../../examples/widgets/bubble_test.py
:literal:
Customize the Bubble
--------------------
You can choose the direction in which the arrow points::
Bubble(arrow_pos='top_mid')
The widgets added to the Bubble are ordered horizontally by default, like a
Boxlayout. You can change that by::
orientation = 'vertical'
To add items to the bubble::
bubble = Bubble(orientation = 'vertical')
bubble.add_widget(your_widget_instance)
To remove items::
bubble.remove_widget(widget)
or
bubble.clear_widgets()
To access the list of children, use content.children::
bubble.content.children
.. warning::
This is important! Do not use bubble.children
To change the appearance of the bubble::
bubble.background_color = (1, 0, 0, .5) #50% translucent red
bubble.border = [0, 0, 0, 0]
background_image = 'path/to/background/image'
arrow_image = 'path/to/arrow/image'
'''
__all__ = ('Bubble', 'BubbleButton', 'BubbleContent')
from kivy.uix.image import Image
from kivy.uix.widget import Widget
from kivy.uix.scatter import Scatter
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.properties import ObjectProperty, StringProperty, OptionProperty, \
ListProperty, BooleanProperty
from kivy.clock import Clock
from kivy.base import EventLoop
from kivy.metrics import dp
class BubbleButton(Button):
'''A button intended for use in a Bubble widget.
You can use a "normal" button class, but it will not look good unless
the background is changed.
Rather use this BubbleButton widget that is already defined and provides a
suitable background for you.
'''
pass
class BubbleContent(GridLayout):
pass
class Bubble(GridLayout):
'''Bubble class. See module documentation for more information.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a). To use it you have to set
either :attr:`background_image` or :attr:`arrow_image` first.
:attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with the :attr:`background_image`.
It should be used when using custom backgrounds.
It must be a list of 4 values: (bottom, right, top, left). Read the
BorderImage instructions for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to
(16, 16, 16, 16)
'''
background_image = StringProperty(
'atlas://data/images/defaulttheme/bubble')
'''Background image of the bubble.
:attr:`background_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/bubble'.
'''
arrow_image = StringProperty(
'atlas://data/images/defaulttheme/bubble_arrow')
''' Image of the arrow pointing to the bubble.
:attr:`arrow_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/bubble_arrow'.
'''
show_arrow = BooleanProperty(True)
''' Indicates whether to show arrow.
.. versionadded:: 1.8.0
:attr:`show_arrow` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `True`.
'''
arrow_pos = OptionProperty('bottom_mid', options=(
'left_top', 'left_mid', 'left_bottom', 'top_left', 'top_mid',
'top_right', 'right_top', 'right_mid', 'right_bottom',
'bottom_left', 'bottom_mid', 'bottom_right'))
'''Specifies the position of the arrow relative to the bubble.
Can be one of: left_top, left_mid, left_bottom top_left, top_mid, top_right
right_top, right_mid, right_bottom bottom_left, bottom_mid, bottom_right.
:attr:`arrow_pos` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'bottom_mid'.
'''
content = ObjectProperty(None)
'''This is the object where the main content of the bubble is held.
:attr:`content` is a :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
orientation = OptionProperty('horizontal',
options=('horizontal', 'vertical'))
'''This specifies the manner in which the children inside bubble
are arranged. Can be one of 'vertical' or 'horizontal'.
:attr:`orientation` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'horizontal'.
'''
limit_to = ObjectProperty(None, allownone=True)
'''Specifies the widget to which the bubbles position is restricted.
.. versionadded:: 1.6.0
:attr:`limit_to` is a :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
def __init__(self, **kwargs):
self._prev_arrow_pos = None
self._arrow_layout = BoxLayout()
self._bk_img = Image(
source=self.background_image, allow_stretch=True,
keep_ratio=False, color=self.background_color)
self.background_texture = self._bk_img.texture
self._arrow_img = Image(source=self.arrow_image,
allow_stretch=True,
color=self.background_color)
self.content = content = BubbleContent(parent=self)
super(Bubble, self).__init__(**kwargs)
content.parent = None
self.add_widget(content)
self.on_arrow_pos()
def add_widget(self, *l):
content = self.content
if content is None:
return
if l[0] == content or l[0] == self._arrow_img\
or l[0] == self._arrow_layout:
super(Bubble, self).add_widget(*l)
else:
content.add_widget(*l)
def remove_widget(self, *l):
content = self.content
if not content:
return
if l[0] == content or l[0] == self._arrow_img\
or l[0] == self._arrow_layout:
super(Bubble, self).remove_widget(*l)
else:
content.remove_widget(l[0])
def clear_widgets(self, **kwargs):
content = self.content
if not content:
return
if kwargs.get('do_super', False):
super(Bubble, self).clear_widgets()
else:
content.clear_widgets()
def on_show_arrow(self, instance, value):
self._arrow_img.opacity = int(value)
def on_parent(self, instance, value):
Clock.schedule_once(self._update_arrow)
def on_pos(self, instance, pos):
lt = self.limit_to
if lt:
self.limit_to = None
if lt is EventLoop.window:
x = y = 0
top = lt.height
right = lt.width
else:
x, y = lt.x, lt.y
top, right = lt.top, lt.right
self.x = max(self.x, x)
self.right = min(self.right, right)
self.top = min(self.top, top)
self.y = max(self.y, y)
self.limit_to = lt
def on_background_image(self, *l):
self._bk_img.source = self.background_image
def on_background_color(self, *l):
if self.content is None:
return
self._arrow_img.color = self._bk_img.color = self.background_color
def on_orientation(self, *l):
content = self.content
if not content:
return
if self.orientation[0] == 'v':
content.cols = 1
content.rows = 99
else:
content.cols = 99
content.rows = 1
def on_arrow_image(self, *l):
self._arrow_img.source = self.arrow_image
def on_arrow_pos(self, *l):
self_content = self.content
if not self_content:
Clock.schedule_once(self.on_arrow_pos)
return
if self_content not in self.children:
Clock.schedule_once(self.on_arrow_pos)
return
self_arrow_pos = self.arrow_pos
if self._prev_arrow_pos == self_arrow_pos:
return
self._prev_arrow_pos = self_arrow_pos
self_arrow_layout = self._arrow_layout
self_arrow_layout.clear_widgets()
self_arrow_img = self._arrow_img
self._sctr = self._arrow_img
self.clear_widgets(do_super=True)
self_content.parent = None
self_arrow_img.size_hint = (1, None)
self_arrow_img.height = dp(self_arrow_img.texture_size[1])
self_arrow_img.pos = 0, 0
widget_list = []
arrow_list = []
parent = self_arrow_img.parent
if parent:
parent.remove_widget(self_arrow_img)
if self_arrow_pos[0] == 'b' or self_arrow_pos[0] == 't':
self.cols = 1
self.rows = 3
self_arrow_layout.orientation = 'horizontal'
self_arrow_img.width = self.width / 3
self_arrow_layout.size_hint = (1, None)
self_arrow_layout.height = self_arrow_img.height
if self_arrow_pos[0] == 'b':
if self_arrow_pos == 'bottom_mid':
widget_list = (self_content, self_arrow_img)
else:
if self_arrow_pos == 'bottom_left':
arrow_list = (self_arrow_img, Widget(), Widget())
elif self_arrow_pos == 'bottom_right':
# add two dummy widgets
arrow_list = (Widget(), Widget(), self_arrow_img)
widget_list = (self_content, self_arrow_layout)
else:
sctr = Scatter(do_translation=False,
rotation=180,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
size=self_arrow_img.size)
sctr.add_widget(self_arrow_img)
if self_arrow_pos == 'top_mid':
# add two dummy widgets
arrow_list = (Widget(), sctr, Widget())
elif self_arrow_pos == 'top_left':
arrow_list = (sctr, Widget(), Widget())
elif self_arrow_pos == 'top_right':
arrow_list = (Widget(), Widget(), sctr)
widget_list = (self_arrow_layout, self_content)
elif self_arrow_pos[0] == 'l' or self_arrow_pos[0] == 'r':
self.cols = 3
self.rows = 1
self_arrow_img.width = self.height / 3
self_arrow_layout.orientation = 'vertical'
self_arrow_layout.cols = 1
self_arrow_layout.size_hint = (None, 1)
self_arrow_layout.width = self_arrow_img.height
rotation = -90 if self_arrow_pos[0] == 'l' else 90
self._sctr = sctr = Scatter(do_translation=False,
rotation=rotation,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
size=(self_arrow_img.size))
sctr.add_widget(self_arrow_img)
if self_arrow_pos[-4:] == '_top':
arrow_list = (Widget(size_hint=(1, .07)),
sctr, Widget(size_hint=(1, .3)))
elif self_arrow_pos[-4:] == '_mid':
arrow_list = (Widget(), sctr, Widget())
Clock.schedule_once(self._update_arrow)
elif self_arrow_pos[-7:] == '_bottom':
arrow_list = (Widget(), Widget(), sctr)
if self_arrow_pos[0] == 'l':
widget_list = (self_arrow_layout, self_content)
else:
widget_list = (self_content, self_arrow_layout)
# add widgets to arrow_layout
add = self_arrow_layout.add_widget
for widg in arrow_list:
add(widg)
# add widgets to self
add = self.add_widget
for widg in widget_list:
add(widg)
def _update_arrow(self, *dt):
if self.arrow_pos in ('left_mid', 'right_mid'):
self._sctr.center_y = self._arrow_layout.center_y
| mit | 7,869,353,859,527,945,000 | 32.914439 | 79 | 0.570956 | false |
wfxiang08/django185 | django/contrib/auth/middleware.py | 172 | 5116 | from django.contrib import auth
from django.contrib.auth import load_backend
from django.contrib.auth.backends import RemoteUserBackend
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
def get_user(request):
if not hasattr(request, '_cached_user'):
request._cached_user = auth.get_user(request)
return request._cached_user
class AuthenticationMiddleware(object):
def process_request(self, request):
assert hasattr(request, 'session'), (
"The Django authentication middleware requires session middleware "
"to be installed. Edit your MIDDLEWARE_CLASSES setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
)
request.user = SimpleLazyObject(lambda: get_user(request))
class SessionAuthenticationMiddleware(object):
"""
Formerly, a middleware for invalidating a user's sessions that don't
correspond to the user's current session authentication hash. However, it
caused the "Vary: Cookie" header on all responses.
Now a backwards compatibility shim that enables session verification in
auth.get_user() if this middleware is in MIDDLEWARE_CLASSES.
"""
def process_request(self, request):
pass
class RemoteUserMiddleware(object):
"""
Middleware for utilizing Web-server-provided authentication.
If request.user is not authenticated, then this middleware attempts to
authenticate the username passed in the ``REMOTE_USER`` request header.
If authentication is successful, the user is automatically logged in to
persist the user in the session.
The header used is configurable and defaults to ``REMOTE_USER``. Subclass
this class and change the ``header`` attribute if you need to use a
different header.
"""
# Name of request header to grab username from. This will be the key as
# used in the request.META dictionary, i.e. the normalization of headers to
# all uppercase and the addition of "HTTP_" prefix apply.
header = "REMOTE_USER"
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
try:
username = request.META[self.header]
except KeyError:
# If specified header doesn't exist then remove any existing
# authenticated remote-user, or return (leaving request.user set to
# AnonymousUser by the AuthenticationMiddleware).
if request.user.is_authenticated():
self._remove_invalid_user(request)
return
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated():
if request.user.get_username() == self.clean_username(username, request):
return
else:
# An authenticated user is associated with the request, but
# it does not match the authorized user in the header.
self._remove_invalid_user(request)
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(remote_user=username)
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[auth.BACKEND_SESSION_KEY]
backend = auth.load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError: # Backend has no clean_username method.
pass
return username
def _remove_invalid_user(self, request):
"""
Removes the current authenticated user in the request which is invalid
but only if the user is authenticated via the RemoteUserBackend.
"""
try:
stored_backend = load_backend(request.session.get(auth.BACKEND_SESSION_KEY, ''))
except ImportError:
# backend failed to load
auth.logout(request)
else:
if isinstance(stored_backend, RemoteUserBackend):
auth.logout(request)
| bsd-3-clause | 8,544,740,072,199,391,000 | 41.633333 | 92 | 0.663018 | false |
ojengwa/talk | venv/lib/python2.7/site-packages/django/utils/translation/trans_null.py | 84 | 1536 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe, SafeData
def ngettext(singular, plural, number):
if number == 1:
return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_text(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
def gettext(message):
if isinstance(message, SafeData):
return mark_safe(message)
return message
def ugettext(message):
return force_text(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def get_language_from_request(request, check_path=False):
return settings.LANGUAGE_CODE
def get_language_from_path(request):
return None
| mit | 3,185,623,647,926,365,000 | 24.6 | 77 | 0.723307 | false |
camptocamp/QGIS | python/plugins/processing/lidar/lastools/lassplit.py | 1 | 2355 | # -*- coding: utf-8 -*-
"""
***************************************************************************
lassplit.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.lidar.lastools.LAStoolsUtils import LAStoolsUtils
from processing.lidar.lastools.LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.parameters.ParameterNumber import ParameterNumber
class lassplit(LAStoolsAlgorithm):
NUM_POINTS = "NUM_POINTS"
def defineCharacteristics(self):
self.name = "lassplit"
self.group = "LAStools"
self.addParametersVerboseGUI()
self.addParametersPointInputGUI()
self.addParameter(ParameterNumber(lassplit.NUM_POINTS, "number of points in output files", 1, None, 1000000))
self.addParametersPointOutputGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "lassplit.exe")]
self.addParametersVerboseCommands(commands)
self.addParametersPointInputCommands(commands)
commands.append("-split")
commands.append(self.getParameterValue(lassplit.NUM_POINTS))
self.addParametersPointOutputCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 | -8,099,768,237,118,897,000 | 41.053571 | 117 | 0.545223 | false |
sunyihuan326/DeltaLab | shuwei_fengge/practice_two/load_data/knn_outline.py | 1 | 1415 | # coding:utf-8
'''
Created on 2017/12/29.
@author: chk01
'''
from practice_one.Company.load_material.utils import *
from practice_one.model.utils import *
from imblearn.over_sampling import RandomOverSampler
def preprocessing(trX, teX, trY, teY):
res = RandomOverSampler(ratio={0: 700, 1: 700, 2: 700})
m, w, h = trX.shape
trX = np.reshape(trX, [m, -1])
# print(trX.shape, np.squeeze(trY).shape)
trX, trY = res.fit_sample(trX, np.squeeze(trY))
new_m = trX.shape[0]
trX = trX.reshape(new_m, w, h)
trY = trY.reshape([-1, 1])
return trX, teX, trY, teY
if __name__ == '__main__':
file = 'knn-outline.mat'
X_train, X_test, Y_train, Y_test = load_data(file)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
X_train, X_test, Y_train, Y_test = preprocessing(X_train, X_test, Y_train, Y_test)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
test_num = Y_test.shape[0]
outline = NearestNeighbor()
outline.train(X_train, Y_train)
# distinces = np.zeros([376, ])
error = 0
Error = [[2], [], [0]]
k = 100
for i in range(376):
distinces = np.linalg.norm(outline.trX - X_test[i], axis=(1, 2))
preY = Y_train[np.argsort(distinces)[:k]]
preY = np.sum(preY) / k
if int(np.squeeze(preY)) in Error[int(Y_test[i])]:
error += 1
print(error / test_num)
| mit | -4,662,336,195,998,147,000 | 28.479167 | 86 | 0.601413 | false |
oandrew/home-assistant | tests/components/device_tracker/test_owntracks.py | 10 | 28145 | """The tests for the Owntracks device tracker."""
import json
import os
import unittest
from collections import defaultdict
from unittest.mock import patch
from tests.common import (assert_setup_component, fire_mqtt_message,
get_test_home_assistant, mock_mqtt_component)
import homeassistant.components.device_tracker.owntracks as owntracks
from homeassistant.bootstrap import setup_component
from homeassistant.components import device_tracker
from homeassistant.const import CONF_PLATFORM, STATE_NOT_HOME
USER = 'greg'
DEVICE = 'phone'
LOCATION_TOPIC = 'owntracks/{}/{}'.format(USER, DEVICE)
EVENT_TOPIC = 'owntracks/{}/{}/event'.format(USER, DEVICE)
WAYPOINT_TOPIC = owntracks.WAYPOINT_TOPIC.format(USER, DEVICE)
USER_BLACKLIST = 'ram'
WAYPOINT_TOPIC_BLOCKED = owntracks.WAYPOINT_TOPIC.format(
USER_BLACKLIST, DEVICE)
DEVICE_TRACKER_STATE = 'device_tracker.{}_{}'.format(USER, DEVICE)
IBEACON_DEVICE = 'keys'
REGION_TRACKER_STATE = 'device_tracker.beacon_{}'.format(IBEACON_DEVICE)
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_WAYPOINT_IMPORT = owntracks.CONF_WAYPOINT_IMPORT
CONF_WAYPOINT_WHITELIST = owntracks.CONF_WAYPOINT_WHITELIST
CONF_SECRET = owntracks.CONF_SECRET
LOCATION_MESSAGE = {
'batt': 92,
'cog': 248,
'tid': 'user',
'lon': 1.0,
't': 'u',
'alt': 27,
'acc': 60,
'p': 101.3977584838867,
'vac': 4,
'lat': 2.0,
'_type': 'location',
'tst': 1,
'vel': 0}
LOCATION_MESSAGE_INACCURATE = {
'batt': 92,
'cog': 248,
'tid': 'user',
'lon': 2.0,
't': 'u',
'alt': 27,
'acc': 2000,
'p': 101.3977584838867,
'vac': 4,
'lat': 6.0,
'_type': 'location',
'tst': 1,
'vel': 0}
LOCATION_MESSAGE_ZERO_ACCURACY = {
'batt': 92,
'cog': 248,
'tid': 'user',
'lon': 2.0,
't': 'u',
'alt': 27,
'acc': 0,
'p': 101.3977584838867,
'vac': 4,
'lat': 6.0,
'_type': 'location',
'tst': 1,
'vel': 0}
REGION_ENTER_MESSAGE = {
'lon': 1.0,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 60,
'tst': 2,
'lat': 2.0,
'_type': 'transition'}
REGION_LEAVE_MESSAGE = {
'lon': 1.0,
'event': 'leave',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 60,
'tst': 2,
'lat': 2.0,
'_type': 'transition'}
REGION_LEAVE_INACCURATE_MESSAGE = {
'lon': 10.0,
'event': 'leave',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 2000,
'tst': 2,
'lat': 20.0,
'_type': 'transition'}
WAYPOINTS_EXPORTED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 3,
"lat": 47,
"lon": 9,
"rad": 10,
"desc": "exp_wayp1"
},
{
"_type": "waypoint",
"tst": 4,
"lat": 3,
"lon": 9,
"rad": 500,
"desc": "exp_wayp2"
}
]
}
WAYPOINTS_UPDATED_MESSAGE = {
"_type": "waypoints",
"_creator": "test",
"waypoints": [
{
"_type": "waypoint",
"tst": 4,
"lat": 9,
"lon": 47,
"rad": 50,
"desc": "exp_wayp1"
},
]
}
WAYPOINT_ENTITY_NAMES = ['zone.greg_phone__exp_wayp1',
'zone.greg_phone__exp_wayp2',
'zone.ram_phone__exp_wayp1',
'zone.ram_phone__exp_wayp2']
REGION_ENTER_ZERO_MESSAGE = {
'lon': 1.0,
'event': 'enter',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 0,
'tst': 2,
'lat': 2.0,
'_type': 'transition'}
REGION_LEAVE_ZERO_MESSAGE = {
'lon': 10.0,
'event': 'leave',
'tid': 'user',
'desc': 'inner',
'wtst': 1,
't': 'b',
'acc': 0,
'tst': 2,
'lat': 20.0,
'_type': 'transition'}
BAD_JSON_PREFIX = '--$this is bad json#--'
BAD_JSON_SUFFIX = '** and it ends here ^^'
SECRET_KEY = 's3cretkey'
ENCRYPTED_LOCATION_MESSAGE = {
# Encrypted version of LOCATION_MESSAGE using libsodium and SECRET_KEY
'_type': 'encrypted',
'data': ('qm1A83I6TVFRmH5343xy+cbex8jBBxDFkHRuJhELVKVRA/DgXcyKtghw'
'9pOw75Lo4gHcyy2wV5CmkjrpKEBR7Qhye4AR0y7hOvlx6U/a3GuY1+W8'
'I4smrLkwMvGgBOzXSNdVTzbFTHDvG3gRRaNHFkt2+5MsbH2Dd6CXmpzq'
'DIfSN7QzwOevuvNIElii5MlFxI6ZnYIDYA/ZdnAXHEVsNIbyT2N0CXt3'
'fTPzgGtFzsufx40EEUkC06J7QTJl7lLG6qaLW1cCWp86Vp0eL3vtZ6xq')}
MOCK_ENCRYPTED_LOCATION_MESSAGE = {
# Mock-encrypted version of LOCATION_MESSAGE using pickle
'_type': 'encrypted',
'data': ('gANDCXMzY3JldGtleXEAQ6p7ImxvbiI6IDEuMCwgInQiOiAidSIsICJi'
'YXR0IjogOTIsICJhY2MiOiA2MCwgInZlbCI6IDAsICJfdHlwZSI6ICJs'
'b2NhdGlvbiIsICJ2YWMiOiA0LCAicCI6IDEwMS4zOTc3NTg0ODM4ODY3'
'LCAidHN0IjogMSwgImxhdCI6IDIuMCwgImFsdCI6IDI3LCAiY29nIjog'
'MjQ4LCAidGlkIjogInVzZXIifXEBhnECLg==')
}
class BaseMQTT(unittest.TestCase):
"""Base MQTT assert functions."""
hass = None
def send_message(self, topic, message, corrupt=False):
"""Test the sending of a message."""
str_message = json.dumps(message)
if corrupt:
mod_message = BAD_JSON_PREFIX + str_message + BAD_JSON_SUFFIX
else:
mod_message = str_message
fire_mqtt_message(self.hass, topic, mod_message)
self.hass.block_till_done()
def assert_location_state(self, location):
"""Test the assertion of a location state."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.state, location)
def assert_location_latitude(self, latitude):
"""Test the assertion of a location latitude."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('latitude'), latitude)
def assert_location_longitude(self, longitude):
"""Test the assertion of a location longitude."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('longitude'), longitude)
def assert_location_accuracy(self, accuracy):
"""Test the assertion of a location accuracy."""
state = self.hass.states.get(DEVICE_TRACKER_STATE)
self.assertEqual(state.attributes.get('gps_accuracy'), accuracy)
class TestDeviceTrackerOwnTracks(BaseMQTT):
"""Test the OwnTrack sensor."""
# pylint: disable=invalid-name
def setup_method(self, _):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True,
CONF_WAYPOINT_WHITELIST: ['jon', 'greg']
}})
self.hass.states.set(
'zone.inner', 'zoning',
{
'name': 'zone',
'latitude': 2.1,
'longitude': 1.1,
'radius': 10
})
self.hass.states.set(
'zone.inner_2', 'zoning',
{
'name': 'zone',
'latitude': 2.1,
'longitude': 1.1,
'radius': 10
})
self.hass.states.set(
'zone.outer', 'zoning',
{
'name': 'zone',
'latitude': 2.0,
'longitude': 1.0,
'radius': 100000
})
# Clear state between teste
self.hass.states.set(DEVICE_TRACKER_STATE, None)
owntracks.REGIONS_ENTERED = defaultdict(list)
owntracks.MOBILE_BEACONS_ACTIVE = defaultdict(list)
def teardown_method(self, _):
"""Stop everything that was started."""
self.hass.stop()
try:
os.remove(self.hass.config.path(device_tracker.YAML_DEVICES))
except FileNotFoundError:
pass
def assert_tracker_state(self, location):
"""Test the assertion of a tracker state."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.state, location)
def assert_tracker_latitude(self, latitude):
"""Test the assertion of a tracker latitude."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.attributes.get('latitude'), latitude)
def assert_tracker_accuracy(self, accuracy):
"""Test the assertion of a tracker accuracy."""
state = self.hass.states.get(REGION_TRACKER_STATE)
self.assertEqual(state.attributes.get('gps_accuracy'), accuracy)
def test_location_invalid_devid(self): # pylint: disable=invalid-name
"""Test the update of a location."""
self.send_message('owntracks/paulus/nexus-5x', LOCATION_MESSAGE)
state = self.hass.states.get('device_tracker.paulus_nexus5x')
assert state.state == 'outer'
def test_location_update(self):
"""Test the update of a location."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
self.assert_location_state('outer')
def test_location_inaccurate_gps(self):
"""Test the location for inaccurate GPS information."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_INACCURATE)
self.assert_location_latitude(2.0)
self.assert_location_longitude(1.0)
def test_location_zero_accuracy_gps(self):
"""Ignore the location for zero accuracy GPS information."""
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE_ZERO_ACCURACY)
self.assert_location_latitude(2.0)
self.assert_location_longitude(1.0)
def test_event_entry_exit(self):
"""Test the entry event."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
# Updates ignored when in a zone
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
# Exit switches back to GPS
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
self.assert_location_state('outer')
# Left clean zone state
self.assertFalse(owntracks.REGIONS_ENTERED[USER])
def test_event_with_spaces(self):
"""Test the entry event."""
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner 2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner 2"
self.send_message(EVENT_TOPIC, message)
# Left clean zone state
self.assertFalse(owntracks.REGIONS_ENTERED[USER])
def test_event_entry_exit_inaccurate(self):
"""Test the event for inaccurate exit."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_LEAVE_INACCURATE_MESSAGE)
# Exit doesn't use inaccurate gps
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
# But does exit region correctly
self.assertFalse(owntracks.REGIONS_ENTERED[USER])
def test_event_entry_exit_zero_accuracy(self):
"""Test entry/exit events with accuracy zero."""
self.send_message(EVENT_TOPIC, REGION_ENTER_ZERO_MESSAGE)
# Enter uses the zone's gps co-ords
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
self.send_message(EVENT_TOPIC, REGION_LEAVE_ZERO_MESSAGE)
# Exit doesn't use zero gps
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
self.assert_location_state('inner')
# But does exit region correctly
self.assertFalse(owntracks.REGIONS_ENTERED[USER])
def test_event_exit_outside_zone_sets_away(self):
"""Test the event for exit zone."""
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
# Exit message far away GPS location
message = REGION_LEAVE_MESSAGE.copy()
message['lon'] = 90.1
message['lat'] = 90.1
self.send_message(EVENT_TOPIC, message)
# Exit forces zone change to away
self.assert_location_state(STATE_NOT_HOME)
def test_event_entry_exit_right_order(self):
"""Test the event for ordering."""
# Enter inner zone
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Enter inner2 zone
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Exit inner_2 - should be in 'inner'
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner')
self.assert_location_latitude(2.1)
self.assert_location_accuracy(10.0)
# Exit inner - should be in 'outer'
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
self.assert_location_state('outer')
self.assert_location_latitude(2.0)
self.assert_location_accuracy(60.0)
def test_event_entry_exit_wrong_order(self):
"""Test the event for wrong order."""
# Enter inner zone
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
# Enter inner2 zone
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('inner_2')
# Exit inner - should still be in 'inner_2'
self.send_message(EVENT_TOPIC, REGION_LEAVE_MESSAGE)
self.assert_location_state('inner_2')
# Exit inner_2 - should be in 'outer'
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_location_state('outer')
def test_event_entry_unknown_zone(self):
"""Test the event for unknown zone."""
# Just treat as location update
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "unknown"
self.send_message(EVENT_TOPIC, message)
self.assert_location_latitude(2.0)
self.assert_location_state('outer')
def test_event_exit_unknown_zone(self):
"""Test the event for unknown zone."""
# Just treat as location update
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "unknown"
self.send_message(EVENT_TOPIC, message)
self.assert_location_latitude(2.0)
self.assert_location_state('outer')
def test_event_entry_zone_loading_dash(self):
"""Test the event for zone landing."""
# Make sure the leading - is ignored
# Ownracks uses this to switch on hold
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "-inner"
self.send_message(EVENT_TOPIC, REGION_ENTER_MESSAGE)
self.assert_location_state('inner')
def test_mobile_enter_move_beacon(self):
"""Test the movement of a beacon."""
# Enter mobile beacon, should set location
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Move should move beacon
message = LOCATION_MESSAGE.copy()
message['lat'] = "3.0"
self.send_message(LOCATION_TOPIC, message)
self.assert_tracker_latitude(3.0)
self.assert_tracker_state(STATE_NOT_HOME)
def test_mobile_enter_exit_region_beacon(self):
"""Test the enter and the exit of a region beacon."""
# Start tracking beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Enter location should move beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.1)
self.assert_tracker_state('inner_2')
# Exit location should switch to gps
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = "inner_2"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
def test_mobile_exit_move_beacon(self):
"""Test the exit move of a beacon."""
# Start tracking beacon
message = REGION_ENTER_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(2.0)
self.assert_tracker_state('outer')
# Exit mobile beacon, should set location
message = REGION_LEAVE_MESSAGE.copy()
message['desc'] = IBEACON_DEVICE
message['lat'] = "3.0"
self.send_message(EVENT_TOPIC, message)
self.assert_tracker_latitude(3.0)
# Move after exit should do nothing
message = LOCATION_MESSAGE.copy()
message['lat'] = "4.0"
self.send_message(LOCATION_TOPIC, LOCATION_MESSAGE)
self.assert_tracker_latitude(3.0)
def test_mobile_multiple_async_enter_exit(self):
"""Test the multiple entering."""
# Test race condition
enter_message = REGION_ENTER_MESSAGE.copy()
enter_message['desc'] = IBEACON_DEVICE
exit_message = REGION_LEAVE_MESSAGE.copy()
exit_message['desc'] = IBEACON_DEVICE
for _ in range(0, 20):
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(enter_message))
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(exit_message))
fire_mqtt_message(
self.hass, EVENT_TOPIC, json.dumps(enter_message))
self.hass.block_till_done()
self.send_message(EVENT_TOPIC, exit_message)
self.assertEqual(owntracks.MOBILE_BEACONS_ACTIVE['greg_phone'], [])
def test_mobile_multiple_enter_exit(self):
"""Test the multiple entering."""
# Should only happen if the iphone dies
enter_message = REGION_ENTER_MESSAGE.copy()
enter_message['desc'] = IBEACON_DEVICE
exit_message = REGION_LEAVE_MESSAGE.copy()
exit_message['desc'] = IBEACON_DEVICE
self.send_message(EVENT_TOPIC, enter_message)
self.send_message(EVENT_TOPIC, enter_message)
self.send_message(EVENT_TOPIC, exit_message)
self.assertEqual(owntracks.MOBILE_BEACONS_ACTIVE['greg_phone'], [])
def test_waypoint_import_simple(self):
"""Test a simple import of list of waypoints."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
self.assertTrue(wayp is not None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[1])
self.assertTrue(wayp is not None)
def test_waypoint_import_blacklist(self):
"""Test import of list of waypoints for blacklisted user."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is None)
def test_waypoint_import_no_whitelist(self):
"""Test import of list of waypoints with no whitelist set."""
def mock_see(**kwargs):
"""Fake see method for owntracks."""
return
test_config = {
CONF_PLATFORM: 'owntracks',
CONF_MAX_GPS_ACCURACY: 200,
CONF_WAYPOINT_IMPORT: True
}
owntracks.setup_scanner(self.hass, test_config, mock_see)
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC_BLOCKED, waypoints_message)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is not None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is not None)
def test_waypoint_import_bad_json(self):
"""Test importing a bad JSON payload."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message, True)
# Check if it made it into states
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[2])
self.assertTrue(wayp is None)
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[3])
self.assertTrue(wayp is None)
def test_waypoint_import_existing(self):
"""Test importing a zone that exists."""
waypoints_message = WAYPOINTS_EXPORTED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
# Get the first waypoint exported
wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
# Send an update
waypoints_message = WAYPOINTS_UPDATED_MESSAGE.copy()
self.send_message(WAYPOINT_TOPIC, waypoints_message)
new_wayp = self.hass.states.get(WAYPOINT_ENTITY_NAMES[0])
self.assertTrue(wayp == new_wayp)
class TestDeviceTrackerOwnTrackConfigs(BaseMQTT):
"""Test the OwnTrack sensor."""
# pylint: disable=invalid-name
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
def mock_cipher(): # pylint: disable=no-method-argument
"""Return a dummy pickle-based cipher."""
def mock_decrypt(ciphertext, key):
"""Decrypt/unpickle."""
import pickle
(mkey, plaintext) = pickle.loads(ciphertext)
if key != mkey:
raise ValueError()
return plaintext
return (len(SECRET_KEY), mock_decrypt)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload(self):
"""Test encrypted payload."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: SECRET_KEY,
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_topic_key(self):
"""Test encrypted payload with a topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
LOCATION_TOPIC: SECRET_KEY,
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_no_key(self):
"""Test encrypted payload with no key, ."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
# key missing
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(None)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_wrong_key(self):
"""Test encrypted payload with wrong key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: 'wrong key',
}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(None)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_wrong_topic_key(self):
"""Test encrypted payload with wrong topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
LOCATION_TOPIC: 'wrong key'
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(None)
@patch('homeassistant.components.device_tracker.owntracks.get_cipher',
mock_cipher)
def test_encrypted_payload_no_topic_key(self):
"""Test encrypted payload with no topic key."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: {
'owntracks/{}/{}'.format(USER, 'otherdevice'): 'foobar'
}}})
self.send_message(LOCATION_TOPIC, MOCK_ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(None)
try:
import libnacl
except (ImportError, OSError):
libnacl = None
@unittest.skipUnless(libnacl, "libnacl/libsodium is not installed")
def test_encrypted_payload_libsodium(self):
"""Test sending encrypted message payload."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert setup_component(self.hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {
CONF_PLATFORM: 'owntracks',
CONF_SECRET: SECRET_KEY,
}})
self.send_message(LOCATION_TOPIC, ENCRYPTED_LOCATION_MESSAGE)
self.assert_location_latitude(2.0)
| mit | 6,048,500,633,906,802,000 | 34.093516 | 79 | 0.607177 | false |
beni55/june | june/utils/mail.py | 11 | 1651 | # coding: utf-8
from flask import current_app, url_for, render_template
from flask.ext.babel import gettext as _
from flask_mail import Message
from .user import create_auth_token
def send_mail(app, msg):
mail = app.extensions['mail']
if not mail.default_sender:
return
mail.send(msg)
def signup_mail(user, path=None):
config = current_app.config
msg = Message(
_("Signup for %(site)s", site=config['SITE_TITLE']),
recipients=[user.email],
)
reply_to = config.get('MAIL_REPLY_TO', None)
if reply_to:
msg.reply_to = reply_to
host = config.get('SITE_URL', '')
dct = {
'host': host.rstrip('/'),
'token': create_auth_token(user)
}
if path:
dct['path'] = path
else:
dct['path'] = url_for('account.signup')
link = '%(host)s%(path)s?token=%(token)s' % dct
html = render_template('email/signup.html', user=user, link=link)
msg.html = html
send_mail(current_app, msg)
return msg
def find_mail(user):
config = current_app.config
msg = Message(
_("Find password for %(site)s", site=config['SITE_TITLE']),
recipients=[user.email],
)
reply_to = config.get('MAIL_REPLY_TO', None)
if reply_to:
msg.reply_to = reply_to
host = config.get('SITE_URL', '')
dct = {
'host': host.rstrip('/'),
'path': url_for('account.reset'),
'token': create_auth_token(user)
}
link = '%(host)s%(path)s?token=%(token)s' % dct
html = render_template('email/find.html', user=user, link=link)
msg.html = html
send_mail(current_app, msg)
return msg
| bsd-3-clause | -7,252,105,222,293,648,000 | 26.065574 | 69 | 0.588734 | false |
Rafiot/PyCIRCLean | filecheck/filecheck.py | 1 | 35947 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import mimetypes
import shlex
import subprocess
import zipfile
import argparse
import random
import shutil
import time
import hashlib
import oletools.oleid
import olefile
import officedissector
import warnings
import exifread
from PIL import Image
from pdfid import PDFiD, cPDFiD
from kittengroomer import FileBase, KittenGroomerBase, Logging
class Config:
"""Configuration information for filecheck.py."""
# MIMES
# Application subtypes (mimetype: 'application/<subtype>')
mimes_ooxml = ('vnd.openxmlformats-officedocument.',)
mimes_office = ('msword', 'vnd.ms-',)
mimes_libreoffice = ('vnd.oasis.opendocument',)
mimes_rtf = ('rtf', 'richtext',)
mimes_pdf = ('pdf', 'postscript',)
mimes_xml = ('xml',)
mimes_ms = ('dosexec',)
mimes_compressed = ('zip', 'rar', 'x-rar', 'bzip2', 'lzip', 'lzma', 'lzop',
'xz', 'compress', 'gzip', 'tar',)
mimes_data = ('octet-stream',)
mimes_audio = ('ogg',)
# Image subtypes
mimes_exif = ('image/jpeg', 'image/tiff',)
mimes_png = ('image/png',)
# Mimetypes with metadata
mimes_metadata = ('image/jpeg', 'image/tiff', 'image/png',)
# Mimetype aliases
aliases = {
# Win executables
'application/x-msdos-program': 'application/x-dosexec',
'application/x-dosexec': 'application/x-msdos-program',
# Other apps with confusing mimetypes
'application/rtf': 'text/rtf',
'application/rar': 'application/x-rar',
'application/ogg': 'audio/ogg',
'audio/ogg': 'application/ogg'
}
# EXTS
# Commonly used malicious extensions
# Sources: http://www.howtogeek.com/137270/50-file-extensions-that-are-potentially-dangerous-on-windows/
# https://github.com/wiregit/wirecode/blob/master/components/core-settings/src/main/java/org/limewire/core/settings/FilterSettings.java
malicious_exts = (
# Applications
".exe", ".pif", ".application", ".gadget", ".msi", ".msp", ".com", ".scr",
".hta", ".cpl", ".msc", ".jar",
# Scripts
".bat", ".cmd", ".vb", ".vbs", ".vbe", ".js", ".jse", ".ws", ".wsf",
".wsc", ".wsh", ".ps1", ".ps1xml", ".ps2", ".ps2xml", ".psc1", ".psc2",
".msh", ".msh1", ".msh2", ".mshxml", ".msh1xml", ".msh2xml",
# Shortcuts
".scf", ".lnk", ".inf",
# Other
".reg", ".dll",
# Office macro (OOXML with macro enabled)
".docm", ".dotm", ".xlsm", ".xltm", ".xlam", ".pptm", ".potm", ".ppam",
".ppsm", ".sldm",
# banned from wirecode
".asf", ".asx", ".au", ".htm", ".html", ".mht", ".vbs",
".wax", ".wm", ".wma", ".wmd", ".wmv", ".wmx", ".wmz", ".wvx",
# Google chrome malicious extensions
".ad", ".ade", ".adp", ".ah", ".apk", ".app", ".application", ".asp",
".asx", ".bas", ".bash", ".bat", ".cfg", ".chi", ".chm", ".class",
".cmd", ".com", ".command", ".crt", ".crx", ".csh", ".deb", ".dex",
".dll", ".drv", ".exe", ".fxp", ".grp", ".hlp", ".hta", ".htm", ".html",
".htt", ".inf", ".ini", ".ins", ".isp", ".jar", ".jnlp", ".user.js",
".js", ".jse", ".ksh", ".lnk", ".local", ".mad", ".maf", ".mag", ".mam",
".manifest", ".maq", ".mar", ".mas", ".mat", ".mau", ".mav", ".maw",
".mda", ".mdb", ".mde", ".mdt", ".mdw", ".mdz", ".mht", ".mhtml", ".mmc",
".mof", ".msc", ".msh", ".mshxml", ".msi", ".msp", ".mst", ".ocx", ".ops",
".pcd", ".pif", ".pkg", ".pl", ".plg", ".prf", ".prg", ".pst", ".py",
".pyc", ".pyw", ".rb", ".reg", ".rpm", ".scf", ".scr", ".sct", ".sh",
".shar", ".shb", ".shs", ".shtm", ".shtml", ".spl", ".svg", ".swf", ".sys",
".tcsh", ".url", ".vb", ".vbe", ".vbs", ".vsd", ".vsmacros", ".vss",
".vst", ".vsw", ".ws", ".wsc", ".wsf", ".wsh", ".xbap", ".xht", ".xhtm",
".xhtml", ".xml", ".xsl", ".xslt", ".website", ".msh1", ".msh2", ".msh1xml",
".msh2xml", ".ps1", ".ps1xml", ".ps2", ".ps2xml", ".psc1", ".psc2", ".xnk",
".appref-ms", ".gadget", ".efi", ".fon", ".partial", ".svg", ".xml",
".xrm_ms", ".xsl", ".action", ".bin", ".inx", ".ipa", ".isu", ".job",
".out", ".pad", ".paf", ".rgs", ".u3p", ".vbscript", ".workflow", ".001",
".ace", ".arc", ".arj", ".b64", ".balz", ".bhx", ".cab", ".cpio", ".fat",
".hfs", ".hqx", ".iso", ".lha", ".lpaq1", ".lpaq5", ".lpaq8", ".lzh",
".mim", ".ntfs", ".paq8f", ".paq8jd", ".paq8l", ".paq8o", ".pea", ".quad",
".r00", ".r01", ".r02", ".r03", ".r04", ".r05", ".r06", ".r07", ".r08",
".r09", ".r10", ".r11", ".r12", ".r13", ".r14", ".r15", ".r16", ".r17",
".r18", ".r19", ".r20", ".r21", ".r22", ".r23", ".r24", ".r25", ".r26",
".r27", ".r28", ".r29", ".squashfs", ".swm", ".tpz", ".txz", ".tz", ".udf",
".uu", ".uue", ".vhd", ".vmdk", ".wim", ".wrc", ".xar", ".xxe", ".z",
".zipx", ".zpaq", ".cdr", ".dart", ".dc42", ".diskcopy42", ".dmg",
".dmgpart", ".dvdr", ".img", ".imgpart", ".ndif", ".smi", ".sparsebundle",
".sparseimage", ".toast", ".udif",
)
# Sometimes, mimetypes.guess_type gives unexpected results, such as for .tar.gz files:
# In [12]: mimetypes.guess_type('toot.tar.gz', strict=False)
# Out[12]: ('application/x-tar', 'gzip')
# It works as expected if you do mimetypes.guess_type('application/gzip', strict=False)
override_ext = {'.gz': 'application/gzip'}
SEVENZ_PATH = '/usr/bin/7z'
class File(FileBase):
"""
Main file object
Created for each file that is processed by KittenGroomer. Contains all
filetype-specific processing methods.
"""
def __init__(self, src_path, dst_path):
super(File, self).__init__(src_path, dst_path)
self.is_archive = False
self.tempdir_path = self.dst_path + '_temp'
subtypes_apps = (
(Config.mimes_office, self._winoffice),
(Config.mimes_ooxml, self._ooxml),
(Config.mimes_rtf, self.text),
(Config.mimes_libreoffice, self._libreoffice),
(Config.mimes_pdf, self._pdf),
(Config.mimes_xml, self.text),
(Config.mimes_ms, self._executables),
(Config.mimes_compressed, self._archive),
(Config.mimes_data, self._binary_app),
(Config.mimes_audio, self.audio)
)
self.app_subtype_methods = self._make_method_dict(subtypes_apps)
types_metadata = (
(Config.mimes_exif, self._metadata_exif),
(Config.mimes_png, self._metadata_png),
)
self.metadata_mimetype_methods = self._make_method_dict(types_metadata)
self.mime_processing_options = {
'text': self.text,
'audio': self.audio,
'image': self.image,
'video': self.video,
'application': self.application,
'example': self.example,
'message': self.message,
'model': self.model,
'multipart': self.multipart,
'inode': self.inode,
}
def __repr__(self):
return "<filecheck.File object: {{{}}}>".format(self.filename)
def _check_extension(self):
"""
Guess the file's mimetype based on its extension.
If the file's mimetype (as determined by libmagic) is contained in
the `mimetype` module's list of valid mimetypes and the expected
mimetype based on its extension differs from the mimetype determined
by libmagic, then mark the file as dangerous.
"""
if not self.has_extension:
self.make_dangerous('File has no extension')
else:
if self.extension in Config.override_ext:
expected_mimetype = Config.override_ext[self.extension]
else:
expected_mimetype, encoding = mimetypes.guess_type(self.src_path,
strict=False)
if expected_mimetype in Config.aliases:
expected_mimetype = Config.aliases[expected_mimetype]
is_known_extension = self.extension in mimetypes.types_map.keys()
if is_known_extension and expected_mimetype != self.mimetype:
self.make_dangerous('Mimetype does not match expected mimetype ({}) for this extension'.format(expected_mimetype))
def _check_mimetype(self):
"""
Compare mimetype (as determined by libmagic) to extension.
Determine whether the extension that are normally associated with
the mimetype include the file's actual extension.
"""
if not self.has_mimetype:
self.make_dangerous('File has no mimetype')
else:
if self.mimetype in Config.aliases:
mimetype = Config.aliases[self.mimetype]
else:
mimetype = self.mimetype
expected_extensions = mimetypes.guess_all_extensions(mimetype,
strict=False)
if expected_extensions:
if self.has_extension and self.extension not in expected_extensions:
self.make_dangerous('Extension does not match expected extensions ({}) for this mimetype'.format(expected_extensions))
def _check_filename(self):
"""
Verify the filename
If the filename contains any dangerous or specific characters, handle
them appropriately.
"""
if self.filename.startswith('.'):
macos_hidden_files = set(
'.Trashes', '._.Trashes', '.DS_Store', '.fseventsd', '.Spotlight-V100'
)
if self.filename in macos_hidden_files:
self.add_description('MacOS metadata file, added by MacOS to USB drives and some directories')
self.should_copy = False
right_to_left_override = u"\u202E"
if right_to_left_override in self.filename:
self.make_dangerous('Filename contains dangerous character')
new_filename = self.filename.replace(right_to_left_override, '')
self.set_property('filename', new_filename)
def _check_malicious_exts(self):
"""Check that the file's extension isn't contained in a blacklist"""
if self.extension in Config.malicious_exts:
self.make_dangerous('Extension identifies file as potentially dangerous')
def _compute_random_hashes(self):
"""Compute a random amount of hashes at random positions in the file to ensure integrity after the copy (mitigate TOCTOU attacks)"""
if not os.path.exists(self.src_path) or os.path.isdir(self.src_path) or self.maintype == 'image':
# Images are converted, no need to compute the hashes
return
self.random_hashes = []
if self.size < 64:
# hash the whole file
self.block_length = self.size
else:
if self.size < 128:
# Get a random length between 16 and the size of the file
self.block_length = random.randint(16, self.size)
else:
# Get a random length between 16 and 128
self.block_length = random.randint(16, 128)
for i in range(random.randint(3, 6)): # Do a random amound of read on the file (between 5 and 10)
start_pos = random.randint(0, self.size - self.block_length) # Pick a random length for the hash to compute
with open(self.src_path, 'rb') as f:
f.seek(start_pos)
hashed = hashlib.sha256(f.read(self.block_length)).hexdigest()
self.random_hashes.append((start_pos, hashed))
time.sleep(random.uniform(0.1, 0.5)) # Add a random sleep length
def _validate_random_hashes(self):
"""Validate hashes computed by _compute_random_hashes"""
if not os.path.exists(self.src_path) or os.path.isdir(self.src_path) or self.maintype == 'image':
# Images are converted, we don't have to fear TOCTOU
return True
for start_pos, hashed_src in self.random_hashes:
with open(self.dst_path, 'rb') as f:
f.seek(start_pos)
hashed = hashlib.sha256(f.read(self.block_length)).hexdigest()
if hashed != hashed_src:
# Something fucked up happened
return False
return True
def check(self):
"""
Main file processing method.
First, checks for basic properties that might indicate a dangerous file.
If the file isn't dangerous, then delegates to various helper methods
for filetype-specific checks based on the file's mimetype.
"""
# Any of these methods can call make_dangerous():
self._check_malicious_exts()
self._check_mimetype()
self._check_extension()
self._check_filename() # can mutate self.filename
self._compute_random_hashes()
if not self.is_dangerous:
self.mime_processing_options.get(self.maintype, self.unknown)()
# ##### Helper functions #####
def _make_method_dict(self, list_of_tuples):
"""Returns a dictionary with mimetype: method pairs."""
dict_to_return = {}
for list_of_subtypes, method in list_of_tuples:
for subtype in list_of_subtypes:
dict_to_return[subtype] = method
return dict_to_return
@property
def has_metadata(self):
"""True if filetype typically contains metadata, else False."""
if self.mimetype in Config.mimes_metadata:
return True
return False
def make_tempdir(self):
"""Make a temporary directory at self.tempdir_path."""
if not os.path.exists(self.tempdir_path):
os.makedirs(self.tempdir_path)
return self.tempdir_path
#######################
# ##### Discarded mimetypes, reason in the docstring ######
def inode(self):
"""Empty file or symlink."""
if self.is_symlink:
symlink_path = self.get_property('symlink')
self.add_description('File is a symlink to {}'.format(symlink_path))
else:
self.add_description('File is an inode (empty file)')
self.should_copy = False
def unknown(self):
"""Main type should never be unknown."""
self.add_description('Unknown mimetype')
self.should_copy = False
def example(self):
"""Used in examples, should never be returned by libmagic."""
self.add_description('Example file')
self.should_copy = False
def multipart(self):
"""Used in web apps, should never be returned by libmagic"""
self.add_description('Multipart file - usually found in web apps')
self.should_copy = False
# ##### Treated as malicious, no reason to have it on a USB key ######
def message(self):
"""Process a message file."""
self.make_dangerous('Message file - should not be found on USB key')
def model(self):
"""Process a model file."""
self.make_dangerous('Model file - should not be found on USB key')
# ##### Files that will be converted ######
def text(self):
"""Process an rtf, ooxml, or plaintext file."""
for mt in Config.mimes_rtf:
if mt in self.subtype:
self.add_description('Rich Text (rtf) file')
self.force_ext('.txt')
return
for mt in Config.mimes_ooxml:
if mt in self.subtype:
self._ooxml()
return
self.add_description('Plain text file')
self.force_ext('.txt')
def application(self):
"""Process an application specific file according to its subtype."""
for subtype, method in self.app_subtype_methods.items():
if subtype in self.subtype: # checking for partial matches
method()
return
self._unknown_app() # if none of the methods match
def _executables(self):
"""Process an executable file."""
self.make_dangerous('Executable file')
def _winoffice(self):
"""Process a winoffice file using olefile/oletools."""
oid = oletools.oleid.OleID(self.src_path) # First assume a valid file
if not olefile.isOleFile(self.src_path):
# Manual processing, may already count as suspicious
try:
ole = olefile.OleFileIO(self.src_path, raise_defects=olefile.DEFECT_INCORRECT)
except Exception:
self.make_dangerous('Unparsable WinOffice file')
if ole.parsing_issues:
self.make_dangerous('Parsing issues with WinOffice file')
else:
if ole.exists('macros/vba') or ole.exists('Macros') \
or ole.exists('_VBA_PROJECT_CUR') or ole.exists('VBA'):
self.make_dangerous('WinOffice file containing a macro')
else:
indicators = oid.check()
# Encrypted can be set by multiple checks on the script
if oid.encrypted.value:
self.make_dangerous('Encrypted WinOffice file')
if oid.macros.value or oid.ole.exists('macros/vba') or oid.ole.exists('Macros') \
or oid.ole.exists('_VBA_PROJECT_CUR') or oid.ole.exists('VBA'):
self.make_dangerous('WinOffice file containing a macro')
for i in indicators:
if i.id == 'ObjectPool' and i.value:
self.make_dangerous('WinOffice file containing an object pool')
elif i.id == 'flash' and i.value:
self.make_dangerous('WinOffice file with embedded flash')
self.add_description('WinOffice file')
def _ooxml(self):
"""Process an ooxml file."""
self.add_description('OOXML (openoffice) file')
try:
doc = officedissector.doc.Document(self.src_path)
except Exception:
self.make_dangerous('Invalid ooxml file')
return
# There are probably other potentially malicious features:
# fonts, custom props, custom XML
if doc.is_macro_enabled or len(doc.features.macros) > 0:
self.make_dangerous('Ooxml file containing macro')
if len(doc.features.embedded_controls) > 0:
self.make_dangerous('Ooxml file with activex')
if len(doc.features.embedded_objects) > 0:
# Exploited by CVE-2014-4114 (OLE)
self.make_dangerous('Ooxml file with embedded objects')
if len(doc.features.embedded_packages) > 0:
self.make_dangerous('Ooxml file with embedded packages')
def _libreoffice(self):
"""Process a libreoffice file."""
# As long as there is no way to do a sanity check on the files => dangerous
try:
lodoc = zipfile.ZipFile(self.src_path, 'r')
except Exception:
# TODO: are there specific exceptions we should catch here? Or should it be everything
self.make_dangerous('Invalid libreoffice file')
for f in lodoc.infolist():
fname = f.filename.lower()
if fname.startswith('script') or fname.startswith('basic') or \
fname.startswith('object') or fname.endswith('.bin'):
self.make_dangerous('Libreoffice file containing executable code')
if not self.is_dangerous:
self.add_description('Libreoffice file')
def _pdf(self):
"""Process a PDF file."""
xmlDoc = PDFiD(self.src_path)
oPDFiD = cPDFiD(xmlDoc, True)
if oPDFiD.encrypt.count > 0:
self.make_dangerous('Encrypted pdf')
if oPDFiD.js.count > 0 or oPDFiD.javascript.count > 0:
self.make_dangerous('Pdf with embedded javascript')
if oPDFiD.aa.count > 0 or oPDFiD.openaction.count > 0:
self.make_dangerous('Pdf with openaction(s)')
if oPDFiD.richmedia.count > 0:
self.make_dangerous('Pdf containing flash')
if oPDFiD.launch.count > 0:
self.make_dangerous('Pdf with launch action(s)')
if oPDFiD.xfa.count > 0:
self.make_dangerous('Pdf with XFA structures')
if oPDFiD.objstm.count > 0:
self.make_dangerous('Pdf with ObjectStream structures')
if not self.is_dangerous:
self.add_description('Pdf file')
def _archive(self):
"""
Process an archive using 7zip.
The archive is extracted to a temporary directory and self.process_dir
is called on that directory. The recursive archive depth is increased
to protect against archive bombs.
"""
# TODO: change this to something archive type specific instead of generic 'Archive'
self.add_description('Archive')
self.should_copy = False
self.is_archive = True
def _unknown_app(self):
"""Process an unknown file."""
self.make_dangerous('Unknown application file')
def _binary_app(self):
"""Process an unknown binary file."""
self.make_dangerous('Unknown binary file')
#######################
# Metadata extractors
def _metadata_exif(self, metadata_file_path):
"""Read exif metadata from a jpg or tiff file using exifread."""
# TODO: can we shorten this method somehow?
with open(self.src_path, 'rb') as img:
tags = None
try:
tags = exifread.process_file(img, debug=True)
except Exception as e:
self.add_error(e, "Error while trying to grab full metadata for file {}; retrying for partial data.".format(self.src_path))
if tags is None:
try:
tags = exifread.process_file(img, debug=True)
except Exception as e:
self.add_error(e, "Failed to get any metadata for file {}.".format(self.src_path))
return False
for tag in sorted(tags.keys()):
# These tags are long and obnoxious/binary so we don't add them
if tag not in ('JPEGThumbnail', 'TIFFThumbnail'):
tag_string = str(tags[tag])
# Exifreader truncates data.
if len(tag_string) > 25 and tag_string.endswith(", ... ]"):
tag_value = tags[tag].values
tag_string = str(tag_value)
with open(metadata_file_path, 'w+') as metadata_file:
metadata_file.write("Key: {}\tValue: {}\n".format(tag, tag_string))
# TODO: how do we want to log metadata?
self.set_property('metadata', 'exif')
return True
def _metadata_png(self, metadata_file_path):
"""Extract metadata from a png file using PIL/Pillow."""
warnings.simplefilter('error', Image.DecompressionBombWarning)
try:
with Image.open(self.src_path) as img:
for tag in sorted(img.info.keys()):
# These are long and obnoxious/binary
if tag not in ('icc_profile'):
with open(metadata_file_path, 'w+') as metadata_file:
metadata_file.write("Key: {}\tValue: {}\n".format(tag, img.info[tag]))
# LOG: handle metadata
self.set_property('metadata', 'png')
except Exception as e: # Catch decompression bombs
# TODO: only catch DecompressionBombWarnings here?
self.add_error(e, "Caught exception processing metadata for {}".format(self.src_path))
self.make_dangerous('exception processing metadata')
return False
def extract_metadata(self):
"""Create metadata file and call correct metadata extraction method."""
metadata_file_path = self.create_metadata_file(".metadata.txt")
mt = self.mimetype
metadata_processing_method = self.metadata_mimetype_methods.get(mt)
if metadata_processing_method:
# TODO: should we return metadata and write it here instead of in processing method?
metadata_processing_method(metadata_file_path)
#######################
# ##### Media - audio and video aren't converted ######
def audio(self):
"""Process an audio file."""
self.add_description('Audio file')
self._media_processing()
def video(self):
"""Process a video."""
self.add_description('Video file')
self._media_processing()
def _media_processing(self):
"""Generic way to process all media files."""
self.add_description('Media file')
def image(self):
"""
Process an image.
Extracts metadata to dest key using self.extract_metada() if metadata
is present. Creates a temporary directory on dest key, opens the image
using PIL.Image, saves it to the temporary directory, and copies it to
the destination.
"""
if self.has_metadata:
self.extract_metadata()
tempdir_path = self.make_tempdir()
tempfile_path = os.path.join(tempdir_path, self.filename)
warnings.simplefilter('error', Image.DecompressionBombWarning)
try: # Do image conversions
with Image.open(self.src_path) as img_in:
with Image.frombytes(img_in.mode, img_in.size, img_in.tobytes()) as img_out:
img_out.save(tempfile_path)
self.src_path = tempfile_path
except Exception as e: # Catch decompression bombs
# TODO: change this from all Exceptions to specific DecompressionBombWarning
self.add_error(e, "Caught exception (possible decompression bomb?) while translating file {}.".format(self.src_path))
self.make_dangerous('Image file containing decompression bomb')
if not self.is_dangerous:
self.add_description('Image file')
class GroomerLogger(object):
"""Groomer logging interface."""
def __init__(self, src_root_path, dst_root_path, debug=False):
self._src_root_path = src_root_path
self._dst_root_path = dst_root_path
self._log_dir_path = self._make_log_dir(dst_root_path)
self.log_path = os.path.join(self._log_dir_path, 'circlean_log.txt')
self._add_root_dir(src_root_path)
if debug:
self.log_debug_err = os.path.join(self._log_dir_path, 'debug_stderr.log')
self.log_debug_out = os.path.join(self._log_dir_path, 'debug_stdout.log')
else:
self.log_debug_err = os.devnull
self.log_debug_out = os.devnull
def _make_log_dir(self, root_dir_path):
"""Create the directory in the dest dir that will hold the logs"""
log_dir_path = os.path.join(root_dir_path, 'logs')
if os.path.exists(log_dir_path):
shutil.rmtree(log_dir_path)
os.makedirs(log_dir_path)
return log_dir_path
def _add_root_dir(self, root_path):
"""Add the root directory to the log"""
dirname = os.path.split(root_path)[1] + '/'
with open(self.log_path, mode='ab') as lf:
lf.write(bytes(dirname, 'utf-8'))
lf.write(b'\n')
def add_file(self, file_path, file_props, in_tempdir=False):
"""Add a file to the log. Takes a path and a dict of file properties."""
depth = self._get_path_depth(file_path)
try:
file_hash = Logging.computehash(file_path)[:6]
except IsADirectoryError:
file_hash = 'directory'
except FileNotFoundError:
file_hash = '------'
if file_props['is_symlink']:
symlink_template = "+- NOT COPIED: symbolic link to {name} ({sha_hash})"
log_string = symlink_template.format(
name=file_props['symlink_path'],
sha_hash=file_hash
)
else:
if file_props['is_dangerous']:
category = "Dangerous"
else:
category = "Normal"
size_string = self._format_file_size(file_props['file_size'])
if not file_props['copied']:
copied_string = 'NOT COPIED: '
else:
copied_string = ''
file_template = "+- {copied}{name} ({sha_hash}): {size}, type: {mt}/{st}. {cat}: {desc_str}"
log_string = file_template.format(
copied=copied_string,
name=file_props['filename'],
sha_hash=file_hash,
size=size_string,
mt=file_props['maintype'],
st=file_props['subtype'],
cat=category,
desc_str=file_props['description_string'],
)
if file_props['errors']:
error_string = ', '.join([str(key) for key in file_props['errors']])
log_string += (' Errors: ' + error_string)
if in_tempdir:
depth -= 1
self._write_line_to_log(log_string, depth)
def add_dir(self, dir_path):
"""Add a directory to the log"""
path_depth = self._get_path_depth(dir_path)
dirname = os.path.split(dir_path)[1] + '/'
log_line = '+- ' + dirname
self._write_line_to_log(log_line, path_depth)
def _format_file_size(self, size):
"""Returns a string with the file size and appropriate unit"""
file_size = size
for unit in ('B', 'KB', 'MB', 'GB'):
if file_size < 1024:
return str(int(file_size)) + unit
else:
file_size = file_size / 1024
return str(int(file_size)) + 'GB'
def _get_path_depth(self, path):
"""Returns the relative path depth compared to root directory"""
if self._dst_root_path in path:
base_path = self._dst_root_path
elif self._src_root_path in path:
base_path = self._src_root_path
relpath = os.path.relpath(path, base_path)
path_depth = relpath.count(os.path.sep)
return path_depth
def _write_line_to_log(self, line, indentation_depth):
"""
Write a line to the log
Pad the line according to the `indentation_depth`.
"""
padding = b' '
padding += b'| ' * indentation_depth
line_bytes = os.fsencode(line)
with open(self.log_path, mode='ab') as lf:
lf.write(padding)
lf.write(line_bytes)
lf.write(b'\n')
class KittenGroomerFileCheck(KittenGroomerBase):
def __init__(self, root_src, root_dst, max_recursive_depth=2, debug=False):
super(KittenGroomerFileCheck, self).__init__(root_src, root_dst)
self.recursive_archive_depth = 0
self.max_recursive_depth = max_recursive_depth
self.logger = GroomerLogger(root_src, root_dst, debug)
def __repr__(self):
return "filecheck.KittenGroomerFileCheck object: {{{}}}".format(
os.path.basename(self.src_root_path)
)
def process_dir(self, src_dir, dst_dir):
"""Process a directory on the source key."""
for srcpath in self.list_files_dirs(src_dir):
if not os.path.islink(srcpath) and os.path.isdir(srcpath):
self.logger.add_dir(srcpath)
else:
dstpath = os.path.join(dst_dir, os.path.basename(srcpath))
cur_file = File(srcpath, dstpath)
self.process_file(cur_file)
def process_file(self, file):
"""
Process an individual file.
Check the file, handle archives using self.process_archive, copy
the file to the destionation key, and clean up temporary directory.
"""
file.check()
if file.is_archive:
self.process_archive(file)
else:
if file.should_copy:
file.safe_copy()
file.set_property('copied', True)
if not file._validate_random_hashes():
# Something's fucked up.
file.make_dangerous('The copied file is different from the one checked, removing.')
os.remove(file.dst_path)
self.write_file_to_log(file)
# TODO: Can probably handle cleaning up the tempdir better
if hasattr(file, 'tempdir_path'):
self.safe_rmtree(file.tempdir_path)
def process_archive(self, file):
"""
Unpack an archive using 7zip and process contents using process_dir.
Should be given a Kittengroomer file object whose src_path points
to an archive.
"""
self.recursive_archive_depth += 1
if self.recursive_archive_depth >= self.max_recursive_depth:
file.make_dangerous('Archive bomb')
else:
tempdir_path = file.make_tempdir()
command_str = '{} -p1 x "{}" -o"{}" -bd -aoa'
# -p1=password, x=extract, -o=output location, -bd=no % indicator, -aoa=overwrite existing files
unpack_command = command_str.format(SEVENZ_PATH,
file.src_path, tempdir_path)
self._run_process(unpack_command)
self.write_file_to_log(file)
self.process_dir(tempdir_path, file.dst_path)
self.safe_rmtree(tempdir_path)
self.recursive_archive_depth -= 1
def _run_process(self, command_string, timeout=None):
"""Run command_string in a subprocess, wait until it finishes."""
args = shlex.split(command_string)
with open(self.logger.log_debug_err, 'ab') as stderr, open(self.logger.log_debug_out, 'ab') as stdout:
try:
subprocess.check_call(args, stdout=stdout, stderr=stderr, timeout=timeout)
except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
return
return True
def write_file_to_log(self, file):
"""Pass information about `file` to self.logger."""
props = file.get_all_props()
if not file.is_archive:
# FIXME: in_tempdir is a hack to make image files appear at the correct tree depth in log
in_tempdir = os.path.exists(file.tempdir_path)
self.logger.add_file(file.src_path, props, in_tempdir)
def list_files_dirs(self, root_dir_path):
"""
Returns a list of all files and directories
Performs a depth-first traversal of the file tree.
"""
queue = []
for path in sorted(os.listdir(root_dir_path), key=lambda x: str.lower(x)):
full_path = os.path.join(root_dir_path, path)
# check for symlinks first to prevent getting trapped in infinite symlink recursion
if os.path.islink(full_path):
queue.append(full_path)
elif os.path.isdir(full_path):
queue.append(full_path)
queue += self.list_files_dirs(full_path)
elif os.path.isfile(full_path):
queue.append(full_path)
return queue
def run(self):
self.process_dir(self.src_root_path, self.dst_root_path)
def main(kg_implementation, description):
parser = argparse.ArgumentParser(prog='KittenGroomer', description=description)
parser.add_argument('-s', '--source', type=str, help='Source directory')
parser.add_argument('-d', '--destination', type=str, help='Destination directory')
args = parser.parse_args()
kg = kg_implementation(args.source, args.destination)
kg.run()
if __name__ == '__main__':
main(KittenGroomerFileCheck, 'File sanitizer used in CIRCLean. Renames potentially dangerous files.')
| bsd-3-clause | 5,973,277,264,772,506,000 | 42.466747 | 140 | 0.56995 | false |
xingjian-f/Leetcode-solution | 304. Range Sum Query 2D - Immutable.py | 1 | 1592 | class NumMatrix(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
self.dp = [range(len(matrix[0])) for i in range(len(matrix))]
for i in range(len(matrix)):
for j in range(len(matrix[i])):
self.dp[i][j] = matrix[i][j]
if i > 0:
self.dp[i][j] += self.dp[i-1][j]
if j > 0:
self.dp[i][j] += self.dp[i][j-1]
if i>0 and j>0:
self.dp[i][j] -= self.dp[i-1][j-1]
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
ret = self.dp[row2][col2]
if row1>0:
ret -= self.dp[row1-1][col2]
if col1>0:
ret -= self.dp[row2][col1-1]
if row1>0 and col1>0:
ret += self.dp[row1-1][col1-1]
return ret
# Your NumMatrix object will be instantiated and called as such:
# numMatrix = NumMatrix([ [3, 0, 1, 4, 2],
# [5, 6, 3, 2, 1],
# [1, 2, 0, 1, 5],
# [4, 1, 0, 1, 7],
# [1, 0, 3, 0, 5]])
numMatrix = NumMatrix([[-2]])
print numMatrix.sumRegion(0,0,0,0)
# print numMatrix.sumRegion(0, 1, 2, 3)
# print numMatrix.sumRegion(1, 2, 3, 4)
# print numMatrix.sumRegion(2, 1, 4, 3) #-> 8
# print numMatrix.sumRegion(1, 1, 2, 2) #-> 11
# print numMatrix.sumRegion(1, 2, 2, 4) #-> 12 | mit | -3,337,564,328,645,928,400 | 30.235294 | 69 | 0.486809 | false |
gavinelliott/patsi | node_modules/grunt-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py | 148 | 44560 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cmake output module
This module is under development and should be considered experimental.
This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is
created for each configuration.
This module's original purpose was to support editing in IDEs like KDevelop
which use CMake for project management. It is also possible to use CMake to
generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator
will convert the CMakeLists.txt to a code::blocks cbp for the editor to read,
but build using CMake. As a result QtCreator editor is unaware of compiler
defines. The generated CMakeLists.txt can also be used to build on Linux. There
is currently no support for building on platforms other than Linux.
The generated CMakeLists.txt should properly compile all projects. However,
there is a mismatch between gyp and cmake with regard to linking. All attempts
are made to work around this, but CMake sometimes sees -Wl,--start-group as a
library and incorrectly repeats it. As a result the output of this generator
should not be relied on for building.
When using with kdevelop, use version 4.4+. Previous versions of kdevelop will
not be able to find the header file directories described in the generated
CMakeLists.txt file.
"""
import multiprocessing
import os
import signal
import string
import subprocess
import gyp.common
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
'SHARED_LIB_SUFFIX': '.so',
'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}',
'LIB_DIR': '${obj}.${TOOLSET}',
'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni',
'SHARED_INTERMEDIATE_DIR': '${obj}/gen',
'PRODUCT_DIR': '${builddir}',
'RULE_INPUT_PATH': '${RULE_INPUT_PATH}',
'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}',
'RULE_INPUT_NAME': '${RULE_INPUT_NAME}',
'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}',
'RULE_INPUT_EXT': '${RULE_INPUT_EXT}',
'CONFIGURATION_NAME': '${configuration}',
}
FULL_PATH_VARS = ('${CMAKE_SOURCE_DIR}', '${builddir}', '${obj}')
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = True
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 's', # cc
'.S': 's', # cc
}
def RemovePrefix(a, prefix):
"""Returns 'a' without 'prefix' if it starts with 'prefix'."""
return a[len(prefix):] if a.startswith(prefix) else a
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS)
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def NormjoinPathForceCMakeSource(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
"""
if os.path.isabs(rel_path):
return rel_path
if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
# TODO: do we need to check base_path for absolute variables as well?
return os.path.join('${CMAKE_SOURCE_DIR}',
os.path.normpath(os.path.join(base_path, rel_path)))
def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path))
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def SetFileProperty(output, source_name, property_name, values, sep):
"""Given a set of source file, sets the given property on them."""
output.write('set_source_files_properties(')
output.write(source_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetVariable(output, variable_name, value):
"""Sets a CMake variable."""
output.write('set(')
output.write(variable_name)
output.write(' "')
output.write(CMakeStringEscape(value))
output.write('")\n')
def SetVariableList(output, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(output, variable_name, "")
if len(values) == 1:
return SetVariable(output, variable_name, values[0])
output.write('list(APPEND ')
output.write(variable_name)
output.write('\n "')
output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
output.write('")\n')
def UnsetVariable(output, variable_name):
"""Unsets a CMake variable."""
output.write('unset(')
output.write(variable_name)
output.write(')\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
class CMakeTargetType(object):
def __init__(self, command, modifier, property_modifier):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
cmake_target_type_from_gyp_target_type = {
'executable': CMakeTargetType('add_executable', None, 'RUNTIME'),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'),
'none': CMakeTargetType('add_custom_target', 'SOURCES', None),
}
def StringToCMakeTargetName(a):
"""Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')', '"'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
"""
return a.translate(string.maketrans(' /():."', '_______'))
def WriteActions(target_name, actions, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for action in actions:
action_name = StringToCMakeTargetName(action['action_name'])
action_target_name = '%s__%s' % (target_name, action_name)
inputs = action['inputs']
inputs_name = action_target_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = action['outputs']
cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
for out in outputs]
outputs_name = action_target_name + '__output'
SetVariableList(output, outputs_name, cmake_outputs)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources.extend(zip(cmake_outputs, outputs))
# add_custom_command
output.write('add_custom_command(OUTPUT ')
WriteVariable(output, outputs_name)
output.write('\n')
if len(dirs) > 0:
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(action['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write('\n')
output.write(' WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in action:
output.write(action['message'])
else:
output.write(action_target_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(action_target_name)
output.write('\n DEPENDS ')
WriteVariable(output, outputs_name)
output.write('\n SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n)\n')
extra_deps.append(action_target_name)
def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source):
if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")):
if any([rule_source.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
return NormjoinPathForceCMakeSource(base_path, rel_path)
def WriteRules(target_name, rules, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for rule in rules:
rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
inputs = rule.get('inputs', [])
inputs_name = rule_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = rule['outputs']
var_outputs = []
for count, rule_source in enumerate(rule.get('rule_sources', [])):
action_name = rule_name + '_' + str(count)
rule_source_dirname, rule_source_basename = os.path.split(rule_source)
rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
SetVariable(output, 'RULE_INPUT_PATH', rule_source)
SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
# Create variables for the output, as 'local' variable will be unset.
these_outputs = []
for output_index, out in enumerate(outputs):
output_name = action_name + '_' + str(output_index)
SetVariable(output, output_name,
NormjoinRulePathForceCMakeSource(path_to_gyp, out,
rule_source))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(('${' + output_name + '}', out))
these_outputs.append('${' + output_name + '}')
var_outputs.append('${' + output_name + '}')
# add_custom_command
output.write('add_custom_command(OUTPUT\n')
for out in these_outputs:
output.write(' ')
output.write(out)
output.write('\n')
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(rule['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
# CMAKE_SOURCE_DIR is where the CMakeLists.txt lives.
# The cwd is the current build directory.
output.write(' WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in rule:
output.write(rule['message'])
else:
output.write(action_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
UnsetVariable(output, 'RULE_INPUT_PATH')
UnsetVariable(output, 'RULE_INPUT_DIRNAME')
UnsetVariable(output, 'RULE_INPUT_NAME')
UnsetVariable(output, 'RULE_INPUT_ROOT')
UnsetVariable(output, 'RULE_INPUT_EXT')
# add_custom_target
output.write('add_custom_target(')
output.write(rule_name)
output.write(' DEPENDS\n')
for out in var_outputs:
output.write(' ')
output.write(out)
output.write('\n')
output.write('SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n')
for rule_source in rule.get('rule_sources', []):
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
output.write(')\n')
extra_deps.append(rule_name)
def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
"""Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
copy_name = target_name + '__copies'
# CMake gets upset with custom targets with OUTPUT which specify no output.
have_copies = any(copy['files'] for copy in copies)
if not have_copies:
output.write('add_custom_target(')
output.write(copy_name)
output.write(')\n')
extra_deps.append(copy_name)
return
class Copy(object):
def __init__(self, ext, command):
self.cmake_inputs = []
self.cmake_outputs = []
self.gyp_inputs = []
self.gyp_outputs = []
self.ext = ext
self.inputs_name = None
self.outputs_name = None
self.command = command
file_copy = Copy('', 'copy')
dir_copy = Copy('_dirs', 'copy_directory')
for copy in copies:
files = copy['files']
destination = copy['destination']
for src in files:
path = os.path.normpath(src)
basename = os.path.split(path)[1]
dst = os.path.join(destination, basename)
copy = file_copy if os.path.basename(src) else dir_copy
copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src))
copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
copy.gyp_inputs.append(src)
copy.gyp_outputs.append(dst)
for copy in (file_copy, dir_copy):
if copy.cmake_inputs:
copy.inputs_name = copy_name + '__input' + copy.ext
SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
copy.outputs_name = copy_name + '__output' + copy.ext
SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
# add_custom_command
output.write('add_custom_command(\n')
output.write('OUTPUT')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n')
for copy in (file_copy, dir_copy):
for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
# 'cmake -E copy src dst' will create the 'dst' directory if needed.
output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
output.write(src)
output.write(' ')
output.write(dst)
output.write("\n")
output.write('DEPENDS')
for copy in (file_copy, dir_copy):
if copy.inputs_name:
WriteVariable(output, copy.inputs_name, ' ')
output.write('\n')
output.write('WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write('COMMENT Copying for ')
output.write(target_name)
output.write('\n')
output.write('VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(copy_name)
output.write('\n DEPENDS')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n SOURCES')
if file_copy.inputs_name:
WriteVariable(output, file_copy.inputs_name, ' ')
output.write('\n)\n')
extra_deps.append(copy_name)
def CreateCMakeTargetBaseName(qualified_target):
"""This is the name we would like the target to have."""
_, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_base_name = gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_base_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_base_name)
def CreateCMakeTargetFullName(qualified_target):
"""An unambiguous name for the target."""
gyp_file, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_full_name = gyp_file + ':' + gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_full_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_full_name)
class CMakeNamer(object):
"""Converts Gyp target names into CMake target names.
CMake requires that target names be globally unique. One way to ensure
this is to fully qualify the names of the targets. Unfortunatly, this
ends up with all targets looking like "chrome_chrome_gyp_chrome" instead
of just "chrome". If this generator were only interested in building, it
would be possible to fully qualify all target names, then create
unqualified target names which depend on all qualified targets which
should have had that name. This is more or less what the 'make' generator
does with aliases. However, one goal of this generator is to create CMake
files for use with IDEs, and fully qualified names are not as user
friendly.
Since target name collision is rare, we do the above only when required.
Toolset variants are always qualified from the base, as this is required for
building. However, it also makes sense for an IDE, as it is possible for
defines to be different.
"""
def __init__(self, target_list):
self.cmake_target_base_names_conficting = set()
cmake_target_base_names_seen = set()
for qualified_target in target_list:
cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target)
if cmake_target_base_name not in cmake_target_base_names_seen:
cmake_target_base_names_seen.add(cmake_target_base_name)
else:
self.cmake_target_base_names_conficting.add(cmake_target_base_name)
def CreateCMakeTargetName(self, qualified_target):
base_name = CreateCMakeTargetBaseName(qualified_target)
if base_name in self.cmake_target_base_names_conficting:
return CreateCMakeTargetFullName(qualified_target)
return base_name
def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output):
# The make generator does this always.
# TODO: It would be nice to be able to tell CMake all dependencies.
circular_libs = generator_flags.get('circular', True)
if not generator_flags.get('standalone', False):
output.write('\n#')
output.write(qualified_target)
output.write('\n')
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir)
rel_gyp_dir = os.path.dirname(rel_gyp_file)
# Relative path from build dir to top dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir)
# Relative path from build dir to gyp dir.
build_to_gyp = os.path.join(build_to_top, rel_gyp_dir)
path_from_cmakelists_to_gyp = build_to_gyp
spec = target_dicts.get(qualified_target, {})
config = spec.get('configurations', {}).get(config_to_use, {})
target_name = spec.get('target_name', '<missing target name>')
target_type = spec.get('type', '<missing target type>')
target_toolset = spec.get('toolset')
cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type)
if cmake_target_type is None:
print ('Target %s has unknown target type %s, skipping.' %
( target_name, target_type ) )
return
SetVariable(output, 'TARGET', target_name)
SetVariable(output, 'TOOLSET', target_toolset)
cmake_target_name = namer.CreateCMakeTargetName(qualified_target)
extra_sources = []
extra_deps = []
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Rules must be early like actions.
if 'rules' in spec:
WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Copies
if 'copies' in spec:
WriteCopies(cmake_target_name, spec['copies'], extra_deps,
path_from_cmakelists_to_gyp, output)
# Target and sources
srcs = spec.get('sources', [])
# Gyp separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see below).
s_sources = []
c_sources = []
cxx_sources = []
linkable_sources = []
other_sources = []
for src in srcs:
_, ext = os.path.splitext(src)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src);
if src_type == 's':
s_sources.append(src_norm_path)
elif src_type == 'cc':
c_sources.append(src_norm_path)
elif src_type == 'cxx':
cxx_sources.append(src_norm_path)
elif Linkable(ext):
linkable_sources.append(src_norm_path)
else:
other_sources.append(src_norm_path)
for extra_source in extra_sources:
src, real_source = extra_source
_, ext = os.path.splitext(real_source)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
if src_type == 's':
s_sources.append(src)
elif src_type == 'cc':
c_sources.append(src)
elif src_type == 'cxx':
cxx_sources.append(src)
elif Linkable(ext):
linkable_sources.append(src)
else:
other_sources.append(src)
s_sources_name = None
if s_sources:
s_sources_name = cmake_target_name + '__asm_srcs'
SetVariableList(output, s_sources_name, s_sources)
c_sources_name = None
if c_sources:
c_sources_name = cmake_target_name + '__c_srcs'
SetVariableList(output, c_sources_name, c_sources)
cxx_sources_name = None
if cxx_sources:
cxx_sources_name = cmake_target_name + '__cxx_srcs'
SetVariableList(output, cxx_sources_name, cxx_sources)
linkable_sources_name = None
if linkable_sources:
linkable_sources_name = cmake_target_name + '__linkable_srcs'
SetVariableList(output, linkable_sources_name, linkable_sources)
other_sources_name = None
if other_sources:
other_sources_name = cmake_target_name + '__other_srcs'
SetVariableList(output, other_sources_name, other_sources)
# CMake gets upset when executable targets provide no sources.
# http://www.cmake.org/pipermail/cmake/2010-July/038461.html
dummy_sources_name = None
has_sources = (s_sources_name or
c_sources_name or
cxx_sources_name or
linkable_sources_name or
other_sources_name)
if target_type == 'executable' and not has_sources:
dummy_sources_name = cmake_target_name + '__dummy_srcs'
SetVariable(output, dummy_sources_name,
"${obj}.${TOOLSET}/${TARGET}/genc/dummy.c")
output.write('if(NOT EXISTS "')
WriteVariable(output, dummy_sources_name)
output.write('")\n')
output.write(' file(WRITE "')
WriteVariable(output, dummy_sources_name)
output.write('" "")\n')
output.write("endif()\n")
# CMake is opposed to setting linker directories and considers the practice
# of setting linker directories dangerous. Instead, it favors the use of
# find_library and passing absolute paths to target_link_libraries.
# However, CMake does provide the command link_directories, which adds
# link directories to targets defined after it is called.
# As a result, link_directories must come before the target definition.
# CMake unfortunately has no means of removing entries from LINK_DIRECTORIES.
library_dirs = config.get('library_dirs')
if library_dirs is not None:
output.write('link_directories(')
for library_dir in library_dirs:
output.write(' ')
output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir))
output.write('\n')
output.write(')\n')
output.write(cmake_target_type.command)
output.write('(')
output.write(cmake_target_name)
if cmake_target_type.modifier is not None:
output.write(' ')
output.write(cmake_target_type.modifier)
if s_sources_name:
WriteVariable(output, s_sources_name, ' ')
if c_sources_name:
WriteVariable(output, c_sources_name, ' ')
if cxx_sources_name:
WriteVariable(output, cxx_sources_name, ' ')
if linkable_sources_name:
WriteVariable(output, linkable_sources_name, ' ')
if other_sources_name:
WriteVariable(output, other_sources_name, ' ')
if dummy_sources_name:
WriteVariable(output, dummy_sources_name, ' ')
output.write(')\n')
# Let CMake know if the 'all' target should depend on this target.
exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets
else 'FALSE')
SetTargetProperty(output, cmake_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
for extra_target_name in extra_deps:
SetTargetProperty(output, extra_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
# Output name and location.
if target_type != 'none':
# Link as 'C' if there are no other files
if not c_sources and not cxx_sources:
SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if other_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n')
# Mark object sources as linkable.
if linkable_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n')
# Output directory
target_output_directory = spec.get('product_dir')
if target_output_directory is None:
if target_type in ('executable', 'loadable_module'):
target_output_directory = generator_default_variables['PRODUCT_DIR']
elif target_type == 'shared_library':
target_output_directory = '${builddir}/lib.${TOOLSET}'
elif spec.get('standalone_static_library', False):
target_output_directory = generator_default_variables['PRODUCT_DIR']
else:
base_path = gyp.common.RelativePath(os.path.dirname(gyp_file),
options.toplevel_dir)
target_output_directory = '${obj}.${TOOLSET}'
target_output_directory = (
os.path.join(target_output_directory, base_path))
cmake_target_output_directory = NormjoinPathForceCMakeSource(
path_from_cmakelists_to_gyp,
target_output_directory)
SetTargetProperty(output,
cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY',
cmake_target_output_directory)
# Output name
default_product_prefix = ''
default_product_name = target_name
default_product_ext = ''
if target_type == 'static_library':
static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
static_library_prefix)
default_product_prefix = static_library_prefix
default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX']
elif target_type in ('loadable_module', 'shared_library'):
shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
shared_library_prefix)
default_product_prefix = shared_library_prefix
default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX']
elif target_type != 'executable':
print ('ERROR: What output file should be generated?',
'type', target_type, 'target', target_name)
product_prefix = spec.get('product_prefix', default_product_prefix)
product_name = spec.get('product_name', default_product_name)
product_ext = spec.get('product_extension')
if product_ext:
product_ext = '.' + product_ext
else:
product_ext = default_product_ext
SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix)
SetTargetProperty(output, cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_NAME',
product_name)
SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext)
# Make the output of this target referenceable as a source.
cmake_target_output_basename = product_prefix + product_name + product_ext
cmake_target_output = os.path.join(cmake_target_output_directory,
cmake_target_output_basename)
SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '')
# Includes
includes = config.get('include_dirs')
if includes:
# This (target include directories) is what requires CMake 2.8.8
includes_name = cmake_target_name + '__include_dirs'
SetVariableList(output, includes_name,
[NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include)
for include in includes])
output.write('set_property(TARGET ')
output.write(cmake_target_name)
output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ')
WriteVariable(output, includes_name, '')
output.write(')\n')
# Defines
defines = config.get('defines')
if defines is not None:
SetTargetProperty(output,
cmake_target_name,
'COMPILE_DEFINITIONS',
defines,
';')
# Compile Flags - http://www.cmake.org/Bug/view.php?id=6493
# CMake currently does not have target C and CXX flags.
# So, instead of doing...
# cflags_c = config.get('cflags_c')
# if cflags_c is not None:
# SetTargetProperty(output, cmake_target_name,
# 'C_COMPILE_FLAGS', cflags_c, ' ')
# cflags_cc = config.get('cflags_cc')
# if cflags_cc is not None:
# SetTargetProperty(output, cmake_target_name,
# 'CXX_COMPILE_FLAGS', cflags_cc, ' ')
# Instead we must...
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cxx = config.get('cflags_cc', [])
if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources):
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ')
elif c_sources and not (s_sources or cxx_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
elif cxx_sources and not (s_sources or c_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if s_sources and cflags:
SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ')
if c_sources and (cflags or cflags_c):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ')
if cxx_sources and (cflags or cflags_cxx):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = config.get('ldflags')
if ldflags is not None:
SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ')
# Note on Dependencies and Libraries:
# CMake wants to handle link order, resolving the link line up front.
# Gyp does not retain or enforce specifying enough information to do so.
# So do as other gyp generators and use --start-group and --end-group.
# Give CMake as little information as possible so that it doesn't mess it up.
# Dependencies
rawDeps = spec.get('dependencies', [])
static_deps = []
shared_deps = []
other_deps = []
for rawDep in rawDeps:
dep_cmake_name = namer.CreateCMakeTargetName(rawDep)
dep_spec = target_dicts.get(rawDep, {})
dep_target_type = dep_spec.get('type', None)
if dep_target_type == 'static_library':
static_deps.append(dep_cmake_name)
elif dep_target_type == 'shared_library':
shared_deps.append(dep_cmake_name)
else:
other_deps.append(dep_cmake_name)
# ensure all external dependencies are complete before internal dependencies
# extra_deps currently only depend on their own deps, so otherwise run early
if static_deps or shared_deps or other_deps:
for extra_dep in extra_deps:
output.write('add_dependencies(')
output.write(extra_dep)
output.write('\n')
for deps in (static_deps, shared_deps, other_deps):
for dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(dep)
output.write('\n')
output.write(')\n')
linkable = target_type in ('executable', 'loadable_module', 'shared_library')
other_deps.extend(extra_deps)
if other_deps or (not linkable and (static_deps or shared_deps)):
output.write('add_dependencies(')
output.write(cmake_target_name)
output.write('\n')
for dep in gyp.common.uniquer(other_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if not linkable:
for deps in (static_deps, shared_deps):
for lib_dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(lib_dep)
output.write('\n')
output.write(')\n')
# Libraries
if linkable:
external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0]
if external_libs or static_deps or shared_deps:
output.write('target_link_libraries(')
output.write(cmake_target_name)
output.write('\n')
if static_deps:
write_group = circular_libs and len(static_deps) > 1
if write_group:
output.write('-Wl,--start-group\n')
for dep in gyp.common.uniquer(static_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if write_group:
output.write('-Wl,--end-group\n')
if shared_deps:
for dep in gyp.common.uniquer(shared_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if external_libs:
for lib in gyp.common.uniquer(external_libs):
output.write(' ')
output.write(lib)
output.write('\n')
output.write(')\n')
UnsetVariable(output, 'TOOLSET')
UnsetVariable(output, 'TARGET')
def GenerateOutputForConfig(target_list, target_dicts, data,
params, config_to_use):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
# Each Gyp configuration creates a different CMakeLists.txt file
# to avoid incompatibilities between Gyp and CMake configurations.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_to_use))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
output_file = os.path.join(toplevel_build, 'CMakeLists.txt')
gyp.common.EnsureDirExists(output_file)
output = open(output_file, 'w')
output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
output.write('cmake_policy(VERSION 2.8.8)\n')
gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1])
output.write('project(')
output.write(project_target)
output.write(')\n')
SetVariable(output, 'configuration', config_to_use)
ar = None
cc = None
cxx = None
make_global_settings = data[gyp_file].get('make_global_settings', [])
build_to_top = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_top, value)
if key == 'CC':
cc = os.path.join(build_to_top, value)
if key == 'CXX':
cxx = os.path.join(build_to_top, value)
ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar)
cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc)
cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx)
if ar:
SetVariable(output, 'CMAKE_AR', ar)
if cc:
SetVariable(output, 'CMAKE_C_COMPILER', cc)
if cxx:
SetVariable(output, 'CMAKE_CXX_COMPILER', cxx)
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
output.write('enable_language(ASM)\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
if cc:
SetVariable(output, 'CMAKE_ASM_COMPILER', cc)
SetVariable(output, 'builddir', '${CMAKE_BINARY_DIR}')
SetVariable(output, 'obj', '${builddir}/obj')
output.write('\n')
# TODO: Undocumented/unsupported (the CMake Java generator depends on it).
# CMake by default names the object resulting from foo.c to be foo.c.o.
# Gyp traditionally names the object resulting from foo.c foo.o.
# This should be irrelevant, but some targets extract .o files from .a
# and depend on the name of the extracted .o files.
output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('\n')
# Force ninja to use rsp files. Otherwise link and ar lines can get too long,
# resulting in 'Argument list too long' errors.
output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n')
output.write('\n')
namer = CMakeNamer(target_list)
# The list of targets upon which the 'all' target should depend.
# CMake has it's own implicit 'all' target, one is not created explicitly.
all_qualified_targets = set()
for build_file in params['build_files']:
for qualified_target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_qualified_targets.add(qualified_target)
for qualified_target in target_list:
WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output)
output.close()
def PerformBuild(data, configurations, params):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
for config_name in configurations:
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
arguments = ['cmake', '-G', 'Ninja']
print 'Generating [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments, cwd=build_dir)
arguments = ['ninja', '-C', build_dir]
print 'Building [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
target_list, target_dicts, data, params, config_name = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data,
params, user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append((target_list, target_dicts, data,
params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data,
params, config_name)
| mit | -7,218,082,783,665,534,000 | 35.494676 | 80 | 0.662545 | false |
ronuchit/GroundHog | groundhog/layers/ff_layers.py | 16 | 18887 | """
Feedforward layers.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import copy
import theano
import theano.tensor as TT
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog import utils
from groundhog.utils import sample_weights, \
sample_weights_classic,\
init_bias, \
constant_shape, \
sample_zeros
from basic import Layer
class MultiLayer(Layer):
"""
Implementing a standard feed forward MLP
"""
def __init__(self,
rng,
n_in,
n_hids=[500,500],
activation='TT.tanh',
scale=0.01,
sparsity=-1,
rank_n_approx=0,
rank_n_activ='lambda x: x',
weight_noise=False,
dropout = 1.,
init_fn='sample_weights_classic',
bias_fn='init_bias',
bias_scale = 0.,
learn_bias = True,
grad_scale = 1.,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: list of ints
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type rank_n_approx: int
:param rank_n_approx: It applies to the first layer only. If
positive and larger than 0, the first weight matrix is
factorized into two matrices. The first one goes from input to
`rank_n_approx` hidden units, the second from `rank_n_approx` to
the number of units on the second layer
:type rank_n_activ: string or function
:param rank_n_activ: Function that is applied on on the intermediary
layer formed from factorizing the first weight matrix (Q: do we
need this?)
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type learn_bias: bool
:param learn_bias: flag, saying if we should learn the bias or keep
it constant
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
assert rank_n_approx >= 0, "Please enter a valid rank_n_approx"
self.rank_n_approx = rank_n_approx
if isinstance(rank_n_activ, (str, unicode)):
rank_n_activ = eval(rank_n_activ)
self.rank_n_activ = rank_n_activ
if type(n_hids) not in (list, tuple):
n_hids = [n_hids]
n_layers = len(n_hids)
self.n_layers = n_layers
if type(scale) not in (list, tuple):
scale = [scale] * n_layers
if type(sparsity) not in (list, tuple):
sparsity = [sparsity] * n_layers
for idx, sp in enumerate(sparsity):
if sp < 0: sparsity[idx] = n_hids[idx]
if type(activation) not in (list, tuple):
activation = [activation] * n_layers
if type(bias_scale) not in (list, tuple):
bias_scale = [bias_scale] * n_layers
if bias_fn not in (list, tuple):
bias_fn = [bias_fn] * n_layers
if init_fn not in (list, tuple):
init_fn = [init_fn] * n_layers
for dx in xrange(n_layers):
if isinstance(bias_fn[dx], (str, unicode)):
bias_fn[dx] = eval(bias_fn[dx])
if isinstance(init_fn[dx], (str, unicode)):
init_fn[dx] = eval(init_fn[dx])
if isinstance(activation[dx], (str, unicode)):
activation[dx] = eval(activation[dx])
super(MultiLayer, self).__init__(n_in, n_hids[-1], rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.activation = activation
self.scale = scale
self.sparsity = sparsity
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self._grad_scale = grad_scale
self.weight_noise = weight_noise
self.dropout = dropout
self.n_hids = n_hids
self.learn_bias = learn_bias
self._init_params()
def _init_params(self):
"""
Initialize the parameters of the layer, either by using sparse initialization or small
isotropic noise.
"""
self.W_ems = []
self.b_ems = []
if self.rank_n_approx:
W_em1 = self.init_fn[0](self.n_in,
self.rank_n_approx,
self.sparsity[0],
self.scale[0],
self.rng)
W_em2 = self.init_fn[0](self.rank_n_approx,
self.n_hids[0],
self.sparsity[0],
self.scale[0],
self.rng)
self.W_em1 = theano.shared(W_em1,
name='W1_0_%s'%self.name)
self.W_em2 = theano.shared(W_em2,
name='W2_0_%s'%self.name)
self.W_ems = [self.W_em1, self.W_em2]
else:
W_em = self.init_fn[0](self.n_in,
self.n_hids[0],
self.sparsity[0],
self.scale[0],
self.rng)
self.W_em = theano.shared(W_em,
name='W_0_%s'%self.name)
self.W_ems = [self.W_em]
self.b_em = theano.shared(
self.bias_fn[0](self.n_hids[0], self.bias_scale[0],self.rng),
name='b_0_%s'%self.name)
self.b_ems = [self.b_em]
for dx in xrange(1, self.n_layers):
W_em = self.init_fn[dx](self.n_hids[dx-1] / self.pieces[dx],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
self.rng)
W_em = theano.shared(W_em,
name='W_%d_%s'%(dx,self.name))
self.W_ems += [W_em]
b_em = theano.shared(
self.bias_fn[dx](self.n_hids[dx], self.bias_scale[dx],self.rng),
name='b_%d_%s'%(dx,self.name))
self.b_ems += [b_em]
self.params = [x for x in self.W_ems]
if self.learn_bias and self.learn_bias!='last':
self.params = [x for x in self.W_ems] + [x for x in self.b_ems]
elif self.learn_bias == 'last':
self.params = [x for x in self.W_ems] + [x for x in
self.b_ems][:-1]
self.params_grad_scale = [self._grad_scale for x in self.params]
if self.weight_noise:
self.nW_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_ems]
self.nb_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_ems]
self.noise_params = [x for x in self.nW_ems] + [x for x in self.nb_ems]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def fprop(self, state_below, use_noise=True, no_noise_bias=False,
first_only = False):
"""
Constructs the computational graph of this layer.
If the input is ints, we assume is an index, otherwise we assume is
a set of floats.
"""
if self.weight_noise and use_noise and self.noise_params:
W_ems = [(x+y) for x, y in zip(self.W_ems, self.nW_ems)]
if not no_noise_bias:
b_ems = [(x+y) for x, y in zip(self.b_ems, self.nb_ems)]
else:
b_ems = self.b_ems
else:
W_ems = self.W_ems
b_ems = self.b_ems
if self.rank_n_approx:
if first_only:
emb_val = self.rank_n_activ(utils.dot(state_below, W_ems[0]))
self.out = emb_val
return emb_val
emb_val = TT.dot(
self.rank_n_activ(utils.dot(state_below, W_ems[0])),
W_ems[1])
if b_ems:
emb_val += b_ems[0]
st_pos = 1
else:
emb_val = utils.dot(state_below, W_ems[0])
if b_ems:
emb_val += b_ems[0]
st_pos = 0
emb_val = self.activation[0](emb_val)
if self.dropout < 1.:
if use_noise:
emb_val = emb_val * self.trng.binomial(emb_val.shape, n=1, p=self.dropout, dtype=emb_val.dtype)
else:
emb_val = emb_val * self.dropout
for dx in xrange(1, self.n_layers):
emb_val = utils.dot(emb_val, W_ems[st_pos+dx])
if b_ems:
emb_val = self.activation[dx](emb_val+ b_ems[dx])
else:
emb_val = self.activation[dx](emb_val)
if self.dropout < 1.:
if use_noise:
emb_val = emb_val * self.trng.binomial(emb_val.shape, n=1, p=self.dropout, dtype=emb_val.dtype)
else:
emb_val = emb_val * self.dropout
self.out = emb_val
return emb_val
class LastState(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, ntimes = False, n = TT.constant(0)):
"""
:type ntimes: bool
:param ntimes: If the last state needs to be repeated `n` times
:type n: int, theano constant, None
:param n: how many times the last state is repeated
"""
self.ntimes = ntimes
self.n = n
super(LastState, self).__init__(0, 0, None)
def fprop(self, all_states):
if self.ntimes:
stateshape0 = all_states.shape[0]
shape0 = TT.switch(TT.gt(self.n, 0), self.n, all_states.shape[0])
single_frame = TT.shape_padleft(all_states[stateshape0-1])
mask = TT.alloc(numpy.float32(1), shape0, *[1 for k in xrange(all_states.ndim-1)])
rval = single_frame * mask
self.out = rval
return rval
single_frame = all_states[all_states.shape[0]-1]
self.out = single_frame
return single_frame
last = LastState()
last_ntimes = LastState(ntimes=True)
class GaussianNoise(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, rng, std = 0.1, ndim=0, avg =0, shape_fn=None):
"""
"""
assert rng is not None, "random number generator should not be empty!"
super(GaussianNoise, self).__init__(0, 0, rng)
self.std = scale
self.avg = self.avg
self.ndim = ndim
self.shape_fn = shape_fn
if self.shape_fn:
# Name is not important as it is not a parameter of the model
self.noise_term = theano.shared(numpy.zeros((2,)*ndim,
dtype=theano.config.floatX),
name='ndata')
self.noise_params += [self.noise_term]
self.noise_params_shape_fn += [shape_fn]
self.trng = RandomStreams(rng.randint(1e5))
def fprop(self, x):
self.out = x
if self.scale:
if self.shape_fn:
self.out += self.noise_term
else:
self.out += self.trng.normal(self.out.shape, std=self.std,
avg = self.avg,
dtype=self.out.dtype)
return self.out
class BinaryOp(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, op = 'lambda x,y: x+y', name=None):
if type(op) is str:
op = eval(op)
self.op = op
super(BinaryOp, self).__init__(0, 0, None, name)
def fprop(self, x, y):
self.out = self.op(x, y)
return self.out
class DropOp(Layer):
"""
This layers randomly drops elements of the input by multiplying with a
mask sampled from a binomial distribution
"""
def __init__(self, rng = None, name=None, dropout=1.):
super(DropOp, self).__init__(0, 0, None, name)
self.dropout = dropout
if dropout < 1.:
self.trng = RandomStreams(rng.randint(1e5))
def fprop(self, state_below, use_noise = True):
self.out = state_below
if self.dropout < 1.:
if use_noise:
self.out = self.out * self.trng.binomial(self.out.shape,
n=1,
p=self.dropout,
dtype=self.out.dtype)
else:
self.out = self.out * self.dropout
return self.out
class UnaryOp(Layer):
"""
This layer is used to construct an embedding of the encoder by doing a
max pooling over the hidden state
"""
def __init__(self, activation = 'lambda x: x', name=None):
if type(activation) is str:
activation = eval(activation)
self.activation = activation
super(UnaryOp, self).__init__(0, 0, None, name)
def fprop(self, state_below):
self.out = self.activation(state_below)
return self.out
tanh = UnaryOp('lambda x: TT.tanh(x)')
sigmoid = UnaryOp('lambda x: TT.nnet.sigmoid(x)')
rectifier = UnaryOp('lambda x: x*(x>0)')
hard_sigmoid = UnaryOp('lambda x: x*(x>0)*(x<1)')
hard_tanh = UnaryOp('lambda x: x*(x>-1)*(x<1)')
class Shift(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, n=1, name=None):
self.n = n
super(Shift, self).__init__(0, 0, None, name)
def fprop(self, var):
rval = TT.zeros_like(var)
if self.n >0:
rval = TT.set_subtensor(rval[self.n:], var[:-self.n])
elif self.n<0:
rval = TT.set_subtensor(rval[:self.n], var[-self.n:])
self.out = rval
return rval
class MinPooling(Layer):
"""
This layer is used to construct an embedding of the encoder by doing a
max pooling over the hidden state
"""
def __init__(self, ntimes=False, name=None):
self.ntimes = ntimes
super(MinPooling, self).__init__(0, 0, None, name)
def fprop(self, all_states):
shape0 = all_states.shape[0]
single_frame = all_states.min(0)
if self.ntimes:
single_frame = TT.shape_padleft(all_states.max(0))
mask = TT.alloc(numpy.float32(1),
shape0, *[1 for k in xrange(all_states.ndim-1)])
rval = single_frame * mask
self.out = rval
return rval
self.out = single_frame
return single_frame
minpool = MinPooling()
minpool_ntimes = MinPooling(ntimes=True)
class MaxPooling(Layer):
"""
This layer is used to construct an embedding of the encoder by doing a
max pooling over the hidden state
"""
def __init__(self, ntimes=False, name=None):
self.ntimes = ntimes
super(MaxPooling, self).__init__(0, 0, None, name)
def fprop(self, all_states):
shape0 = all_states.shape[0]
single_frame = all_states.max(0)
if self.ntimes:
single_frame = TT.shape_padleft(all_states.max(0))
mask = TT.alloc(numpy.float32(1),
shape0, *[1 for k in xrange(all_states.ndim-1)])
rval = single_frame * mask
self.out = rval
return rval
self.out = single_frame
return single_frame
maxpool = MaxPooling()
maxpool_ntimes = MaxPooling(ntimes=True)
class Concatenate(Layer):
def __init__(self, axis):
self.axis = axis
Layer.__init__(self, 0, 0, None)
def fprop(self, *args):
self.out = TT.concatenate(args, axis=self.axis)
return self.out
| bsd-3-clause | -8,832,714,422,617,155,000 | 35.960861 | 115 | 0.533753 | false |
megies/numpy | numpy/ma/timer_comparison.py | 76 | 17500 | from __future__ import division, absolute_import, print_function
import timeit
from functools import reduce
import numpy as np
from numpy import float_
import np.core.fromnumeric as fromnumeric
from np.testing.utils import build_err_msg
# Fixme: this does not look right.
np.seterr(all='ignore')
pi = np.pi
class moduletester(object):
def __init__(self, module):
self.module = module
self.allequal = module.allequal
self.arange = module.arange
self.array = module.array
# self.average = module.average
self.concatenate = module.concatenate
self.count = module.count
self.equal = module.equal
self.filled = module.filled
self.getmask = module.getmask
self.getmaskarray = module.getmaskarray
self.id = id
self.inner = module.inner
self.make_mask = module.make_mask
self.masked = module.masked
self.masked_array = module.masked_array
self.masked_values = module.masked_values
self.mask_or = module.mask_or
self.nomask = module.nomask
self.ones = module.ones
self.outer = module.outer
self.repeat = module.repeat
self.resize = module.resize
self.sort = module.sort
self.take = module.take
self.transpose = module.transpose
self.zeros = module.zeros
self.MaskType = module.MaskType
try:
self.umath = module.umath
except AttributeError:
self.umath = module.core.umath
self.testnames = []
def assert_array_compare(self, comparison, x, y, err_msg='', header='',
fill_value=True):
"""Asserts that a comparison relation between two masked arrays is satisfied
elementwise."""
xf = self.filled(x)
yf = self.filled(y)
m = self.mask_or(self.getmask(x), self.getmask(y))
x = self.filled(self.masked_array(xf, mask=m), fill_value)
y = self.filled(self.masked_array(yf, mask=m), fill_value)
if (x.dtype.char != "O"):
x = x.astype(float_)
if isinstance(x, np.ndarray) and x.size > 1:
x[np.isnan(x)] = 0
elif np.isnan(x):
x = 0
if (y.dtype.char != "O"):
y = y.astype(float_)
if isinstance(y, np.ndarray) and y.size > 1:
y[np.isnan(y)] = 0
elif np.isnan(y):
y = 0
try:
cond = (x.shape==() or y.shape==()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ '\n(shapes %s, %s mismatch)' % (x.shape,
y.shape),
header=header,
names=('x', 'y'))
assert cond, msg
val = comparison(x, y)
if m is not self.nomask and fill_value:
val = self.masked_array(val, mask=m)
if isinstance(val, bool):
cond = val
reduced = [0]
else:
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
if not cond:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
+ '\n(mismatch %s%%)' % (match,),
header=header,
names=('x', 'y'))
assert cond, msg
except ValueError:
msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y'))
raise ValueError(msg)
def assert_array_equal(self, x, y, err_msg=''):
"""Checks the elementwise equality of two masked arrays."""
self.assert_array_compare(self.equal, x, y, err_msg=err_msg,
header='Arrays are not equal')
def test_0(self):
"Tests creation"
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
xm = self.masked_array(x, mask=m)
xm[0]
def test_1(self):
"Tests creation"
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = self.masked_array(x, mask=m1)
ym = self.masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = self.masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1.e+20, x)
xm.set_fill_value(1.e+20)
assert((xm-ym).filled(0).any())
#fail_if_equal(xm.mask.astype(int_), ym.mask.astype(int_))
s = x.shape
assert(xm.size == reduce(lambda x, y:x*y, s))
assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
def test_2(self):
"Tests conversions and indexing"
x1 = np.array([1, 2, 4, 3])
x2 = self.array(x1, mask=[1, 0, 0, 0])
x3 = self.array(x1, mask=[0, 1, 0, 1])
x4 = self.array(x1)
# test conversion to strings
junk, garbage = str(x2), repr(x2)
# assert_equal(np.sort(x1), self.sort(x2, fill_value=0))
# tests of indexing
assert type(x2[1]) is type(x1[1])
assert x1[1] == x2[1]
# assert self.allequal(x1[2],x2[2])
# assert self.allequal(x1[2:5],x2[2:5])
# assert self.allequal(x1[:],x2[:])
# assert self.allequal(x1[1:], x3[1:])
x1[2] = 9
x2[2] = 9
self.assert_array_equal(x1, x2)
x1[1:3] = 99
x2[1:3] = 99
# assert self.allequal(x1,x2)
x2[1] = self.masked
# assert self.allequal(x1,x2)
x2[1:3] = self.masked
# assert self.allequal(x1,x2)
x2[:] = x1
x2[1] = self.masked
# assert self.allequal(self.getmask(x2),self.array([0,1,0,0]))
x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
# assert self.allequal(self.getmask(x3), self.array([0,1,1,0]))
x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
# assert self.allequal(self.getmask(x4), self.array([0,1,1,0]))
# assert self.allequal(x4, self.array([1,2,3,4]))
x1 = np.arange(5)*1.0
x2 = self.masked_values(x1, 3.0)
# assert self.allequal(x1,x2)
# assert self.allequal(self.array([0,0,0,1,0], self.MaskType), x2.mask)
x1 = self.array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
assert x1[1:1].shape == (0,)
# Tests copy-size
n = [0, 0, 1, 0, 0]
m = self.make_mask(n)
m2 = self.make_mask(m)
assert(m is m2)
m3 = self.make_mask(m, copy=1)
assert(m is not m3)
def test_3(self):
"Tests resize/repeat"
x4 = self.arange(4)
x4[2] = self.masked
y4 = self.resize(x4, (8,))
assert self.allequal(self.concatenate([x4, x4]), y4)
assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
y5 = self.repeat(x4, (2, 2, 2, 2), axis=0)
self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
y6 = self.repeat(x4, 2, axis=0)
assert self.allequal(y5, y6)
y7 = x4.repeat((2, 2, 2, 2), axis=0)
assert self.allequal(y5, y7)
y8 = x4.repeat(2, 0)
assert self.allequal(y5, y8)
#----------------------------------
def test_4(self):
"Test of take, transpose, inner, outer products"
x = self.arange(24)
y = np.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1)))
assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1))
assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)),
self.inner(x, y))
assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)),
self.outer(x, y))
y = self.array(['abc', 1, 'def', 2, 3], object)
y[2] = self.masked
t = self.take(y, [0, 3, 4])
assert t[0] == 'abc'
assert t[1] == 2
assert t[2] == 3
#----------------------------------
def test_5(self):
"Tests inplace w/ scalar"
x = self.arange(10)
y = self.arange(10)
xm = self.arange(10)
xm[2] = self.masked
x += 1
assert self.allequal(x, y+1)
xm += 1
assert self.allequal(xm, y+1)
x = self.arange(10)
xm = self.arange(10)
xm[2] = self.masked
x -= 1
assert self.allequal(x, y-1)
xm -= 1
assert self.allequal(xm, y-1)
x = self.arange(10)*1.0
xm = self.arange(10)*1.0
xm[2] = self.masked
x *= 2.0
assert self.allequal(x, y*2)
xm *= 2.0
assert self.allequal(xm, y*2)
x = self.arange(10)*2
xm = self.arange(10)*2
xm[2] = self.masked
x /= 2
assert self.allequal(x, y)
xm /= 2
assert self.allequal(xm, y)
x = self.arange(10)*1.0
xm = self.arange(10)*1.0
xm[2] = self.masked
x /= 2.0
assert self.allequal(x, y/2.0)
xm /= self.arange(10)
self.assert_array_equal(xm, self.ones((10,)))
x = self.arange(10).astype(float_)
xm = self.arange(10)
xm[2] = self.masked
id1 = self.id(x.raw_data())
x += 1.
#assert id1 == self.id(x.raw_data())
assert self.allequal(x, y+1.)
def test_6(self):
"Tests inplace w/ array"
x = self.arange(10, dtype=float_)
y = self.arange(10)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x += a
xm += a
assert self.allequal(x, y+a)
assert self.allequal(xm, y+a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x -= a
xm -= a
assert self.allequal(x, y-a)
assert self.allequal(xm, y-a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x *= a
xm *= a
assert self.allequal(x, y*a)
assert self.allequal(xm, y*a)
assert self.allequal(xm.mask, self.mask_or(m, a.mask))
x = self.arange(10, dtype=float_)
xm = self.arange(10, dtype=float_)
xm[2] = self.masked
m = xm.mask
a = self.arange(10, dtype=float_)
a[-1] = self.masked
x /= a
xm /= a
#----------------------------------
def test_7(self):
"Tests ufunc"
d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6),
self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),)
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
# 'sin', 'cos', 'tan',
# 'arcsin', 'arccos', 'arctan',
# 'sinh', 'cosh', 'tanh',
# 'arcsinh',
# 'arccosh',
# 'arctanh',
# 'absolute', 'fabs', 'negative',
# # 'nonzero', 'around',
# 'floor', 'ceil',
# # 'sometrue', 'alltrue',
# 'logical_not',
# 'add', 'subtract', 'multiply',
# 'divide', 'true_divide', 'floor_divide',
# 'remainder', 'fmod', 'hypot', 'arctan2',
# 'equal', 'not_equal', 'less_equal', 'greater_equal',
# 'less', 'greater',
# 'logical_and', 'logical_or', 'logical_xor',
]:
#print f
try:
uf = getattr(self.umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(self.module, f)
args = d[:uf.nin]
ur = uf(*args)
mr = mf(*args)
self.assert_array_equal(ur.filled(0), mr.filled(0), f)
self.assert_array_equal(ur._mask, mr._mask)
#----------------------------------
def test_99(self):
# test average
ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
self.assert_array_equal(2.0, self.average(ott, axis=0))
self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.]))
result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1)
self.assert_array_equal(2.0, result)
assert(wts == 4.0)
ott[:] = self.masked
assert(self.average(ott, axis=0) is self.masked)
ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
ott = ott.reshape(2, 2)
ott[:, 1] = self.masked
self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0])
assert(self.average(ott, axis=1)[0] is self.masked)
self.assert_array_equal([2., 0.], self.average(ott, axis=0))
result, wts = self.average(ott, axis=0, returned=1)
self.assert_array_equal(wts, [1., 0.])
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = self.arange(6)
self.assert_array_equal(self.average(x, axis=0), 2.5)
self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5)
y = self.array([self.arange(6), 2.0*self.arange(6)])
self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.)
self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.)
self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
self.assert_array_equal(self.average(y, None, weights=w2), 20./6.)
self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.])
self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
m1 = self.zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = self.ones(6)
m5 = [0, 1, 1, 1, 1, 1]
self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5)
self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5)
# assert(self.average(masked_array(x, m4),axis=0) is masked)
self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0)
self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0)
z = self.masked_array(y, m3)
self.assert_array_equal(self.average(z, None), 20./6.)
self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0])
self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0])
#------------------------
def test_A(self):
x = self.arange(24)
y = np.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
################################################################################
if __name__ == '__main__':
setup_base = "from __main__ import moduletester \n"\
"import numpy\n" \
"tester = moduletester(module)\n"
# setup_new = "import np.ma.core_ini as module\n"+setup_base
setup_cur = "import np.ma.core as module\n"+setup_base
# setup_alt = "import np.ma.core_alt as module\n"+setup_base
# setup_tmp = "import np.ma.core_tmp as module\n"+setup_base
(nrepeat, nloop) = (10, 10)
if 1:
for i in range(1, 8):
func = 'tester.test_%i()' % i
# new = timeit.Timer(func, setup_new).repeat(nrepeat, nloop*10)
cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10)
# alt = timeit.Timer(func, setup_alt).repeat(nrepeat, nloop*10)
# tmp = timeit.Timer(func, setup_tmp).repeat(nrepeat, nloop*10)
# new = np.sort(new)
cur = np.sort(cur)
# alt = np.sort(alt)
# tmp = np.sort(tmp)
print("#%i" % i +50*'.')
print(eval("moduletester.test_%i.__doc__" % i))
# print "core_ini : %.3f - %.3f" % (new[0], new[1])
print("core_current : %.3f - %.3f" % (cur[0], cur[1]))
# print "core_alt : %.3f - %.3f" % (alt[0], alt[1])
# print "core_tmp : %.3f - %.3f" % (tmp[0], tmp[1])
| bsd-3-clause | -7,649,316,451,598,109,000 | 37.126362 | 114 | 0.484971 | false |
capturePointer/vigra | vigranumpy/examples/grid_graph_shortestpath.py | 8 | 3978 | import vigra
import vigra.graphs as vigraph
import pylab
import numpy
np=numpy
import sys
import matplotlib
import pylab as plt
import math
from matplotlib.widgets import Slider, Button, RadioButtons
def makeWeights(gamma):
global hessian,gradmag,gridGraph
print "hessian",hessian.min(),hessian.max()
print "raw ",raw.min(),raw.max()
wImg= numpy.exp((gradmag**0.5)*gamma*-1.0)#**0.5
wImg = numpy.array(wImg).astype(numpy.float32)
w=vigra.graphs.implicitMeanEdgeMap(gridGraph,wImg)
return w
def makeVisuImage(path,img):
coords = (path[:,0],path[:,1])
visuimg =img.copy()
iR=visuimg[:,:,0]
iG=visuimg[:,:,1]
iB=visuimg[:,:,2]
iR[coords]=255
iG[coords]=0
iB[coords]=0
visuimg-=visuimg.min()
visuimg/=visuimg.max()
return visuimg
f = '100075.jpg'
f = '69015.jpg'
#f = "/media/tbeier/GSP1RMCPRFR/iso.03530.png"
img = vigra.impex.readImage(f)
print img.shape
if(img.shape[2]==1):
img = numpy.concatenate([img]*3,axis=2)
imgLab = img
imgLab = vigra.taggedView(imgLab,'xyc')
else:
imgLab = vigra.colors.transform_RGB2Lab(img)
sigma = 1.0
imgLab-=imgLab.min()
imgLab/=imgLab.max()
imgLab*=255
img-=img.min()
img/=img.max()
img*=255
print imgLab.shape
print "interpolate image"
imgLabSmall = imgLab
# make a few edge weights
gradmag = numpy.squeeze(vigra.filters.gaussianGradientMagnitude(imgLabSmall,sigma))
hessian = numpy.squeeze(vigra.filters.hessianOfGaussianEigenvalues(imgLabSmall[:,:,0],sigma))[:,:,0]
hessian-=hessian.min()
raw = 256-imgLabSmall[:,:,0].copy()
gridGraph = vigraph.gridGraph(imgLab.shape[:2],False)
weights = makeWeights(3.0)
pathFinder = vigraph.ShortestPathPathDijkstra(gridGraph)
visuimg =img.copy()
ax = plt.gca()
fig = plt.gcf()
visuimg-=visuimg.min()
visuimg/=visuimg.max()
implot = ax.imshow(numpy.swapaxes(visuimg,0,1),cmap='gray')
clickList=[]
frozen = False
axslider = plt.axes([0.0, 0.00, 0.4, 0.075])
axfreeze = plt.axes([0.6, 0.00, 0.1, 0.075])
axunfreeze = plt.axes([0.8, 0.00, 0.1, 0.075])
bfreeze = Button(axfreeze, 'freeze')
bunfreeze = Button(axunfreeze, 'unfrease and clear')
sgamma = Slider(axslider, 'gamma', 0.01, 5.0, valinit=1.0)
def onclick(event):
global clickList
global weights
global img
if event.xdata != None and event.ydata != None:
xRaw,yRaw = event.xdata,event.ydata
if not frozen and xRaw >=0.0 and yRaw>=0.0 and xRaw<img.shape[0] and yRaw<img.shape[1]:
x,y = long(math.floor(event.xdata)),long(math.floor(event.ydata))
clickList.append((x,y))
if len(clickList)==2:
source = gridGraph.coordinateToNode(clickList[0])
target = gridGraph.coordinateToNode(clickList[1])
weights = makeWeights(sgamma.val)
#path = pathFinder.run(weights, source,target).path(pathType='coordinates')
path = pathFinder.run(weights, source).path(pathType='coordinates',target=target)
visuimg = makeVisuImage(path,img)
implot.set_data(numpy.swapaxes(visuimg,0,1))
plt.draw()
def freeze(event):
global frozen
frozen=True
def unfreeze(event):
global frozen,clickList
frozen=False
clickList = []
def onslide(event):
global img,gradmag,weights,clickList,sgamma
weights = makeWeights(sgamma.val)
print "onslide",clickList
if len(clickList)>=2:
print "we have path"
source = gridGraph.coordinateToNode(clickList[0])
target = gridGraph.coordinateToNode(clickList[1])
path = pathFinder.run(weights, source,target).path(pathType='coordinates')
visuimg = makeVisuImage(path,img)
implot.set_data(numpy.swapaxes(visuimg,0,1))
plt.draw()
bfreeze.on_clicked(freeze)
bunfreeze.on_clicked(unfreeze)
sgamma.on_changed(onslide)
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
| mit | 1,526,639,020,373,556,000 | 25.52 | 100 | 0.659377 | false |
chiefspace/udemy-rest-api | udemy_rest_flask1/env/lib/python3.4/site-packages/markupsafe/_constants.py | 1535 | 4795 | # -*- coding: utf-8 -*-
"""
markupsafe._constants
~~~~~~~~~~~~~~~~~~~~~
Highlevel implementation of the Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
HTML_ENTITIES = {
'AElig': 198,
'Aacute': 193,
'Acirc': 194,
'Agrave': 192,
'Alpha': 913,
'Aring': 197,
'Atilde': 195,
'Auml': 196,
'Beta': 914,
'Ccedil': 199,
'Chi': 935,
'Dagger': 8225,
'Delta': 916,
'ETH': 208,
'Eacute': 201,
'Ecirc': 202,
'Egrave': 200,
'Epsilon': 917,
'Eta': 919,
'Euml': 203,
'Gamma': 915,
'Iacute': 205,
'Icirc': 206,
'Igrave': 204,
'Iota': 921,
'Iuml': 207,
'Kappa': 922,
'Lambda': 923,
'Mu': 924,
'Ntilde': 209,
'Nu': 925,
'OElig': 338,
'Oacute': 211,
'Ocirc': 212,
'Ograve': 210,
'Omega': 937,
'Omicron': 927,
'Oslash': 216,
'Otilde': 213,
'Ouml': 214,
'Phi': 934,
'Pi': 928,
'Prime': 8243,
'Psi': 936,
'Rho': 929,
'Scaron': 352,
'Sigma': 931,
'THORN': 222,
'Tau': 932,
'Theta': 920,
'Uacute': 218,
'Ucirc': 219,
'Ugrave': 217,
'Upsilon': 933,
'Uuml': 220,
'Xi': 926,
'Yacute': 221,
'Yuml': 376,
'Zeta': 918,
'aacute': 225,
'acirc': 226,
'acute': 180,
'aelig': 230,
'agrave': 224,
'alefsym': 8501,
'alpha': 945,
'amp': 38,
'and': 8743,
'ang': 8736,
'apos': 39,
'aring': 229,
'asymp': 8776,
'atilde': 227,
'auml': 228,
'bdquo': 8222,
'beta': 946,
'brvbar': 166,
'bull': 8226,
'cap': 8745,
'ccedil': 231,
'cedil': 184,
'cent': 162,
'chi': 967,
'circ': 710,
'clubs': 9827,
'cong': 8773,
'copy': 169,
'crarr': 8629,
'cup': 8746,
'curren': 164,
'dArr': 8659,
'dagger': 8224,
'darr': 8595,
'deg': 176,
'delta': 948,
'diams': 9830,
'divide': 247,
'eacute': 233,
'ecirc': 234,
'egrave': 232,
'empty': 8709,
'emsp': 8195,
'ensp': 8194,
'epsilon': 949,
'equiv': 8801,
'eta': 951,
'eth': 240,
'euml': 235,
'euro': 8364,
'exist': 8707,
'fnof': 402,
'forall': 8704,
'frac12': 189,
'frac14': 188,
'frac34': 190,
'frasl': 8260,
'gamma': 947,
'ge': 8805,
'gt': 62,
'hArr': 8660,
'harr': 8596,
'hearts': 9829,
'hellip': 8230,
'iacute': 237,
'icirc': 238,
'iexcl': 161,
'igrave': 236,
'image': 8465,
'infin': 8734,
'int': 8747,
'iota': 953,
'iquest': 191,
'isin': 8712,
'iuml': 239,
'kappa': 954,
'lArr': 8656,
'lambda': 955,
'lang': 9001,
'laquo': 171,
'larr': 8592,
'lceil': 8968,
'ldquo': 8220,
'le': 8804,
'lfloor': 8970,
'lowast': 8727,
'loz': 9674,
'lrm': 8206,
'lsaquo': 8249,
'lsquo': 8216,
'lt': 60,
'macr': 175,
'mdash': 8212,
'micro': 181,
'middot': 183,
'minus': 8722,
'mu': 956,
'nabla': 8711,
'nbsp': 160,
'ndash': 8211,
'ne': 8800,
'ni': 8715,
'not': 172,
'notin': 8713,
'nsub': 8836,
'ntilde': 241,
'nu': 957,
'oacute': 243,
'ocirc': 244,
'oelig': 339,
'ograve': 242,
'oline': 8254,
'omega': 969,
'omicron': 959,
'oplus': 8853,
'or': 8744,
'ordf': 170,
'ordm': 186,
'oslash': 248,
'otilde': 245,
'otimes': 8855,
'ouml': 246,
'para': 182,
'part': 8706,
'permil': 8240,
'perp': 8869,
'phi': 966,
'pi': 960,
'piv': 982,
'plusmn': 177,
'pound': 163,
'prime': 8242,
'prod': 8719,
'prop': 8733,
'psi': 968,
'quot': 34,
'rArr': 8658,
'radic': 8730,
'rang': 9002,
'raquo': 187,
'rarr': 8594,
'rceil': 8969,
'rdquo': 8221,
'real': 8476,
'reg': 174,
'rfloor': 8971,
'rho': 961,
'rlm': 8207,
'rsaquo': 8250,
'rsquo': 8217,
'sbquo': 8218,
'scaron': 353,
'sdot': 8901,
'sect': 167,
'shy': 173,
'sigma': 963,
'sigmaf': 962,
'sim': 8764,
'spades': 9824,
'sub': 8834,
'sube': 8838,
'sum': 8721,
'sup': 8835,
'sup1': 185,
'sup2': 178,
'sup3': 179,
'supe': 8839,
'szlig': 223,
'tau': 964,
'there4': 8756,
'theta': 952,
'thetasym': 977,
'thinsp': 8201,
'thorn': 254,
'tilde': 732,
'times': 215,
'trade': 8482,
'uArr': 8657,
'uacute': 250,
'uarr': 8593,
'ucirc': 251,
'ugrave': 249,
'uml': 168,
'upsih': 978,
'upsilon': 965,
'uuml': 252,
'weierp': 8472,
'xi': 958,
'yacute': 253,
'yen': 165,
'yuml': 255,
'zeta': 950,
'zwj': 8205,
'zwnj': 8204
}
| gpl-2.0 | -6,452,721,923,217,905,000 | 16.958801 | 50 | 0.450052 | false |
azlanismail/prismgames | examples/games/car/networkx/tests/test.py | 1 | 1289 | #!/usr/bin/env python
import sys
from os import path,getcwd
def run(verbosity=1,doctest=False,numpy=True):
"""Run NetworkX tests.
Parameters
----------
verbosity: integer, optional
Level of detail in test reports. Higher numbers provide more detail.
doctest: bool, optional
True to run doctests in code modules
numpy: bool, optional
True to test modules dependent on numpy
"""
try:
import nose
except ImportError:
raise ImportError(\
"The nose package is needed to run the NetworkX tests.")
sys.stderr.write("Running NetworkX tests:")
nx_install_dir=path.join(path.dirname(__file__), path.pardir)
# stop if running from source directory
if getcwd() == path.abspath(path.join(nx_install_dir,path.pardir)):
raise RuntimeError("Can't run tests from source directory.\n"
"Run 'nosetests' from the command line.")
argv=[' ','--verbosity=%d'%verbosity,
'-w',nx_install_dir,
'-exe']
if doctest:
argv.extend(['--with-doctest','--doctest-extension=txt'])
if not numpy:
argv.extend(['-A not numpy'])
nose.run(argv=argv)
if __name__=="__main__":
run()
| gpl-2.0 | 1,359,636,146,898,896,000 | 26.644444 | 78 | 0.59038 | false |
xiandiancloud/ji | common/lib/xmodule/xmodule/tests/xml/test_policy.py | 248 | 1262 | """
Tests that policy json files import correctly when loading XML
"""
from nose.tools import assert_equals, assert_raises # pylint: disable=no-name-in-module
from xmodule.tests.xml.factories import CourseFactory
from xmodule.tests.xml import XModuleXmlImportTest
class TestPolicy(XModuleXmlImportTest):
"""
Tests that policy json files import correctly when loading xml
"""
def test_no_attribute_mapping(self):
# Policy files are json, and thus the values aren't passed through 'deserialize_field'
# Therefor, the string 'null' is passed unchanged to the Float field, which will trigger
# a ValueError
with assert_raises(ValueError):
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': 'null'}))
# Trigger the exception by looking at the imported data
course.days_early_for_beta # pylint: disable=pointless-statement
def test_course_policy(self):
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': None}))
assert_equals(None, course.days_early_for_beta)
course = self.process_xml(CourseFactory.build(policy={'days_early_for_beta': 9}))
assert_equals(9, course.days_early_for_beta)
| agpl-3.0 | -4,800,155,057,017,082,000 | 41.066667 | 98 | 0.704437 | false |
tersmitten/ansible | lib/ansible/modules/packaging/os/homebrew_tap.py | 87 | 6806 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Daniel Jaouen <[email protected]>
# (c) 2016, Indrajit Raychaudhuri <[email protected]>
#
# Based on homebrew (Andrew Dunham <[email protected]>)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: homebrew_tap
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "Daniel Jaouen (@danieljaouen)"
short_description: Tap a Homebrew repository.
description:
- Tap external Homebrew repositories.
version_added: "1.6"
options:
name:
description:
- The GitHub user/organization repository to tap.
required: true
aliases: ['tap']
url:
description:
- The optional git URL of the repository to tap. The URL is not
assumed to be on GitHub, and the protocol doesn't have to be HTTP.
Any location and protocol that git can handle is fine.
- I(name) option may not be a list of multiple taps (but a single
tap instead) when this option is provided.
required: false
version_added: "2.2"
state:
description:
- state of the repository.
choices: [ 'present', 'absent' ]
required: false
default: 'present'
requirements: [ homebrew ]
'''
EXAMPLES = '''
- homebrew_tap:
name: homebrew/dupes
- homebrew_tap:
name: homebrew/dupes
state: absent
- homebrew_tap:
name: homebrew/dupes,homebrew/science
state: present
- homebrew_tap:
name: telemachus/brew
url: 'https://bitbucket.org/telemachus/brew'
'''
import re
from ansible.module_utils.basic import AnsibleModule
def a_valid_tap(tap):
'''Returns True if the tap is valid.'''
regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
return regex.match(tap)
def already_tapped(module, brew_path, tap):
'''Returns True if already tapped.'''
rc, out, err = module.run_command([
brew_path,
'tap',
])
taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
tap_name = re.sub('homebrew-', '', tap.lower())
return tap_name in taps
def add_tap(module, brew_path, tap, url=None):
'''Adds a single tap.'''
failed, changed, msg = False, False, ''
if not a_valid_tap(tap):
failed = True
msg = 'not a valid tap: %s' % tap
elif not already_tapped(module, brew_path, tap):
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([
brew_path,
'tap',
tap,
url,
])
if rc == 0:
changed = True
msg = 'successfully tapped: %s' % tap
else:
failed = True
msg = 'failed to tap: %s' % tap
else:
msg = 'already tapped: %s' % tap
return (failed, changed, msg)
def add_taps(module, brew_path, taps):
'''Adds one or more taps.'''
failed, unchanged, added, msg = False, 0, 0, ''
for tap in taps:
(failed, changed, msg) = add_tap(module, brew_path, tap)
if failed:
break
if changed:
added += 1
else:
unchanged += 1
if failed:
msg = 'added: %d, unchanged: %d, error: ' + msg
msg = msg % (added, unchanged)
elif added:
changed = True
msg = 'added: %d, unchanged: %d' % (added, unchanged)
else:
msg = 'added: %d, unchanged: %d' % (added, unchanged)
return (failed, changed, msg)
def remove_tap(module, brew_path, tap):
'''Removes a single tap.'''
failed, changed, msg = False, False, ''
if not a_valid_tap(tap):
failed = True
msg = 'not a valid tap: %s' % tap
elif already_tapped(module, brew_path, tap):
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command([
brew_path,
'untap',
tap,
])
if not already_tapped(module, brew_path, tap):
changed = True
msg = 'successfully untapped: %s' % tap
else:
failed = True
msg = 'failed to untap: %s' % tap
else:
msg = 'already untapped: %s' % tap
return (failed, changed, msg)
def remove_taps(module, brew_path, taps):
'''Removes one or more taps.'''
failed, unchanged, removed, msg = False, 0, 0, ''
for tap in taps:
(failed, changed, msg) = remove_tap(module, brew_path, tap)
if failed:
break
if changed:
removed += 1
else:
unchanged += 1
if failed:
msg = 'removed: %d, unchanged: %d, error: ' + msg
msg = msg % (removed, unchanged)
elif removed:
changed = True
msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
else:
msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
return (failed, changed, msg)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['tap'], type='list', required=True),
url=dict(default=None, required=False),
state=dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True,
)
brew_path = module.get_bin_path(
'brew',
required=True,
opt_dirs=['/usr/local/bin']
)
taps = module.params['name']
url = module.params['url']
if module.params['state'] == 'present':
if url is None:
# No tap URL provided explicitly, continue with bulk addition
# of all the taps.
failed, changed, msg = add_taps(module, brew_path, taps)
else:
# When an tap URL is provided explicitly, we allow adding
# *single* tap only. Validate and proceed to add single tap.
if len(taps) > 1:
msg = "List of multiple taps may not be provided with 'url' option."
module.fail_json(msg=msg)
else:
failed, changed, msg = add_tap(module, brew_path, taps[0], url)
if failed:
module.fail_json(msg=msg)
else:
module.exit_json(changed=changed, msg=msg)
elif module.params['state'] == 'absent':
failed, changed, msg = remove_taps(module, brew_path, taps)
if failed:
module.fail_json(msg=msg)
else:
module.exit_json(changed=changed, msg=msg)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,140,108,822,832,994,000 | 25.690196 | 92 | 0.554952 | false |
baidu/Paddle | python/paddle/fluid/tests/unittests/test_sequence_expand.py | 1 | 4061 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
class TestSequenceExpand(OpTest):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [3, 1]).astype('float32')
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32')
y_lod = [[1, 3, 4]]
self.inputs = {'X': x_data, 'Y': (y_data, y_lod)}
def compute(self):
x = self.inputs['X']
x_data, x_lod = x if type(x) == tuple else (x, None)
y_data, y_lod = self.inputs['Y']
if hasattr(self, 'attrs'):
ref_level = self.attrs['ref_level']
else:
ref_level = len(y_lod) - 1
out = np.zeros(shape=((0, ) + x_data.shape[1:]), dtype=x_data.dtype)
if x_lod is None:
# x_idx = [i for i in xrange(x_data.shape[0] + 1)]
x_idx = [1] * x_data.shape[0]
else:
x_idx = x_lod[0]
out_lod = [[]]
offset = 0
for i in range(len(y_lod[ref_level])):
repeat_num = y_lod[ref_level][i]
x_len = x_idx[i]
if repeat_num > 0:
x_sub = x_data[offset:(offset + x_len), :]
stacked_x_sub = x_sub
for r in range(repeat_num - 1):
stacked_x_sub = np.vstack((stacked_x_sub, x_sub))
out = np.vstack((out, stacked_x_sub))
if x_lod is not None:
for j in range(repeat_num):
out_lod[0].append(x_len)
offset += x_len
if x_lod is None:
self.outputs = {'Out': out}
else:
self.outputs = {'Out': (out, out_lod)}
def setUp(self):
self.op_type = 'sequence_expand'
self.set_data()
self.compute()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestSequenceExpandCase1(TestSequenceExpand):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32')
y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float32')
y_lod = [[2, 3], [2, 2, 3, 3, 3]]
self.inputs = {'X': x_data, 'Y': (y_data, y_lod)}
self.attrs = {'ref_level': 1}
class TestSequenceExpandCase2(TestSequenceExpand):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [1, 2, 2]).astype('float32')
x_lod = [[1]]
y_data = np.random.uniform(0.1, 1, [2, 2, 2]).astype('float32')
y_lod = [[2], [1, 1]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
self.attrs = {'ref_level': 0}
class TestSequenceExpandCase3(TestSequenceExpand):
def set_data(self):
x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float32')
x_lod = [[1, 1, 1, 1]]
y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32')
y_lod = [[2, 2, 2, 2]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
class TestSequenceExpandCase4(TestSequenceExpand):
def set_data(self):
data = np.random.uniform(0.1, 1, [5 * 2, 1])
x_data = np.array(data).reshape([5, 2]).astype('float32')
x_lod = [[2, 3]]
y_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32')
y_lod = [[2], [2, 3]]
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,225,064,777,958,325,000 | 32.841667 | 76 | 0.541738 | false |
invisiblek/python-for-android | python-modules/twisted/twisted/protocols/gps/nmea.py | 59 | 7970 | # -*- test-case-name: twisted.test.test_nmea -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""NMEA 0183 implementation
Maintainer: Bob Ippolito
The following NMEA 0183 sentences are currently understood::
GPGGA (fix)
GPGLL (position)
GPRMC (position and time)
GPGSA (active satellites)
The following NMEA 0183 sentences require implementation::
None really, the others aren't generally useful or implemented in most devices anyhow
Other desired features::
- A NMEA 0183 producer to emulate GPS devices (?)
"""
import operator
from twisted.protocols import basic
from twisted.python.compat import reduce
POSFIX_INVALID, POSFIX_SPS, POSFIX_DGPS, POSFIX_PPS = 0, 1, 2, 3
MODE_AUTO, MODE_FORCED = 'A', 'M'
MODE_NOFIX, MODE_2D, MODE_3D = 1, 2, 3
class InvalidSentence(Exception):
pass
class InvalidChecksum(Exception):
pass
class NMEAReceiver(basic.LineReceiver):
"""This parses most common NMEA-0183 messages, presumably from a serial GPS device at 4800 bps
"""
delimiter = '\r\n'
dispatch = {
'GPGGA': 'fix',
'GPGLL': 'position',
'GPGSA': 'activesatellites',
'GPRMC': 'positiontime',
'GPGSV': 'viewsatellites', # not implemented
'GPVTG': 'course', # not implemented
'GPALM': 'almanac', # not implemented
'GPGRS': 'range', # not implemented
'GPGST': 'noise', # not implemented
'GPMSS': 'beacon', # not implemented
'GPZDA': 'time', # not implemented
}
# generally you may miss the beginning of the first message
ignore_invalid_sentence = 1
# checksums shouldn't be invalid
ignore_checksum_mismatch = 0
# ignore unknown sentence types
ignore_unknown_sentencetypes = 0
# do we want to even bother checking to see if it's from the 20th century?
convert_dates_before_y2k = 1
def lineReceived(self, line):
if not line.startswith('$'):
if self.ignore_invalid_sentence:
return
raise InvalidSentence("%r does not begin with $" % (line,))
# message is everything between $ and *, checksum is xor of all ASCII values of the message
strmessage, checksum = line[1:].strip().split('*')
message = strmessage.split(',')
sentencetype, message = message[0], message[1:]
dispatch = self.dispatch.get(sentencetype, None)
if (not dispatch) and (not self.ignore_unknown_sentencetypes):
raise InvalidSentence("sentencetype %r" % (sentencetype,))
if not self.ignore_checksum_mismatch:
checksum, calculated_checksum = int(checksum, 16), reduce(operator.xor, map(ord, strmessage))
if checksum != calculated_checksum:
raise InvalidChecksum("Given 0x%02X != 0x%02X" % (checksum, calculated_checksum))
handler = getattr(self, "handle_%s" % dispatch, None)
decoder = getattr(self, "decode_%s" % dispatch, None)
if not (dispatch and handler and decoder):
# missing dispatch, handler, or decoder
return
# return handler(*decoder(*message))
try:
decoded = decoder(*message)
except Exception, e:
raise InvalidSentence("%r is not a valid %s (%s) sentence" % (line, sentencetype, dispatch))
return handler(*decoded)
def decode_position(self, latitude, ns, longitude, ew, utc, status):
latitude, longitude = self._decode_latlon(latitude, ns, longitude, ew)
utc = self._decode_utc(utc)
if status == 'A':
status = 1
else:
status = 0
return (
latitude,
longitude,
utc,
status,
)
def decode_positiontime(self, utc, status, latitude, ns, longitude, ew, speed, course, utcdate, magvar, magdir):
utc = self._decode_utc(utc)
latitude, longitude = self._decode_latlon(latitude, ns, longitude, ew)
if speed != '':
speed = float(speed)
else:
speed = None
if course != '':
course = float(course)
else:
course = None
utcdate = 2000+int(utcdate[4:6]), int(utcdate[2:4]), int(utcdate[0:2])
if self.convert_dates_before_y2k and utcdate[0] > 2073:
# GPS was invented by the US DoD in 1973, but NMEA uses 2 digit year.
# Highly unlikely that we'll be using NMEA or this twisted module in 70 years,
# but remotely possible that you'll be using it to play back data from the 20th century.
utcdate = (utcdate[0] - 100, utcdate[1], utcdate[2])
if magvar != '':
magvar = float(magvar)
if magdir == 'W':
magvar = -magvar
else:
magvar = None
return (
latitude,
longitude,
speed,
course,
# UTC seconds past utcdate
utc,
# UTC (year, month, day)
utcdate,
# None or magnetic variation in degrees (west is negative)
magvar,
)
def _decode_utc(self, utc):
utc_hh, utc_mm, utc_ss = map(float, (utc[:2], utc[2:4], utc[4:]))
return utc_hh * 3600.0 + utc_mm * 60.0 + utc_ss
def _decode_latlon(self, latitude, ns, longitude, ew):
latitude = float(latitude[:2]) + float(latitude[2:])/60.0
if ns == 'S':
latitude = -latitude
longitude = float(longitude[:3]) + float(longitude[3:])/60.0
if ew == 'W':
longitude = -longitude
return (latitude, longitude)
def decode_activesatellites(self, mode1, mode2, *args):
satellites, (pdop, hdop, vdop) = args[:12], map(float, args[12:])
satlist = []
for n in satellites:
if n:
satlist.append(int(n))
else:
satlist.append(None)
mode = (mode1, int(mode2))
return (
# satellite list by channel
tuple(satlist),
# (MODE_AUTO/MODE_FORCED, MODE_NOFIX/MODE_2DFIX/MODE_3DFIX)
mode,
# position dilution of precision
pdop,
# horizontal dilution of precision
hdop,
# vertical dilution of precision
vdop,
)
def decode_fix(self, utc, latitude, ns, longitude, ew, posfix, satellites, hdop, altitude, altitude_units, geoid_separation, geoid_separation_units, dgps_age, dgps_station_id):
latitude, longitude = self._decode_latlon(latitude, ns, longitude, ew)
utc = self._decode_utc(utc)
posfix = int(posfix)
satellites = int(satellites)
hdop = float(hdop)
altitude = (float(altitude), altitude_units)
if geoid_separation != '':
geoid = (float(geoid_separation), geoid_separation_units)
else:
geoid = None
if dgps_age != '':
dgps = (float(dgps_age), dgps_station_id)
else:
dgps = None
return (
# seconds since 00:00 UTC
utc,
# latitude (degrees)
latitude,
# longitude (degrees)
longitude,
# position fix status (POSFIX_INVALID, POSFIX_SPS, POSFIX_DGPS, POSFIX_PPS)
posfix,
# number of satellites used for fix 0 <= satellites <= 12
satellites,
# horizontal dilution of precision
hdop,
# None or (altitude according to WGS-84 ellipsoid, units (typically 'M' for meters))
altitude,
# None or (geoid separation according to WGS-84 ellipsoid, units (typically 'M' for meters))
geoid,
# (age of dgps data in seconds, dgps station id)
dgps,
)
| apache-2.0 | 7,266,965,071,144,821,000 | 37.133971 | 180 | 0.569385 | false |
cav71/osc | tests/test_setlinkrev.py | 14 | 4652 | import osc.core
import osc.oscerr
import os
from common import GET, PUT, OscTestCase
FIXTURES_DIR = os.path.join(os.getcwd(), 'setlinkrev_fixtures')
def suite():
import unittest
return unittest.makeSuite(TestSetLinkRev)
class TestSetLinkRev(OscTestCase):
def setUp(self):
OscTestCase.setUp(self, copytree=False)
def _get_fixtures_dir(self):
return FIXTURES_DIR
@GET('http://localhost/source/osctest/simple/_link', file='simple_link')
@GET('http://localhost/source/srcprj/srcpkg?rev=latest', file='simple_filesremote')
@PUT('http://localhost/source/osctest/simple/_link',
exp='<link package="srcpkg" project="srcprj" rev="42" />', text='dummytext')
def test_simple1(self):
"""a simple set_link_rev call without revision"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple')
@GET('http://localhost/source/osctest/simple/_link', file='simple_link')
@PUT('http://localhost/source/osctest/simple/_link',
exp='<link package="srcpkg" project="srcprj" rev="42" />', text='dummytext')
def test_simple2(self):
"""a simple set_link_rev call with revision"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', '42')
@GET('http://localhost/source/osctest/simple/_link', file='noproject_link')
@GET('http://localhost/source/osctest/srcpkg?rev=latest&expand=1', file='expandedsrc_filesremote')
@PUT('http://localhost/source/osctest/simple/_link',
exp='<link package="srcpkg" rev="eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" vrev="1" />', text='dummytext')
def test_expandedsrc(self):
"""expand src package"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', expand=True)
@GET('http://localhost/source/osctest/simple/_link', file='link_with_rev')
@GET('http://localhost/source/srcprj/srcpkg?rev=latest', file='simple_filesremote')
@PUT('http://localhost/source/osctest/simple/_link',
exp='<link package="srcpkg" project="srcprj" rev="42" />', text='dummytext')
def test_existingrev(self):
"""link already has a rev attribute, update it to current version"""
# we could also avoid the superfluous PUT
osc.core.set_link_rev('http://localhost', 'osctest', 'simple')
@GET('http://localhost/source/osctest/simple/_link', file='link_with_rev')
@GET('http://localhost/source/srcprj/srcpkg?rev=latest&expand=1', file='expandedsrc_filesremote')
@PUT('http://localhost/source/osctest/simple/_link',
exp='<link package="srcpkg" project="srcprj" rev="eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" vrev="1" />',
text='dummytext')
def test_expandexistingrev(self):
"""link already has a rev attribute, update it to current version"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', expand=True)
@GET('http://localhost/source/osctest/simple/_link', file='simple_link')
@GET('http://localhost/source/srcprj/srcpkg?rev=latest&expand=1', text='conflict in file merge', code=400)
def test_linkerror(self):
"""link is broken"""
try:
from urllib.error import HTTPError
except ImportError:
from urllib2 import HTTPError
# the backend returns status 400 if we try to expand a broken _link
self.assertRaises(HTTPError, osc.core.set_link_rev, 'http://localhost', 'osctest', 'simple', expand=True)
@GET('http://localhost/source/osctest/simple/_link', file='rev_link')
@PUT('http://localhost/source/osctest/simple/_link',
exp='<link package="srcpkg" project="srcprj" />', text='dummytext')
def test_deleterev(self):
"""delete rev attribute from link xml"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', revision=None)
@GET('http://localhost/source/osctest/simple/_link', file='md5_rev_link')
@PUT('http://localhost/source/osctest/simple/_link',
exp='<link package="srcpkg" project="srcprj" />', text='dummytext')
def test_deleterev(self):
"""delete rev and vrev attribute from link xml"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', revision=None)
@GET('http://localhost/source/osctest/simple/_link', file='simple_link')
@PUT('http://localhost/source/osctest/simple/_link',
exp='<link package="srcpkg" project="srcprj" />', text='dummytext')
def test_deleterevnonexistent(self):
"""delete non existent rev attribute from link xml"""
osc.core.set_link_rev('http://localhost', 'osctest', 'simple', revision=None)
if __name__ == '__main__':
import unittest
unittest.main()
| gpl-2.0 | -1,359,176,530,188,534,300 | 49.021505 | 113 | 0.659931 | false |
twlizer/plugin.video.Pseudonymous | chardet/gb2312prober.py | 231 | 1722 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| gpl-2.0 | 1,626,406,253,262,029,800 | 40 | 69 | 0.700929 | false |
gfreed/android_external_chromium-org | third_party/jinja2/compiler.py | 121 | 61899 | # -*- coding: utf-8 -*-
"""
jinja2.compiler
~~~~~~~~~~~~~~~
Compiles nodes into python code.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from cStringIO import StringIO
from itertools import chain
from copy import deepcopy
from jinja2 import nodes
from jinja2.nodes import EvalContext
from jinja2.visitor import NodeVisitor
from jinja2.exceptions import TemplateAssertionError
from jinja2.utils import Markup, concat, escape, is_python_keyword, next
operators = {
'eq': '==',
'ne': '!=',
'gt': '>',
'gteq': '>=',
'lt': '<',
'lteq': '<=',
'in': 'in',
'notin': 'not in'
}
try:
exec '(0 if 0 else 0)'
except SyntaxError:
have_condexpr = False
else:
have_condexpr = True
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
if hasattr(dict, 'iteritems'):
dict_item_iter = 'iteritems'
else:
dict_item_iter = 'items'
# does if 0: dummy(x) get us x into the scope?
def unoptimize_before_dead_code():
x = 42
def f():
if 0: dummy(x)
return f
unoptimize_before_dead_code = bool(unoptimize_before_dead_code().func_closure)
def generate(node, environment, name, filename, stream=None,
defer_init=False):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
generator = CodeGenerator(environment, name, filename, stream, defer_init)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
def has_safe_repr(value):
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if isinstance(value, (bool, int, long, float, complex, basestring,
xrange, Markup)):
return True
if isinstance(value, (tuple, list, set, frozenset)):
for item in value:
if not has_safe_repr(item):
return False
return True
elif isinstance(value, dict):
for key, value in value.iteritems():
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class Identifiers(object):
"""Tracks the status of identifiers in frames."""
def __init__(self):
# variables that are known to be declared (probably from outer
# frames or because they are special for the frame)
self.declared = set()
# undeclared variables from outer scopes
self.outer_undeclared = set()
# names that are accessed without being explicitly declared by
# this one or any of the outer scopes. Names can appear both in
# declared and undeclared.
self.undeclared = set()
# names that are declared locally
self.declared_locally = set()
# names that are declared by parameters
self.declared_parameter = set()
def add_special(self, name):
"""Register a special name like `loop`."""
self.undeclared.discard(name)
self.declared.add(name)
def is_declared(self, name):
"""Check if a name is declared in this or an outer scope."""
if name in self.declared_locally or name in self.declared_parameter:
return True
return name in self.declared
def copy(self):
return deepcopy(self)
class Frame(object):
"""Holds compile time information for us."""
def __init__(self, eval_ctx, parent=None):
self.eval_ctx = eval_ctx
self.identifiers = Identifiers()
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = parent and parent.require_output_check
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer = None
# the name of the block we're in, otherwise None.
self.block = parent and parent.block or None
# a set of actually assigned names
self.assigned_names = set()
# the parent of this frame
self.parent = parent
if parent is not None:
self.identifiers.declared.update(
parent.identifiers.declared |
parent.identifiers.declared_parameter |
parent.assigned_names
)
self.identifiers.outer_undeclared.update(
parent.identifiers.undeclared -
self.identifiers.declared
)
self.buffer = parent.buffer
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.identifiers = object.__new__(self.identifiers.__class__)
rv.identifiers.__dict__.update(self.identifiers.__dict__)
return rv
def inspect(self, nodes):
"""Walk the node and check for identifiers. If the scope is hard (eg:
enforce on a python level) overrides from outer scopes are tracked
differently.
"""
visitor = FrameIdentifierVisitor(self.identifiers)
for node in nodes:
visitor.visit(node)
def find_shadowed(self, extra=()):
"""Find all the shadowed names. extra is an iterable of variables
that may be defined with `add_special` which may occour scoped.
"""
i = self.identifiers
return (i.declared | i.outer_undeclared) & \
(i.declared_locally | i.declared_parameter) | \
set(x for x in extra if i.is_declared(x))
def inner(self):
"""Return an inner frame."""
return Frame(self.eval_ctx, self)
def soft(self):
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
"""
rv = self.copy()
rv.rootlevel = False
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self):
self.filters = set()
self.tests = set()
def visit_Filter(self, node):
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node):
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node):
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if node.ctx == 'load' and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node):
"""Stop visiting a blocks."""
class FrameIdentifierVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, identifiers):
self.identifiers = identifiers
def visit_Name(self, node):
"""All assignments to names go through this function."""
if node.ctx == 'store':
self.identifiers.declared_locally.add(node.name)
elif node.ctx == 'param':
self.identifiers.declared_parameter.add(node.name)
elif node.ctx == 'load' and not \
self.identifiers.is_declared(node.name):
self.identifiers.undeclared.add(node.name)
def visit_If(self, node):
self.visit(node.test)
real_identifiers = self.identifiers
old_names = real_identifiers.declared_locally | \
real_identifiers.declared_parameter
def inner_visit(nodes):
if not nodes:
return set()
self.identifiers = real_identifiers.copy()
for subnode in nodes:
self.visit(subnode)
rv = self.identifiers.declared_locally - old_names
# we have to remember the undeclared variables of this branch
# because we will have to pull them.
real_identifiers.undeclared.update(self.identifiers.undeclared)
self.identifiers = real_identifiers
return rv
body = inner_visit(node.body)
else_ = inner_visit(node.else_ or ())
# the differences between the two branches are also pulled as
# undeclared variables
real_identifiers.undeclared.update(body.symmetric_difference(else_) -
real_identifiers.declared)
# remember those that are declared.
real_identifiers.declared_locally.update(body | else_)
def visit_Macro(self, node):
self.identifiers.declared_locally.add(node.name)
def visit_Import(self, node):
self.generic_visit(node)
self.identifiers.declared_locally.add(node.target)
def visit_FromImport(self, node):
self.generic_visit(node)
for name in node.names:
if isinstance(name, tuple):
self.identifiers.declared_locally.add(name[1])
else:
self.identifiers.declared_locally.add(name)
def visit_Assign(self, node):
"""Visit assignments in the correct order."""
self.visit(node.node)
self.visit(node.target)
def visit_For(self, node):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter)
def visit_CallBlock(self, node):
self.visit(node.call)
def visit_FilterBlock(self, node):
self.visit(node.filter)
def visit_Scope(self, node):
"""Stop visiting at scopes."""
def visit_Block(self, node):
"""Stop visiting at blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(self, environment, name, filename, stream=None,
defer_init=False):
if stream is None:
stream = StringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
# aliases for imports
self.import_aliases = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests = {}
self.filters = {}
# the debug information
self.debug_info = []
self._write_debug_info = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# -- Various compilation helpers
def fail(self, msg, lineno):
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return 't_%d' % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer)
def return_buffer_contents(self, frame):
"""Return the buffer contents of the frame."""
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
else:
self.writeline('return concat(%s)' % frame.buffer)
def indent(self):
"""Indent by one."""
self._indentation += 1
def outdent(self, step=1):
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(')')
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes, frame):
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically
unless the force_generator parameter is set to False.
"""
if frame.buffer is None:
self.writeline('if 0: yield None')
else:
self.writeline('pass')
try:
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in extra_kwargs.iteritems():
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in extra_kwargs.iteritems():
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame)
def pull_locals(self, frame):
"""Pull all the references identifiers into the local scope."""
for name in frame.identifiers.undeclared:
self.writeline('l_%s = context.resolve(%r)' % (name, name))
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name))
def unoptimize_scope(self, frame):
"""Disable Python optimizations for the frame."""
# XXX: this is not that nice but it has no real overhead. It
# mainly works because python finds the locals before dead code
# is removed. If that breaks we have to add a dummy function
# that just accepts the arguments and does nothing.
if frame.identifiers.declared:
self.writeline('%sdummy(%s)' % (
unoptimize_before_dead_code and 'if 0: ' or '',
', '.join('l_' + name for name in frame.identifiers.declared)
))
def push_scope(self, frame, extra_vars=()):
"""This function returns all the shadowed variables in a dict
in the form name: alias and will write the required assignments
into the current scope. No indentation takes place.
This also predefines locally declared variables from the loop
body because under some circumstances it may be the case that
`extra_vars` is passed to `Frame.find_shadowed`.
"""
aliases = {}
for name in frame.find_shadowed(extra_vars):
aliases[name] = ident = self.temporary_identifier()
self.writeline('%s = l_%s' % (ident, name))
to_declare = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_declare.add('l_' + name)
if to_declare:
self.writeline(' = '.join(to_declare) + ' = missing')
return aliases
def pop_scope(self, aliases, frame):
"""Restore all aliases and delete unused variables."""
for name, alias in aliases.iteritems():
self.writeline('l_%s = %s' % (name, alias))
to_delete = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_delete.add('l_' + name)
if to_delete:
# we cannot use the del statement here because enclosed
# scopes can trigger a SyntaxError:
# a = 42; b = lambda: a; del a
self.writeline(' = '.join(to_delete) + ' = missing')
def function_scoping(self, node, frame, children=None,
find_special=True):
"""In Jinja a few statements require the help of anonymous
functions. Those are currently macros and call blocks and in
the future also recursive loops. As there is currently
technical limitation that doesn't allow reading and writing a
variable in a scope where the initial value is coming from an
outer scope, this function tries to fall back with a common
error message. Additionally the frame passed is modified so
that the argumetns are collected and callers are looked up.
This will return the modified frame.
"""
# we have to iterate twice over it, make sure that works
if children is None:
children = node.iter_child_nodes()
children = list(children)
func_frame = frame.inner()
func_frame.inspect(children)
# variables that are undeclared (accessed before declaration) and
# declared locally *and* part of an outside scope raise a template
# assertion error. Reason: we can't generate reasonable code from
# it without aliasing all the variables.
# this could be fixed in Python 3 where we have the nonlocal
# keyword or if we switch to bytecode generation
overriden_closure_vars = (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared &
(func_frame.identifiers.declared_locally |
func_frame.identifiers.declared_parameter)
)
if overriden_closure_vars:
self.fail('It\'s not possible to set and access variables '
'derived from an outer scope! (affects: %s)' %
', '.join(sorted(overriden_closure_vars)), node.lineno)
# remove variables from a closure from the frame's undeclared
# identifiers.
func_frame.identifiers.undeclared -= (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared
)
# no special variables for this scope, abort early
if not find_special:
return func_frame
func_frame.accesses_kwargs = False
func_frame.accesses_varargs = False
func_frame.accesses_caller = False
func_frame.arguments = args = ['l_' + x.name for x in node.args]
undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs'))
if 'caller' in undeclared:
func_frame.accesses_caller = True
func_frame.identifiers.add_special('caller')
args.append('l_caller')
if 'kwargs' in undeclared:
func_frame.accesses_kwargs = True
func_frame.identifiers.add_special('kwargs')
args.append('l_kwargs')
if 'varargs' in undeclared:
func_frame.accesses_varargs = True
func_frame.identifiers.add_special('varargs')
args.append('l_varargs')
return func_frame
def macro_body(self, node, frame, children=None):
"""Dump the function def of a macro or call block."""
frame = self.function_scoping(node, frame, children)
# macros are delayed, they never require output checks
frame.require_output_check = False
args = frame.arguments
# XXX: this is an ugly fix for the loop nesting bug
# (tests.test_old_bugs.test_loop_call_bug). This works around
# a identifier nesting problem we have in general. It's just more
# likely to happen in loops which is why we work around it. The
# real solution would be "nonlocal" all the identifiers that are
# leaking into a new python frame and might be used both unassigned
# and assigned.
if 'loop' in frame.identifiers.declared:
args = args + ['l_loop=l_loop']
self.writeline('def macro(%s):' % ', '.join(args), node)
self.indent()
self.buffer(frame)
self.pull_locals(frame)
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame)
self.outdent()
return frame
def macro_def(self, node, frame):
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ', '.join(repr(x.name) for x in node.args)
name = getattr(node, 'name', None)
if len(node.args) == 1:
arg_tuple += ','
self.write('Macro(environment, macro, %r, (%s), (' %
(name, arg_tuple))
for arg in node.defaults:
self.visit(arg, frame)
self.write(', ')
self.write('), %r, %r, %r)' % (
bool(frame.accesses_kwargs),
bool(frame.accesses_varargs),
bool(frame.accesses_caller)
))
def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv
# -- Statement Visitors
def visit_Template(self, node, frame=None):
assert frame is None, 'no root frame allowed'
eval_ctx = EvalContext(self.environment, self.name)
from jinja2.runtime import __all__ as exported
self.writeline('from __future__ import division')
self.writeline('from jinja2.runtime import ' + ', '.join(exported))
if not unoptimize_before_dead_code:
self.writeline('dummy = lambda *x: None')
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = not self.defer_init and ', environment=environment' or ''
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail('block %r defined twice' % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if '.' in imp:
module, obj = imp.rsplit('.', 1)
self.writeline('from %s import %s as %s' %
(module, obj, alias))
else:
self.writeline('import %s as %s' % (imp, alias))
# add the load name
self.writeline('name = %r' % self.name)
# generate the root render function.
self.writeline('def root(context%s):' % envenv, extra=1)
# process the root
frame = Frame(eval_ctx)
frame.inspect(node.body)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
self.indent()
if have_extends:
self.writeline('parent_template = None')
if 'self' in find_undeclared(node.body, ('self',)):
frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
self.pull_locals(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('for event in parent_template.'
'root_render_func(context):')
self.indent()
self.writeline('yield event')
self.outdent(2 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in self.blocks.iteritems():
block_frame = Frame(eval_ctx)
block_frame.inspect(block.body)
block_frame.block = name
self.writeline('def block_%s(context%s):' % (name, envenv),
block, 1)
self.indent()
undeclared = find_undeclared(block.body, ('self', 'super'))
if 'self' in undeclared:
block_frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
if 'super' in undeclared:
block_frame.identifiers.add_special('super')
self.writeline('l_super = context.super(%r, '
'block_%s)' % (name, name))
self.pull_locals(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.outdent()
self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
for x in self.blocks),
extra=1)
# add a function that returns the debug info
self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
in self.debug_info))
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 1
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline('if parent_template is None:')
self.indent()
level += 1
context = node.scoped and 'context.derived(locals())' or 'context'
self.writeline('for event in context.blocks[%r][0](%s):' % (
node.name, context), node)
self.indent()
self.simple_write('event', frame)
self.outdent(level)
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
self.outdent()
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node, frame):
"""Handles includes."""
if node.with_context:
self.unoptimize_scope(frame)
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, basestring):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
if node.with_context:
self.writeline('for event in template.root_render_func('
'template.new_context(context.parent, True, '
'locals())):')
else:
self.writeline('for event in template.module._body_stream:')
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def visit_Import(self, node, frame):
"""Visit regular imports."""
if node.with_context:
self.unoptimize_scope(frame)
self.writeline('l_%s = ' % node.target, node)
if frame.toplevel:
self.write('context.vars[%r] = ' % node.target)
self.write('environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True, locals())')
else:
self.write('module')
if frame.toplevel and not node.target.startswith('_'):
self.writeline('context.exported_vars.discard(%r)' % node.target)
frame.assigned_names.add(node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True)')
else:
self.write('module')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('l_%s = getattr(included_template, '
'%r, missing)' % (alias, name))
self.writeline('if l_%s is missing:' % alias)
self.indent()
self.writeline('l_%s = environment.undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(alias, 'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
frame.assigned_names.add(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: l_%s' % (name, name) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(map(repr, discarded_names)))
def visit_For(self, node, frame):
# when calculating the nodes for the inner frame we have to exclude
# the iterator contents from it
children = node.iter_child_nodes(exclude=('iter',))
if node.recursive:
loop_frame = self.function_scoping(node, frame, children,
find_special=False)
else:
loop_frame = frame.inner()
loop_frame.inspect(children)
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
extended_loop = node.recursive or 'loop' in \
find_undeclared(node.iter_child_nodes(
only=('body',)), ('loop',))
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if not node.recursive:
aliases = self.push_scope(loop_frame, ('loop',))
# otherwise we set up a buffer and add a function def
else:
self.writeline('def loop(reciter, loop_render_func):', node)
self.indent()
self.buffer(loop_frame)
aliases = {}
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
loop_frame.identifiers.add_special('loop')
for name in node.find_all(nodes.Name):
if name.ctx == 'store' and name.name == 'loop':
self.fail('Can\'t assign to special loop variable '
'in for-loop target', name.lineno)
self.pull_locals(loop_frame)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline('%s = 1' % iteration_indicator)
# Create a fake parent loop if the else or test section of a
# loop is accessing the special loop variable and no parent loop
# exists.
if 'loop' not in aliases and 'loop' in find_undeclared(
node.iter_child_nodes(only=('else_', 'test')), ('loop',)):
self.writeline("l_loop = environment.undefined(%r, name='loop')" %
("'loop' is undefined. the filter section of a loop as well "
"as the else block don't have access to the special 'loop'"
" variable of the current loop. Because there is no parent "
"loop it's undefined. Happened in loop on %s" %
self.position(node)))
self.writeline('for ', node)
self.visit(node.target, loop_frame)
self.write(extended_loop and ', l_loop in LoopContext(' or ' in ')
# if we have an extened loop and a node test, we filter in the
# "outer frame".
if extended_loop and node.test is not None:
self.write('(')
self.visit(node.target, loop_frame)
self.write(' for ')
self.visit(node.target, loop_frame)
self.write(' in ')
if node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
self.write(' if (')
test_frame = loop_frame.copy()
self.visit(node.test, test_frame)
self.write('))')
elif node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
if node.recursive:
self.write(', recurse=loop_render_func):')
else:
self.write(extended_loop and '):' or ':')
# tests in not extended loops become a continue
if not extended_loop and node.test is not None:
self.indent()
self.writeline('if not ')
self.visit(node.test, loop_frame)
self.write(':')
self.indent()
self.writeline('continue')
self.outdent(2)
self.indent()
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline('%s = 0' % iteration_indicator)
self.outdent()
if node.else_:
self.writeline('if %s:' % iteration_indicator)
self.indent()
self.blockvisit(node.else_, loop_frame)
self.outdent()
# reset the aliases if there are any.
if not node.recursive:
self.pop_scope(aliases, loop_frame)
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
self.write('loop(')
self.visit(node.iter, frame)
self.write(', loop)')
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
self.writeline('if ', node)
self.visit(node.test, if_frame)
self.write(':')
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
if node.else_:
self.writeline('else:')
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node, frame):
macro_frame = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith('_'):
self.write('context.exported_vars.add(%r)' % node.name)
self.writeline('context.vars[%r] = ' % node.name)
self.write('l_%s = ' % node.name)
self.macro_def(node, macro_frame)
frame.assigned_names.add(node.name)
def visit_CallBlock(self, node, frame):
children = node.iter_child_nodes(exclude=('call',))
call_frame = self.macro_body(node, frame, children)
self.writeline('caller = ')
self.macro_def(node, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, call_frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node, frame):
filter_frame = frame.inner()
filter_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(filter_frame)
self.pull_locals(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.pop_scope(aliases, filter_frame)
def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
def visit_Output(self, node, frame):
# if we have a known extends statement, we don't output anything
# if we are in a require_output_check section
if self.has_known_extends and frame.require_output_check:
return
if self.environment.finalize:
finalize = lambda x: unicode(self.environment.finalize(x))
else:
finalize = unicode
# if we are inside a frame that requires output checking, we do so
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# try to evaluate as many chunks as possible into a static
# string at compile time.
body = []
for child in node.nodes:
try:
const = child.as_const(frame.eval_ctx)
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, becaus otherwise the
# as_const() function would raise an Impossible exception
# at that point.
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = finalize(const)
except Exception:
# if something goes wrong here we evaluate the node
# at runtime for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ', ')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 1
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
elif frame.eval_ctx.autoescape:
self.write('escape(')
else:
self.write('to_string(')
if self.environment.finalize is not None:
self.write('environment.finalize(')
close += 1
self.visit(item, frame)
self.write(')' * close)
if frame.buffer is not None:
self.write(', ')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
idx = -1
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
close += 1
elif frame.eval_ctx.autoescape:
self.write('escape(')
close += 1
if self.environment.finalize is not None:
self.write('environment.finalize(')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
def visit_Assign(self, node, frame):
self.newline(node)
# toplevel assignments however go into the local namespace and
# the current template's context. We create a copy of the frame
# here and add a set so that the Name visitor can add the assigned
# names here.
if frame.toplevel:
assignment_frame = frame.copy()
assignment_frame.toplevel_assignments = set()
else:
assignment_frame = frame
self.visit(node.target, assignment_frame)
self.write(' = ')
self.visit(node.node, frame)
# make sure toplevel assignments are added to the context.
if frame.toplevel:
public_names = [x for x in assignment_frame.toplevel_assignments
if not x.startswith('_')]
if len(assignment_frame.toplevel_assignments) == 1:
name = next(iter(assignment_frame.toplevel_assignments))
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(assignment_frame.toplevel_assignments):
if idx:
self.write(', ')
self.write('%r: l_%s' % (name, name))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(map(repr, public_names)))
# -- Expression Visitors
def visit_Name(self, node, frame):
if node.ctx == 'store' and frame.toplevel:
frame.toplevel_assignments.add(node.name)
self.write('l_' + node.name)
frame.assigned_names.add(node.name)
def visit_Const(self, node, frame):
val = node.value
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write('(context.eval_ctx.autoescape and Markup or identity)(%r)'
% node.data)
def visit_Tuple(self, node, frame):
self.write('(')
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(idx == 0 and ',)' or ')')
def visit_List(self, node, frame):
self.write('[')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(']')
def visit_Dict(self, node, frame):
self.write('{')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item.key, frame)
self.write(': ')
self.visit(item.value, frame)
self.write('}')
def binop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_binops:
self.write('environment.call_binop(context, %r, ' % operator)
self.visit(node.left, frame)
self.write(', ')
self.visit(node.right, frame)
else:
self.write('(')
self.visit(node.left, frame)
self.write(' %s ' % operator)
self.visit(node.right, frame)
self.write(')')
return visitor
def uaop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_unops:
self.write('environment.call_unop(context, %r, ' % operator)
self.visit(node.node, frame)
else:
self.write('(' + operator)
self.visit(node.node, frame)
self.write(')')
return visitor
visit_Add = binop('+')
visit_Sub = binop('-')
visit_Mul = binop('*')
visit_Div = binop('/')
visit_FloorDiv = binop('//')
visit_Pow = binop('**')
visit_Mod = binop('%')
visit_And = binop('and', interceptable=False)
visit_Or = binop('or', interceptable=False)
visit_Pos = uaop('+')
visit_Neg = uaop('-')
visit_Not = uaop('not ', interceptable=False)
del binop, uaop
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
func_name = '(context.eval_ctx.volatile and' \
' markup_join or unicode_join)'
elif frame.eval_ctx.autoescape:
func_name = 'markup_join'
else:
func_name = 'unicode_join'
self.write('%s((' % func_name)
for arg in node.nodes:
self.visit(arg, frame)
self.write(', ')
self.write('))')
def visit_Compare(self, node, frame):
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
def visit_Operand(self, node, frame):
self.write(' %s ' % operators[node.op])
self.visit(node.expr, frame)
def visit_Getattr(self, node, frame):
self.write('environment.getattr(')
self.visit(node.node, frame)
self.write(', %r)' % node.attr)
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write('[')
self.visit(node.arg, frame)
self.write(']')
else:
self.write('environment.getitem(')
self.visit(node.node, frame)
self.write(', ')
self.visit(node.arg, frame)
self.write(')')
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
self.write(':')
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(':')
self.visit(node.step, frame)
def visit_Filter(self, node, frame):
self.write(self.filters[node.name] + '(')
func = self.environment.filters.get(node.name)
if func is None:
self.fail('no filter named %r' % node.name, node.lineno)
if getattr(func, 'contextfilter', False):
self.write('context, ')
elif getattr(func, 'evalcontextfilter', False):
self.write('context.eval_ctx, ')
elif getattr(func, 'environmentfilter', False):
self.write('environment, ')
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' Markup(concat(%s)) or concat(%s))' %
(frame.buffer, frame.buffer))
elif frame.eval_ctx.autoescape:
self.write('Markup(concat(%s))' % frame.buffer)
else:
self.write('concat(%s)' % frame.buffer)
self.signature(node, frame)
self.write(')')
def visit_Test(self, node, frame):
self.write(self.tests[node.name] + '(')
if node.name not in self.environment.tests:
self.fail('no test named %r' % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
self.write(')')
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
self.write('environment.undefined(%r)' % ('the inline if-'
'expression on %s evaluated to false and '
'no else section was defined.' % self.position(node)))
if not have_condexpr:
self.write('((')
self.visit(node.test, frame)
self.write(') and (')
self.visit(node.expr1, frame)
self.write(',) or (')
write_expr2()
self.write(',))[0]')
else:
self.write('(')
self.visit(node.expr1, frame)
self.write(' if ')
self.visit(node.test, frame)
self.write(' else ')
write_expr2()
self.write(')')
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.sandboxed:
self.write('environment.call(context, ')
else:
self.write('context.call(')
self.visit(node.node, frame)
extra_kwargs = forward_caller and {'caller': 'caller'} or None
self.signature(node, frame, extra_kwargs)
self.write(')')
def visit_Keyword(self, node, frame):
self.write(node.key + '=')
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
self.write('Markup(')
self.visit(node.expr, frame)
self.write(')')
def visit_MarkSafeIfAutoescape(self, node, frame):
self.write('(context.eval_ctx.autoescape and Markup or identity)(')
self.visit(node.expr, frame)
self.write(')')
def visit_EnvironmentAttribute(self, node, frame):
self.write('environment.' + node.name)
def visit_ExtensionAttribute(self, node, frame):
self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
self.write('context')
def visit_Continue(self, node, frame):
self.writeline('continue', node)
def visit_Break(self, node, frame):
self.writeline('break', node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
scope_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(scope_frame)
self.pull_locals(scope_frame)
self.blockvisit(node.body, scope_frame)
self.pop_scope(aliases, scope_frame)
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
self.writeline('context.eval_ctx.%s = ' % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
safed_ctx = frame.eval_ctx.save()
self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(safed_ctx)
self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
| bsd-3-clause | -4,756,405,537,260,084,000 | 36.537295 | 86 | 0.558474 | false |
0k/OpenUpgrade | addons/l10n_ar/__init__.py | 2120 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,307,122,977,793,663,000 | 44.5 | 80 | 0.684753 | false |
Nick-OpusVL/odoo | openerp/tools/translate.py | 62 | 44976 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import codecs
import csv
import fnmatch
import inspect
import locale
import os
import openerp.sql_db as sql_db
import re
import logging
import tarfile
import tempfile
import threading
from babel.messages import extract
from collections import defaultdict
from datetime import datetime
from lxml import etree
from os.path import join
import config
import misc
from misc import SKIPPED_ELEMENT_TYPES
import osutil
import openerp
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
SKIPPED_ELEMENTS = ('script', 'style')
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ': 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands',
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_Greece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
# These are not all english small words, just those that could potentially be isolated within views
ENGLISH_SMALL_WORDS = set("as at by do go if in me no of ok on or to up us we".split())
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
#
# Warning: better use self.pool.get('ir.translation')._get_source if you can
#
def translate(cr, name, source_type, lang, source=None):
if source and name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s', (lang, source_type, str(name), source))
elif name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
elif source:
cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s', (lang, source_type, source))
res_trans = cr.fetchone()
res = res_trans and res_trans[0] or False
return res
class GettextAlias(object):
def _get_db(self):
# find current DB based on thread/worker db name (see netsvc)
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
return sql_db.db_connect(db_name)
def _get_cr(self, frame, allow_create=True):
# try, in order: cr, cursor, self.env.cr, self.cr,
# request.env.cr
if 'cr' in frame.f_locals:
return frame.f_locals['cr'], False
if 'cursor' in frame.f_locals:
return frame.f_locals['cursor'], False
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
return s.env.cr, False
if hasattr(s, 'cr'):
return s.cr, False
try:
from openerp.http import request
return request.env.cr, False
except RuntimeError:
pass
if allow_create:
# create a new cursor
db = self._get_db()
if db is not None:
return db.cursor(), True
return None, False
def _get_uid(self, frame):
# try, in order: uid, user, self.env.uid
if 'uid' in frame.f_locals:
return frame.f_locals['uid']
if 'user' in frame.f_locals:
return int(frame.f_locals['user']) # user may be a record
s = frame.f_locals.get('self')
return s.env.uid
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang'), request.env.lang
lang = None
if frame.f_locals.get('context'):
lang = frame.f_locals['context'].get('lang')
if not lang:
kwargs = frame.f_locals.get('kwargs', {})
if kwargs.get('context'):
lang = kwargs['context'].get('lang')
if not lang:
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
lang = s.env.lang
if not lang:
if hasattr(s, 'localcontext'):
lang = s.localcontext.get('lang')
if not lang:
try:
from openerp.http import request
lang = request.env.lang
except RuntimeError:
pass
if not lang:
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the originial uid, so the language may
# be wrong when the admin language differs.
pool = getattr(s, 'pool', None)
(cr, dummy) = self._get_cr(frame, allow_create=False)
uid = self._get_uid(frame)
if pool and cr and uid:
lang = pool['res.users'].context_get(cr, uid)['lang']
return lang
def __call__(self, source):
res = source
cr = None
is_new_cr = False
try:
frame = inspect.currentframe()
if frame is None:
return source
frame = frame.f_back
if not frame:
return source
lang = self._get_lang(frame)
if lang:
cr, is_new_cr = self._get_cr(frame)
if cr:
# Try to use ir.translation to benefit from global cache if possible
registry = openerp.registry(cr.dbname)
res = registry['ir.translation']._get_source(cr, SUPERUSER_ID, None, ('code','sql_constraint'), lang, source)
else:
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
else:
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
_logger.debug('translation went wrong for "%r", skipped', source)
# if so, double-check the root/base translations filenames
finally:
if cr and is_new_cr:
cr.close()
return res
_ = GettextAlias()
def quote(s):
"""Returns quoted PO term string, with special PO characters escaped"""
assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
return '"%s"' % s.replace('\\','\\\\') \
.replace('"','\\"') \
.replace('\n', '\\n"\n"')
re_escaped_char = re.compile(r"(\\.)")
re_escaped_replacements = {'n': '\n', }
def _sub_replacement(match_obj):
return re_escaped_replacements.get(match_obj.group(1)[1], match_obj.group(1)[1])
def unquote(str):
"""Returns unquoted PO term string, with special PO characters unescaped"""
return re_escaped_char.sub(_sub_replacement, str[1:-1])
# class to handle po files
class TinyPoFile(object):
def __init__(self, buffer):
self.buffer = buffer
def warn(self, msg, *args):
_logger.warning(msg, *args)
def __iter__(self):
self.buffer.seek(0)
self.lines = self._get_lines()
self.lines_count = len(self.lines)
self.first = True
self.extra_lines= []
return self
def _get_lines(self):
lines = self.buffer.readlines()
# remove the BOM (Byte Order Mark):
if len(lines):
lines[0] = unicode(lines[0], 'utf8').lstrip(unicode( codecs.BOM_UTF8, "utf8"))
lines.append('') # ensure that the file ends with at least an empty line
return lines
def cur_line(self):
return self.lines_count - len(self.lines)
def next(self):
trans_type = name = res_id = source = trad = None
if self.extra_lines:
trans_type, name, res_id, source, trad, comments = self.extra_lines.pop(0)
if not res_id:
res_id = '0'
else:
comments = []
targets = []
line = None
fuzzy = False
while not line:
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0).strip()
while line.startswith('#'):
if line.startswith('#~ '):
break
if line.startswith('#.'):
line = line[2:].strip()
if not line.startswith('module:'):
comments.append(line)
elif line.startswith('#:'):
# Process the `reference` comments. Each line can specify
# multiple targets (e.g. model, view, code, selection,
# ...). For each target, we will return an additional
# entry.
for lpart in line[2:].strip().split(' '):
trans_info = lpart.strip().split(':',2)
if trans_info and len(trans_info) == 2:
# looks like the translation trans_type is missing, which is not
# unexpected because it is not a GetText standard. Default: 'code'
trans_info[:0] = ['code']
if trans_info and len(trans_info) == 3:
# this is a ref line holding the destination info (model, field, record)
targets.append(trans_info)
elif line.startswith('#,') and (line[2:].strip() == 'fuzzy'):
fuzzy = True
line = self.lines.pop(0).strip()
if not self.lines:
raise StopIteration()
while not line:
# allow empty lines between comments and msgid
line = self.lines.pop(0).strip()
if line.startswith('#~ '):
while line.startswith('#~ ') or not line.strip():
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0)
# This has been a deprecated entry, don't return anything
return self.next()
if not line.startswith('msgid'):
raise Exception("malformed file: bad line: %s" % line)
source = unquote(line[6:])
line = self.lines.pop(0).strip()
if not source and self.first:
self.first = False
# if the source is "" and it's the first msgid, it's the special
# msgstr with the informations about the traduction and the
# traductor; we skip it
self.extra_lines = []
while line:
line = self.lines.pop(0).strip()
return self.next()
while not line.startswith('msgstr'):
if not line:
raise Exception('malformed file at %d'% self.cur_line())
source += unquote(line)
line = self.lines.pop(0).strip()
trad = unquote(line[7:])
line = self.lines.pop(0).strip()
while line:
trad += unquote(line)
line = self.lines.pop(0).strip()
if targets and not fuzzy:
# Use the first target for the current entry (returned at the
# end of this next() call), and keep the others to generate
# additional entries (returned the next next() calls).
trans_type, name, res_id = targets.pop(0)
for t, n, r in targets:
if t == trans_type == 'code': continue
self.extra_lines.append((t, n, r, source, trad, comments))
if name is None:
if not fuzzy:
self.warn('Missing "#:" formated comment at line %d for the following source:\n\t%s',
self.cur_line(), source[:30])
return self.next()
return trans_type, name, res_id, source, trad, '\n'.join(comments)
def write_infos(self, modules):
import openerp.release as release
self.buffer.write("# Translation of %(project)s.\n" \
"# This file contains the translation of the following modules:\n" \
"%(modules)s" \
"#\n" \
"msgid \"\"\n" \
"msgstr \"\"\n" \
'''"Project-Id-Version: %(project)s %(version)s\\n"\n''' \
'''"Report-Msgid-Bugs-To: \\n"\n''' \
'''"POT-Creation-Date: %(now)s\\n"\n''' \
'''"PO-Revision-Date: %(now)s\\n"\n''' \
'''"Last-Translator: <>\\n"\n''' \
'''"Language-Team: \\n"\n''' \
'''"MIME-Version: 1.0\\n"\n''' \
'''"Content-Type: text/plain; charset=UTF-8\\n"\n''' \
'''"Content-Transfer-Encoding: \\n"\n''' \
'''"Plural-Forms: \\n"\n''' \
"\n"
% { 'project': release.description,
'version': release.version,
'modules': reduce(lambda s, m: s + "#\t* %s\n" % m, modules, ""),
'now': datetime.utcnow().strftime('%Y-%m-%d %H:%M')+"+0000",
}
)
def write(self, modules, tnrs, source, trad, comments=None):
plurial = len(modules) > 1 and 's' or ''
self.buffer.write("#. module%s: %s\n" % (plurial, ', '.join(modules)))
if comments:
self.buffer.write(''.join(('#. %s\n' % c for c in comments)))
code = False
for typy, name, res_id in tnrs:
self.buffer.write("#: %s:%s:%s\n" % (typy, name, res_id))
if typy == 'code':
code = True
if code:
# only strings in python code are python formated
self.buffer.write("#, python-format\n")
if not isinstance(trad, unicode):
trad = unicode(trad, 'utf8')
if not isinstance(source, unicode):
source = unicode(source, 'utf8')
msg = "msgid %s\n" \
"msgstr %s\n\n" \
% (quote(source), quote(trad))
self.buffer.write(msg.encode('utf8'))
# Methods to export the translation file
def trans_export(lang, modules, buffer, format, cr):
def _process(format, modules, rows, buffer, lang):
if format == 'csv':
writer = csv.writer(buffer, 'UNIX')
# write header first
writer.writerow(("module","type","name","res_id","src","value"))
for module, type, name, res_id, src, trad, comments in rows:
# Comments are ignored by the CSV writer
writer.writerow((module, type, name, res_id, src, trad))
elif format == 'po':
writer = TinyPoFile(buffer)
writer.write_infos(modules)
# we now group the translations by source. That means one translation per source.
grouped_rows = {}
for module, type, name, res_id, src, trad, comments in rows:
row = grouped_rows.setdefault(src, {})
row.setdefault('modules', set()).add(module)
if not row.get('translation') and trad != src:
row['translation'] = trad
row.setdefault('tnrs', []).append((type, name, res_id))
row.setdefault('comments', set()).update(comments)
for src, row in sorted(grouped_rows.items()):
if not lang:
# translation template, so no translation value
row['translation'] = ''
elif not row.get('translation'):
row['translation'] = src
writer.write(row['modules'], row['tnrs'], src, row['translation'], row['comments'])
elif format == 'tgz':
rows_by_module = {}
for row in rows:
module = row[0]
rows_by_module.setdefault(module, []).append(row)
tmpdir = tempfile.mkdtemp()
for mod, modrows in rows_by_module.items():
tmpmoddir = join(tmpdir, mod, 'i18n')
os.makedirs(tmpmoddir)
pofilename = (lang if lang else mod) + ".po" + ('t' if not lang else '')
buf = file(join(tmpmoddir, pofilename), 'w')
_process('po', [mod], modrows, buf, lang)
buf.close()
tar = tarfile.open(fileobj=buffer, mode='w|gz')
tar.add(tmpdir, '')
tar.close()
else:
raise Exception(_('Unrecognized extension: must be one of '
'.csv, .po, or .tgz (received .%s).' % format))
translations = trans_generate(lang, modules, cr)
modules = set(t[0] for t in translations)
_process(format, modules, translations, buffer, lang)
del translations
def trans_parse_xsl(de):
return list(set(trans_parse_xsl_aux(de, False)))
def trans_parse_xsl_aux(de, t):
res = []
for n in de:
t = t or n.get("t")
if t:
if isinstance(n, SKIPPED_ELEMENT_TYPES) or n.tag.startswith('{http://www.w3.org/1999/XSL/Transform}'):
continue
if n.text:
l = n.text.strip().replace('\n',' ')
if len(l):
res.append(l.encode("utf8"))
if n.tail:
l = n.tail.strip().replace('\n',' ')
if len(l):
res.append(l.encode("utf8"))
res.extend(trans_parse_xsl_aux(n, t))
return res
def trans_parse_rml(de):
res = []
for n in de:
for m in n:
if isinstance(m, SKIPPED_ELEMENT_TYPES) or not m.text:
continue
string_list = [s.replace('\n', ' ').strip() for s in re.split('\[\[.+?\]\]', m.text)]
for s in string_list:
if s:
res.append(s.encode("utf8"))
res.extend(trans_parse_rml(n))
return res
def _push(callback, term, source_line):
""" Sanity check before pushing translation terms """
term = (term or "").strip().encode('utf8')
# Avoid non-char tokens like ':' '...' '.00' etc.
if len(term) > 8 or any(x.isalpha() for x in term):
callback(term, source_line)
def trans_parse_view(element, callback):
""" Helper method to recursively walk an etree document representing a
regular view and call ``callback(term)`` for each translatable term
that is found in the document.
:param ElementTree element: root of etree document to extract terms from
:param callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
for el in element.iter():
if (not isinstance(el, SKIPPED_ELEMENT_TYPES)
and el.tag.lower() not in SKIPPED_ELEMENTS
and el.get("translation", '').strip() != "off"
and el.text):
_push(callback, el.text, el.sourceline)
if el.tail:
_push(callback, el.tail, el.sourceline)
for attr in ('string', 'help', 'sum', 'confirm', 'placeholder'):
value = el.get(attr)
if value:
_push(callback, value, el.sourceline)
# tests whether an object is in a list of modules
def in_modules(object_name, modules):
if 'all' in modules:
return True
module_dict = {
'ir': 'base',
'res': 'base',
'workflow': 'base',
}
module = object_name.split('.')[0]
module = module_dict.get(module, module)
return module in modules
def _extract_translatable_qweb_terms(element, callback):
""" Helper method to walk an etree document representing
a QWeb template, and call ``callback(term)`` for each
translatable term that is found in the document.
:param etree._Element element: root of etree document to extract terms from
:param Callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
# not using elementTree.iterparse because we need to skip sub-trees in case
# the ancestor element had a reason to be skipped
for el in element:
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
if (el.tag.lower() not in SKIPPED_ELEMENTS
and "t-js" not in el.attrib
and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib)
and el.get("t-translation", '').strip() != "off"):
_push(callback, el.text, el.sourceline)
for att in ('title', 'alt', 'label', 'placeholder'):
if att in el.attrib:
_push(callback, el.attrib[att], el.sourceline)
_extract_translatable_qweb_terms(el, callback)
_push(callback, el.tail, el.sourceline)
def babel_extract_qweb(fileobj, keywords, comment_tags, options):
"""Babel message extractor for qweb template files.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: Iterable
"""
result = []
def handle_text(text, lineno):
result.append((lineno, None, text, []))
tree = etree.parse(fileobj)
_extract_translatable_qweb_terms(tree.getroot(), handle_text)
return result
def trans_generate(lang, modules, cr):
dbname = cr.dbname
registry = openerp.registry(dbname)
trans_obj = registry['ir.translation']
model_data_obj = registry['ir.model.data']
uid = 1
query = 'SELECT name, model, res_id, module' \
' FROM ir_model_data'
query_models = """SELECT m.id, m.model, imd.module
FROM ir_model AS m, ir_model_data AS imd
WHERE m.id = imd.res_id AND imd.model = 'ir.model' """
if 'all_installed' in modules:
query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') '
query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') "
query_param = None
if 'all' not in modules:
query += ' WHERE module IN %s'
query_models += ' AND imd.module in %s'
query_param = (tuple(modules),)
query += ' ORDER BY module, model, name'
query_models += ' ORDER BY module, model'
cr.execute(query, query_param)
_to_translate = set()
def push_translation(module, type, name, id, source, comments=None):
# empty and one-letter terms are ignored, they probably are not meant to be
# translated, and would be very hard to translate anyway.
if not source or len(source.strip()) <= 1:
return
tnx = (module, source, name, id, type, tuple(comments or ()))
_to_translate.add(tnx)
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
def push(mod, type, name, res_id, term):
term = (term or '').strip()
if len(term) > 2 or term in ENGLISH_SMALL_WORDS:
push_translation(mod, type, name, res_id, term)
def get_root_view(xml_id):
view = model_data_obj.xmlid_to_object(cr, uid, xml_id)
if view:
while view.mode != 'primary':
view = view.inherit_id
xml_id = view.get_external_id(cr, uid).get(view.id, xml_id)
return xml_id
for (xml_name,model,res_id,module) in cr.fetchall():
module = encode(module)
model = encode(model)
xml_name = "%s.%s" % (module, encode(xml_name))
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
Model = registry[model]
if not Model._translate:
# explicitly disabled
continue
obj = Model.browse(cr, uid, res_id)
if not obj.exists():
_logger.warning("Unable to find object %r with id %d", model, res_id)
continue
if model=='ir.ui.view':
d = etree.XML(encode(obj.arch))
if obj.type == 'qweb':
view_id = get_root_view(xml_name)
push_qweb = lambda t,l: push(module, 'view', 'website', view_id, t)
_extract_translatable_qweb_terms(d, push_qweb)
else:
push_view = lambda t,l: push(module, 'view', obj.model, xml_name, t)
trans_parse_view(d, push_view)
elif model=='ir.actions.wizard':
pass # TODO Can model really be 'ir.actions.wizard' ?
elif model=='ir.model.fields':
try:
field_name = encode(obj.name)
except AttributeError, exc:
_logger.error("name error in %s: %s", xml_name, str(exc))
continue
objmodel = registry.get(obj.model)
if (objmodel is None or field_name not in objmodel._columns
or not objmodel._translate):
continue
field_def = objmodel._columns[field_name]
name = "%s,%s" % (encode(obj.model), field_name)
push_translation(module, 'field', name, 0, encode(field_def.string))
if field_def.help:
push_translation(module, 'help', name, 0, encode(field_def.help))
if field_def.translate:
ids = objmodel.search(cr, uid, [])
obj_values = objmodel.read(cr, uid, ids, [field_name])
for obj_value in obj_values:
res_id = obj_value['id']
if obj.name in ('ir.model', 'ir.ui.menu'):
res_id = 0
model_data_ids = model_data_obj.search(cr, uid, [
('model', '=', model),
('res_id', '=', res_id),
])
if not model_data_ids:
push_translation(module, 'model', name, 0, encode(obj_value[field_name]))
if hasattr(field_def, 'selection') and isinstance(field_def.selection, (list, tuple)):
for dummy, val in field_def.selection:
push_translation(module, 'selection', name, 0, encode(val))
elif model=='ir.actions.report.xml':
name = encode(obj.report_name)
fname = ""
if obj.report_rml:
fname = obj.report_rml
parse_func = trans_parse_rml
report_type = "report"
elif obj.report_xsl:
fname = obj.report_xsl
parse_func = trans_parse_xsl
report_type = "xsl"
if fname and obj.report_type in ('pdf', 'xsl'):
try:
report_file = misc.file_open(fname)
try:
d = etree.parse(report_file)
for t in parse_func(d.iter()):
push_translation(module, report_type, name, 0, t)
finally:
report_file.close()
except (IOError, etree.XMLSyntaxError):
_logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
for field_name, field_def in obj._columns.items():
if model == 'ir.model' and field_name == 'name' and obj.name == obj.model:
# ignore model name if it is the technical one, nothing to translate
continue
if field_def.translate:
name = model + "," + field_name
try:
term = obj[field_name] or ''
except:
term = ''
push_translation(module, 'model', name, xml_name, encode(term))
# End of data for ir.model.data query results
cr.execute(query_models, query_param)
def push_constraint_msg(module, term_type, model, msg):
if not hasattr(msg, '__call__'):
push_translation(encode(module), term_type, encode(model), 0, encode(msg))
def push_local_constraints(module, model, cons_type='sql_constraints'):
"""Climb up the class hierarchy and ignore inherited constraints
from other modules"""
term_type = 'sql_constraint' if cons_type == 'sql_constraints' else 'constraint'
msg_pos = 2 if cons_type == 'sql_constraints' else 1
for cls in model.__class__.__mro__:
if getattr(cls, '_module', None) != module:
continue
constraints = getattr(cls, '_local_' + cons_type, [])
for constraint in constraints:
push_constraint_msg(module, term_type, model._name, constraint[msg_pos])
for (_, model, module) in cr.fetchall():
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
model_obj = registry[model]
if model_obj._constraints:
push_local_constraints(module, model_obj, 'constraints')
if model_obj._sql_constraints:
push_local_constraints(module, model_obj, 'sql_constraints')
installed_modules = map(
lambda m: m['name'],
registry['ir.module.module'].search_read(cr, uid, [('state', '=', 'installed')], fields=['name']))
path_list = list(openerp.modules.module.ad_paths)
# Also scan these non-addon paths
for bin_path in ['osv', 'report' ]:
path_list.append(os.path.join(config.config['root_path'], bin_path))
_logger.debug("Scanning modules at paths: %s", path_list)
def get_module_from_path(path):
for mp in path_list:
if path.startswith(mp) and os.path.dirname(path) != mp:
path = path[len(mp)+1:]
return path.split(os.path.sep)[0]
return 'base' # files that are not in a module are considered as being in 'base' module
def verified_module_filepaths(fname, path, root):
fabsolutepath = join(root, fname)
frelativepath = fabsolutepath[len(path):]
display_path = "addons%s" % frelativepath
module = get_module_from_path(fabsolutepath)
if ('all' in modules or module in modules) and module in installed_modules:
if os.path.sep != '/':
display_path = display_path.replace(os.path.sep, '/')
return module, fabsolutepath, frelativepath, display_path
return None, None, None, None
def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code',
extra_comments=None, extract_keywords={'_': None}):
module, fabsolutepath, _, display_path = verified_module_filepaths(fname, path, root)
extra_comments = extra_comments or []
if not module: return
src_file = open(fabsolutepath, 'r')
try:
for extracted in extract.extract(extract_method, src_file,
keywords=extract_keywords):
# Babel 0.9.6 yields lineno, message, comments
# Babel 1.3 yields lineno, message, comments, context
lineno, message, comments = extracted[:3]
push_translation(module, trans_type, display_path, lineno,
encode(message), comments + extra_comments)
except Exception:
_logger.exception("Failed to extract terms from %s", fabsolutepath)
finally:
src_file.close()
for path in path_list:
_logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in osutil.walksymlinks(path):
for fname in fnmatch.filter(files, '*.py'):
babel_extract_terms(fname, path, root)
# mako provides a babel extractor: http://docs.makotemplates.org/en/latest/usage.html#babel
for fname in fnmatch.filter(files, '*.mako'):
babel_extract_terms(fname, path, root, 'mako', trans_type='report')
# Javascript source files in the static/src/js directory, rest is ignored (libs)
if fnmatch.fnmatch(root, '*/static/src/js*'):
for fname in fnmatch.filter(files, '*.js'):
babel_extract_terms(fname, path, root, 'javascript',
extra_comments=[WEB_TRANSLATION_COMMENT],
extract_keywords={'_t': None, '_lt': None})
# QWeb template files
if fnmatch.fnmatch(root, '*/static/src/xml*'):
for fname in fnmatch.filter(files, '*.xml'):
babel_extract_terms(fname, path, root, 'openerp.tools.translate:babel_extract_qweb',
extra_comments=[WEB_TRANSLATION_COMMENT])
out = []
# translate strings marked as to be translated
for module, source, name, id, type, comments in sorted(_to_translate):
trans = '' if not lang else trans_obj._get_source(cr, uid, name, type, lang, source)
out.append((module, type, name, id, source, encode(trans) or '', comments))
return out
def trans_load(cr, filename, lang, verbose=True, module_name=None, context=None):
try:
fileobj = misc.file_open(filename)
_logger.info("loading %s", filename)
fileformat = os.path.splitext(filename)[-1][1:].lower()
result = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, module_name=module_name, context=context)
fileobj.close()
return result
except IOError:
if verbose:
_logger.error("couldn't read translation file %s", filename)
return None
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None):
"""Populates the ir_translation table."""
if verbose:
_logger.info('loading translation file for language %s', lang)
if context is None:
context = {}
db_name = cr.dbname
registry = openerp.registry(db_name)
lang_obj = registry.get('res.lang')
trans_obj = registry.get('ir.translation')
iso_lang = misc.get_iso_codes(lang)
try:
ids = lang_obj.search(cr, SUPERUSER_ID, [('code','=', lang)])
if not ids:
# lets create the language with locale information
lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name)
# Parse also the POT: it will possibly provide additional targets.
# (Because the POT comments are correct on Launchpad but not the
# PO comments due to a Launchpad limitation. See LP bug 933496.)
pot_reader = []
# now, the serious things: we read the language file
fileobj.seek(0)
if fileformat == 'csv':
reader = csv.reader(fileobj, quotechar='"', delimiter=',')
# read the first line of the file (it contains columns titles)
for row in reader:
fields = row
break
elif fileformat == 'po':
reader = TinyPoFile(fileobj)
fields = ['type', 'name', 'res_id', 'src', 'value', 'comments']
# Make a reader for the POT file and be somewhat defensive for the
# stable branch.
if fileobj.name.endswith('.po'):
try:
# Normally the path looks like /path/to/xxx/i18n/lang.po
# and we try to find the corresponding
# /path/to/xxx/i18n/xxx.pot file.
# (Sometimes we have 'i18n_extra' instead of just 'i18n')
addons_module_i18n, _ = os.path.split(fileobj.name)
addons_module, i18n_dir = os.path.split(addons_module_i18n)
addons, module = os.path.split(addons_module)
pot_handle = misc.file_open(os.path.join(
addons, module, i18n_dir, module + '.pot'))
pot_reader = TinyPoFile(pot_handle)
except:
pass
else:
_logger.error('Bad file format: %s', fileformat)
raise Exception(_('Bad file format'))
# Read the POT references, and keep them indexed by source string.
class Target(object):
def __init__(self):
self.value = None
self.targets = set() # set of (type, name, res_id)
self.comments = None
pot_targets = defaultdict(Target)
for type, name, res_id, src, _, comments in pot_reader:
if type is not None:
target = pot_targets[src]
target.targets.add((type, name, res_id))
target.comments = comments
# read the rest of the file
irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context)
def process_row(row):
"""Process a single PO (or POT) entry."""
# dictionary which holds values for this line of the csv file
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
# 'src': ..., 'value': ..., 'module':...}
dic = dict.fromkeys(('type', 'name', 'res_id', 'src', 'value',
'comments', 'imd_model', 'imd_name', 'module'))
dic['lang'] = lang
dic.update(zip(fields, row))
# discard the target from the POT targets.
src = dic['src']
if src in pot_targets:
target = pot_targets[src]
target.value = dic['value']
target.targets.discard((dic['type'], dic['name'], dic['res_id']))
# This would skip terms that fail to specify a res_id
res_id = dic['res_id']
if not res_id:
return
if isinstance(res_id, (int, long)) or \
(isinstance(res_id, basestring) and res_id.isdigit()):
dic['res_id'] = int(res_id)
dic['module'] = module_name
else:
# res_id is an xml id
dic['res_id'] = None
dic['imd_model'] = dic['name'].split(',')[0]
if '.' in res_id:
dic['module'], dic['imd_name'] = res_id.split('.', 1)
else:
dic['module'], dic['imd_name'] = False, res_id
irt_cursor.push(dic)
# First process the entries from the PO file (doing so also fills/removes
# the entries from the POT file).
for row in reader:
process_row(row)
# Then process the entries implied by the POT file (which is more
# correct w.r.t. the targets) if some of them remain.
pot_rows = []
for src, target in pot_targets.iteritems():
if target.value:
for type, name, res_id in target.targets:
pot_rows.append((type, name, res_id, src, target.value, target.comments))
pot_targets.clear()
for row in pot_rows:
process_row(row)
irt_cursor.finish()
trans_obj.clear_caches()
if verbose:
_logger.info("translation file loaded succesfully")
except IOError:
filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
_logger.exception("couldn't read translation file %s", filename)
def get_locales(lang=None):
if lang is None:
lang = locale.getdefaultlocale()[0]
if os.name == 'nt':
lang = _LOCALE2WIN32.get(lang, lang)
def process(enc):
ln = locale._build_localename((lang, enc))
yield ln
nln = locale.normalize(ln)
if nln != ln:
yield nln
for x in process('utf8'): yield x
prefenc = locale.getpreferredencoding()
if prefenc:
for x in process(prefenc): yield x
prefenc = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}.get(prefenc.lower())
if prefenc:
for x in process(prefenc): yield x
yield lang
def resetlocale():
# locale.resetlocale is bugged with some locales.
for ln in get_locales():
try:
return locale.setlocale(locale.LC_ALL, ln)
except locale.Error:
continue
def load_language(cr, lang):
"""Loads a translation terms for a language.
Used mainly to automate language loading at db initialization.
:param lang: language ISO code with optional _underscore_ and l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE')
:type lang: str
"""
registry = openerp.registry(cr.dbname)
language_installer = registry['base.language.install']
oid = language_installer.create(cr, SUPERUSER_ID, {'lang': lang})
language_installer.lang_install(cr, SUPERUSER_ID, [oid], context=None)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,913,251,370,468,359,000 | 39.22898 | 143 | 0.541578 | false |
aniruddhkanojia/qtile | test/test_bar.py | 14 | 11959 | # Copyright (c) 2011 Florian Mounier
# Copyright (c) 2012-2013 Craig Barnes
# Copyright (c) 2012 roger
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import six
import libqtile.layout
import libqtile.bar
import libqtile.widget
import libqtile.manager
import libqtile.config
import libqtile.confreader
from .utils import Xephyr
class GBConfig:
auto_fullscreen = True
keys = []
mouse = []
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("bb"),
libqtile.config.Group("ccc"),
libqtile.config.Group("dddd"),
libqtile.config.Group("Pppy")
]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar(
[
libqtile.widget.CPUGraph(
width=libqtile.bar.STRETCH,
type="linefill",
border_width=20,
margin_x=1,
margin_y=1
),
libqtile.widget.MemoryGraph(type="line"),
libqtile.widget.SwapGraph(type="box"),
libqtile.widget.TextBox(name="text",
background="333333"),
],
50,
),
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
libqtile.widget.AGroupBox(),
libqtile.widget.Prompt(),
libqtile.widget.WindowName(),
libqtile.widget.Sep(),
libqtile.widget.Clock(),
],
50
),
# TODO: Add vertical bars and test widgets that support them
)
]
main = None
def test_completion():
c = libqtile.widget.prompt.CommandCompleter(None, True)
c.reset()
c.lookup = [
("a", "x/a"),
("aa", "x/aa"),
]
assert c.complete("a") == "a"
assert c.actual() == "x/a"
assert c.complete("a") == "aa"
assert c.complete("a") == "a"
c = libqtile.widget.prompt.CommandCompleter(None)
r = c.complete("l")
assert c.actual().endswith(r)
c.reset()
assert c.complete("/bi") == "/bin/"
c.reset()
assert c.complete("/bin") != "/bin/"
c.reset()
assert c.complete("~") != "~"
c.reset()
s = "thisisatotallynonexistantpathforsure"
assert c.complete(s) == s
assert c.actual() == s
@Xephyr(True, GBConfig())
def test_draw(self):
self.testWindow("one")
b = self.c.bar["bottom"].info()
assert b["widgets"][0]["name"] == "groupbox"
@Xephyr(True, GBConfig())
def test_prompt(self):
assert self.c.widget["prompt"].info()["width"] == 0
self.c.spawncmd(":")
self.c.widget["prompt"].fake_keypress("a")
self.c.widget["prompt"].fake_keypress("Tab")
self.c.spawncmd(":")
self.c.widget["prompt"].fake_keypress("slash")
self.c.widget["prompt"].fake_keypress("Tab")
@Xephyr(True, GBConfig())
def test_event(self):
self.c.group["bb"].toscreen()
@Xephyr(True, GBConfig())
def test_textbox(self):
assert "text" in self.c.list_widgets()
s = "some text"
self.c.widget["text"].update(s)
assert self.c.widget["text"].get() == s
s = "Aye, much longer string than the initial one"
self.c.widget["text"].update(s)
assert self.c.widget["text"].get() == s
self.c.group["Pppy"].toscreen()
self.c.widget["text"].set_font(fontsize=12)
time.sleep(3)
@Xephyr(True, GBConfig())
def test_textbox_errors(self):
self.c.widget["text"].update(None)
self.c.widget["text"].update("".join(chr(i) for i in range(255)))
self.c.widget["text"].update("V\xE2r\xE2na\xE7\xEE")
self.c.widget["text"].update(six.u("\ua000"))
@Xephyr(True, GBConfig())
def test_groupbox_button_press(self):
self.c.group["ccc"].toscreen()
assert self.c.groups()["a"]["screen"] == None
self.c.bar["bottom"].fake_button_press(0, "bottom", 10, 10, 1)
assert self.c.groups()["a"]["screen"] == 0
class GeomConf:
auto_fullscreen = False
main = None
keys = []
mouse = []
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar([], 10),
bottom=libqtile.bar.Bar([], 10),
left=libqtile.bar.Bar([], 10),
right=libqtile.bar.Bar([], 10),
)
]
class DBarH(libqtile.bar.Bar):
def __init__(self, widgets, size):
libqtile.bar.Bar.__init__(self, widgets, size)
self.horizontal = True
class DBarV(libqtile.bar.Bar):
def __init__(self, widgets, size):
libqtile.bar.Bar.__init__(self, widgets, size)
self.horizontal = False
class DWidget:
def __init__(self, length, length_type):
self.length, self.length_type = length, length_type
@Xephyr(True, GeomConf())
def test_geometry(self):
self.testXeyes()
g = self.c.screens()[0]["gaps"]
assert g["top"] == (0, 0, 800, 10)
assert g["bottom"] == (0, 590, 800, 10)
assert g["left"] == (0, 10, 10, 580)
assert g["right"] == (790, 10, 10, 580)
assert len(self.c.windows()) == 1
geom = self.c.windows()[0]
assert geom["x"] == 10
assert geom["y"] == 10
assert geom["width"] == 778
assert geom["height"] == 578
internal = self.c.internal_windows()
assert len(internal) == 4
wid = self.c.bar["bottom"].info()["window"]
assert self.c.window[wid].inspect()
@Xephyr(True, GeomConf())
def test_resize(self):
def wd(l):
return [i.length for i in l]
def offx(l):
return [i.offsetx for i in l]
def offy(l):
return [i.offsety for i in l]
for DBar, off in ((DBarH, offx), (DBarV, offy)):
b = DBar([], 100)
l = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH),
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, l)
assert wd(l) == [10, 40, 40, 10]
assert off(l) == [0, 10, 50, 90]
b._resize(101, l)
assert wd(l) == [10, 40, 41, 10]
assert off(l) == [0, 10, 50, 91]
l = [
DWidget(10, libqtile.bar.CALCULATED)
]
b._resize(100, l)
assert wd(l) == [10]
assert off(l) == [0]
l = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH)
]
b._resize(100, l)
assert wd(l) == [10, 90]
assert off(l) == [0, 10]
l = [
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, l)
assert wd(l) == [90, 10]
assert off(l) == [0, 90]
l = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, l)
assert wd(l) == [10, 80, 10]
assert off(l) == [0, 10, 90]
class TestWidget(libqtile.widget.base._Widget):
orientations = libqtile.widget.base.ORIENTATION_HORIZONTAL
def __init__(self):
libqtile.widget.base._Widget.__init__(self, 10)
def draw(self):
pass
class IncompatibleWidgetConf:
main = None
keys = []
mouse = []
groups = [libqtile.config.Group("a")]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
left=libqtile.bar.Bar(
[
# This widget doesn't support vertical orientation
TestWidget(),
],
10
),
)
]
@Xephyr(True, IncompatibleWidgetConf(), False)
def test_incompatible_widget(self):
# Ensure that adding a widget that doesn't support the orientation of the
# bar raises ConfigError
self.qtileRaises(libqtile.confreader.ConfigError, IncompatibleWidgetConf())
class MultiStretchConf:
main = None
keys = []
mouse = []
groups = [libqtile.config.Group("a")]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar(
[
libqtile.widget.Spacer(libqtile.bar.STRETCH),
libqtile.widget.Spacer(libqtile.bar.STRETCH),
],
10
),
)
]
@Xephyr(True, MultiStretchConf(), False)
def test_multiple_stretches(self):
# Ensure that adding two STRETCH widgets to the same bar raises ConfigError
self.qtileRaises(libqtile.confreader.ConfigError, MultiStretchConf())
@Xephyr(True, GeomConf(), False)
def test_basic(self):
self.config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
TestWidget(),
libqtile.widget.Spacer(libqtile.bar.STRETCH),
TestWidget()
],
10
)
)
]
self.startQtile(self.config)
i = self.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][1]["offset"] == 10
assert i["widgets"][1]["width"] == 780
assert i["widgets"][2]["offset"] == 790
libqtile.hook.clear()
self.stopQtile()
@Xephyr(True, GeomConf(), False)
def test_singlespacer(self):
self.config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.Spacer(libqtile.bar.STRETCH),
],
10
)
)
]
self.startQtile(self.config)
i = self.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][0]["width"] == 800
libqtile.hook.clear()
self.stopQtile()
@Xephyr(True, GeomConf(), False)
def test_nospacer(self):
self.config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
TestWidget(),
TestWidget()
],
10
)
)
]
self.startQtile(self.config)
i = self.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][1]["offset"] == 10
libqtile.hook.clear()
self.stopQtile()
| mit | 5,393,738,090,292,947,000 | 28.026699 | 79 | 0.564512 | false |
vebin/Wox | PythonHome/Lib/site-packages/setuptools/command/test.py | 55 | 6481 | from distutils.errors import DistutilsOptionError
from unittest import TestLoader
import unittest
import sys
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.compat import PY3
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0, '--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
def with_project_on_sys_path(self, func):
with_2to3 = PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest_main(
None, None, [unittest.__file__] + self.test_args,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.load(require=False)()
| mit | -1,090,664,959,929,145,200 | 36.034286 | 79 | 0.567814 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.