text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from django.contrib.auth.models import User
from django.views.generic.edit import CreateView, FormView
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.core.context_processors import csrf
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.decorators import login_required
def test_page(request):
return render(request, "test.html") | godlike64/typhon | typhon/views.py | Python | gpl-3.0 | 468 | 0.004274 |
#!/usr/bin/python
def message(to, text):
print "this is ", to, ":\n", text
def add(a, b):
return a + b;
message('xichen', 'eyu')
print add(1,2);
def mul(a, b):
return a * b;
print mul(2, 3);
print mul('a', 3);
print mul(b=2, a='dd');
print 2 ** 100;
print message;
func = add;
print func(1, 2);
| cxsjabc/basic | python/message.py | Python | agpl-3.0 | 308 | 0.061688 |
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetUniverseMoonsMoonIdPosition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, x=None, y=None, z=None):
"""
GetUniverseMoonsMoonIdPosition - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'x': 'float',
'y': 'float',
'z': 'float'
}
self.attribute_map = {
'x': 'x',
'y': 'y',
'z': 'z'
}
self._x = x
self._y = y
self._z = z
@property
def x(self):
"""
Gets the x of this GetUniverseMoonsMoonIdPosition.
x number
:return: The x of this GetUniverseMoonsMoonIdPosition.
:rtype: float
"""
return self._x
@x.setter
def x(self, x):
"""
Sets the x of this GetUniverseMoonsMoonIdPosition.
x number
:param x: The x of this GetUniverseMoonsMoonIdPosition.
:type: float
"""
if x is None:
raise ValueError("Invalid value for `x`, must not be `None`")
self._x = x
@property
def y(self):
"""
Gets the y of this GetUniverseMoonsMoonIdPosition.
y number
:return: The y of this GetUniverseMoonsMoonIdPosition.
:rtype: float
"""
return self._y
@y.setter
def y(self, y):
"""
Sets the y of this GetUniverseMoonsMoonIdPosition.
y number
:param y: The y of this GetUniverseMoonsMoonIdPosition.
:type: float
"""
if y is None:
raise ValueError("Invalid value for `y`, must not be `None`")
self._y = y
@property
def z(self):
"""
Gets the z of this GetUniverseMoonsMoonIdPosition.
z number
:return: The z of this GetUniverseMoonsMoonIdPosition.
:rtype: float
"""
return self._z
@z.setter
def z(self, z):
"""
Sets the z of this GetUniverseMoonsMoonIdPosition.
z number
:param z: The z of this GetUniverseMoonsMoonIdPosition.
:type: float
"""
if z is None:
raise ValueError("Invalid value for `z`, must not be `None`")
self._z = z
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetUniverseMoonsMoonIdPosition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| minlexx/pyevemon | esi_client/models/get_universe_moons_moon_id_position.py | Python | gpl-3.0 | 4,328 | 0.000231 |
from __future__ import annotations
import logging
from os.path import exists
from typing import TYPE_CHECKING
from .helpers import BumprError, execute
if TYPE_CHECKING:
from typing import Optional
logger = logging.getLogger(__name__)
__all_ = (
"Hook",
"ReadTheDocHook",
"ChangelogHook",
"CommandHook",
"ReplaceHook",
"HOOKS",
)
class Hook:
key: str = ""
defaults: dict[str, Optional[str]] = {}
def __init__(self, releaser):
self.releaser = releaser
self.verbose = releaser.config.verbose
self.dryrun = releaser.config.dryrun
self.config = releaser.config[self.key]
self.validate()
def validate(self):
"""Override this method to implement initial validation"""
def bump(self, replacements):
pass
def prepare(self, replacements):
pass
class ReadTheDocHook(Hook):
"""
This hook set the readthedoc url corresponding to the version
"""
key = "readthedoc"
defaults = {
"id": None,
"url": "https://{id}.readthedocs.io/en/{tag}",
"badge": "https://readthedocs.org/projects/{id}/badge/?version={tag}",
"bump": "{version}",
"prepare": "latest",
}
def url(self, tag):
return self.config.url.format(id=self.config.id, tag=tag)
def badge(self, tag):
return self.config.badge.format(id=self.config.id, tag=tag)
def bump(self, replacements):
replacements.insert(0, (self.badge("latest"), self.badge(self.releaser.tag_label)))
replacements.insert(0, (self.url("latest"), self.url(self.releaser.tag_label)))
def prepare(self, replacements):
replacements.insert(0, (self.badge(self.releaser.tag_label), self.badge("latest")))
replacements.insert(0, (self.url(self.releaser.tag_label), self.url("latest")))
class ChangelogHook(Hook):
"""
This hook bump the changelog version header and prepare a new section for the next release.
"""
key = "changelog"
defaults = {
"file": None,
"separator": "-",
"bump": "{version} ({date:%Y-%m-%d})",
"prepare": "Current",
"empty": "Nothing yet",
}
def validate(self):
if not self.config.get("file"):
raise BumprError("Changelog file has not been specified")
elif not exists(self.config.file):
raise BumprError("Changelog file does not exists")
def bump(self, replacements):
with open(self.config.file, "r", encoding=self.releaser.config.encoding) as changelog_file:
before = changelog_file.read()
after = before.replace(self.dev_header(), self.bumped_header())
self.releaser.perform(self.config.file, before, after)
def prepare(self, replacements):
next_header = "\n".join(
(
self.dev_header(),
"",
"- {0}".format(self.config.empty),
"",
self.bumped_header(),
)
)
with open(self.config.file, "r", encoding=self.releaser.config.encoding) as changelog_file:
before = changelog_file.read()
after = before.replace(self.bumped_header(), next_header)
self.releaser.perform(self.config.file, before, after)
def dev_header(self):
return self.underline(self.config.prepare)
def bumped_header(self):
title = self.config.bump.format(
version=self.releaser.version,
date=self.releaser.timestamp,
**self.releaser.version.__dict__,
)
return self.underline(title)
def underline(self, text):
if self.config.separator:
return "\n".join((text, len(text) * self.config.separator))
else:
return text
class CommandsHook(Hook):
"""
This hook execute commands
"""
key = "commands"
defaults = {
"bump": None,
"prepare": None,
}
def bump(self, replacements):
if self.config.bump:
replacements = dict(
version=self.releaser.version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.version.__dict__,
)
execute(
self.config.bump,
replacements=replacements,
verbose=self.verbose,
dryrun=self.dryrun,
)
def prepare(self, replacements):
if self.config.prepare:
replacements = dict(
version=self.releaser.next_version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.next_version.__dict__,
)
execute(
self.config.prepare,
replacements=replacements,
verbose=self.verbose,
dryrun=self.dryrun,
)
class ReplaceHook(Hook):
"""
This hook perform replacements in files
"""
key = "replace"
defaults: dict[str, Optional[str]] = {}
def bump(self, replacements):
replacements.insert(
0,
(
self.config.dev.format(
version=self.releaser.prev_version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.prev_version.__dict__,
),
self.config.stable.format(
version=self.releaser.version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.version.__dict__,
),
),
)
def prepare(self, replacements):
replacements.insert(
0,
(
self.config.stable.format(
version=self.releaser.version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.version.__dict__,
),
self.config.dev.format(
version=self.releaser.next_version,
tag=self.releaser.tag_label,
date=self.releaser.timestamp,
**self.releaser.next_version.__dict__,
),
),
)
HOOKS = (ReadTheDocHook, ChangelogHook, CommandsHook, ReplaceHook)
| noirbizarre/bumpr | bumpr/hooks.py | Python | lgpl-3.0 | 6,499 | 0.001077 |
import abc
import subprocess
import logging
from observables import BLOperator, MCObservable
from data import BLDataChannel, GIDataChannel
import util
class Channel(metaclass=abc.ABCMeta):
ISOSPIN_MAP = {
'singlet': "0",
'doublet': "1h",
'triplet': "1",
'quartet': "3h",
'quintet': "2",
'sextet': "5h"
}
def __init__(self, *, particle_type=None, isospin, strangeness=None, laph_query="laph_query",
sigmond_query="sigmond_query"):
self.particle_type = particle_type
self.strangeness = strangeness
self.isospin = isospin
self.laph_query = laph_query
self.sigmond_query = sigmond_query
# @ADH - I think I am going to have the DataHandler deal with these in the future
self.raw_data_channels = list()
@staticmethod
def initialize(*, data_file, laph_query="laph_query", sigmond_query="sigmond_query",
is_basic_laph=True):
if is_basic_laph:
query_result = subprocess.check_output([laph_query, '-i', data_file]).decode()
laph_xml = util.queryToXML(query_result)
operator = BLOperator.createFromXML(laph_xml.find(".//Operator"))
if 'special' in data_file.split('/'):
return SpecialChannel(particle_type=operator.particle_type, isospin=operator.isospin,
strangeness=operator.strangeness, flavor=operator.flavor,
laph_query=laph_query, sigmond_query=sigmond_query)
elif operator.psq > 0:
return MovingChannel(particle_type=operator.particle_type, isospin=operator.isospin,
strangeness=operator.strangeness, psq=operator.psq,
lg_irrep=operator.lg_irrep, laph_query=laph_query,
sigmond_query=sigmond_query)
else:
return AtRestChannel(particle_type=operator.particle_type, isospin=operator.isospin,
strangeness=operator.strangeness, lg_irrep=operator.lg_irrep,
laph_query=laph_query, sigmond_query=sigmond_query)
else:
query_result = subprocess.check_output([sigmond_query, '-k', data_file]).decode()
try:
records = query_result.split('Record')
observable = MCObservable.createFromXML(util.queryToXML(records[1]))
if observable.psq > 0:
return MovingChannel(isospin=observable.isospin, psq=observable.psq,
lg_irrep=observable.lg_irrep, laph_query=laph_query,
sigmond_query=sigmond_query)
else:
return AtRestChannel(isospin=observable.isospin, lg_irrep=observable.lg_irrep,
laph_query=laph_query, sigmond_query=sigmond_query)
except IndexError:
logging.warning("%s contains no records", data_file)
except AttributeError:
logging.warning("%s contains Observables", data_file)
return None
def addRawDataChannel(self, path, is_basic_laph=True):
if is_basic_laph:
self.raw_data_channels.append(BLDataChannel(path, self.laph_query))
else:
self.raw_data_channels.append(GIDataChannel(path, self.sigmond_query))
@property
@abc.abstractmethod
def channel_string(self):
pass
@property
def is_special(self):
return isinstance(self, SpecialChannel)
@property
def is_atrest(self):
return isinstance(self, AtRestChannel)
@property
def is_moving(self):
return isinstance(self, MovingChannel)
def __hash__(self):
return hash(self.__repr__())
def __str__(self):
return self.channel_string
# @ADH - Should be checking that 'other' is an instance of an object
# derived from Channel. I'm not sure how to best do that right now.
# So, this will suffice for the moment.
def __eq__(self, other):
return self.__repr__() == other.__repr__()
def __ne__(self, other):
return self.__repr__() != other.__repr__()
def __lt__(self, other):
return self.__repr__() < other.__repr__()
def __gt__(self, other):
return self.__repr__() > other.__repr__()
def __le__(self, other):
return self.__repr__() <= other.__repr__()
def __ge__(self, other):
return self.__repr__() >= other.__repr__()
class SpecialChannel(Channel):
def __init__(self, *, particle_type, isospin, strangeness, flavor, laph_query="laph_query",
sigmond_query="sigmond_query"):
super().__init__(particle_type=particle_type, isospin=isospin, strangeness=strangeness,
laph_query=laph_query, sigmond_query=sigmond_query)
self.flavor = flavor
@property
def channel_string(self):
if self.particle_type == "boson":
particle_type = "B"
elif self.particle_type == "fermion":
particle_type = "F"
strangeness = str(self.strangeness).replace('-', 'm')
return "{p_type}_{flavor}_I{isospin}_S{strangeness}_special".format(
p_type=particle_type, flavor=self.flavor, isospin=self.ISOSPIN_MAP[self.isospin],
strangeness=strangeness)
def __repr__(self):
return "SP_{}".format(self.channel_string)
class AtRestChannel(Channel):
def __init__(self, *, particle_type=None, isospin, strangeness=None, lg_irrep,
laph_query="laph_query", sigmond_query="sigmond_query"):
super().__init__(particle_type=particle_type, isospin=isospin, strangeness=strangeness,
laph_query=laph_query, sigmond_query=sigmond_query)
self.psq = 0
self.lg_irrep = lg_irrep
@property
def channel_string(self):
if self.particle_type == "boson":
particle_type = "B_"
elif self.particle_type == "fermion":
particle_type = "F_"
else:
particle_type = ""
if self.strangeness is not None:
strangeness = "S{}_".format(self.strangeness).replace('-', 'm')
else:
strangeness = ""
return "{p_type}I{isospin}_{strangeness}P0_{irrep}".format(
p_type=particle_type, isospin=self.ISOSPIN_MAP[self.isospin], strangeness=strangeness,
irrep=self.lg_irrep)
def __repr__(self):
return "AR_{}".format(self.channel_string)
class MovingChannel(Channel):
def __init__(self, *, particle_type=None, isospin, strangeness=None, psq, lg_irrep,
laph_query="laph_query", sigmond_query="sigmond_query"):
super().__init__(particle_type=particle_type, isospin=isospin, strangeness=strangeness,
laph_query=laph_query, sigmond_query=sigmond_query)
self.psq = psq
self.lg_irrep = lg_irrep
@property
def channel_string(self):
if self.particle_type == "boson":
particle_type = "B_"
elif self.particle_type == "fermion":
particle_type = "F_"
else:
particle_type = ""
if self.strangeness is not None:
strangeness = "S{}_".format(self.strangeness).replace('-', 'm')
else:
strangeness = ""
return "{p_type}I{isospin}_{strangeness}PSQ{psq}_{irrep}".format(
p_type=particle_type, isospin=self.ISOSPIN_MAP[self.isospin], strangeness=strangeness,
psq=self.psq, irrep=self.lg_irrep)
def __repr__(self):
return "MV_{}".format(self.channel_string)
| andrewhanlon/QCD_scripts | sigmond/channel.py | Python | gpl-3.0 | 7,175 | 0.012683 |
import os
from jenkins_jobs import cmd
from tests.base import mock
from tests.cmd.test_cmd import CmdTestsBase
@mock.patch('jenkins_jobs.builder.Jenkins.get_plugins_info', mock.MagicMock)
class DeleteTests(CmdTestsBase):
@mock.patch('jenkins_jobs.cmd.Builder.delete_job')
def test_delete_single_job(self, delete_job_mock):
"""
Test handling the deletion of a single Jenkins job.
"""
args = self.parser.parse_args(['delete', 'test_job'])
cmd.execute(args, self.config) # passes if executed without error
@mock.patch('jenkins_jobs.cmd.Builder.delete_job')
def test_delete_multiple_jobs(self, delete_job_mock):
"""
Test handling the deletion of multiple Jenkins jobs.
"""
args = self.parser.parse_args(['delete', 'test_job1', 'test_job2'])
cmd.execute(args, self.config) # passes if executed without error
@mock.patch('jenkins_jobs.builder.Jenkins.delete_job')
def test_delete_using_glob_params(self, delete_job_mock):
"""
Test handling the deletion of multiple Jenkins jobs using the glob
parameters feature.
"""
args = self.parser.parse_args(['delete',
'--path',
os.path.join(self.fixtures_path,
'cmd-002.yaml'),
'*bar*'])
cmd.execute(args, self.config)
calls = [mock.call('bar001'), mock.call('bar002')]
delete_job_mock.assert_has_calls(calls, any_order=True)
self.assertEqual(delete_job_mock.call_count, len(calls),
"Jenkins.delete_job() was called '%s' times when "
"expected '%s'" % (delete_job_mock.call_count,
len(calls)))
| lukas-bednar/jenkins-job-builder | tests/cmd/subcommands/test_delete.py | Python | apache-2.0 | 1,878 | 0 |
# -*- coding: utf-8 -*-
import copy
from functools import wraps
import json
import sys
import django
from django.contrib.admin.helpers import AdminForm
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.admin.models import LogEntry, CHANGE
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.util import get_deleted_objects
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site, get_current_site
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist, ValidationError
from django.db import router, transaction
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from django.template.defaultfilters import escape
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext_lazy as _, get_language
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from cms.admin.change_list import CMSChangeList
from cms.admin.dialog.views import get_copy_dialog
from cms.admin.forms import (PageForm, AdvancedSettingsForm, PagePermissionForm,
PublicationDatesForm)
from cms.admin.permissionadmin import (PERMISSION_ADMIN_INLINES, PagePermissionInlineAdmin, ViewRestrictionInlineAdmin)
from cms.admin.placeholderadmin import PlaceholderAdminMixin
from cms.admin.views import revert_plugins
from cms.constants import PAGE_TYPES_ID, PUBLISHER_STATE_PENDING
from cms.models import Page, Title, CMSPlugin, PagePermission, GlobalPagePermission, StaticPlaceholder
from cms.models.managers import PagePermissionsPermissionManager
from cms.plugin_pool import plugin_pool
from cms.toolbar_pool import toolbar_pool
from cms.utils import helpers, permissions, get_language_from_request, admin as admin_utils, copy_plugins
from cms.utils.i18n import get_language_list, get_language_tuple, get_language_object, force_language
from cms.utils.admin import jsonify_request
from cms.utils.compat.dj import is_installed
from cms.utils.conf import get_cms_setting
from cms.utils.helpers import find_placeholder_relation, current_site
from cms.utils.permissions import has_global_page_permission, has_generic_permission
from cms.utils.urlutils import add_url_parameters, admin_reverse
require_POST = method_decorator(require_POST)
if is_installed('reversion'):
from reversion.admin import VersionAdmin as ModelAdmin
from reversion import create_revision
else: # pragma: no cover
from django.contrib.admin import ModelAdmin
class ReversionContext(object):
def __enter__(self):
yield
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __call__(self, func):
"""Allows this revision context to be used as a decorator."""
@wraps(func)
def do_revision_context(*args, **kwargs):
self.__enter__()
exception = False
try:
try:
return func(*args, **kwargs)
except:
exception = True
if not self.__exit__(*sys.exc_info()):
raise
finally:
if not exception:
self.__exit__(None, None, None)
return do_revision_context
def create_revision():
return ReversionContext()
PUBLISH_COMMENT = "Publish"
INITIAL_COMMENT = "Initial version."
class PageAdmin(PlaceholderAdminMixin, ModelAdmin):
form = PageForm
search_fields = ('=id', 'title_set__slug', 'title_set__title', 'reverse_id')
revision_form_template = "admin/cms/page/history/revision_header.html"
recover_form_template = "admin/cms/page/history/recover_header.html"
add_general_fields = ['title', 'slug', 'language', 'template']
change_list_template = "admin/cms/page/tree/base.html"
list_filter = ['in_navigation', 'template', 'changed_by', 'soft_root']
title_frontend_editable_fields = ['title', 'menu_title', 'page_title']
inlines = PERMISSION_ADMIN_INLINES
def get_urls(self):
"""Get the admin urls
"""
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.model_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = [
pat(r'^([0-9]+)/advanced-settings/$', self.advanced),
pat(r'^([0-9]+)/dates/$', self.dates),
pat(r'^([0-9]+)/permission-settings/$', self.permissions),
pat(r'^([0-9]+)/delete-translation/$', self.delete_translation),
pat(r'^([0-9]+)/move-page/$', self.move_page),
pat(r'^([0-9]+)/copy-page/$', self.copy_page),
pat(r'^([0-9]+)/copy-language/$', self.copy_language),
pat(r'^([0-9]+)/dialog/copy/$', get_copy_dialog), # copy dialog
pat(r'^([0-9]+)/change-navigation/$', self.change_innavigation),
pat(r'^([0-9]+)/permissions/$', self.get_permissions),
pat(r'^([0-9]+)/undo/$', self.undo),
pat(r'^([0-9]+)/redo/$', self.redo),
pat(r'^([0-9]+)/change_template/$', self.change_template),
pat(r'^([0-9]+)/([a-z\-]+)/descendants/$', self.descendants), # menu html for page descendants
pat(r'^([0-9]+)/([a-z\-]+)/edit-field/$', self.edit_title_fields),
pat(r'^([0-9]+)/([a-z\-]+)/publish/$', self.publish_page),
pat(r'^([0-9]+)/([a-z\-]+)/unpublish/$', self.unpublish),
pat(r'^([0-9]+)/([a-z\-]+)/revert/$', self.revert_page),
pat(r'^([0-9]+)/([a-z\-]+)/preview/$', self.preview_page),
pat(r'^add-page-type/$', self.add_page_type),
pat(r'^published-pages/$', self.get_published_pagelist),
url(r'^resolve/$', self.resolve, name="cms_page_resolve"),
]
if plugin_pool.get_all_plugins():
url_patterns += plugin_pool.get_patterns()
url_patterns += super(PageAdmin, self).get_urls()
return url_patterns
def get_revision_instances(self, request, object):
"""Returns all the instances to be used in the object's revision."""
if isinstance(object, Title):
object = object.page
if isinstance(object, Page) and not object.publisher_is_draft:
object = object.publisher_public
placeholder_relation = find_placeholder_relation(object)
data = [object]
filters = {'placeholder__%s' % placeholder_relation: object}
for plugin in CMSPlugin.objects.filter(**filters):
data.append(plugin)
plugin_instance, admin = plugin.get_plugin_instance()
if plugin_instance:
data.append(plugin_instance)
if isinstance(object, Page):
titles = object.title_set.all()
for title in titles:
title.publisher_public = None
data.append(title)
return data
def save_model(self, request, obj, form, change):
"""
Move the page in the tree if necessary and save every placeholder
Content object.
"""
target = request.GET.get('target', None)
position = request.GET.get('position', None)
if 'recover' in request.path_info:
pk = obj.pk
if obj.parent_id:
try:
parent = Page.objects.get(pk=obj.parent_id)
except Page.DoesNotExist:
parent = None
else:
parent = None
obj.pk = None
obj.path = None
obj.numchild = 0
obj.depth = 0
if parent:
saved_obj = parent.add_child(instance=obj)
else:
saved_obj = obj.add_root(instance=obj)
tmp_pk = saved_obj.pk
saved_obj.pk = pk
Page.objects.get(pk=tmp_pk).delete()
saved_obj.save(no_signals=True)
else:
if 'history' in request.path_info:
old_obj = Page.objects.get(pk=obj.pk)
obj.depth = old_obj.depth
obj.parent_id = old_obj.parent_id
obj.path = old_obj.path
obj.numchild = old_obj.numchild
new = False
if not obj.pk:
new = True
obj.save()
if 'recover' in request.path_info or 'history' in request.path_info:
revert_plugins(request, obj.version.pk, obj)
if target is not None and position is not None:
try:
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
pass
else:
if position == 'last-child' or position == 'first-child':
obj.parent_id = target.pk
else:
obj.parent_id = target.parent_id
obj.save()
obj = obj.move(target, pos=position)
page_type_id = form.cleaned_data.get('page_type')
copy_target_id = request.GET.get('copy_target')
if copy_target_id or page_type_id:
if page_type_id:
copy_target_id = page_type_id
copy_target = Page.objects.get(pk=copy_target_id)
if not copy_target.has_view_permission(request):
raise PermissionDenied()
obj = Page.objects.get(pk=obj.pk) #mptt reload
copy_target._copy_attributes(obj, clean=True)
obj.save()
for lang in copy_target.languages.split(','):
copy_target._copy_contents(obj, lang)
if not 'permission' in request.path_info:
language = form.cleaned_data['language']
Title.objects.set_or_create(
request,
obj,
form,
language,
)
# is it home? publish it right away
if new and Page.objects.filter(site_id=obj.site_id).count() == 1:
obj.publish(language)
def get_fieldsets(self, request, obj=None):
form = self.get_form(request, obj, fields=None)
if getattr(form, 'fieldsets', None) is None:
fields = list(form.base_fields) + list(self.get_readonly_fields(request, obj))
return [(None, {'fields': fields})]
else:
return form.fieldsets
def get_inline_classes(self, request, obj=None, **kwargs):
if obj and 'permission' in request.path_info:
return PERMISSION_ADMIN_INLINES
return []
def get_form_class(self, request, obj=None, **kwargs):
if 'advanced' in request.path_info:
return AdvancedSettingsForm
elif 'permission' in request.path_info:
return PagePermissionForm
elif 'dates' in request.path_info:
return PublicationDatesForm
return self.form
def get_form(self, request, obj=None, **kwargs):
"""
Get PageForm for the Page model and modify its fields depending on
the request.
"""
language = get_language_from_request(request, obj)
form_cls = self.get_form_class(request, obj)
form = super(PageAdmin, self).get_form(request, obj, form=form_cls, **kwargs)
# get_form method operates by overriding initial fields value which
# may persist across invocation. Code below deepcopies fields definition
# to avoid leaks
for field in form.base_fields.keys():
form.base_fields[field] = copy.deepcopy(form.base_fields[field])
if 'language' in form.base_fields:
form.base_fields['language'].initial = language
if 'page_type' in form.base_fields:
if 'copy_target' in request.GET or 'add_page_type' in request.GET or obj:
del form.base_fields['page_type']
elif not Title.objects.filter(page__parent__reverse_id=PAGE_TYPES_ID, language=language).exists():
del form.base_fields['page_type']
if 'add_page_type' in request.GET:
del form.base_fields['menu_title']
del form.base_fields['meta_description']
del form.base_fields['page_title']
self.inlines = self.get_inline_classes(request, obj, **kwargs)
if obj:
if 'history' in request.path_info or 'recover' in request.path_info:
version_id = request.path_info.split('/')[-2]
else:
version_id = None
title_obj = obj.get_title_obj(language=language, fallback=False, version_id=version_id, force_reload=True)
if 'site' in form.base_fields and form.base_fields['site'].initial is None:
form.base_fields['site'].initial = obj.site
for name in ('slug', 'title', 'meta_description', 'menu_title', 'page_title', 'redirect'):
if name in form.base_fields:
form.base_fields[name].initial = getattr(title_obj, name)
if 'overwrite_url' in form.base_fields:
if title_obj.has_url_overwrite:
form.base_fields['overwrite_url'].initial = title_obj.path
else:
form.base_fields['overwrite_url'].initial = ''
else:
for name in ('slug', 'title'):
form.base_fields[name].initial = u''
if 'target' in request.GET or 'copy_target' in request.GET:
target = request.GET.get('copy_target') or request.GET.get('target')
if 'position' in request.GET:
position = request.GET['position']
if position == 'last-child' or position == 'first-child':
form.base_fields['parent'].initial = request.GET.get('target', None)
else:
sibling = Page.objects.get(pk=target)
form.base_fields['parent'].initial = sibling.parent_id
else:
form.base_fields['parent'].initial = request.GET.get('target', None)
form.base_fields['site'].initial = request.session.get('cms_admin_site', None)
return form
def advanced(self, request, object_id):
page = get_object_or_404(Page, pk=object_id)
if not page.has_advanced_settings_permission(request):
raise PermissionDenied("No permission for editing advanced settings")
return self.change_view(request, object_id, extra_context={'advanced_settings': True, 'title': _("Advanced Settings")})
def dates(self, request, object_id):
return self.change_view(request, object_id, extra_context={'publishing_dates': True, 'title': _("Publishing dates")})
def permissions(self, request, object_id):
page = get_object_or_404(Page, pk=object_id)
if not page.has_change_permissions_permission(request):
raise PermissionDenied("No permission for editing advanced settings")
return self.change_view(request, object_id, extra_context={'show_permissions': True, 'title': _("Change Permissions")})
def get_inline_instances(self, request, obj=None):
inlines = super(PageAdmin, self).get_inline_instances(request, obj)
if get_cms_setting('PERMISSION') and obj:
filtered_inlines = []
for inline in inlines:
if (isinstance(inline, PagePermissionInlineAdmin)
and not isinstance(inline, ViewRestrictionInlineAdmin)):
if "recover" in request.path or "history" in request.path:
# do not display permissions in recover mode
continue
if not obj.has_change_permissions_permission(request):
continue
filtered_inlines.append(inline)
inlines = filtered_inlines
return inlines
def get_unihandecode_context(self, language):
if language[:2] in get_cms_setting('UNIHANDECODE_DECODERS'):
uhd_lang = language[:2]
else:
uhd_lang = get_cms_setting('UNIHANDECODE_DEFAULT_DECODER')
uhd_host = get_cms_setting('UNIHANDECODE_HOST')
uhd_version = get_cms_setting('UNIHANDECODE_VERSION')
if uhd_lang and uhd_host and uhd_version:
uhd_urls = [
'%sunihandecode-%s.core.min.js' % (uhd_host, uhd_version),
'%sunihandecode-%s.%s.min.js' % (uhd_host, uhd_version, uhd_lang),
]
else:
uhd_urls = []
return {'unihandecode_lang': uhd_lang, 'unihandecode_urls': uhd_urls}
def add_view(self, request, form_url='', extra_context=None):
extra_context = extra_context or {}
language = get_language_from_request(request)
extra_context.update({
'language': language,
})
if not request.GET.get('add_page_type') is None:
extra_context.update({
'add_page_type': True,
'title': _("Add Page Type"),
})
elif 'copy_target' in request.GET:
extra_context.update({
'title': _("Add Page Copy"),
})
else:
extra_context = self.update_language_tab_context(request, context=extra_context)
extra_context.update(self.get_unihandecode_context(language))
return super(PageAdmin, self).add_view(request, form_url, extra_context=extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
"""
The 'change' admin view for the Page model.
"""
if extra_context is None:
extra_context = {'basic_info': True}
try:
obj = self.model.objects.get(pk=object_id)
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
else:
#activate(user_lang_set)
context = {
'page': obj,
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
'ADMIN_MEDIA_URL': settings.STATIC_URL,
'can_change': obj.has_change_permission(request),
'can_change_permissions': obj.has_change_permissions_permission(request),
'current_site_id': settings.SITE_ID,
}
context.update(extra_context or {})
extra_context = self.update_language_tab_context(request, obj, context)
tab_language = get_language_from_request(request)
extra_context.update(self.get_unihandecode_context(tab_language))
response = super(PageAdmin, self).change_view(
request, object_id, form_url=form_url, extra_context=extra_context)
if tab_language and response.status_code == 302 and response._headers['location'][1] == request.path_info:
location = response._headers['location']
response._headers['location'] = (location[0], "%s?language=%s" % (location[1], tab_language))
return response
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
# add context variables
filled_languages = []
if obj:
filled_languages = [t[0] for t in obj.title_set.filter(title__isnull=False).values_list('language')]
allowed_languages = [lang[0] for lang in self._get_site_languages(obj)]
context.update({
'filled_languages': [lang for lang in filled_languages if lang in allowed_languages],
})
return super(PageAdmin, self).render_change_form(request, context, add, change, form_url, obj)
def _get_site_languages(self, obj=None):
site_id = None
if obj:
site_id = obj.site_id
else:
site_id = Site.objects.get_current().pk
return get_language_tuple(site_id)
def update_language_tab_context(self, request, obj=None, context=None):
if not context:
context = {}
language = get_language_from_request(request, obj)
languages = self._get_site_languages(obj)
context.update({
'language': language,
'language_tabs': languages,
# Dates are not language dependent, thus we hide the language
# selection bar: the language is forced through the form class
'show_language_tabs': len(list(languages)) > 1 and not context.get('publishing_dates', False),
})
return context
def response_change(self, request, obj):
"""Called always when page gets changed, call save on page, there may be
some new stuff, which should be published after all other objects on page
are collected.
"""
# save the object again, so all the related changes to page model
# can be published if required
obj.save()
return super(PageAdmin, self).response_change(request, obj)
def has_add_permission(self, request):
"""
Return true if the current user has permission to add a new page.
"""
if get_cms_setting('PERMISSION'):
return permissions.has_page_add_permission(request)
return super(PageAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
if get_cms_setting('PERMISSION'):
if obj:
return obj.has_change_permission(request)
else:
return permissions.has_page_change_permission(request)
return super(PageAdmin, self).has_change_permission(request, obj)
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance. If CMS_PERMISSION are in use also takes look to
object permissions.
"""
if get_cms_setting('PERMISSION') and obj is not None:
return obj.has_delete_permission(request)
return super(PageAdmin, self).has_delete_permission(request, obj)
def has_recover_permission(self, request):
"""
Returns True if the use has the right to recover pages
"""
if not is_installed('reversion'):
return False
user = request.user
if user.is_superuser:
return True
try:
if has_global_page_permission(request, can_recover_page=True):
return True
except:
pass
return False
def has_add_plugin_permission(self, request, placeholder, plugin_type):
if not permissions.has_plugin_permission(request.user, plugin_type, "add"):
return False
page = placeholder.page
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
return True
def has_copy_plugin_permission(self, request, source_placeholder, target_placeholder, plugins):
source_page = source_placeholder.page
if source_page and not source_page.has_change_permission(request):
return False
target_page = target_placeholder.page
if target_page and not target_page.has_change_permission(request):
return False
if target_page and not target_page.publisher_is_draft:
return False
for plugin in plugins:
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "add"):
return False
return True
def has_change_plugin_permission(self, request, plugin):
page = plugin.placeholder.page if plugin.placeholder else None
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
return True
def has_move_plugin_permission(self, request, plugin, target_placeholder):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
page = plugin.placeholder.page
if page and not page.has_change_permission(request):
return False
if page and not page.publisher_is_draft:
return False
return True
def has_delete_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "delete"):
return False
page = plugin.placeholder.page
if page:
if not page.publisher_is_draft:
return False
if not page.has_change_permission(request):
return False
return True
def has_clear_placeholder_permission(self, request, placeholder):
page = placeholder.page if placeholder else None
if page:
if not page.publisher_is_draft:
return False
if not page.has_change_permission(request):
return False
return True
def post_add_plugin(self, request, placeholder, plugin):
if is_installed('reversion') and placeholder.page:
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
message = _(u"%(plugin_name)s plugin added to %(placeholder)s") % {
'plugin_name': plugin_name, 'placeholder': placeholder}
self.cleanup_history(placeholder.page)
helpers.make_revision_with_plugins(placeholder.page, request.user, message)
def post_copy_plugins(self, request, source_placeholder, target_placeholder, plugins):
page = target_placeholder.page
if page and is_installed('reversion'):
message = _(u"Copied plugins to %(placeholder)s") % {'placeholder': target_placeholder}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
def post_edit_plugin(self, request, plugin):
page = plugin.placeholder.page
if page:
# if reversion is installed, save version of the page plugins
if is_installed('reversion') and page:
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
message = _(
u"%(plugin_name)s plugin edited at position %(position)s in %(placeholder)s") % {
'plugin_name': plugin_name,
'position': plugin.position,
'placeholder': plugin.placeholder.slot
}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
def post_move_plugin(self, request, source_placeholder, target_placeholder, plugin):
page = target_placeholder.page
if page and is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, _(u"Plugins were moved"))
def post_delete_plugin(self, request, plugin):
plugin_name = force_text(plugin_pool.get_plugin(plugin.plugin_type).name)
page = plugin.placeholder.page
if page:
page.save()
comment = _("%(plugin_name)s plugin at position %(position)s in %(placeholder)s was deleted.") % {
'plugin_name': plugin_name,
'position': plugin.position,
'placeholder': plugin.placeholder,
}
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, comment)
def post_clear_placeholder(self, request, placeholder):
page = placeholder.page
if page:
page.save()
comment = _('All plugins in the placeholder "%(name)s" were deleted.') % {
'name': force_text(placeholder)
}
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, comment)
def get_placeholder_template(self, request, placeholder):
page = placeholder.page
if page:
return page.get_template()
def changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
return HttpResponseForbidden(force_text(_("You do not have permission to change pages.")))
try:
cl = CMSChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in request.GET.keys():
return render_to_response('admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path_info + '?' + ERROR_FLAG + '=1')
cl.set_items(request)
site_id = request.GET.get('site__exact', None)
if site_id is None:
site_id = current_site(request).pk
site_id = int(site_id)
# languages
languages = get_language_list(site_id)
# parse the cookie that saves which page trees have
# been opened already and extracts the page ID
djangocms_nodes_open = request.COOKIES.get('djangocms_nodes_open', '')
raw_nodes = unquote(djangocms_nodes_open).split(',')
try:
open_menu_trees = [int(c.split('page_', 1)[1]) for c in raw_nodes]
except IndexError:
open_menu_trees = []
# Language may be present in the GET dictionary but empty
language = request.GET.get('language', get_language())
if not language:
language = get_language()
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'opts': opts,
'has_add_permission': self.has_add_permission(request),
'root_path': admin_reverse('index'),
'app_label': app_label,
'preview_language': language,
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'CMS_PERMISSION': get_cms_setting('PERMISSION'),
'DEBUG': settings.DEBUG,
'site_languages': languages,
'open_menu_trees': open_menu_trees,
}
if is_installed('reversion'):
context['has_recover_permission'] = self.has_recover_permission(request)
context['has_change_permission'] = self.has_change_permission(request)
context.update(extra_context or {})
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, context_instance=RequestContext(request))
def recoverlist_view(self, request, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
return super(PageAdmin, self).recoverlist_view(request, extra_context)
def recover_view(self, request, version_id, extra_context=None):
if not self.has_recover_permission(request):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
return super(PageAdmin, self).recover_view(request, version_id, extra_context)
def revision_view(self, request, object_id, version_id, extra_context=None):
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
response = super(PageAdmin, self).revision_view(request, object_id, version_id, extra_context)
return response
def history_view(self, request, object_id, extra_context=None):
if not self.has_change_permission(request, Page.objects.get(pk=object_id)):
raise PermissionDenied
extra_context = self.update_language_tab_context(request, None, extra_context)
return super(PageAdmin, self).history_view(request, object_id, extra_context)
def render_revision_form(self, request, obj, version, context, revert=False, recover=False):
# reset parent to null if parent is not found
if version.field_dict['parent']:
try:
Page.objects.get(pk=version.field_dict['parent'])
except:
if revert and obj.parent_id != int(version.field_dict['parent']):
version.field_dict['parent'] = obj.parent_id
if recover:
obj.parent = None
obj.parent_id = None
version.field_dict['parent'] = None
obj.version = version
return super(PageAdmin, self).render_revision_form(request, obj, version, context, revert, recover)
@require_POST
def undo(self, request, object_id):
if not is_installed('reversion'):
return HttpResponseBadRequest('django reversion not installed')
from reversion.models import Revision
from cms.utils.page_resolver import is_valid_url
import reversion
page = get_object_or_404(Page, pk=object_id)
old_titles = list(page.title_set.all())
if not page.publisher_is_draft:
page = page.publisher_draft
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
versions = reversion.get_for_object(page)
if page.revision_id:
current_revision = Revision.objects.get(pk=page.revision_id)
else:
try:
current_version = versions[0]
except IndexError:
return HttpResponseBadRequest("no current revision found")
current_revision = current_version.revision
try:
previous_version = versions.filter(revision__pk__lt=current_revision.pk)[0]
except IndexError:
return HttpResponseBadRequest("no previous revision found")
previous_revision = previous_version.revision
# clear all plugins
placeholders = page.placeholders.all()
placeholder_ids = []
for placeholder in placeholders:
placeholder_ids.append(placeholder.pk)
plugins = CMSPlugin.objects.filter(placeholder__in=placeholder_ids).order_by('-depth')
for plugin in plugins:
plugin._no_reorder = True
plugin.delete()
# TODO: delete placeholders instead of finding duplicates for 3.1
#page.placeholders.all().delete()
previous_revision.revert(True)
rev_page = get_object_or_404(Page, pk=page.pk)
rev_page.revision_id = previous_revision.pk
rev_page.publisher_public_id = page.publisher_public_id
rev_page.save()
new_placeholders = rev_page.placeholders.all()
slots = {}
for new_ph in new_placeholders:
if not new_ph.slot in slots:
slots[new_ph.slot] = new_ph
else:
if new_ph in placeholder_ids:
new_ph.delete()
elif slots[new_ph.slot] in placeholder_ids:
slots[new_ph.slot].delete()
new_titles = rev_page.title_set.all()
for title in new_titles:
try:
is_valid_url(title.path, rev_page)
except ValidationError:
for old_title in old_titles:
if old_title.language == title.language:
title.slug = old_title.slug
title.save()
messages.error(request, _("Page reverted but slug stays the same because of url collisions."))
return HttpResponse("ok")
@require_POST
def redo(self, request, object_id):
if not is_installed('reversion'):
return HttpResponseBadRequest('django reversion not installed')
from reversion.models import Revision
import reversion
from cms.utils.page_resolver import is_valid_url
page = get_object_or_404(Page, pk=object_id)
old_titles = list(page.title_set.all())
if not page.publisher_is_draft:
page = page.publisher_draft
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
versions = reversion.get_for_object(page)
if page.revision_id:
current_revision = Revision.objects.get(pk=page.revision_id)
else:
try:
current_version = versions[0]
except IndexError:
return HttpResponseBadRequest("no current revision found")
current_revision = current_version.revision
try:
previous_version = versions.filter(revision__pk__gt=current_revision.pk).order_by('pk')[0]
except IndexError:
return HttpResponseBadRequest("no next revision found")
next_revision = previous_version.revision
# clear all plugins
placeholders = page.placeholders.all()
placeholder_ids = []
for placeholder in placeholders:
placeholder_ids.append(placeholder.pk)
plugins = CMSPlugin.objects.filter(placeholder__in=placeholder_ids).order_by('-depth')
for plugin in plugins:
plugin._no_reorder = True
plugin.delete()
# TODO: 3.1 remove the placeholder matching from below and just delete them
#page.placeholders.all().delete()
next_revision.revert(True)
rev_page = get_object_or_404(Page, pk=page.pk)
rev_page.revision_id = next_revision.pk
rev_page.publisher_public_id = page.publisher_public_id
rev_page.save()
new_placeholders = rev_page.placeholders.all()
slots = {}
for new_ph in new_placeholders:
if not new_ph.slot in slots:
slots[new_ph.slot] = new_ph
else:
if new_ph in placeholder_ids:
new_ph.delete()
elif slots[new_ph.slot] in placeholder_ids:
slots[new_ph.slot].delete()
new_titles = rev_page.title_set.all()
for title in new_titles:
try:
is_valid_url(title.path, rev_page)
except ValidationError:
for old_title in old_titles:
if old_title.language == title.language:
title.slug = old_title.slug
title.save()
messages.error(request, _("Page reverted but slug stays the same because of url collisions."))
return HttpResponse("ok")
@require_POST
@create_revision()
def change_template(self, request, object_id):
page = get_object_or_404(Page, pk=object_id)
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change the template")))
to_template = request.POST.get("template", None)
if to_template not in dict(get_cms_setting('TEMPLATES')):
return HttpResponseBadRequest(force_text(_("Template not valid")))
page.template = to_template
page.save()
if is_installed('reversion'):
message = _("Template changed to %s") % dict(get_cms_setting('TEMPLATES'))[to_template]
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse(force_text(_("The template was successfully changed")))
@transaction.atomic
def move_page(self, request, page_id, extra_context=None):
"""
Move the page to the requested target, at the given position
"""
target = request.POST.get('target', None)
position = request.POST.get('position', None)
if target is None or position is None:
return HttpResponseRedirect('../../')
try:
page = self.model.objects.get(pk=page_id)
target = self.model.objects.get(pk=target)
except self.model.DoesNotExist:
return jsonify_request(HttpResponseBadRequest("error"))
# does he haves permissions to do this...?
if not page.has_move_page_permission(request) or \
not target.has_add_permission(request):
return jsonify_request(
HttpResponseForbidden(force_text(_("Error! You don't have permissions to move this page. Please reload the page"))))
# move page
page.move_page(target, position)
if is_installed('reversion'):
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, _("Page moved"))
return jsonify_request(HttpResponse(admin_utils.render_admin_menu_item(request, page).content))
def get_permissions(self, request, page_id):
page = get_object_or_404(Page, id=page_id)
can_change_list = Page.permissions.get_change_id_list(request.user, page.site_id)
global_page_permissions = GlobalPagePermission.objects.filter(sites__in=[page.site_id])
page_permissions = PagePermission.objects.for_page(page)
all_permissions = list(global_page_permissions) + list(page_permissions)
# does he can change global permissions ?
has_global = permissions.has_global_change_permissions_permission(request)
permission_set = []
for permission in all_permissions:
if isinstance(permission, GlobalPagePermission):
if has_global:
permission_set.append([(True, True), permission])
else:
permission_set.append([(True, False), permission])
else:
if can_change_list == PagePermissionsPermissionManager.GRANT_ALL:
can_change = True
else:
can_change = permission.page_id in can_change_list
permission_set.append([(False, can_change), permission])
context = {
'page': page,
'permission_set': permission_set,
}
return render_to_response('admin/cms/page/permissions.html', context)
@require_POST
@transaction.atomic
def copy_language(self, request, page_id):
with create_revision():
source_language = request.POST.get('source_language')
target_language = request.POST.get('target_language')
page = Page.objects.get(pk=page_id)
placeholders = page.get_placeholders()
if not target_language or not target_language in get_language_list():
return HttpResponseBadRequest(force_text(_("Language must be set to a supported language!")))
for placeholder in placeholders:
plugins = list(
placeholder.cmsplugin_set.filter(language=source_language).order_by('path'))
if not self.has_copy_plugin_permission(request, placeholder, placeholder, plugins):
return HttpResponseForbidden(force_text(_('You do not have permission to copy these plugins.')))
copy_plugins.copy_plugins_to(plugins, placeholder, target_language)
if page and is_installed('reversion'):
message = _(u"Copied plugins from %(source_language)s to %(target_language)s") % {
'source_language': source_language, 'target_language': target_language}
self.cleanup_history(page)
helpers.make_revision_with_plugins(page, request.user, message)
return HttpResponse("ok")
@transaction.atomic
def copy_page(self, request, page_id, extra_context=None):
"""
Copy the page and all its plugins and descendants to the requested target, at the given position
"""
context = {}
page = Page.objects.get(pk=page_id)
target = request.POST.get('target', None)
position = request.POST.get('position', None)
site = request.POST.get('site', None)
if target is not None and position is not None and site is not None:
try:
target = self.model.objects.get(pk=target)
# does he have permissions to copy this page under target?
assert target.has_add_permission(request)
site = Site.objects.get(pk=site)
except (ObjectDoesNotExist, AssertionError):
return HttpResponse("error")
#context.update({'error': _('Page could not been moved.')})
else:
try:
kwargs = {
'copy_permissions': request.REQUEST.get('copy_permissions', False),
}
page.copy_page(target, site, position, **kwargs)
return jsonify_request(HttpResponse("ok"))
except ValidationError:
exc = sys.exc_info()[1]
return jsonify_request(HttpResponseBadRequest(exc.messages))
context.update(extra_context or {})
return HttpResponseRedirect('../../')
@transaction.atomic
@create_revision()
def publish_page(self, request, page_id, language):
try:
page = Page.objects.get(id=page_id, publisher_is_draft=True)
except Page.DoesNotExist:
page = None
# ensure user has permissions to publish this page
all_published = True
if page:
if not page.has_publish_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to publish this page")))
published = page.publish(language)
if not published:
all_published = False
statics = request.GET.get('statics', '')
if not statics and not page:
return Http404("No page or stack found for publishing.")
if statics:
static_ids = statics .split(',')
for pk in static_ids:
static_placeholder = StaticPlaceholder.objects.get(pk=pk)
published = static_placeholder.publish(request, language)
if not published:
all_published = False
if page:
if all_published:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.info(request, _('The content was successfully published.'))
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(language),
action_flag=CHANGE,
)
else:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
messages.warning(request, _("Page not published! A parent page is not published yet."))
else:
messages.warning(request, _("There was a problem publishing your content"))
if is_installed('reversion') and page:
self.cleanup_history(page, publish=True)
helpers.make_revision_with_plugins(page, request.user, PUBLISH_COMMENT)
# create a new publish reversion
if 'node' in request.REQUEST:
# if request comes from tree..
return admin_utils.render_admin_menu_item(request, page)
if 'redirect' in request.GET:
return HttpResponseRedirect(request.GET['redirect'])
referrer = request.META.get('HTTP_REFERER', '')
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
if admin_reverse('index') not in referrer:
if all_published:
if page:
if page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
path = page.get_absolute_url(language, fallback=True)
else:
public_page = Page.objects.get(publisher_public=page.pk)
path = '%s?%s' % (public_page.get_absolute_url(language, fallback=True), get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '%s?%s' % (referrer, get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
else:
path = '/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')
return HttpResponseRedirect(path)
def cleanup_history(self, page, publish=False):
if is_installed('reversion') and page:
# delete revisions that are not publish revisions
from reversion.models import Version
content_type = ContentType.objects.get_for_model(Page)
# reversion 1.8+ removes type field, revision filtering must be based on comments
versions_qs = Version.objects.filter(content_type=content_type, object_id_int=page.pk)
history_limit = get_cms_setting("MAX_PAGE_HISTORY_REVERSIONS")
deleted = []
for version in versions_qs.exclude(revision__comment__in=(INITIAL_COMMENT, PUBLISH_COMMENT)).order_by(
'-revision__pk')[history_limit - 1:]:
if not version.revision_id in deleted:
revision = version.revision
revision.delete()
deleted.append(revision.pk)
# delete all publish revisions that are more then MAX_PAGE_PUBLISH_REVERSIONS
publish_limit = get_cms_setting("MAX_PAGE_PUBLISH_REVERSIONS")
if publish_limit and publish:
deleted = []
for version in versions_qs.filter(revision__comment__exact=PUBLISH_COMMENT).order_by(
'-revision__pk')[publish_limit - 1:]:
if not version.revision_id in deleted:
revision = version.revision
revision.delete()
deleted.append(revision.pk)
@transaction.atomic
def unpublish(self, request, page_id, language):
"""
Publish or unpublish a language of a page
"""
site = Site.objects.get_current()
page = get_object_or_404(Page, pk=page_id)
if not page.has_publish_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to unpublish this page")))
if not page.publisher_public_id:
return HttpResponseForbidden(force_text(_("This page was never published")))
try:
page.unpublish(language)
message = _('The %(language)s page "%(page)s" was successfully unpublished') % {
'language': get_language_object(language, site)['name'], 'page': page}
messages.info(request, message)
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Page).pk,
object_id=page_id,
object_repr=page.get_title(),
action_flag=CHANGE,
change_message=message,
)
except RuntimeError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
except ValidationError:
exc = sys.exc_info()[1]
messages.error(request, exc.message)
path = admin_reverse("cms_page_changelist")
if request.GET.get('redirect_language'):
path = "%s?language=%s&page_id=%s" % (path, request.GET.get('redirect_language'), request.GET.get('redirect_page_id'))
return HttpResponseRedirect(path)
@transaction.atomic
def revert_page(self, request, page_id, language):
page = get_object_or_404(Page, id=page_id)
# ensure user has permissions to publish this page
if not page.has_change_permission(request):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
page.revert(language)
messages.info(request, _('The page "%s" was successfully reverted.') % page)
if 'node' in request.REQUEST:
# if request comes from tree..
return admin_utils.render_admin_menu_item(request, page)
referer = request.META.get('HTTP_REFERER', '')
path = '../../'
if admin_reverse('index') not in referer:
path = '%s?%s' % (referer.split('?')[0], get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
return HttpResponseRedirect(path)
@create_revision()
def delete_translation(self, request, object_id, extra_context=None):
if 'language' in request.GET:
language = request.GET['language']
else:
language = get_language_from_request(request)
opts = Page._meta
titleopts = Title._meta
app_label = titleopts.app_label
pluginopts = CMSPlugin._meta
try:
obj = self.get_queryset(request).get(pk=unquote(object_id))
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_delete_permission(request, obj):
return HttpResponseForbidden(force_text(_("You do not have permission to change this page")))
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name),
'key': escape(object_id)
})
if not len(list(obj.get_languages())) > 1:
raise Http404(_('There only exists one translation for this page'))
titleobj = get_object_or_404(Title, page__id=object_id, language=language)
saved_plugins = CMSPlugin.objects.filter(placeholder__page__id=object_id, language=language)
using = router.db_for_read(self.model)
kwargs = {
'admin_site': self.admin_site,
'user': request.user,
'using': using
}
deleted_objects, perms_needed = get_deleted_objects(
[titleobj],
titleopts,
**kwargs
)[:2]
to_delete_plugins, perms_needed_plugins = get_deleted_objects(
saved_plugins,
pluginopts,
**kwargs
)[:2]
deleted_objects.append(to_delete_plugins)
perms_needed = set(list(perms_needed) + list(perms_needed_plugins))
if request.method == 'POST':
if perms_needed:
raise PermissionDenied
message = _('Title and plugins with language %(language)s was deleted') % {
'language': force_text(get_language_object(language)['name'])
}
self.log_change(request, titleobj, message)
messages.info(request, message)
titleobj.delete()
for p in saved_plugins:
p.delete()
public = obj.publisher_public
if public:
public.save()
if is_installed('reversion'):
self.cleanup_history(obj)
helpers.make_revision_with_plugins(obj, request.user, message)
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
context = {
"title": _("Are you sure?"),
"object_name": force_text(titleopts.verbose_name),
"object": titleobj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": admin_reverse('index'),
"app_label": app_label,
}
context.update(extra_context or {})
context_instance = RequestContext(request, current_app=self.admin_site.name)
return render_to_response(self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, titleopts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context, context_instance=context_instance)
def preview_page(self, request, object_id, language):
"""Redirecting preview function based on draft_id
"""
page = get_object_or_404(Page, id=object_id)
attrs = "?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
attrs += "&language=" + language
with force_language(language):
url = page.get_absolute_url(language) + attrs
site = get_current_site(request)
if not site == page.site:
url = "http%s://%s%s" % ('s' if request.is_secure() else '',
page.site.domain, url)
return HttpResponseRedirect(url)
def change_innavigation(self, request, page_id):
"""
Switch the in_navigation of a page
"""
page = get_object_or_404(Page, pk=page_id)
if page.has_change_permission(request):
page.toggle_in_navigation()
language = request.GET.get('language') or get_language_from_request(request)
return admin_utils.render_admin_menu_item(request, page, language=language)
return HttpResponseForbidden(force_text(_("You do not have permission to change this page's in_navigation status")))
def descendants(self, request, page_id, language):
"""
Get html for descendants of given page
Used for lazy loading pages in cms.changelist.js
Permission checks is done in admin_utils.get_admin_menu_item_context
which is called by admin_utils.render_admin_menu_item.
"""
page = get_object_or_404(Page, pk=page_id)
return admin_utils.render_admin_menu_item(request, page,
template="admin/cms/page/tree/lazy_menu.html", language=language)
def add_page_type(self, request):
site = Site.objects.get_current()
language = request.GET.get('language') or get_language()
target = request.GET.get('copy_target')
type_root, created = Page.objects.get_or_create(reverse_id=PAGE_TYPES_ID, publisher_is_draft=True, site=site,
defaults={'in_navigation': False})
type_title, created = Title.objects.get_or_create(page=type_root, language=language, slug=PAGE_TYPES_ID,
defaults={'title': _('Page Types')})
url = add_url_parameters(admin_reverse('cms_page_add'), target=type_root.pk, position='first-child',
add_page_type=1, copy_target=target, language=language)
return HttpResponseRedirect(url)
def resolve(self, request):
if not request.user.is_staff:
return HttpResponse('', content_type='text/plain')
if request.session.get('cms_log_latest', False):
log = LogEntry.objects.get(pk=request.session['cms_log_latest'])
try:
obj = log.get_edited_object()
except (ObjectDoesNotExist, ValueError):
obj = None
del request.session['cms_log_latest']
if obj and obj.__class__ in toolbar_pool.get_watch_models() and hasattr(obj, 'get_absolute_url'):
try:
return HttpResponse(force_text(obj.get_absolute_url()), content_type='text/plain')
except:
pass
pk = request.REQUEST.get('pk')
full_model = request.REQUEST.get('model')
if pk and full_model:
app_label, model = full_model.split('.')
if pk and app_label:
ctype = ContentType.objects.get(app_label=app_label, model=model)
try:
instance = ctype.get_object_for_this_type(pk=pk)
except ctype.model_class().DoesNotExist:
return HttpResponse('/', content_type='text/plain')
return HttpResponse(force_text(instance.get_absolute_url()), content_type='text/plain')
return HttpResponse('', content_type='text/plain')
def lookup_allowed(self, key, *args, **kwargs):
if key == 'site__exact':
return True
return super(PageAdmin, self).lookup_allowed(key, *args, **kwargs)
def edit_title_fields(self, request, page_id, language):
title = Title.objects.get(page_id=page_id, language=language)
saved_successfully = False
raw_fields = request.GET.get("edit_fields", 'title')
edit_fields = [field for field in raw_fields.split(",") if field in self.title_frontend_editable_fields]
cancel_clicked = request.POST.get("_cancel", False)
opts = Title._meta
if not edit_fields:
# Defaults to title
edit_fields = ('title',)
if not has_generic_permission(title.page.pk, request.user, "change",
title.page.site.pk):
return HttpResponseForbidden(force_text(_("You do not have permission to edit this page")))
class PageTitleForm(django.forms.ModelForm):
"""
Dynamic form showing only the fields to be edited
"""
class Meta:
model = Title
fields = edit_fields
if not cancel_clicked and request.method == 'POST':
form = PageTitleForm(instance=title, data=request.POST)
if form.is_valid():
form.save()
saved_successfully = True
else:
form = PageTitleForm(instance=title)
admin_form = AdminForm(form, fieldsets=[(None, {'fields': edit_fields})], prepopulated_fields={},
model_admin=self)
media = self.media + admin_form.media
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'title': 'Title',
'plugin': title.page,
'plugin_id': title.page.id,
'adminform': admin_form,
'add': False,
'is_popup': True,
'media': media,
'opts': opts,
'change': True,
'save_as': False,
'has_add_permission': False,
'window_close_timeout': 10,
}
if cancel_clicked:
# cancel button was clicked
context.update({
'cancel': True,
})
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
if not cancel_clicked and request.method == 'POST' and saved_successfully:
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
return render_to_response('admin/cms/page/plugin/change_form.html', context, RequestContext(request))
def get_published_pagelist(self, *args, **kwargs):
"""
This view is used by the PageSmartLinkWidget as the user type to feed the autocomplete drop-down.
"""
request = args[0]
if request.is_ajax():
query_term = request.GET.get('q','').strip('/')
language_code = request.GET.get('language_code', settings.LANGUAGE_CODE)
matching_published_pages = Page.objects.published().public().filter(
Q(title_set__title__icontains=query_term, title_set__language=language_code)
| Q(title_set__path__icontains=query_term, title_set__language=language_code)
| Q(title_set__menu_title__icontains=query_term, title_set__language=language_code)
| Q(title_set__page_title__icontains=query_term, title_set__language=language_code)
).distinct()
results = []
for page in matching_published_pages:
results.append(
{
'path': page.get_path(language=language_code),
'title': page.get_title(language=language_code),
'redirect_url': page.get_absolute_url(language=language_code)
}
)
return HttpResponse(json.dumps(results), content_type='application/json')
else:
return HttpResponseForbidden()
def add_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).add_plugin(*args, **kwargs)
def copy_plugins(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).copy_plugins(*args, **kwargs)
def edit_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).edit_plugin(*args, **kwargs)
def move_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).move_plugin(*args, **kwargs)
def delete_plugin(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).delete_plugin(*args, **kwargs)
def clear_placeholder(self, *args, **kwargs):
with create_revision():
return super(PageAdmin, self).clear_placeholder(*args, **kwargs)
admin.site.register(Page, PageAdmin)
| donce/django-cms | cms/admin/pageadmin.py | Python | bsd-3-clause | 67,669 | 0.002941 |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for instance_setup.py module."""
import subprocess
from google_compute_engine.instance_setup import instance_setup
from google_compute_engine.test_compat import mock
from google_compute_engine.test_compat import unittest
class InstanceSetupTest(unittest.TestCase):
def setUp(self):
self.mock_instance_config = mock.Mock()
self.mock_logger = mock.Mock()
self.mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
self.mock_setup.debug = False
self.mock_setup.instance_config = self.mock_instance_config
self.mock_setup.logger = self.mock_logger
@mock.patch('google_compute_engine.instance_setup.instance_setup.instance_config')
@mock.patch('google_compute_engine.instance_setup.instance_setup.metadata_watcher')
@mock.patch('google_compute_engine.instance_setup.instance_setup.logger')
def testInstanceSetup(self, mock_logger, mock_watcher, mock_config):
mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
mocks = mock.Mock()
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_watcher, 'watcher')
mocks.attach_mock(mock_config, 'config')
mocks.attach_mock(mock_setup, 'setup')
mock_logger_instance = mock.Mock()
mock_logger.Logger.return_value = mock_logger_instance
mock_watcher_instance = mock.Mock()
mock_watcher_instance.GetMetadata.return_value = {'hello': 'world'}
mock_watcher.MetadataWatcher.return_value = mock_watcher_instance
mock_config_instance = mock.Mock()
mock_config_instance.GetOptionBool.return_value = True
mock_config.InstanceConfig.return_value = mock_config_instance
instance_setup.InstanceSetup.__init__(mock_setup)
expected_calls = [
# Setup and reading the configuration file.
mock.call.logger.Logger(
name=mock.ANY, debug=False, facility=mock.ANY),
mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
mock.call.config.InstanceConfig(),
# Setup for local SSD.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'optimize_local_ssd'),
mock.call.setup._RunScript('optimize_local_ssd'),
# Setup for multiqueue virtio driver.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_multiqueue'),
mock.call.setup._RunScript('set_multiqueue'),
# Check network access for reaching the metadata server.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'network_enabled'),
mock.call.watcher.MetadataWatcher().GetMetadata(),
# Setup for SSH host keys if necessary.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_host_keys'),
mock.call.setup._SetSshHostKeys(),
# Setup for the boto config if necessary.
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_boto_config'),
mock.call.setup._SetupBotoConfig(),
# Write the updated config file.
mock.call.config.InstanceConfig().WriteConfig(),
]
self.assertEqual(mocks.mock_calls, expected_calls)
self.assertEqual(mock_setup.metadata_dict, {'hello': 'world'})
@mock.patch('google_compute_engine.instance_setup.instance_setup.instance_config')
@mock.patch('google_compute_engine.instance_setup.instance_setup.metadata_watcher')
@mock.patch('google_compute_engine.instance_setup.instance_setup.logger')
def testInstanceSetupException(self, mock_logger, mock_watcher, mock_config):
mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
mocks = mock.Mock()
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_watcher, 'watcher')
mocks.attach_mock(mock_config, 'config')
mocks.attach_mock(mock_setup, 'setup')
mock_logger_instance = mock.Mock()
mock_logger.Logger.return_value = mock_logger_instance
mock_config_instance = mock.Mock()
mock_config_instance.GetOptionBool.return_value = False
mock_config_instance.WriteConfig.side_effect = IOError('Test Error')
mock_config.InstanceConfig.return_value = mock_config_instance
instance_setup.InstanceSetup.__init__(mock_setup)
expected_calls = [
mock.call.logger.Logger(
name=mock.ANY, debug=False, facility=mock.ANY),
mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
mock.call.config.InstanceConfig(),
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'optimize_local_ssd'),
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'set_multiqueue'),
mock.call.config.InstanceConfig().GetOptionBool(
'InstanceSetup', 'network_enabled'),
mock.call.config.InstanceConfig().WriteConfig(),
mock.call.logger.Logger().warning('Test Error'),
]
self.assertEqual(mocks.mock_calls, expected_calls)
self.assertIsNone(mock_setup.metadata_dict)
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess')
def testRunScript(self, mock_subprocess):
mock_readline = mock.Mock()
mock_readline.side_effect = [bytes(b'a\n'), bytes(b'b\n'), bytes(b'')]
mock_stdout = mock.Mock()
mock_stdout.readline = mock_readline
mock_process = mock.Mock()
mock_process.poll.return_value = 0
mock_process.stdout = mock_stdout
mock_subprocess.Popen.return_value = mock_process
script = '/tmp/script.py'
instance_setup.InstanceSetup._RunScript(self.mock_setup, script)
expected_calls = [mock.call('a'), mock.call('b')]
self.assertEqual(self.mock_logger.info.mock_calls, expected_calls)
mock_subprocess.Popen.assert_called_once_with(
script, shell=True, stderr=mock_subprocess.STDOUT,
stdout=mock_subprocess.PIPE)
mock_process.poll.assert_called_once_with()
def testGetInstanceId(self):
self.mock_setup.metadata_dict = {'instance': {'attributes': {}, 'id': 123}}
self.assertEqual(
instance_setup.InstanceSetup._GetInstanceId(self.mock_setup), '123')
self.mock_logger.warning.assert_not_called()
def testGetInstanceIdNotFound(self):
self.mock_setup.metadata_dict = {'instance': {'attributes': {}}}
self.assertIsNone(
instance_setup.InstanceSetup._GetInstanceId(self.mock_setup))
self.assertEqual(self.mock_logger.warning.call_count, 1)
@mock.patch('google_compute_engine.instance_setup.instance_setup.file_utils.SetPermissions')
@mock.patch('google_compute_engine.instance_setup.instance_setup.shutil.move')
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.check_call')
@mock.patch('google_compute_engine.instance_setup.instance_setup.tempfile.NamedTemporaryFile')
def testGenerateSshKey(
self, mock_tempfile, mock_call, mock_move, mock_permissions):
mocks = mock.Mock()
mocks.attach_mock(mock_tempfile, 'tempfile')
mocks.attach_mock(mock_call, 'call')
mocks.attach_mock(mock_move, 'move')
mocks.attach_mock(mock_permissions, 'permissions')
mocks.attach_mock(self.mock_logger, 'logger')
key_type = 'key-type'
key_dest = '/key/dest'
temp_dest = '/tmp/dest'
mock_tempfile.return_value = mock_tempfile
mock_tempfile.__enter__.return_value.name = temp_dest
instance_setup.InstanceSetup._GenerateSshKey(
self.mock_setup, key_type, key_dest)
expected_calls = [
mock.call.tempfile(prefix=key_type, delete=True),
mock.call.tempfile.__enter__(),
mock.call.tempfile.__exit__(None, None, None),
mock.call.logger.info(mock.ANY, key_dest),
mock.call.call(
['ssh-keygen', '-t', key_type, '-f', temp_dest, '-N', '', '-q']),
mock.call.move(temp_dest, key_dest),
mock.call.move('%s.pub' % temp_dest, '%s.pub' % key_dest),
mock.call.permissions(key_dest, mode=0o600),
mock.call.permissions('%s.pub' % key_dest, mode=0o644),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.check_call')
def testGenerateSshKeyProcessError(self, mock_call):
key_type = 'key-type'
key_dest = '/key/dest'
mock_call.side_effect = subprocess.CalledProcessError(1, 'Test')
instance_setup.InstanceSetup._GenerateSshKey(
self.mock_setup, key_type, key_dest)
self.mock_logger.info.assert_called_once_with(mock.ANY, key_dest)
self.mock_logger.warning.assert_called_once_with(mock.ANY, key_dest)
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call')
@mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists')
def testStartSshdSysVinit(self, mock_exists, mock_call):
mocks = mock.Mock()
mocks.attach_mock(mock_exists, 'exists')
mocks.attach_mock(mock_call, 'call')
mock_exists.side_effect = [False, False, True]
instance_setup.InstanceSetup._StartSshd(self.mock_setup)
expected_calls = [
mock.call.exists('/bin/systemctl'),
mock.call.exists('/etc/init.d/ssh'),
mock.call.exists('/etc/init/ssh.conf'),
mock.call.call(['service', 'ssh', 'start']),
mock.call.call(['service', 'ssh', 'reload']),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call')
@mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists')
def testStartSshdUpstart(self, mock_exists, mock_call):
mocks = mock.Mock()
mocks.attach_mock(mock_exists, 'exists')
mocks.attach_mock(mock_call, 'call')
mock_exists.side_effect = [False, False, False, False, True]
instance_setup.InstanceSetup._StartSshd(self.mock_setup)
expected_calls = [
mock.call.exists('/bin/systemctl'),
mock.call.exists('/etc/init.d/ssh'),
mock.call.exists('/etc/init/ssh.conf'),
mock.call.exists('/etc/init.d/sshd'),
mock.call.exists('/etc/init/sshd.conf'),
mock.call.call(['service', 'sshd', 'start']),
mock.call.call(['service', 'sshd', 'reload']),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call')
@mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists')
def testStartSshdSystemd(self, mock_exists, mock_call):
mocks = mock.Mock()
mocks.attach_mock(mock_exists, 'exists')
mocks.attach_mock(mock_call, 'call')
mock_exists.return_value = True
instance_setup.InstanceSetup._StartSshd(self.mock_setup)
expected_calls = [mock.call.exists('/bin/systemctl')]
self.assertEqual(mocks.mock_calls, expected_calls)
def testSetSshHostKeys(self):
self.mock_instance_config.GetOptionString.return_value = '123'
mock_instance_id = mock.Mock()
mock_instance_id.return_value = '123'
self.mock_setup._GetInstanceId = mock_instance_id
instance_setup.InstanceSetup._SetSshHostKeys(self.mock_setup)
self.mock_instance_config.SetOption.assert_not_called()
@mock.patch('google_compute_engine.instance_setup.instance_setup.os.listdir')
def testSetSshHostKeysFirstBoot(self, mock_listdir):
self.mock_instance_config.GetOptionString.return_value = None
mock_instance_id = mock.Mock()
mock_instance_id.return_value = '123'
self.mock_setup._GetInstanceId = mock_instance_id
mock_generate_key = mock.Mock()
self.mock_setup._GenerateSshKey = mock_generate_key
mock_listdir.return_value = [
'ssh_config',
'ssh_host_rsa_key',
'ssh_host_dsa_key.pub',
'ssh_host_ed25519_key',
'ssh_host_ed25519_key.pub',
'ssh_host_rsa_key',
'ssh_host_rsa_key.pub',
]
instance_setup.InstanceSetup._SetSshHostKeys(self.mock_setup)
expected_calls = [
mock.call('rsa', '/etc/ssh/ssh_host_rsa_key'),
mock.call('ed25519', '/etc/ssh/ssh_host_ed25519_key'),
mock.call('rsa', '/etc/ssh/ssh_host_rsa_key'),
]
self.assertEqual(mock_generate_key.mock_calls, expected_calls)
self.mock_instance_config.SetOption.assert_called_once_with(
'Instance', 'instance_id', '123')
def testGetNumericProjectId(self):
self.mock_setup.metadata_dict = {
'project': {
'attributes': {},
'numericProjectId': 123,
}
}
self.assertEqual(
instance_setup.InstanceSetup._GetNumericProjectId(self.mock_setup),
'123')
self.mock_logger.warning.assert_not_called()
def testGetNumericProjectIdNotFound(self):
self.mock_setup.metadata_dict = {'project': {'attributes': {}}}
self.assertIsNone(
instance_setup.InstanceSetup._GetNumericProjectId(self.mock_setup))
self.assertEqual(self.mock_logger.warning.call_count, 1)
@mock.patch('google_compute_engine.instance_setup.instance_setup.boto_config.BotoConfig')
def testSetupBotoConfig(self, mock_boto):
mock_project_id = mock.Mock()
mock_project_id.return_value = '123'
self.mock_setup._GetNumericProjectId = mock_project_id
instance_setup.InstanceSetup._SetupBotoConfig(self.mock_setup)
mock_boto.assert_called_once_with('123', debug=False)
@mock.patch('google_compute_engine.instance_setup.instance_setup.boto_config.BotoConfig')
def testSetupBotoConfigLocked(self, mock_boto):
mock_boto.side_effect = IOError('Test Error')
instance_setup.InstanceSetup._SetupBotoConfig(self.mock_setup)
self.mock_logger.warning.assert_called_once_with('Test Error')
if __name__ == '__main__':
unittest.main()
| Sarsate/compute-image-packages | google_compute_engine/instance_setup/tests/instance_setup_test.py | Python | apache-2.0 | 14,219 | 0.004009 |
#------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 06/21/2002
#
# Refactored into a separate module: 07/04/2003
#
#------------------------------------------------------------------------------
""" Defines common, low-level capabilities needed by the Traits package.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from __future__ import absolute_import
import os
import sys
from os import getcwd
from os.path import dirname, exists, join
from string import lowercase, uppercase
from types import (ListType, TupleType, DictType, StringType, UnicodeType,
IntType, LongType, FloatType, ComplexType, ClassType, TypeType)
# Set the Python version being used:
vi = sys.version_info
python_version = vi[0] + (float( vi[1] ) / 10.0)
try:
from traits.etsconfig.api import ETSConfig
except:
# If the ETSConfig package is not available, fake it:
class ETSConfig ( object ):
#-----------------------------------------------------------------------
# 'object' interface:
#-----------------------------------------------------------------------
def __init__ ( self ):
""" Constructor.
Note that this constructor can only ever be called from within
this module, since we don't expose the class.
"""
# Shadow attributes for properties:
self._application_data = None
self._toolkit = None
return
#-----------------------------------------------------------------------
# 'ETSConfig' interface:
#-----------------------------------------------------------------------
#-- Property Implementations -------------------------------------------
def _get_application_data ( self ):
""" Property getter.
This is a directory that applications and packages can safely
write non-user accessible data to i.e. configuration
information, preferences etc.
Do not put anything in here that the user might want to navigate
to (e.g. projects, user data files, etc).
The actual location differs between operating systems.
"""
if self._application_data is None:
self._application_data = self._initialize_application_data()
return self._application_data
def _set_application_data ( self, application_data ):
""" Property setter.
"""
self._application_data = application_data
application_data = property( _get_application_data,
_set_application_data )
def _get_toolkit ( self ):
"""
Property getter for the GUI toolkit. The value returned is, in
order of preference: the value set by the application; the value
passed on the command line using the '-toolkit' option; the value
specified by the 'ETS_TOOLKIT' environment variable; otherwise the
empty string.
"""
if self._toolkit is None:
self._toolkit = self._initialize_toolkit()
return self._toolkit
def _set_toolkit ( self, toolkit ):
"""
Property setter for the GUI toolkit. The toolkit can be set more
than once, but only if it is the same one each time. An application
that is written for a particular toolkit can explicitly set it
before any other module that gets the value is imported.
"""
if self._toolkit and (self._toolkit != toolkit):
raise ValueError( 'Cannot set toolkit to %s because it has '
'already been set to %s' % ( toolkit, self._toolkit ) )
self._toolkit = toolkit
return
toolkit = property( _get_toolkit, _set_toolkit )
#-- Private Methods ----------------------------------------------------
def _initialize_application_data ( self ):
""" Initializes the (default) application data directory.
"""
if sys.platform == 'win32':
environment_variable = 'APPDATA'
directory_name = 'Enthought'
else:
environment_variable = 'HOME'
directory_name = '.enthought'
# Lookup the environment variable:
parent_directory = os.environ.get( environment_variable, None )
if parent_directory is None:
raise ValueError( 'Environment variable "%s" not set' %
environment_variable )
application_data = os.path.join( parent_directory, directory_name )
# If a file already exists with this name then make sure that it is
# a directory!
if os.path.exists( application_data ):
if not os.path.isdir( application_data ):
raise ValueError( 'File "%s" already exists' %
application_data )
# Otherwise, create the directory:
else:
os.makedirs( application_data )
return application_data
def _initialize_toolkit ( self ):
""" Initializes the toolkit.
"""
# We handle the command line option even though it doesn't have the
# highest precedence because we always want to remove it from the
# command line:
if '-toolkit' in sys.argv:
opt_idx = sys.argv.index( '-toolkit' )
try:
opt_toolkit = sys.argv[ opt_idx + 1 ]
except IndexError:
raise ValueError( 'The -toolkit command line argument must '
'be followed by a toolkit name' )
# Remove the option:
del sys.argv[ opt_idx: opt_idx + 1 ]
else:
opt_toolkit = None
if self._toolkit is not None:
toolkit = self._toolkit
elif opt_toolkit is not None:
toolkit = opt_toolkit
else:
toolkit = os.environ.get( 'ETS_TOOLKIT', '' )
return toolkit
ETSConfig = ETSConfig()
#-------------------------------------------------------------------------------
# Provide Python 2.3+ compatible definitions (if necessary):
#-------------------------------------------------------------------------------
try:
from types import BooleanType
except ImportError:
BooleanType = IntType
def _enumerate ( seq ):
for i in xrange( len( seq) ):
yield i, seq[i]
try:
enumerate = enumerate
except:
enumerate = _enumerate
del _enumerate
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
ClassTypes = ( ClassType, TypeType )
SequenceTypes = ( ListType, TupleType )
ComplexTypes = ( float, int )
TypeTypes = ( StringType, UnicodeType, IntType, LongType, FloatType,
ComplexType, ListType, TupleType, DictType, BooleanType )
TraitNotifier = '__trait_notifier__'
# The standard Traits property cache prefix:
TraitsCache = '_traits_cache_'
#-------------------------------------------------------------------------------
# Singleton 'Uninitialized' object:
#-------------------------------------------------------------------------------
Uninitialized = None
class _Uninitialized(object):
""" The singleton value of this class represents the uninitialized state
of a trait and is specified as the 'old' value in the trait change
notification that occurs when the value of a trait is read before being
set.
"""
def __new__(cls):
if Uninitialized is not None:
return Uninitialized
else:
self = object.__new__(cls)
return self
def __repr__(self):
return '<uninitialized>'
def __reduce_ex__(self, protocol):
return (_Uninitialized, ())
#: When the first reference to a trait is a 'get' reference, the default value of
#: the trait is implicitly assigned and returned as the value of the trait.
#: Because of this implicit assignment, a trait change notification is
#: generated with the Uninitialized object as the 'old' value of the trait, and
#: the default trait value as the 'new' value. This allows other parts of the
#: traits package to recognize the assignment as the implicit default value
#: assignment, and treat it specially.
Uninitialized = _Uninitialized()
#-------------------------------------------------------------------------------
# Singleton 'Undefined' object (used as undefined trait name and/or value):
#-------------------------------------------------------------------------------
Undefined = None
class _Undefined(object):
""" Singleton 'Undefined' object (used as undefined trait name and/or value)
"""
def __new__(cls):
if Undefined is not None:
return Undefined
else:
self = object.__new__(cls)
return self
def __repr__(self):
return '<undefined>'
def __reduce_ex__(self, protocol):
return (_Undefined, ())
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return type(self) is not type(other)
#: Singleton object that indicates that a trait attribute has not yet had a
#: value set (i.e., its value is undefined). This object is used instead of
#: None, because None often has other meanings, such as that a value is not
#: used. When a trait attribute is first assigned a value, and its associated
#: trait notification handlers are called, Undefined is passed as the *old*
#: parameter, to indicate that the attribute previously had no value.
Undefined = _Undefined()
# Tell the C-base code about singleton 'Undefined' and 'Uninitialized' objects:
from . import ctraits
ctraits._undefined( Undefined, Uninitialized )
#-------------------------------------------------------------------------------
# Singleton 'Missing' object (used as missing method argument marker):
#-------------------------------------------------------------------------------
class Missing ( object ):
""" Singleton 'Missing' object (used as missing method argument marker).
"""
def __repr__ ( self ):
return '<missing>'
#: Singleton object that indicates that a method argument is missing from a
#: type-checked method signature.
Missing = Missing()
#-------------------------------------------------------------------------------
# Singleton 'Self' object (used as object reference to current 'object'):
#-------------------------------------------------------------------------------
class Self ( object ):
""" Singleton 'Self' object (used as object reference to current 'object').
"""
def __repr__ ( self ):
return '<self>'
#: Singleton object that references the current 'object'.
Self = Self()
#-------------------------------------------------------------------------------
# Define a special 'string' coercion function:
#-------------------------------------------------------------------------------
def strx ( arg ):
""" Wraps the built-in str() function to raise a TypeError if the
argument is not of a type in StringTypes.
"""
if type( arg ) in StringTypes:
return str( arg )
raise TypeError
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
StringTypes = ( StringType, UnicodeType, IntType, LongType, FloatType,
ComplexType )
#-------------------------------------------------------------------------------
# Define a mapping of coercable types:
#-------------------------------------------------------------------------------
# Mapping of coercable types.
CoercableTypes = {
LongType: ( 11, long, int ),
FloatType: ( 11, float, int ),
ComplexType: ( 11, complex, float, int ),
UnicodeType: ( 11, unicode, str )
}
#-------------------------------------------------------------------------------
# Return a string containing the class name of an object with the correct
# article (a or an) preceding it (e.g. 'an Image', 'a PlotValue'):
#-------------------------------------------------------------------------------
def class_of ( object ):
""" Returns a string containing the class name of an object with the
correct indefinite article ('a' or 'an') preceding it (e.g., 'an Image',
'a PlotValue').
"""
if isinstance( object, basestring ):
return add_article( object )
return add_article( object.__class__.__name__ )
#-------------------------------------------------------------------------------
# Return a string containing the right article (i.e. 'a' or 'an') prefixed to
# a specified string:
#-------------------------------------------------------------------------------
def add_article ( name ):
""" Returns a string containing the correct indefinite article ('a' or 'an')
prefixed to the specified string.
"""
if name[:1].lower() in 'aeiou':
return 'an ' + name
return 'a ' + name
#----------------------------------------------------------------------------
# Return a 'user-friendly' name for a specified trait:
#----------------------------------------------------------------------------
def user_name_for ( name ):
""" Returns a "user-friendly" version of a string, with the first letter
capitalized and with underscore characters replaced by spaces. For example,
``user_name_for('user_name_for')`` returns ``'User name for'``.
"""
name = name.replace( '_', ' ' )
result = ''
last_lower = False
for c in name:
if (c in uppercase) and last_lower:
result += ' '
last_lower = (c in lowercase)
result += c
return result.capitalize()
#-------------------------------------------------------------------------------
# Gets the path to the traits home directory:
#-------------------------------------------------------------------------------
_traits_home = None
def traits_home ( ):
""" Gets the path to the Traits home directory.
"""
global _traits_home
if _traits_home is None:
_traits_home = verify_path( join( ETSConfig.application_data,
'traits' ) )
return _traits_home
#-------------------------------------------------------------------------------
# Verify that a specified path exists, and try to create it if it doesn't:
#-------------------------------------------------------------------------------
def verify_path ( path ):
""" Verify that a specified path exists, and try to create it if it
does not exist.
"""
if not exists( path ):
try:
os.mkdir( path )
except:
pass
return path
#-------------------------------------------------------------------------------
# Returns the name of the module the caller's caller is located in:
#-------------------------------------------------------------------------------
def get_module_name ( level = 2 ):
""" Returns the name of the module that the caller's caller is located in.
"""
return sys._getframe( level ).f_globals.get( '__name__', '__main__' )
#-------------------------------------------------------------------------------
# Returns a resource path calculated from the caller's stack:
#-------------------------------------------------------------------------------
def get_resource_path ( level = 2 ):
"""Returns a resource path calculated from the caller's stack.
"""
module = sys._getframe( level ).f_globals.get( '__name__', '__main__' )
if module != '__main__':
# Return the path to the module:
try:
return dirname( getattr( sys.modules.get( module ), '__file__' ) )
except:
# Apparently 'module' is not a registered module...treat it like
# '__main__':
pass
# '__main__' is not a real module, so we need a work around:
for path in [ dirname( sys.argv[0] ), getcwd() ]:
if exists( path ):
break
return path
#-------------------------------------------------------------------------------
# Returns the value of an extended object attribute name of the form:
# name[.name2[.name3...]]:
#-------------------------------------------------------------------------------
def xgetattr( object, xname, default = Undefined ):
""" Returns the value of an extended object attribute name of the form:
name[.name2[.name3...]].
"""
names = xname.split( '.' )
for name in names[:-1]:
if default is Undefined:
object = getattr( object, name )
else:
object = getattr( object, name, None )
if object is None:
return default
if default is Undefined:
return getattr( object, names[-1] )
return getattr( object, names[-1], default )
#-------------------------------------------------------------------------------
# Sets the value of an extended object attribute name of the form:
# name[.name2[.name3...]]:
#-------------------------------------------------------------------------------
def xsetattr( object, xname, value ):
""" Sets the value of an extended object attribute name of the form:
name[.name2[.name3...]].
"""
names = xname.split( '.' )
for name in names[:-1]:
object = getattr( object, name )
setattr( object, names[-1], value )
#-------------------------------------------------------------------------------
# Traits metadata selection functions:
#-------------------------------------------------------------------------------
def is_none ( value ):
return (value is None)
def not_none ( value ):
return (value is not None)
def not_false ( value ):
return (value is not False)
def not_event ( value ):
return (value != 'event')
def is_str ( value ):
return isinstance( value, basestring )
| HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/traits-4.3.0-py2.7-macosx-10.10-x86_64.egg/traits/trait_base.py | Python | gpl-2.0 | 19,017 | 0.018089 |
###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""basic_modules defines basic VisTrails Modules that are used in most
pipelines."""
from __future__ import division
import vistrails.core.cache.hasher
from vistrails.core.debug import format_exception
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.modules.vistrails_module import Module, new_module, \
Converter, NotCacheable, ModuleError
from vistrails.core.modules.config import ConstantWidgetConfig, \
QueryWidgetConfig, ParamExpWidgetConfig, ModuleSettings, IPort, OPort, \
CIPort
import vistrails.core.system
from vistrails.core.utils import InstanceObject
from vistrails.core import debug
from abc import ABCMeta
from ast import literal_eval
from itertools import izip
import mimetypes
import os
import pickle
import re
import shutil
import zipfile
import urllib
try:
import hashlib
sha_hash = hashlib.sha1
except ImportError:
import sha
sha_hash = sha.new
###############################################################################
version = '2.1.1'
name = 'Basic Modules'
identifier = 'org.vistrails.vistrails.basic'
old_identifiers = ['edu.utah.sci.vistrails.basic']
constant_config_path = "vistrails.gui.modules.constant_configuration"
query_config_path = "vistrails.gui.modules.query_configuration"
paramexp_config_path = "vistrails.gui.modules.paramexplore"
def get_port_name(port):
if hasattr(port, 'name'):
return port.name
else:
return port[0]
class meta_add_value_ports(type):
def __new__(cls, name, bases, dct):
"""This metaclass adds the 'value' input and output ports.
"""
mod = type.__new__(cls, name, bases, dct)
if '_input_ports' in mod.__dict__:
input_ports = mod._input_ports
if not any(get_port_name(port_info) == 'value'
for port_info in input_ports):
mod._input_ports = [('value', mod)]
mod._input_ports.extend(input_ports)
else:
mod._input_ports = [('value', mod)]
if '_output_ports' in mod.__dict__:
output_ports = mod._output_ports
if not any(get_port_name(port_info) == 'value'
for port_info in output_ports):
mod._output_ports = [('value', mod)]
mod._output_ports.extend(output_ports)
else:
mod._output_ports = [('value', mod)]
return mod
class Constant(Module):
"""Base class for all Modules that represent a constant value of
some type.
When implementing your own constant, You have to adhere to the
following interface:
Implement the following methods:
translate_to_python(x): Given a string, translate_to_python
must return a python value that will be the value seen by the
execution modules.
For example, translate_to_python called on a float parameter
with value '3.15' will return float('3.15').
translate_to_string(): Return a string representation of the
current constant, which will eventually be passed to
translate_to_python.
validate(v): return True if given python value is a plausible
value for the constant. It should be implemented such that
validate(translate_to_python(x)) == True for all valid x
A constant must also expose its default value, through the field
default_value.
There are fields you are not allowed to use in your constant classes.
These are: 'id', 'interpreter', 'logging' and 'change_parameter'
You can also define the constant's own GUI widget.
See core/modules/constant_configuration.py for details.
"""
_settings = ModuleSettings(abstract=True)
_output_ports = [OPort("value_as_string", "String")]
__metaclass__ = meta_add_value_ports
@staticmethod
def validate(x):
raise NotImplementedError
@staticmethod
def translate_to_python(x):
raise NotImplementedError
def compute(self):
"""Constant.compute() only checks validity (and presence) of
input value."""
v = self.get_input("value")
b = self.validate(v)
if not b:
raise ModuleError(self, "Internal Error: Constant failed validation")
self.set_output("value", v)
self.set_output("value_as_string", self.translate_to_string(v))
def setValue(self, v):
self.set_output("value", self.translate_to_python(v))
self.upToDate = True
@staticmethod
def translate_to_string(v):
return str(v)
@staticmethod
def get_widget_class():
# return StandardConstantWidget
return None
@staticmethod
def query_compute(value_a, value_b, query_method):
if query_method == '==' or query_method is None:
return (value_a == value_b)
elif query_method == '!=':
return (value_a != value_b)
return False
def new_constant(name, py_conversion=None, default_value=None, validation=None,
widget_type=None,
str_conversion=None, base_class=Constant,
compute=None, query_compute=None):
"""new_constant(name: str,
py_conversion: callable,
default_value: python_type,
validation: callable,
widget_type: (path, name) tuple or QWidget type,
str_conversion: callable,
base_class: class,
compute: callable,
query_compute: static callable) -> Module
new_constant dynamically creates a new Module derived from
Constant with given py_conversion and str_conversion functions, a
corresponding python type and a widget type. py_conversion is a
python callable that takes a string and returns a python value of
the type that the class should hold. str_conversion does the reverse.
This is the quickest way to create new Constant Modules."""
d = {}
if py_conversion is not None:
d["translate_to_python"] = py_conversion
elif base_class == Constant:
raise ValueError("Must specify translate_to_python for constant")
if validation is not None:
d["validate"] = validation
elif base_class == Constant:
raise ValueError("Must specify validation for constant")
if default_value is not None:
d["default_value"] = default_value
if str_conversion is not None:
d['translate_to_string'] = str_conversion
if compute is not None:
d['compute'] = compute
if query_compute is not None:
d['query_compute'] = query_compute
if widget_type is not None:
@staticmethod
def get_widget_class():
return widget_type
d['get_widget_class'] = get_widget_class
m = new_module(base_class, name, d)
m._input_ports = [('value', m)]
m._output_ports = [('value', m)]
return m
class Boolean(Constant):
_settings = ModuleSettings(
constant_widget='%s:BooleanWidget' % constant_config_path)
default_value = False
@staticmethod
def translate_to_python(x):
s = x.upper()
if s == 'TRUE':
return True
if s == 'FALSE':
return False
raise ValueError('Boolean from String in VisTrails should be either '
'"true" or "false", got "%s" instead' % x)
@staticmethod
def validate(x):
return isinstance(x, bool)
class Float(Constant):
_settings = ModuleSettings(constant_widgets=[
QueryWidgetConfig('%s:NumericQueryWidget' % query_config_path),
ParamExpWidgetConfig('%s:FloatExploreWidget' % paramexp_config_path)])
default_value = 0.0
@staticmethod
def translate_to_python(x):
return float(x)
@staticmethod
def validate(x):
return isinstance(x, (int, long, float))
@staticmethod
def query_compute(value_a, value_b, query_method):
value_a = float(value_a)
value_b = float(value_b)
if query_method == '==' or query_method is None:
return (value_a == value_b)
elif query_method == '<':
return (value_a < value_b)
elif query_method == '>':
return (value_a > value_b)
elif query_method == '<=':
return (value_a <= value_b)
elif query_method == '>=':
return (value_a >= value_b)
class Integer(Float):
_settings = ModuleSettings(constant_widgets=[
QueryWidgetConfig('%s:NumericQueryWidget' % query_config_path),
ParamExpWidgetConfig('%s:IntegerExploreWidget' % paramexp_config_path)])
default_value = 0
@staticmethod
def translate_to_python(x):
if x.startswith('0x'):
return int(x, 16)
else:
return int(x)
@staticmethod
def validate(x):
return isinstance(x, (int, long))
class String(Constant):
_settings = ModuleSettings(
configure_widget="vistrails.gui.modules.string_configure:TextConfigurationWidget",
constant_widgets=[
ConstantWidgetConfig('%s:MultiLineStringWidget' % constant_config_path,
widget_type='multiline'),
QueryWidgetConfig('%s:StringQueryWidget' % query_config_path)])
_output_ports = [OPort("value_as_string", "String", optional=True)]
default_value = ""
@staticmethod
def translate_to_python(x):
assert isinstance(x, (str, unicode))
return str(x)
@staticmethod
def validate(x):
return isinstance(x, str)
@staticmethod
def query_compute(value_a, value_b, query_method):
if query_method == '*[]*' or query_method is None:
return (value_b in value_a)
elif query_method == '==':
return (value_a == value_b)
elif query_method == '=~':
try:
m = re.match(value_b, value_a)
if m is not None:
return (m.end() ==len(value_a))
except re.error:
pass
return False
##############################################################################
# Rich display for IPython
try:
from IPython import display
except ImportError:
display = None
class PathObject(object):
def __init__(self, name):
self.name = name
self._ipython_repr = None
def __repr__(self):
return "PathObject(%r)" % self.name
__str__ = __repr__
def __getattr__(self, name):
if name.startswith('_repr_') and name.endswith('_'):
if self._ipython_repr is None:
filetype, encoding = mimetypes.guess_type(self.name)
if filetype and filetype.startswith('image/'):
self._ipython_repr = display.Image(filename=self.name)
else:
self._ipython_repr = False
if self._ipython_repr is not False:
return getattr(self._ipython_repr, name)
raise AttributeError
class Path(Constant):
_settings = ModuleSettings(constant_widget=("%s:PathChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "Path"),
IPort("name", "String", optional=True)]
_output_ports = [OPort("value", "Path")]
@staticmethod
def translate_to_python(x):
return PathObject(x)
@staticmethod
def translate_to_string(x):
return str(x.name)
@staticmethod
def validate(v):
return isinstance(v, PathObject)
def get_name(self):
n = None
if self.has_input("value"):
n = self.get_input("value").name
if n is None:
self.check_input("name")
n = self.get_input("name")
return n
def set_results(self, n):
self.set_output("value", PathObject(n))
self.set_output("value_as_string", n)
def compute(self):
n = self.get_name()
self.set_results(n)
Path.default_value = PathObject('')
def path_parameter_hasher(p):
def get_mtime(path):
t = int(os.path.getmtime(path))
if os.path.isdir(path):
for subpath in os.listdir(path):
subpath = os.path.join(path, subpath)
if os.path.isdir(subpath):
t = max(t, get_mtime(subpath))
return t
h = vistrails.core.cache.hasher.Hasher.parameter_signature(p)
try:
# FIXME: This will break with aliases - I don't really care that much
t = get_mtime(p.strValue)
except OSError:
return h
hasher = sha_hash()
hasher.update(h)
hasher.update(str(t))
return hasher.digest()
class File(Path):
"""File is a VisTrails Module that represents a file stored on a
file system local to the machine where VisTrails is running."""
_settings = ModuleSettings(constant_signature=path_parameter_hasher,
constant_widget=("%s:FileChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "File"),
IPort("create_file", "Boolean", optional=True)]
_output_ports = [OPort("value", "File"),
OPort("local_filename", "String", optional=True)]
def compute(self):
n = self.get_name()
if (self.has_input("create_file") and self.get_input("create_file")):
vistrails.core.system.touch(n)
if not os.path.isfile(n):
raise ModuleError(self, 'File %r does not exist' % n)
self.set_results(n)
self.set_output("local_filename", n)
class Directory(Path):
_settings = ModuleSettings(constant_signature=path_parameter_hasher,
constant_widget=("%s:DirectoryChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "Directory"),
IPort("create_directory", "Boolean", optional=True)]
_output_ports = [OPort("value", "Directory"),
OPort("itemList", "List")]
def compute(self):
n = self.get_name()
if (self.has_input("create_directory") and
self.get_input("create_directory")):
try:
vistrails.core.system.mkdir(n)
except Exception, e:
raise ModuleError(self, 'mkdir: %s' % format_exception(e))
if not os.path.isdir(n):
raise ModuleError(self, 'Directory "%s" does not exist' % n)
self.set_results(n)
dir_list = os.listdir(n)
output_list = []
for item in dir_list:
full_path = os.path.join(n, item)
output_list.append(PathObject(full_path))
self.set_output('itemList', output_list)
##############################################################################
class OutputPath(Path):
_settings = ModuleSettings(constant_widget=("%s:OutputPathChooserWidget" %
constant_config_path))
_input_ports = [IPort("value", "OutputPath")]
_output_ports = [OPort("value", "OutputPath")]
def get_name(self):
n = None
if self.has_input("value"):
n = self.get_input("value").name
if n is None:
self.check_input("name")
n = self.get_input("name")
return n
def set_results(self, n):
self.set_output("value", PathObject(n))
self.set_output("value_as_string", n)
def compute(self):
n = self.get_name()
self.set_results(n)
class FileSink(NotCacheable, Module):
"""FileSink takes a file and writes it to a user-specified
location in the file system. The file is stored at location
specified by the outputPath. The overwrite flag allows users to
specify whether an existing path should be overwritten."""
_input_ports = [IPort("file", File),
IPort("outputPath", OutputPath),
IPort("overwrite", Boolean, optional=True,
default=True),
IPort("publishFile", Boolean, optional=True)]
def compute(self):
input_file = self.get_input("file")
output_path = self.get_input("outputPath")
full_path = output_path.name
if os.path.isfile(full_path):
if self.get_input('overwrite'):
try:
os.remove(full_path)
except OSError, e:
msg = ('Could not delete existing path "%s" '
'(overwrite on)' % full_path)
raise ModuleError(self, msg)
else:
raise ModuleError(self,
"Could not copy file to '%s': file already "
"exists")
try:
vistrails.core.system.link_or_copy(input_file.name, full_path)
except OSError, e:
msg = "Could not create file '%s': %s" % (full_path, e)
raise ModuleError(self, msg)
if (self.has_input("publishFile") and
self.get_input("publishFile") or
not self.has_input("publishFile")):
if self.moduleInfo.has_key('extra_info'):
if self.moduleInfo['extra_info'].has_key('pathDumpCells'):
folder = self.moduleInfo['extra_info']['pathDumpCells']
base_fname = os.path.basename(full_path)
(base_fname, file_extension) = os.path.splitext(base_fname)
base_fname = os.path.join(folder, base_fname)
# make a unique filename
filename = base_fname + file_extension
counter = 2
while os.path.exists(filename):
filename = base_fname + "_%d%s" % (counter,
file_extension)
counter += 1
try:
vistrails.core.system.link_or_copy(input_file.name, filename)
except OSError, e:
msg = "Could not publish file '%s' \n on '%s':" % (
full_path, filename)
# I am not sure whether we should raise an error
# I will just print a warning for now (Emanuele)
debug.warning("%s" % msg, e)
class DirectorySink(NotCacheable, Module):
"""DirectorySink takes a directory and writes it to a
user-specified location in the file system. The directory is
stored at location specified by the outputPath. The overwrite
flag allows users to specify whether an existing path should be
overwritten."""
_input_ports = [IPort("dir", Directory),
IPort("outputPath", OutputPath),
IPort("overwrite", Boolean, optional=True, default="True")]
def compute(self):
input_dir = self.get_input("dir")
output_path = self.get_input("outputPath")
full_path = output_path.name
if os.path.exists(full_path):
if self.get_input("overwrite"):
try:
if os.path.isfile(full_path):
os.remove(full_path)
else:
shutil.rmtree(full_path)
except OSError, e:
msg = ('Could not delete existing path "%s" '
'(overwrite on)' % full_path)
raise ModuleError(
self,
'%s\n%s' % (msg, format_exception(e)))
else:
msg = ('Could not write to existing path "%s" '
'(overwrite off)' % full_path)
raise ModuleError(self, msg)
try:
shutil.copytree(input_dir.name, full_path)
except OSError, e:
msg = 'Could not copy path from "%s" to "%s"' % \
(input_dir.name, full_path)
raise ModuleError(self, '%s\n%s' % (msg, format_exception(e)))
##############################################################################
class WriteFile(Converter):
"""Writes a String to a temporary File.
"""
_input_ports = [IPort('in_value', String),
IPort('suffix', String, optional=True, default=""),
IPort('encoding', String, optional=True)]
_output_ports = [OPort('out_value', File)]
def compute(self):
contents = self.get_input('in_value')
suffix = self.force_get_input('suffix', '')
result = self.interpreter.filePool.create_file(suffix=suffix)
if self.has_input('encoding'):
contents = contents.decode('utf-8') # VisTrails uses UTF-8
# internally (I hope)
contents = contents.encode(self.get_input('encoding'))
with open(result.name, 'wb') as fp:
fp.write(contents)
self.set_output('out_value', result)
class ReadFile(Converter):
"""Reads a File to a String.
"""
_input_ports = [IPort('in_value', File),
IPort('encoding', String, optional=True)]
_output_ports = [OPort('out_value', String)]
def compute(self):
filename = self.get_input('in_value').name
with open(filename, 'rb') as fp:
contents = fp.read()
if self.has_input('encoding'):
contents = contents.decode(self.get_input('encoding'))
contents = contents.encode('utf-8') # VisTrails uses UTF-8
# internally (for now)
self.set_output('out_value', contents)
##############################################################################
class Color(Constant):
# We set the value of a color object to be an InstanceObject that
# contains a tuple because a tuple would be interpreted as a
# type(tuple) which messes with the interpreter
_settings = ModuleSettings(constant_widgets=[
'%s:ColorWidget' % constant_config_path,
ConstantWidgetConfig('%s:ColorEnumWidget' % \
constant_config_path,
widget_type='enum'),
QueryWidgetConfig('%s:ColorQueryWidget' % \
query_config_path),
ParamExpWidgetConfig('%s:RGBExploreWidget' % \
paramexp_config_path,
widget_type='rgb'),
ParamExpWidgetConfig('%s:HSVExploreWidget' % \
paramexp_config_path,
widget_type='hsv')])
_input_ports = [IPort("value", "Color")]
_output_ports = [OPort("value", "Color")]
default_value = InstanceObject(tuple=(1,1,1))
@staticmethod
def translate_to_python(x):
return InstanceObject(
tuple=tuple([float(a) for a in x.split(',')]))
@staticmethod
def translate_to_string(v):
return ','.join('%f' % c for c in v.tuple)
@staticmethod
def validate(x):
return isinstance(x, InstanceObject) and hasattr(x, 'tuple')
@staticmethod
def to_string(r, g, b):
return "%s,%s,%s" % (r,g,b)
@staticmethod
def query_compute(value_a, value_b, query_method):
# SOURCE: http://www.easyrgb.com/index.php?X=MATH
def rgb_to_xyz(r, g, b):
# r,g,b \in [0,1]
if r > 0.04045:
r = ( ( r + 0.055 ) / 1.055 ) ** 2.4
else:
r = r / 12.92
if g > 0.04045:
g = ( ( g + 0.055 ) / 1.055 ) ** 2.4
else:
g = g / 12.92
if b > 0.04045:
b = ( ( b + 0.055 ) / 1.055 ) ** 2.4
else:
b = b / 12.92
r *= 100
g *= 100
b *= 100
# Observer. = 2 deg, Illuminant = D65
x = r * 0.4124 + g * 0.3576 + b * 0.1805
y = r * 0.2126 + g * 0.7152 + b * 0.0722
z = r * 0.0193 + g * 0.1192 + b * 0.9505
return (x,y,z)
def xyz_to_cielab(x,y,z):
# Observer= 2 deg, Illuminant= D65
ref_x, ref_y, ref_z = (95.047, 100.000, 108.883)
x /= ref_x
y /= ref_y
z /= ref_z
if x > 0.008856:
x = x ** ( 1/3.0 )
else:
x = ( 7.787 * x ) + ( 16 / 116.0 )
if y > 0.008856:
y = y ** ( 1/3.0 )
else:
y = ( 7.787 * y ) + ( 16 / 116.0 )
if z > 0.008856:
z = z ** ( 1/3.0 )
else:
z = ( 7.787 * z ) + ( 16 / 116.0 )
L = ( 116 * y ) - 16
a = 500 * ( x - y )
b = 200 * ( y - z )
return (L, a, b)
def rgb_to_cielab(r,g,b):
return xyz_to_cielab(*rgb_to_xyz(r,g,b))
value_a_rgb = (float(a) for a in value_a.split(','))
value_b_rgb = (float(b) for b in value_b.split(','))
value_a_lab = rgb_to_cielab(*value_a_rgb)
value_b_lab = rgb_to_cielab(*value_b_rgb)
# cie76 difference
diff = sum((v_1 - v_2) ** 2
for v_1, v_2 in izip(value_a_lab, value_b_lab)) ** (0.5)
# print "CIE 76 DIFFERENCE:", diff
if query_method is None:
query_method = '2.3'
return diff < float(query_method)
##############################################################################
class StandardOutput(NotCacheable, Module):
"""StandardOutput is a VisTrails Module that simply prints the
value connected on its port to standard output. It is intended
mostly as a debugging device."""
_input_ports = [IPort("value", 'Variant')]
def compute(self):
v = self.get_input("value")
if isinstance(v, PathObject):
try:
fp = open(v.name, 'rb')
except IOError:
print v
else:
try:
CHUNKSIZE = 2048
chunk = fp.read(CHUNKSIZE)
if chunk:
sys.stdout.write(chunk)
while len(chunk) == CHUNKSIZE:
chunk = fp.read(CHUNKSIZE)
if chunk:
sys.stdout.write(chunk)
sys.stdout.write('\n')
finally:
fp.close()
else:
print v
##############################################################################
# Tuple will be reasonably magic right now. We'll integrate it better
# with vistrails later.
class Tuple(Module):
"""Tuple represents a tuple of values. Tuple might not be well
integrated with the rest of VisTrails, so don't use it unless
you know what you're doing."""
_settings = ModuleSettings(configure_widget=
"vistrails.gui.modules.tuple_configuration:TupleConfigurationWidget")
def __init__(self):
Module.__init__(self)
self.input_ports_order = []
self.values = tuple()
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.input_ports_order = [p.name for p in module.input_port_specs]
def compute(self):
values = tuple([self.get_input(p)
for p in self.input_ports_order])
self.values = values
self.set_output("value", values)
class Untuple(Module):
"""Untuple takes a tuple and returns the individual values. It
reverses the actions of Tuple.
"""
_settings = ModuleSettings(configure_widget=
"vistrails.gui.modules.tuple_configuration:UntupleConfigurationWidget")
def __init__(self):
Module.__init__(self)
self.output_ports_order = []
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.output_ports_order = [p.name for p in module.output_port_specs]
# output_ports are reversed for display purposes...
self.output_ports_order.reverse()
def compute(self):
if self.has_input("tuple"):
tuple = self.get_input("tuple")
values = tuple.values
else:
values = self.get_input("value")
for p, value in izip(self.output_ports_order, values):
self.set_output(p, value)
##############################################################################
class ConcatenateString(Module):
"""ConcatenateString takes many strings as input and produces the
concatenation as output. Useful for constructing filenames, for
example.
This class will probably be replaced with a better API in the
future."""
fieldCount = 4
_input_ports = [IPort("str%d" % i, "String")
for i in xrange(1, 1 + fieldCount)]
_output_ports = [OPort("value", "String")]
def compute(self):
result = "".join(self.force_get_input('str%d' % i, '')
for i in xrange(1, 1 + self.fieldCount))
self.set_output('value', result)
##############################################################################
class Not(Module):
"""Not inverts a Boolean.
"""
_input_ports = [IPort('input', 'Boolean')]
_output_ports = [OPort('value', 'Boolean')]
def compute(self):
value = self.get_input('input')
self.set_output('value', not value)
##############################################################################
# List
# If numpy is available, we consider numpy arrays to be lists as well
class ListType(object):
__metaclass__ = ABCMeta
ListType.register(list)
try:
import numpy
except ImportError:
numpy = None
else:
ListType.register(numpy.ndarray)
class List(Constant):
_settings = ModuleSettings(configure_widget=
"vistrails.gui.modules.list_configuration:ListConfigurationWidget")
_input_ports = [IPort("value", "List"),
IPort("head", "Variant", depth=1),
IPort("tail", "List")]
_output_ports = [OPort("value", "List")]
default_value = []
def __init__(self):
Constant.__init__(self)
self.input_ports_order = []
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.input_ports_order = [p.name for p in module.input_port_specs]
@staticmethod
def validate(x):
return isinstance(x, ListType)
@staticmethod
def translate_to_python(v):
return literal_eval(v)
@staticmethod
def translate_to_string(v, dims=None):
if dims is None:
if numpy is not None and isinstance(v, numpy.ndarray):
dims = v.ndim
else:
dims = 1
if dims == 1:
return '[%s]' % ', '.join(repr(c)
for c in v)
else:
return '[%s]' % ', '.join(List.translate_to_string(c, dims-1)
for c in v)
def compute(self):
head, middle, items, tail = [], [], [], []
got_value = False
if self.has_input('value'):
# run the regular compute here
Constant.compute(self)
middle = self.outputPorts['value']
got_value = True
if self.has_input('head'):
head = self.get_input('head')
got_value = True
if self.input_ports_order:
items = [self.get_input(p)
for p in self.input_ports_order]
got_value = True
if self.has_input('tail'):
tail = self.get_input('tail')
got_value = True
if not got_value:
self.get_input('value')
self.set_output('value', head + middle + items + tail)
##############################################################################
# Dictionary
class Dictionary(Constant):
default_value = {}
_input_ports = [CIPort("addPair", "Module, Module"),
IPort("addPairs", "List")]
@staticmethod
def translate_to_python(v):
return literal_eval(v)
@staticmethod
def validate(x):
return isinstance(x, dict)
def compute(self):
d = {}
if self.has_input('value'):
Constant.compute(self)
d.update(self.outputPorts['value'])
if self.has_input('addPair'):
pairs_list = self.get_input_list('addPair')
d.update(pairs_list)
if self.has_input('addPairs'):
d.update(self.get_input('addPairs'))
self.set_output("value", d)
##############################################################################
# TODO: Null should be a subclass of Constant?
class Null(Module):
"""Null is the class of None values."""
_settings = ModuleSettings(hide_descriptor=True)
def compute(self):
self.set_output("value", None)
##############################################################################
class Unpickle(Module):
"""Unpickles a string.
"""
_settings = ModuleSettings(hide_descriptor=True)
_input_ports = [IPort('input', 'String')]
_output_ports = [OPort('result', 'Variant')]
def compute(self):
value = self.get_input('input')
self.set_output('result', pickle.loads(value))
##############################################################################
class CodeRunnerMixin(object):
def __init__(self):
self.output_ports_order = []
super(CodeRunnerMixin, self).__init__()
def transfer_attrs(self, module):
Module.transfer_attrs(self, module)
self.output_ports_order = [p.name for p in module.output_port_specs]
# output_ports are reversed for display purposes...
self.output_ports_order.reverse()
def run_code(self, code_str,
use_input=False,
use_output=False):
"""run_code runs a piece of code as a VisTrails module.
use_input and use_output control whether to use the inputport
and output port dictionary as local variables inside the
execution."""
import vistrails.core.packagemanager
def fail(msg):
raise ModuleError(self, msg)
def cache_this():
self.is_cacheable = lambda *args, **kwargs: True
locals_ = locals()
if use_input:
for k in self.inputPorts:
locals_[k] = self.get_input(k)
if use_output:
for output_portname in self.output_ports_order:
if output_portname not in self.inputPorts:
locals_[output_portname] = None
_m = vistrails.core.packagemanager.get_package_manager()
reg = get_module_registry()
locals_.update({'fail': fail,
'package_manager': _m,
'cache_this': cache_this,
'registry': reg,
'self': self})
if 'source' in locals_:
del locals_['source']
# Python 2.6 needs code to end with newline
exec code_str + '\n' in locals_, locals_
if use_output:
for k in self.output_ports_order:
if locals_.get(k) is not None:
self.set_output(k, locals_[k])
##############################################################################
class PythonSource(CodeRunnerMixin, NotCacheable, Module):
"""PythonSource is a Module that executes an arbitrary piece of
Python code.
It is especially useful for one-off pieces of 'glue' in a
pipeline.
If you want a PythonSource execution to fail, call
fail(error_message).
If you want a PythonSource execution to be cached, call
cache_this().
"""
_settings = ModuleSettings(
configure_widget=("vistrails.gui.modules.python_source_configure:"
"PythonSourceConfigurationWidget"))
_input_ports = [IPort('source', 'String', optional=True, default="")]
_output_pors = [OPort('self', 'Module')]
def compute(self):
s = urllib.unquote(str(self.get_input('source')))
self.run_code(s, use_input=True, use_output=True)
##############################################################################
def zip_extract_file(archive, filename_in_archive, output_filename):
z = zipfile.ZipFile(archive)
try:
fileinfo = z.getinfo(filename_in_archive) # Might raise KeyError
output_dirname, output_filename = os.path.split(output_filename)
fileinfo.filename = output_filename
z.extract(fileinfo, output_dirname)
finally:
z.close()
def zip_extract_all_files(archive, output_path):
z = zipfile.ZipFile(archive)
try:
z.extractall(output_path)
finally:
z.close()
class Unzip(Module):
"""Unzip extracts a file from a ZIP archive."""
_input_ports = [IPort('archive_file', 'File'),
IPort('filename_in_archive', 'String')]
_output_ports = [OPort('file', 'File')]
def compute(self):
self.check_input("archive_file")
self.check_input("filename_in_archive")
filename_in_archive = self.get_input("filename_in_archive")
archive_file = self.get_input("archive_file")
if not os.path.isfile(archive_file.name):
raise ModuleError(self, "archive file does not exist")
suffix = self.interpreter.filePool.guess_suffix(filename_in_archive)
output = self.interpreter.filePool.create_file(suffix=suffix)
zip_extract_file(archive_file.name,
filename_in_archive,
output.name)
self.set_output("file", output)
class UnzipDirectory(Module):
"""UnzipDirectory extracts every file from a ZIP archive."""
_input_ports = [IPort('archive_file', 'File')]
_output_ports = [OPort('directory', 'Directory')]
def compute(self):
self.check_input("archive_file")
archive_file = self.get_input("archive_file")
if not os.path.isfile(archive_file.name):
raise ModuleError(self, "archive file does not exist")
output = self.interpreter.filePool.create_directory()
zip_extract_all_files(archive_file.name,
output.name)
self.set_output("directory", output)
##############################################################################
class Round(Converter):
"""Turns a Float into an Integer.
"""
_settings = ModuleSettings(hide_descriptor=True)
_input_ports = [IPort('in_value', 'Float'),
IPort('floor', 'Boolean', optional=True, default="True")]
_output_ports = [OPort('out_value', 'Integer')]
def compute(self):
fl = self.get_input('in_value')
floor = self.get_input('floor')
if floor:
integ = int(fl) # just strip the decimals
else:
integ = int(fl + 0.5) # nearest
self.set_output('out_value', integ)
class TupleToList(Converter):
"""Turns a Tuple into a List.
"""
_settings = ModuleSettings(hide_descriptor=True)
_input_ports = [IPort('in_value', 'Variant')]
_output_ports = [OPort('out_value', 'List')]
@classmethod
def can_convert(cls, sub_descs, super_descs):
if len(sub_descs) <= 1:
return False
reg = get_module_registry()
return super_descs == [reg.get_descriptor(List)]
def compute(self):
tu = self.get_input('in_value')
if not isinstance(tu, tuple):
raise ModuleError(self, "Input is not a tuple")
self.set_output('out_value', list(tu))
##############################################################################
class Variant(Module):
"""
Variant is tracked internally for outputing a variant type on
output port. For input port, Module type should be used
"""
_settings = ModuleSettings(abstract=True)
##############################################################################
class Generator(object):
"""
Used to keep track of list iteration, it will execute a module once for
each input in the list/generator.
"""
_settings = ModuleSettings(abstract=True)
generators = []
def __init__(self, size=None, module=None, generator=None, port=None,
accumulated=False):
self.module = module
self.generator = generator
self.port = port
self.size = size
self.accumulated = accumulated
if generator and module not in Generator.generators:
# add to global list of generators
# they will be topologically ordered
module.generator = generator
Generator.generators.append(module)
def next(self):
""" return next value - the generator """
value = self.module.get_output(self.port)
if isinstance(value, Generator):
value = value.all()
return value
def all(self):
""" exhausts next() for Streams
"""
items = []
item = self.next()
while item is not None:
items.append(item)
item = self.next()
return items
@staticmethod
def stream():
""" executes all generators until inputs are exhausted
this makes sure branching and multiple sinks are executed correctly
"""
result = True
if not Generator.generators:
return
while result is not None:
for g in Generator.generators:
result = g.next()
Generator.generators = []
##############################################################################
class Assert(Module):
"""
Assert is a simple module that conditionally stops the execution.
"""
_input_ports = [IPort('condition', 'Boolean')]
def compute(self):
condition = self.get_input('condition')
if not condition:
raise ModuleError(self, "Assert: condition is False",
abort=True)
class AssertEqual(Module):
"""
AssertEqual works like Assert but compares two values.
It is provided for convenience.
"""
_input_ports = [IPort('value1', 'Variant'),
IPort('value2', 'Variant')]
def compute(self):
values = (self.get_input('value1'),
self.get_input('value2'))
if values[0] != values[1]:
reprs = tuple(repr(v) for v in values)
reprs = tuple('%s...' % v[:17] if len(v) > 20 else v
for v in reprs)
raise ModuleError(self, "AssertEqual: values are different: "
"%r, %r" % reprs,
abort=True)
##############################################################################
class StringFormat(Module):
"""
Builds a string from objects using Python's str.format().
"""
_settings = ModuleSettings(configure_widget=
'vistrails.gui.modules.stringformat_configuration:'
'StringFormatConfigurationWidget')
_input_ports = [IPort('format', String)]
_output_ports = [OPort('value', String)]
@staticmethod
def list_placeholders(fmt):
placeholders = set()
nb = 0
i = 0
n = len(fmt)
while i < n:
if fmt[i] == '{':
i += 1
if fmt[i] == '{': # KeyError:
i += 1
continue
e = fmt.index('}', i) # KeyError
f = e
for c in (':', '!', '[', '.'):
c = fmt.find(c, i)
if c != -1:
f = min(f, c)
if i == f:
nb += 1
else:
arg = fmt[i:f]
try:
arg = int(arg)
except ValueError:
placeholders.add(arg)
else:
nb = max(nb, arg + 1)
i = e
i += 1
return nb, placeholders
def compute(self):
fmt = self.get_input('format')
args, kwargs = StringFormat.list_placeholders(fmt)
f_args = [self.get_input('_%d' % n)
for n in xrange(args)]
f_kwargs = dict((n, self.get_input(n))
for n in kwargs)
self.set_output('value', fmt.format(*f_args, **f_kwargs))
##############################################################################
def init_constant(m):
reg = get_module_registry()
reg.add_module(m)
reg.add_input_port(m, "value", m)
reg.add_output_port(m, "value", m)
_modules = [Module, Converter, Constant, Boolean, Float, Integer, String, List,
Path, File, Directory, OutputPath,
FileSink, DirectorySink, WriteFile, ReadFile, StandardOutput,
Tuple, Untuple, ConcatenateString, Not, Dictionary, Null, Variant,
Unpickle, PythonSource, Unzip, UnzipDirectory, Color,
Round, TupleToList, Assert, AssertEqual, StringFormat]
def initialize(*args, **kwargs):
# initialize the sub_module modules, too
import vistrails.core.modules.sub_module
import vistrails.core.modules.output_modules
_modules.extend(vistrails.core.modules.sub_module._modules)
_modules.extend(vistrails.core.modules.output_modules._modules)
def handle_module_upgrade_request(controller, module_id, pipeline):
from vistrails.core.upgradeworkflow import UpgradeWorkflowHandler
reg = get_module_registry()
def outputName_remap(old_conn, new_module):
ops = []
old_src_module = pipeline.modules[old_conn.source.moduleId]
op_desc = reg.get_descriptor(OutputPath)
new_x = (old_src_module.location.x + new_module.location.x) / 2.0
new_y = (old_src_module.location.y + new_module.location.y) / 2.0
op_module = \
controller.create_module_from_descriptor(op_desc, new_x, new_y)
ops.append(('add', op_module))
create_new_connection = UpgradeWorkflowHandler.create_new_connection
new_conn_1 = create_new_connection(controller,
old_src_module,
old_conn.source,
op_module,
"name")
ops.append(('add', new_conn_1))
new_conn_2 = create_new_connection(controller,
op_module,
"value",
new_module,
"outputPath")
ops.append(('add', new_conn_2))
return ops
module_remap = {'FileSink':
[(None, '1.6', None,
{'dst_port_remap':
{'overrideFile': 'overwrite',
'outputName': outputName_remap},
'function_remap':
{'overrideFile': 'overwrite',
'outputName': 'outputPath'}})],
'GetItemsFromDirectory':
[(None, '1.6', 'Directory',
{'dst_port_remap':
{'dir': 'value'},
'src_port_remap':
{'itemlist': 'itemList'},
})],
'InputPort':
[(None, '1.6', None,
{'dst_port_remap': {'old_name': None}})],
'OutputPort':
[(None, '1.6', None,
{'dst_port_remap': {'old_name': None}})],
'PythonSource':
[(None, '1.6', None, {})],
'Tuple':
[(None, '2.1.1', None, {})],
'StandardOutput':
[(None, '2.1.1', None, {})],
'List':
[(None, '2.1.1', None, {})],
'AssertEqual':
[(None, '2.1.1', None, {})],
'Converter':
[(None, '2.1.1', None, {})],
}
return UpgradeWorkflowHandler.remap_module(controller, module_id, pipeline,
module_remap)
###############################################################################
class NewConstant(Constant):
"""
A new Constant module to be used inside the FoldWithModule module.
"""
def setValue(self, v):
self.set_output("value", v)
self.upToDate = True
def create_constant(value):
"""
Creates a NewConstant module, to be used for the ModuleConnector.
"""
constant = NewConstant()
constant.setValue(value)
return constant
def get_module(value, signature=None):
"""
Creates a module for value, in order to do the type checking.
"""
if isinstance(value, Constant):
return type(value)
elif isinstance(value, bool):
return Boolean
elif isinstance(value, str):
return String
elif isinstance(value, int):
return Integer
elif isinstance(value, float):
return Float
if isinstance(value, list):
return List
elif isinstance(value, tuple):
# Variant supports signatures of any length
if signature is None or \
(len(signature) == 1 and signature[0][0] == Variant):
return (Variant,)*len(value)
v_modules = ()
for element in xrange(len(value)):
v_modules += (get_module(value[element], signature[element]),)
if None in v_modules: # Identification failed
return None
return v_modules
else: # pragma: no cover
debug.warning("Could not identify the type of the list element.")
debug.warning("Type checking is not going to be done inside "
"iterated module.")
return None
###############################################################################
import sys
import unittest
class TestConcatenateString(unittest.TestCase):
@staticmethod
def concatenate(**kwargs):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(ConcatenateString, 'value') as results:
errors = execute([
('ConcatenateString', 'org.vistrails.vistrails.basic', [
(name, [('String', value)])
for name, value in kwargs.iteritems()
]),
])
if errors:
return None
return results
def test_concatenate(self):
"""Concatenates strings"""
self.assertEqual(self.concatenate(
str1="hello ", str2="world"),
["hello world"])
self.assertEqual(self.concatenate(
str3="hello world"),
["hello world"])
self.assertEqual(self.concatenate(
str2="hello ", str4="world"),
["hello world"])
self.assertEqual(self.concatenate(
str1="hello", str3=" ", str4="world"),
["hello world"])
def test_empty(self):
"""Runs ConcatenateString with no input"""
self.assertEqual(self.concatenate(), [""])
class TestNot(unittest.TestCase):
def run_pipeline(self, functions):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(Not, 'value') as results:
errors = execute([
('Not', 'org.vistrails.vistrails.basic',
functions),
])
return errors, results
def test_true(self):
errors, results = self.run_pipeline([
('input', [('Boolean', 'True')])])
self.assertFalse(errors)
self.assertEqual(len(results), 1)
self.assertIs(results[0], False)
def test_false(self):
errors, results = self.run_pipeline([
('input', [('Boolean', 'False')])])
self.assertFalse(errors)
self.assertEqual(len(results), 1)
self.assertIs(results[0], True)
def test_notset(self):
errors, results = self.run_pipeline([])
self.assertTrue(errors)
class TestList(unittest.TestCase):
@staticmethod
def build_list(value=None, head=None, tail=None):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(List, 'value') as results:
functions = []
def add(n, v, t):
if v is not None:
for e in v:
functions.append(
(n, [(t, e)])
)
add('value', value, 'List')
add('head', head, 'String')
add('tail', tail, 'List')
errors = execute([
('List', 'org.vistrails.vistrails.basic', functions),
])
if errors:
return None
# List is a Constant, so the interpreter will set the result 'value'
# from the 'value' input port automatically
# Ignore these first results
return results[-1]
def test_simple(self):
"""Tests the default ports of the List module"""
self.assertEqual(self.build_list(
value=['["a", "b", "c"]']),
["a", "b", "c"])
self.assertEqual(self.build_list(
head=["d"],
value=['["a", "b", "c"]']),
["d", "a", "b", "c"])
self.assertEqual(self.build_list(
head=["d"],
value=['["a", "b", "c"]'],
tail=['["e", "f"]']),
["d", "a", "b", "c", "e", "f"])
self.assertEqual(self.build_list(
value=['[]'],
tail=['[]']),
[])
def test_multiple(self):
"""Tests setting multiple values on a port"""
# Multiple values on 'head'
self.assertEqual(self.build_list(
head=["a", "b"]),
["a", "b"])
self.assertEqual(self.build_list(
head=["a", "b"],
value=['["c", "d"]']),
["a", "b", "c", "d"])
# Multiple values on 'value'
res = self.build_list(value=['["a", "b"]', '["c", "d"]'])
# Connections of List type are merged
self.assertEqual(res, ["a", "b", "c", "d"])
def test_items(self):
"""Tests the multiple 'itemN' ports"""
from vistrails.tests.utils import execute, intercept_result
def list_with_items(nb_items, **kwargs):
with intercept_result(List, 'value') as results:
errors = execute([
('List', 'org.vistrails.vistrails.basic', [
(k, [('String', v)])
for k, v in kwargs.iteritems()
]),
],
add_port_specs=[
(0, 'input', 'item%d' % i,
'(org.vistrails.vistrails.basic:Module)')
for i in xrange(nb_items)
])
if errors:
return None
return results[-1]
self.assertEqual(
list_with_items(2, head="one", item0="two", item1="three"),
["one", "two", "three"])
# All 'itemN' ports have to be set
self.assertIsNone(
list_with_items(3, head="one", item0="two", item2="three"))
class TestPythonSource(unittest.TestCase):
def test_simple(self):
"""A simple PythonSource returning a string"""
import urllib2
from vistrails.tests.utils import execute, intercept_result
source = 'customout = "nb is %d" % customin'
source = urllib2.quote(source)
with intercept_result(PythonSource, 'customout') as results:
self.assertFalse(execute([
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', source)]),
('customin', [('Integer', '42')])
]),
('String', 'org.vistrails.vistrails.basic', []),
],
[
(0, 'customout', 1, 'value'),
],
add_port_specs=[
(0, 'input', 'customin',
'org.vistrails.vistrails.basic:Integer'),
(0, 'output', 'customout',
'org.vistrails.vistrails.basic:String'),
]))
self.assertEqual(results[-1], "nb is 42")
class TestNumericConversions(unittest.TestCase):
def test_full(self):
from vistrails.tests.utils import execute, intercept_result
with intercept_result(Round, 'out_value') as results:
self.assertFalse(execute([
('Integer', 'org.vistrails.vistrails.basic', [
('value', [('Integer', '5')])
]),
('Float', 'org.vistrails.vistrails.basic', []),
('PythonCalc', 'org.vistrails.vistrails.pythoncalc', [
('value2', [('Float', '2.7')]),
('op', [('String', '+')]),
]),
('Round', 'org.vistrails.vistrails.basic', [
('floor', [('Boolean', 'True')]),
]),
],
[
(0, 'value', 1, 'value'),
(1, 'value', 2, 'value1'),
(2, 'value', 3, 'in_value'),
]))
self.assertEqual(results, [7])
class TestUnzip(unittest.TestCase):
def test_unzip_file(self):
from vistrails.tests.utils import execute, intercept_result
from vistrails.core.system import vistrails_root_directory
zipfile = os.path.join(vistrails_root_directory(),
'tests', 'resources',
'test_archive.zip')
with intercept_result(Unzip, 'file') as outfiles:
self.assertFalse(execute([
('Unzip', 'org.vistrails.vistrails.basic', [
('archive_file', [('File', zipfile)]),
('filename_in_archive', [('String', 'file1.txt')]),
]),
]))
self.assertEqual(len(outfiles), 1)
with open(outfiles[0].name, 'rb') as outfile:
self.assertEqual(outfile.read(), "some random\ncontent")
def test_unzip_all(self):
from vistrails.tests.utils import execute, intercept_result
from vistrails.core.system import vistrails_root_directory
zipfile = os.path.join(vistrails_root_directory(),
'tests', 'resources',
'test_archive.zip')
with intercept_result(UnzipDirectory, 'directory') as outdir:
self.assertFalse(execute([
('UnzipDirectory', 'org.vistrails.vistrails.basic', [
('archive_file', [('File', zipfile)]),
]),
]))
self.assertEqual(len(outdir), 1)
self.assertEqual(
[(d, f) for p, d, f in os.walk(outdir[0].name)],
[(['subdir'], ['file1.txt']),
([], ['file2.txt'])])
from vistrails.core.configuration import get_vistrails_configuration
class TestTypechecking(unittest.TestCase):
@classmethod
def setUpClass(cls):
conf = get_vistrails_configuration()
cls.error_all = conf.showConnectionErrors
cls.error_variant = conf.showVariantErrors
@classmethod
def tearDownClass(cls):
conf = get_vistrails_configuration()
conf.showConnectionErrors = cls.error_all
conf.showVariantErrors = cls.error_variant
@staticmethod
def set_settings(error_all, error_variant):
conf = get_vistrails_configuration()
conf.showConnectionErrors = error_all
conf.showVariantErrors = error_variant
def run_test_pipeline(self, result, expected_results, *args, **kwargs):
from vistrails.tests.utils import execute, intercept_result
for error_all, error_variant, expected in expected_results:
self.set_settings(error_all, error_variant)
with intercept_result(*result) as results:
error = execute(*args, **kwargs)
if not expected:
self.assertTrue(error)
else:
self.assertFalse(error)
self.assertEqual(results, expected)
def test_basic(self):
import urllib2
# Base case: no typing error
# This should succeed in every case
self.run_test_pipeline(
(PythonSource, 'r'),
[(False, False, ["test"]),
(True, True, ["test"])],
[
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('o = "test"'))]),
]),
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('r = i'))])
]),
],
[
(0, 'o', 1, 'i'),
],
add_port_specs=[
(0, 'output', 'o',
'org.vistrails.vistrails.basic:String'),
(1, 'input', 'i',
'org.vistrails.vistrails.basic:String'),
(1, 'output', 'r',
'org.vistrails.vistrails.basic:String')
])
def test_fake(self):
import urllib2
# A module is lying, declaring a String but returning an int
# This should fail with showConnectionErrors=True (not the
# default)
self.run_test_pipeline(
(PythonSource, 'r'),
[(False, False, [42]),
(False, True, [42]),
(True, True, False)],
[
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('o = 42'))]),
]),
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('r = i'))])
]),
],
[
(0, 'o', 1, 'i'),
],
add_port_specs=[
(0, 'output', 'o',
'org.vistrails.vistrails.basic:String'),
(1, 'input', 'i',
'org.vistrails.vistrails.basic:String'),
(1, 'output', 'r',
'org.vistrails.vistrails.basic:String')
])
def test_inputport(self):
import urllib2
# This test uses an InputPort module, whose output port should not be
# considered a Variant port (although it is)
self.run_test_pipeline(
(PythonSource, 'r'),
[(False, False, [42]),
(False, True, [42]),
(True, True, [42])],
[
('InputPort', 'org.vistrails.vistrails.basic', [
('ExternalPipe', [('Integer', '42')]),
]),
('PythonSource', 'org.vistrails.vistrails.basic', [
('source', [('String', urllib2.quote('r = i'))])
]),
],
[
(0, 'InternalPipe', 1, 'i'),
],
add_port_specs=[
(1, 'input', 'i',
'org.vistrails.vistrails.basic:String'),
(1, 'output', 'r',
'org.vistrails.vistrails.basic:String'),
])
class TestStringFormat(unittest.TestCase):
def test_list_placeholders(self):
fmt = 'a {} b}} {c!s} {{d e}} {}f'
self.assertEqual(StringFormat.list_placeholders(fmt),
(2, set(['c'])))
def run_format(self, fmt, expected, **kwargs):
from vistrails.tests.utils import execute, intercept_result
functions = [('format', [('String', fmt)])]
functions.extend((n, [(t, v)])
for n, (t, v) in kwargs.iteritems())
with intercept_result(StringFormat, 'value') as results:
self.assertFalse(execute([
('StringFormat', 'org.vistrails.vistrails.basic',
functions),
],
add_port_specs=[
(0, 'input', n, t)
for n, (t, v) in kwargs.iteritems()
]))
self.assertEqual(results, [expected])
def test_format(self):
self.run_format('{{ {a} }} b {c!s}', '{ 42 } b 12',
a=('Integer', '42'),
c=('Integer', '12'))
# Python 2.6 doesn't support {}
@unittest.skipIf(sys.version_info < (2, 7), "No {} support on 2.6")
def test_format_27(self):
self.run_format('{} {}', 'a b',
_0=('String', 'a'), _1=('String', 'b'))
self.run_format('{{ {a} {} {b!s}', '{ 42 b 12',
a=('Integer', '42'), _0=('String', 'b'),
b=('Integer', '12'))
self.run_format('{} {} {!r}{ponc} {:.2f}', "hello dear 'world'! 1.33",
_0=('String', 'hello'), _1=('String', 'dear'),
_2=('String', 'world'), _3=('Float', '1.333333333'),
ponc=('String', '!'))
class TestConstantMetaclass(unittest.TestCase):
def test_meta(self):
"""Tests the __metaclass__ for Constant.
"""
mod1_in = [('value', 'basic:String'), IPort('other', 'basic:Float')]
mod1_out = [('someport', 'basic:Integer')]
class Mod1(Constant):
_input_ports = mod1_in
_output_ports = mod1_out
self.assertEqual(Mod1._input_ports, mod1_in)
self.assertEqual(Mod1._output_ports, [('value', Mod1)] + mod1_out)
mod2_in = [('another', 'basic:String')]
class Mod2(Mod1):
_input_ports = mod2_in
self.assertEqual(Mod2._input_ports, [('value', Mod2)] + mod2_in)
self.assertEqual(Mod2._output_ports, [('value', Mod2)])
class Mod3(Mod1):
_output_ports = []
self.assertEqual(Mod3._input_ports, [('value', Mod3)])
self.assertEqual(Mod3._output_ports, [('value', Mod3)])
| hjanime/VisTrails | vistrails/core/modules/basic_modules.py | Python | bsd-3-clause | 69,112 | 0.002879 |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations for Im2Vox PTN (NIPS16) model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import losses
import metrics
import model_voxel_generation
import utils
from nets import im2vox_factory
slim = tf.contrib.slim
class model_PTN(model_voxel_generation.Im2Vox): # pylint:disable=invalid-name
"""Inherits the generic Im2Vox model class and implements the functions."""
def __init__(self, params):
super(model_PTN, self).__init__(params)
# For testing, this selects all views in input
def preprocess_with_all_views(self, raw_inputs):
(quantity, num_views) = raw_inputs['images'].get_shape().as_list()[:2]
inputs = dict()
inputs['voxels'] = []
inputs['images_1'] = []
for k in xrange(num_views):
inputs['matrix_%d' % (k + 1)] = []
inputs['matrix_1'] = []
for n in xrange(quantity):
for k in xrange(num_views):
inputs['images_1'].append(raw_inputs['images'][n, k, :, :, :])
inputs['voxels'].append(raw_inputs['voxels'][n, :, :, :, :])
tf_matrix = self.get_transform_matrix(k)
inputs['matrix_%d' % (k + 1)].append(tf_matrix)
inputs['images_1'] = tf.stack(inputs['images_1'])
inputs['voxels'] = tf.stack(inputs['voxels'])
for k in xrange(num_views):
inputs['matrix_%d' % (k + 1)] = tf.stack(inputs['matrix_%d' % (k + 1)])
return inputs
def get_model_fn(self, is_training=True, reuse=False, run_projection=True):
return im2vox_factory.get(self._params, is_training, reuse, run_projection)
def get_regularization_loss(self, scopes):
return losses.regularization_loss(scopes, self._params)
def get_loss(self, inputs, outputs):
"""Computes the loss used for PTN paper (projection + volume loss)."""
g_loss = tf.zeros(dtype=tf.float32, shape=[])
if self._params.proj_weight:
g_loss += losses.add_volume_proj_loss(
inputs, outputs, self._params.step_size, self._params.proj_weight)
if self._params.volume_weight:
g_loss += losses.add_volume_loss(inputs, outputs, 1,
self._params.volume_weight)
slim.summaries.add_scalar_summary(g_loss, 'im2vox_loss', prefix='losses')
return g_loss
def get_metrics(self, inputs, outputs):
"""Aggregate the metrics for voxel generation model.
Args:
inputs: Input dictionary of the voxel generation model.
outputs: Output dictionary returned by the voxel generation model.
Returns:
names_to_values: metrics->values (dict).
names_to_updates: metrics->ops (dict).
"""
names_to_values = dict()
names_to_updates = dict()
tmp_values, tmp_updates = metrics.add_volume_iou_metrics(inputs, outputs)
names_to_values.update(tmp_values)
names_to_updates.update(tmp_updates)
for name, value in names_to_values.iteritems():
slim.summaries.add_scalar_summary(
value, name, prefix='eval', print_summary=True)
return names_to_values, names_to_updates
def write_disk_grid(self,
global_step,
log_dir,
input_images,
gt_projs,
pred_projs,
input_voxels=None,
output_voxels=None):
"""Function called by TF to save the prediction periodically."""
summary_freq = self._params.save_every
def write_grid(input_images, gt_projs, pred_projs, global_step,
input_voxels, output_voxels):
"""Native python function to call for writing images to files."""
grid = _build_image_grid(
input_images,
gt_projs,
pred_projs,
input_voxels=input_voxels,
output_voxels=output_voxels)
if global_step % summary_freq == 0:
img_path = os.path.join(log_dir, '%s.jpg' % str(global_step))
utils.save_image(grid, img_path)
return grid
save_op = tf.py_func(write_grid, [
input_images, gt_projs, pred_projs, global_step, input_voxels,
output_voxels
], [tf.uint8], 'write_grid')[0]
slim.summaries.add_image_summary(
tf.expand_dims(save_op, axis=0), name='grid_vis')
return save_op
def get_transform_matrix(self, view_out):
"""Get the 4x4 Perspective Transfromation matrix used for PTN."""
num_views = self._params.num_views
focal_length = self._params.focal_length
focal_range = self._params.focal_range
phi = 30
theta_interval = 360.0 / num_views
theta = theta_interval * view_out
# pylint: disable=invalid-name
camera_matrix = np.zeros((4, 4), dtype=np.float32)
intrinsic_matrix = np.eye(4, dtype=np.float32)
extrinsic_matrix = np.eye(4, dtype=np.float32)
sin_phi = np.sin(float(phi) / 180.0 * np.pi)
cos_phi = np.cos(float(phi) / 180.0 * np.pi)
sin_theta = np.sin(float(-theta) / 180.0 * np.pi)
cos_theta = np.cos(float(-theta) / 180.0 * np.pi)
rotation_azimuth = np.zeros((3, 3), dtype=np.float32)
rotation_azimuth[0, 0] = cos_theta
rotation_azimuth[2, 2] = cos_theta
rotation_azimuth[0, 2] = -sin_theta
rotation_azimuth[2, 0] = sin_theta
rotation_azimuth[1, 1] = 1.0
## rotation axis -- x
rotation_elevation = np.zeros((3, 3), dtype=np.float32)
rotation_elevation[0, 0] = cos_phi
rotation_elevation[0, 1] = sin_phi
rotation_elevation[1, 0] = -sin_phi
rotation_elevation[1, 1] = cos_phi
rotation_elevation[2, 2] = 1.0
rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)
displacement = np.zeros((3, 1), dtype=np.float32)
displacement[0, 0] = float(focal_length) + float(focal_range) / 2.0
displacement = np.matmul(rotation_matrix, displacement)
extrinsic_matrix[0:3, 0:3] = rotation_matrix
extrinsic_matrix[0:3, 3:4] = -displacement
intrinsic_matrix[2, 2] = 1.0 / float(focal_length)
intrinsic_matrix[1, 1] = 1.0 / float(focal_length)
camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)
return camera_matrix
def _build_image_grid(input_images,
gt_projs,
pred_projs,
input_voxels,
output_voxels,
vis_size=128):
"""Builds a grid image by concatenating the input images."""
quantity = input_images.shape[0]
for row in xrange(int(quantity / 3)):
for col in xrange(3):
index = row * 3 + col
input_img_ = utils.resize_image(input_images[index, :, :, :], vis_size,
vis_size)
gt_proj_ = utils.resize_image(gt_projs[index, :, :, :], vis_size,
vis_size)
pred_proj_ = utils.resize_image(pred_projs[index, :, :, :], vis_size,
vis_size)
gt_voxel_vis = utils.resize_image(
utils.display_voxel(input_voxels[index, :, :, :, 0]), vis_size,
vis_size)
pred_voxel_vis = utils.resize_image(
utils.display_voxel(output_voxels[index, :, :, :, 0]), vis_size,
vis_size)
if col == 0:
tmp_ = np.concatenate(
[input_img_, gt_proj_, pred_proj_, gt_voxel_vis, pred_voxel_vis], 1)
else:
tmp_ = np.concatenate([
tmp_, input_img_, gt_proj_, pred_proj_, gt_voxel_vis, pred_voxel_vis
], 1)
if row == 0:
out_grid = tmp_
else:
out_grid = np.concatenate([out_grid, tmp_], 0)
return out_grid
| xcyan/models | ptn/model_ptn.py | Python | apache-2.0 | 8,258 | 0.004481 |
from django.shortcuts import render_to_response
from django.template import RequestContext
from webinars_web.webinars.views import syncs
def show(request, sync_id):
from webinars_web.webinars import models as wm
return syncs._show(request, 'hub', wm.HubSync.objects.select_related('hub').get(pk=sync_id))
def new(request, hub_id):
from webinars_web.webinars import models as wm
return syncs._new(request, 'hub', wm.Hub.objects.get(pk=hub_id))
def interrupt(request, hub_id):
from webinars_web.webinars import models as wm
return syncs._interrupt(request, 'hub', wm.Hub.objects.get(pk=hub_id))
def list(request, hub_id):
from webinars_web.webinars import models as wm
hub = wm.Hub.objects.get(pk=hub_id)
hub_syncs = wm.HubSync.objects.filter(hub=hub).order_by('-started_at')
account_syncs = wm.AccountSync.objects.filter(account__hub=hub, parent__isnull=True).order_by('-started_at')
event_syncs = wm.EventSync.objects.filter(event__account__hub=hub, parent__isnull=True).order_by('-started_at')
return render_to_response('hub_syncs/list.djml', {'hub':hub, 'hub_syncs':hub_syncs, 'account_syncs':account_syncs, 'event_syncs':event_syncs}, context_instance=RequestContext(request))
| prior/webinars | webinars_web/webinars/views/hub_syncs.py | Python | apache-2.0 | 1,234 | 0.010535 |
"""Tutorial on how to create a convolutional autoencoder w/ Tensorflow.
Parag K. Mital, Jan 2016
"""
import tensorflow as tf
import numpy as np
import math
from libs.activations import lrelu
from libs.utils import corrupt
# %%
def autoencoder(input_shape=[None, 784],
n_filters=[1, 10, 10, 10],
filter_sizes=[3, 3, 3, 3],
corruption=False):
"""Build a deep denoising autoencoder w/ tied weights.
Parameters
----------
input_shape : list, optional
Description
n_filters : list, optional
Description
filter_sizes : list, optional
Description
Returns
-------
x : Tensor
Input placeholder to the network
z : Tensor
Inner-most latent representation
y : Tensor
Output reconstruction of the input
cost : Tensor
Overall cost to use for training
Raises
------
ValueError
Description
"""
# %%
# input to the network
x = tf.placeholder(
tf.float32, input_shape, name='x')
# %%
# ensure 2-d is converted to square tensor.
if len(x.get_shape()) == 2:
x_dim = np.sqrt(x.get_shape().as_list()[1])
if x_dim != int(x_dim):
raise ValueError('Unsupported input dimensions')
x_dim = int(x_dim)
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, n_filters[0]])
elif len(x.get_shape()) == 4:
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
current_input = x_tensor
# %%
# Optionally apply denoising autoencoder
if corruption:
current_input = corrupt(current_input)
# %%
# Build the encoder
encoder = []
shapes = []
for layer_i, n_output in enumerate(n_filters[1:]):
n_input = current_input.get_shape().as_list()[3]
shapes.append(current_input.get_shape().as_list())
W = tf.Variable(
tf.random_uniform([
filter_sizes[layer_i],
filter_sizes[layer_i],
n_input, n_output],
-1.0 / math.sqrt(n_input),
1.0 / math.sqrt(n_input)))
b = tf.Variable(tf.zeros([n_output]))
encoder.append(W)
output = lrelu(
tf.add(tf.nn.conv2d(
current_input, W, strides=[1, 2, 2, 1], padding='SAME'), b))
current_input = output
# %%
# store the latent representation
z = current_input
encoder.reverse()
shapes.reverse()
# %%
# Build the decoder using the same weights
for layer_i, shape in enumerate(shapes):
W = encoder[layer_i]
b = tf.Variable(tf.zeros([W.get_shape().as_list()[2]]))
output = lrelu(tf.add(
tf.nn.conv2d_transpose(
current_input, W,
tf.pack([tf.shape(x)[0], shape[1], shape[2], shape[3]]),
strides=[1, 2, 2, 1], padding='SAME'), b))
current_input = output
# %%
# now have the reconstruction through the network
y = current_input
# cost function measures pixel-wise difference
cost = tf.reduce_sum(tf.square(y - x_tensor))
# %%
return {'x': x, 'z': z, 'y': y, 'cost': cost}
# %%
def test_mnist():
"""Test the convolutional autoencder using MNIST."""
# %%
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
import matplotlib.pyplot as plt
# %%
# load MNIST as before
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
mean_img = np.mean(mnist.train.images, axis=0)
ae = autoencoder()
# %%
learning_rate = 0.01
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae['cost'])
# %%
# We create a session to use the graph
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# %%
# Fit all training data
batch_size = 100
n_epochs = 10
for epoch_i in range(n_epochs):
for batch_i in range(mnist.train.num_examples // batch_size):
batch_xs, _ = mnist.train.next_batch(batch_size)
train = np.array([img - mean_img for img in batch_xs])
sess.run(optimizer, feed_dict={ae['x']: train})
print(epoch_i, sess.run(ae['cost'], feed_dict={ae['x']: train}))
# %%
# Plot example reconstructions
n_examples = 10
test_xs, _ = mnist.test.next_batch(n_examples)
test_xs_norm = np.array([img - mean_img for img in test_xs])
recon = sess.run(ae['y'], feed_dict={ae['x']: test_xs_norm})
print(recon.shape)
fig, axs = plt.subplots(2, n_examples, figsize=(10, 2))
for example_i in range(n_examples):
axs[0][example_i].imshow(
np.reshape(test_xs[example_i, :], (28, 28)))
axs[1][example_i].imshow(
np.reshape(
np.reshape(recon[example_i, ...], (784,)) + mean_img,
(28, 28)))
fig.show()
plt.draw()
plt.waitforbuttonpress()
# %%
if __name__ == '__main__':
test_mnist()
| apoorva-sharma/deep-frame-interpolation | tensorflow_tutorials-master/python/09_convolutional_autoencoder.py | Python | mit | 5,033 | 0.000199 |
#!/bin/env python
"""
This file defines a set of system_info classes for getting
information about various resources (libraries, library directories,
include directories, etc.) in the system. Currently, the following
classes are available:
atlas_info
atlas_threads_info
atlas_blas_info
atlas_blas_threads_info
lapack_atlas_info
lapack_atlas_threads_info
atlas_3_10_info
atlas_3_10_threads_info
atlas_3_10_blas_info,
atlas_3_10_blas_threads_info,
lapack_atlas_3_10_info
lapack_atlas_3_10_threads_info
blas_info
lapack_info
openblas_info
blis_info
blas_opt_info # usage recommended
lapack_opt_info # usage recommended
fftw_info,dfftw_info,sfftw_info
fftw_threads_info,dfftw_threads_info,sfftw_threads_info
djbfft_info
x11_info
lapack_src_info
blas_src_info
numpy_info
numarray_info
numpy_info
boost_python_info
agg2_info
wx_info
gdk_pixbuf_xlib_2_info
gdk_pixbuf_2_info
gdk_x11_2_info
gtkp_x11_2_info
gtkp_2_info
xft_info
freetype2_info
umfpack_info
Usage:
info_dict = get_info(<name>)
where <name> is a string 'atlas','x11','fftw','lapack','blas',
'lapack_src', 'blas_src', etc. For a complete list of allowed names,
see the definition of get_info() function below.
Returned info_dict is a dictionary which is compatible with
distutils.setup keyword arguments. If info_dict == {}, then the
asked resource is not available (system_info could not find it).
Several *_info classes specify an environment variable to specify
the locations of software. When setting the corresponding environment
variable to 'None' then the software will be ignored, even when it
is available in system.
Global parameters:
system_info.search_static_first - search static libraries (.a)
in precedence to shared ones (.so, .sl) if enabled.
system_info.verbosity - output the results to stdout if enabled.
The file 'site.cfg' is looked for in
1) Directory of main setup.py file being run.
2) Home directory of user running the setup.py file as ~/.numpy-site.cfg
3) System wide directory (location of this file...)
The first one found is used to get system configuration options The
format is that used by ConfigParser (i.e., Windows .INI style). The
section ALL has options that are the default for each section. The
available sections are fftw, atlas, and x11. Appropriate defaults are
used if nothing is specified.
The order of finding the locations of resources is the following:
1. environment variable
2. section in site.cfg
3. ALL section in site.cfg
Only the first complete match is returned.
Example:
----------
[ALL]
library_dirs = /usr/lib:/usr/local/lib:/opt/lib
include_dirs = /usr/include:/usr/local/include:/opt/include
src_dirs = /usr/local/src:/opt/src
# search static libraries (.a) in preference to shared ones (.so)
search_static_first = 0
[fftw]
fftw_libs = rfftw, fftw
fftw_opt_libs = rfftw_threaded, fftw_threaded
# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs
[atlas]
library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
# for overriding the names of the atlas libraries
atlas_libs = lapack, f77blas, cblas, atlas
[x11]
library_dirs = /usr/X11R6/lib
include_dirs = /usr/X11R6/include
----------
Authors:
Pearu Peterson <[email protected]>, February 2002
David M. Cooke <[email protected]>, April 2002
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import copy
import warnings
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
from ConfigParser import NoOptionError
from ConfigParser import RawConfigParser as ConfigParser
else:
from configparser import NoOptionError
from configparser import RawConfigParser as ConfigParser
# It seems that some people are importing ConfigParser from here so is
# good to keep its class name. Use of RawConfigParser is needed in
# order to be able to load path names with percent in them, like
# `feature%2Fcool` which is common on git flow branch names.
from distutils.errors import DistutilsError
from distutils.dist import Distribution
import distutils.sysconfig
from distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import \
find_executable, exec_command, get_pythonexe
from numpy.distutils.misc_util import is_sequence, is_string, \
get_shared_lib_extension
from numpy.distutils.command.config import config as cmd_config
from numpy.distutils.compat import get_exception
import distutils.ccompiler
import tempfile
import shutil
# Determine number of bits
import platform
_bits = {'32bit': 32, '64bit': 64}
platform_bits = _bits[platform.architecture()[0]]
def libpaths(paths, bits):
"""Return a list of library paths valid on 32 or 64 bit systems.
Inputs:
paths : sequence
A sequence of strings (typically paths)
bits : int
An integer, the only valid values are 32 or 64. A ValueError exception
is raised otherwise.
Examples:
Consider a list of directories
>>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
For a 32-bit platform, this is already valid:
>>> np.distutils.system_info.libpaths(paths,32)
['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']
On 64 bits, we prepend the '64' postfix
>>> np.distutils.system_info.libpaths(paths,64)
['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',
'/usr/lib64', '/usr/lib']
"""
if bits not in (32, 64):
raise ValueError("Invalid bit size in libpaths: 32 or 64 only")
# Handle 32bit case
if bits == 32:
return paths
# Handle 64bit case
out = []
for p in paths:
out.extend([p + '64', p])
return out
if sys.platform == 'win32':
default_lib_dirs = ['C:\\',
os.path.join(distutils.sysconfig.EXEC_PREFIX,
'libs')]
default_runtime_dirs = []
default_include_dirs = []
default_src_dirs = ['.']
default_x11_lib_dirs = []
default_x11_include_dirs = []
else:
default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',
'/opt/local/lib', '/sw/lib'], platform_bits)
default_runtime_dirs = []
default_include_dirs = ['/usr/local/include',
'/opt/include', '/usr/include',
# path of umfpack under macports
'/opt/local/include/ufsparse',
'/opt/local/include', '/sw/include',
'/usr/include/suitesparse']
default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']
default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',
'/usr/lib'], platform_bits)
default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include',
'/usr/include']
if os.path.exists('/usr/lib/X11'):
globbed_x11_dir = glob('/usr/lib/*/libX11.so')
if globbed_x11_dir:
x11_so_dir = os.path.split(globbed_x11_dir[0])[0]
default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])
default_x11_include_dirs.extend(['/usr/lib/X11/include',
'/usr/include/X11'])
import subprocess as sp
tmp = None
try:
# Explicitly open/close file to avoid ResourceWarning when
# tests are run in debug mode Python 3.
tmp = open(os.devnull, 'w')
p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE,
stderr=tmp)
except (OSError, DistutilsError):
# OSError if gcc is not installed, or SandboxViolation (DistutilsError
# subclass) if an old setuptools bug is triggered (see gh-3160).
pass
else:
triplet = str(p.communicate()[0].decode().strip())
if p.returncode == 0:
# gcc supports the "-print-multiarch" option
default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
finally:
if tmp is not None:
tmp.close()
if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))
default_include_dirs.append(os.path.join(sys.prefix, 'include'))
default_src_dirs.append(os.path.join(sys.prefix, 'src'))
default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]
default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]
default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]
default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]
so_ext = get_shared_lib_extension()
def get_standard_file(fname):
"""Returns a list of files named 'fname' from
1) System-wide directory (directory-location of this module)
2) Users HOME directory (os.environ['HOME'])
3) Local directory
"""
# System-wide file
filenames = []
try:
f = __file__
except NameError:
f = sys.argv[0]
else:
sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],
fname)
if os.path.isfile(sysfile):
filenames.append(sysfile)
# Home directory
# And look for the user config file
try:
f = os.path.expanduser('~')
except KeyError:
pass
else:
user_file = os.path.join(f, fname)
if os.path.isfile(user_file):
filenames.append(user_file)
# Local file
if os.path.isfile(fname):
filenames.append(os.path.abspath(fname))
return filenames
def get_info(name, notfound_action=0):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead
'atlas_threads': atlas_threads_info, # ditto
'atlas_blas': atlas_blas_info,
'atlas_blas_threads': atlas_blas_threads_info,
'lapack_atlas': lapack_atlas_info, # use lapack_opt instead
'lapack_atlas_threads': lapack_atlas_threads_info, # ditto
'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead
'atlas_3_10_threads': atlas_3_10_threads_info, # ditto
'atlas_3_10_blas': atlas_3_10_blas_info,
'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
'mkl': mkl_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
# openblas with embedded lapack
'openblas_lapack': openblas_lapack_info, # use blas_opt instead
'blis': blis_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
'fftw2': fftw2_info,
'fftw3': fftw3_info,
'dfftw': dfftw_info,
'sfftw': sfftw_info,
'fftw_threads': fftw_threads_info,
'dfftw_threads': dfftw_threads_info,
'sfftw_threads': sfftw_threads_info,
'djbfft': djbfft_info,
'blas': blas_info, # use blas_opt instead
'lapack': lapack_info, # use lapack_opt instead
'lapack_src': lapack_src_info,
'blas_src': blas_src_info,
'numpy': numpy_info,
'f2py': f2py_info,
'Numeric': Numeric_info,
'numeric': Numeric_info,
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
'blas_opt': blas_opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,
'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,
'gdk_pixbuf_2': gdk_pixbuf_2_info,
'gdk-pixbuf-2.0': gdk_pixbuf_2_info,
'gdk': gdk_info,
'gdk_2': gdk_2_info,
'gdk-2.0': gdk_2_info,
'gdk_x11_2': gdk_x11_2_info,
'gdk-x11-2.0': gdk_x11_2_info,
'gtkp_x11_2': gtkp_x11_2_info,
'gtk+-x11-2.0': gtkp_x11_2_info,
'gtkp_2': gtkp_2_info,
'gtk+-2.0': gtkp_2_info,
'xft': xft_info,
'freetype2': freetype2_info,
'umfpack': umfpack_info,
'amd': amd_info,
}.get(name.lower(), system_info)
return cl().get_info(notfound_action)
class NotFoundError(DistutilsError):
"""Some third-party program or library is not found."""
class AtlasNotFoundError(NotFoundError):
"""
Atlas (http://math-atlas.sourceforge.net/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [atlas]) or by setting
the ATLAS environment variable."""
class LapackNotFoundError(NotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [lapack]) or by setting
the LAPACK environment variable."""
class LapackSrcNotFoundError(LapackNotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [lapack_src]) or by setting
the LAPACK_SRC environment variable."""
class BlasNotFoundError(NotFoundError):
"""
Blas (http://www.netlib.org/blas/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [blas]) or by setting
the BLAS environment variable."""
class BlasSrcNotFoundError(BlasNotFoundError):
"""
Blas (http://www.netlib.org/blas/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [blas_src]) or by setting
the BLAS_SRC environment variable."""
class FFTWNotFoundError(NotFoundError):
"""
FFTW (http://www.fftw.org/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [fftw]) or by setting
the FFTW environment variable."""
class DJBFFTNotFoundError(NotFoundError):
"""
DJBFFT (http://cr.yp.to/djbfft.html) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [djbfft]) or by setting
the DJBFFT environment variable."""
class NumericNotFoundError(NotFoundError):
"""
Numeric (http://www.numpy.org/) module not found.
Get it from above location, install it, and retry setup.py."""
class X11NotFoundError(NotFoundError):
"""X11 libraries not found."""
class UmfpackNotFoundError(NotFoundError):
"""
UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/)
not found. Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [umfpack]) or by setting
the UMFPACK environment variable."""
class system_info(object):
""" get_info() is the only public method. Don't use others.
"""
section = 'ALL'
dir_env_var = None
search_static_first = 0 # XXX: disabled by default, may disappear in
# future unless it is proved to be useful.
verbosity = 1
saved_results = {}
notfounderror = NotFoundError
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {'library_dirs': os.pathsep.join(default_lib_dirs),
'include_dirs': os.pathsep.join(default_include_dirs),
'runtime_library_dirs': os.pathsep.join(default_runtime_dirs),
'rpath': '',
'src_dirs': os.pathsep.join(default_src_dirs),
'search_static_first': str(self.search_static_first),
'extra_compile_args': '', 'extra_link_args': ''}
self.cp = ConfigParser(defaults)
self.files = []
self.files.extend(get_standard_file('.numpy-site.cfg'))
self.files.extend(get_standard_file('site.cfg'))
self.parse_config_files()
if self.section is not None:
self.search_static_first = self.cp.getboolean(
self.section, 'search_static_first')
assert isinstance(self.search_static_first, int)
def parse_config_files(self):
self.cp.read(self.files)
if not self.cp.has_section(self.section):
if self.section is not None:
self.cp.add_section(self.section)
def calc_libraries_info(self):
libs = self.get_libraries()
dirs = self.get_lib_dirs()
# The extensions use runtime_library_dirs
r_dirs = self.get_runtime_lib_dirs()
# Intrinsic distutils use rpath, we simply append both entries
# as though they were one entry
r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))
info = {}
for lib in libs:
i = self.check_libs(dirs, [lib])
if i is not None:
dict_append(info, **i)
else:
log.info('Library %s was not found. Ignoring' % (lib))
if r_dirs:
i = self.check_libs(r_dirs, [lib])
if i is not None:
# Swap library keywords found to runtime_library_dirs
# the libraries are insisting on the user having defined
# them using the library_dirs, and not necessarily by
# runtime_library_dirs
del i['libraries']
i['runtime_library_dirs'] = i.pop('library_dirs')
dict_append(info, **i)
else:
log.info('Runtime library %s was not found. Ignoring' % (lib))
return info
def set_info(self, **info):
if info:
lib_info = self.calc_libraries_info()
dict_append(info, **lib_info)
# Update extra information
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
self.saved_results[self.__class__.__name__] = info
def has_info(self):
return self.__class__.__name__ in self.saved_results
def calc_extra_info(self):
""" Updates the information in the current information with
respect to these flags:
extra_compile_args
extra_link_args
"""
info = {}
for key in ['extra_compile_args', 'extra_link_args']:
# Get values
opt = self.cp.get(self.section, key)
if opt:
tmp = {key : [opt]}
dict_append(info, **tmp)
return info
def get_info(self, notfound_action=0):
""" Return a dictonary with items that are compatible
with numpy.distutils.setup keyword arguments.
"""
flag = 0
if not self.has_info():
flag = 1
log.info(self.__class__.__name__ + ':')
if hasattr(self, 'calc_info'):
self.calc_info()
if notfound_action:
if not self.has_info():
if notfound_action == 1:
warnings.warn(self.notfounderror.__doc__, stacklevel=2)
elif notfound_action == 2:
raise self.notfounderror(self.notfounderror.__doc__)
else:
raise ValueError(repr(notfound_action))
if not self.has_info():
log.info(' NOT AVAILABLE')
self.set_info()
else:
log.info(' FOUND:')
res = self.saved_results.get(self.__class__.__name__)
if self.verbosity > 0 and flag:
for k, v in res.items():
v = str(v)
if k in ['sources', 'libraries'] and len(v) > 270:
v = v[:120] + '...\n...\n...' + v[-120:]
log.info(' %s = %s', k, v)
log.info('')
return copy.deepcopy(res)
def get_paths(self, section, key):
dirs = self.cp.get(section, key).split(os.pathsep)
env_var = self.dir_env_var
if env_var:
if is_sequence(env_var):
e0 = env_var[-1]
for e in env_var:
if e in os.environ:
e0 = e
break
if not env_var[0] == e0:
log.info('Setting %s=%s' % (env_var[0], e0))
env_var = e0
if env_var and env_var in os.environ:
d = os.environ[env_var]
if d == 'None':
log.info('Disabled %s: %s',
self.__class__.__name__, '(%s is None)'
% (env_var,))
return []
if os.path.isfile(d):
dirs = [os.path.dirname(d)] + dirs
l = getattr(self, '_lib_names', [])
if len(l) == 1:
b = os.path.basename(d)
b = os.path.splitext(b)[0]
if b[:3] == 'lib':
log.info('Replacing _lib_names[0]==%r with %r' \
% (self._lib_names[0], b[3:]))
self._lib_names[0] = b[3:]
else:
ds = d.split(os.pathsep)
ds2 = []
for d in ds:
if os.path.isdir(d):
ds2.append(d)
for dd in ['include', 'lib']:
d1 = os.path.join(d, dd)
if os.path.isdir(d1):
ds2.append(d1)
dirs = ds2 + dirs
default_dirs = self.cp.get(self.section, key).split(os.pathsep)
dirs.extend(default_dirs)
ret = []
for d in dirs:
if len(d) > 0 and not os.path.isdir(d):
warnings.warn('Specified path %s is invalid.' % d, stacklevel=2)
continue
if d not in ret:
ret.append(d)
log.debug('( %s = %s )', key, ':'.join(ret))
return ret
def get_lib_dirs(self, key='library_dirs'):
return self.get_paths(self.section, key)
def get_runtime_lib_dirs(self, key='runtime_library_dirs'):
path = self.get_paths(self.section, key)
if path == ['']:
path = []
return path
def get_include_dirs(self, key='include_dirs'):
return self.get_paths(self.section, key)
def get_src_dirs(self, key='src_dirs'):
return self.get_paths(self.section, key)
def get_libs(self, key, default):
try:
libs = self.cp.get(self.section, key)
except NoOptionError:
if not default:
return []
if is_string(default):
return [default]
return default
return [b for b in [a.strip() for a in libs.split(',')] if b]
def get_libraries(self, key='libraries'):
if hasattr(self, '_lib_names'):
return self.get_libs(key, default=self._lib_names)
else:
return self.get_libs(key, '')
def library_extensions(self):
static_exts = ['.a']
if sys.platform == 'win32':
static_exts.append('.lib') # .lib is used by MSVC
if self.search_static_first:
exts = static_exts + [so_ext]
else:
exts = [so_ext] + static_exts
if sys.platform == 'cygwin':
exts.append('.dll.a')
if sys.platform == 'darwin':
exts.append('.dylib')
return exts
def check_libs(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks for all libraries as shared libraries first, then
static (or vice versa if self.search_static_first is True).
"""
exts = self.library_extensions()
info = None
for ext in exts:
info = self._check_libs(lib_dirs, libs, opt_libs, [ext])
if info is not None:
break
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def check_libs2(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks each library for shared or static.
"""
exts = self.library_extensions()
info = self._check_libs(lib_dirs, libs, opt_libs, exts)
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def _find_lib(self, lib_dir, lib, exts):
assert is_string(lib_dir)
# under windows first try without 'lib' prefix
if sys.platform == 'win32':
lib_prefixes = ['', 'lib']
else:
lib_prefixes = ['lib']
# for each library name, see if we can find a file for it.
for ext in exts:
for prefix in lib_prefixes:
p = self.combine_paths(lib_dir, prefix + lib + ext)
if p:
break
if p:
assert len(p) == 1
# ??? splitext on p[0] would do this for cygwin
# doesn't seem correct
if ext == '.dll.a':
lib += '.dll'
return lib
return False
def _find_libs(self, lib_dirs, libs, exts):
# make sure we preserve the order of libs, as it can be important
found_dirs, found_libs = [], []
for lib in libs:
for lib_dir in lib_dirs:
found_lib = self._find_lib(lib_dir, lib, exts)
if found_lib:
found_libs.append(found_lib)
if lib_dir not in found_dirs:
found_dirs.append(lib_dir)
break
return found_dirs, found_libs
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Find mandatory and optional libs in expected paths.
Missing optional libraries are silently forgotten.
"""
if not is_sequence(lib_dirs):
lib_dirs = [lib_dirs]
# First, try to find the mandatory libraries
found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts)
if len(found_libs) > 0 and len(found_libs) == len(libs):
# Now, check for optional libraries
opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts)
found_libs.extend(opt_found_libs)
for lib_dir in opt_found_dirs:
if lib_dir not in found_dirs:
found_dirs.append(lib_dir)
info = {'libraries': found_libs, 'library_dirs': found_dirs}
return info
else:
return None
def combine_paths(self, *args):
"""Return a list of existing paths composed by all combinations
of items from the arguments.
"""
return combine_paths(*args, **{'verbosity': self.verbosity})
class fft_opt_info(system_info):
def calc_info(self):
info = {}
fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')
djbfft_info = get_info('djbfft')
if fftw_info:
dict_append(info, **fftw_info)
if djbfft_info:
dict_append(info, **djbfft_info)
self.set_info(**info)
return
class fftw_info(system_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}]
def calc_ver_info(self, ver_param):
"""Returns True on successful version detection, else False"""
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
incl_dir = None
libs = self.get_libs(self.section + '_libs', ver_param['libs'])
info = self.check_libs(lib_dirs, libs)
if info is not None:
flag = 0
for d in incl_dirs:
if len(self.combine_paths(d, ver_param['includes'])) \
== len(ver_param['includes']):
dict_append(info, include_dirs=[d])
flag = 1
incl_dirs = [d]
break
if flag:
dict_append(info, define_macros=ver_param['macros'])
else:
info = None
if info is not None:
self.set_info(**info)
return True
else:
log.info(' %s not found' % (ver_param['name']))
return False
def calc_info(self):
for i in self.ver_info:
if self.calc_ver_info(i):
break
class fftw2_info(fftw_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}
]
class fftw3_info(fftw_info):
#variables to override
section = 'fftw3'
dir_env_var = 'FFTW3'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
]
class dfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw',
'libs':['drfftw', 'dfftw'],
'includes':['dfftw.h', 'drfftw.h'],
'macros':[('SCIPY_DFFTW_H', None)]}]
class sfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw',
'libs':['srfftw', 'sfftw'],
'includes':['sfftw.h', 'srfftw.h'],
'macros':[('SCIPY_SFFTW_H', None)]}]
class fftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'fftw threads',
'libs':['rfftw_threads', 'fftw_threads'],
'includes':['fftw_threads.h', 'rfftw_threads.h'],
'macros':[('SCIPY_FFTW_THREADS_H', None)]}]
class dfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw threads',
'libs':['drfftw_threads', 'dfftw_threads'],
'includes':['dfftw_threads.h', 'drfftw_threads.h'],
'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]
class sfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw threads',
'libs':['srfftw_threads', 'sfftw_threads'],
'includes':['sfftw_threads.h', 'srfftw_threads.h'],
'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]
class djbfft_info(system_info):
section = 'djbfft'
dir_env_var = 'DJBFFT'
notfounderror = DJBFFTNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['djbfft']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
info = None
for d in lib_dirs:
p = self.combine_paths(d, ['djbfft.a'])
if p:
info = {'extra_objects': p}
break
p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])
if p:
info = {'libraries': ['djbfft'], 'library_dirs': [d]}
break
if info is None:
return
for d in incl_dirs:
if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:
dict_append(info, include_dirs=[d],
define_macros=[('SCIPY_DJBFFT_H', None)])
self.set_info(**info)
return
return
class mkl_info(system_info):
section = 'mkl'
dir_env_var = 'MKLROOT'
_lib_mkl = ['mkl_rt']
def get_mkl_rootdir(self):
mklroot = os.environ.get('MKLROOT', None)
if mklroot is not None:
return mklroot
paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
ld_so_conf = '/etc/ld.so.conf'
if os.path.isfile(ld_so_conf):
with open(ld_so_conf, 'r') as f:
for d in f:
d = d.strip()
if d:
paths.append(d)
intel_mkl_dirs = []
for path in paths:
path_atoms = path.split(os.sep)
for m in path_atoms:
if m.startswith('mkl'):
d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])
intel_mkl_dirs.append(d)
break
for d in paths:
dirs = glob(os.path.join(d, 'mkl', '*'))
dirs += glob(os.path.join(d, 'mkl*'))
for d in dirs:
if os.path.isdir(os.path.join(d, 'lib')):
return d
return None
def __init__(self):
mklroot = self.get_mkl_rootdir()
if mklroot is None:
system_info.__init__(self)
else:
from .cpuinfo import cpu
if cpu.is_Itanium():
plt = '64'
elif cpu.is_Intel() and cpu.is_64bit():
plt = 'intel64'
else:
plt = '32'
system_info.__init__(
self,
default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],
default_include_dirs=[os.path.join(mklroot, 'include')])
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
mkl_libs = self.get_libs('mkl_libs', self._lib_mkl)
info = self.check_libs2(lib_dirs, mkl_libs)
if info is None:
return
dict_append(info,
define_macros=[('SCIPY_MKL_H', None),
('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
if sys.platform == 'win32':
pass # win32 has no pthread library
else:
dict_append(info, libraries=['pthread'])
self.set_info(**info)
class lapack_mkl_info(mkl_info):
pass
class blas_mkl_info(mkl_info):
pass
class atlas_info(system_info):
section = 'atlas'
dir_env_var = 'ATLAS'
_lib_names = ['f77blas', 'cblas']
if sys.platform[:7] == 'freebsd':
_lib_atlas = ['atlas_r']
_lib_lapack = ['alapack_r']
else:
_lib_atlas = ['atlas']
_lib_lapack = ['lapack']
notfounderror = AtlasNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',
'sse', '3dnow', 'sse2']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)
atlas = None
lapack = None
atlas_1 = None
for d in lib_dirs:
atlas = self.check_libs2(d, atlas_libs, [])
lapack_atlas = self.check_libs2(d, ['lapack_atlas'], [])
if atlas is not None:
lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])
lapack = self.check_libs2(lib_dirs2, lapack_libs, [])
if lapack is not None:
break
if atlas:
atlas_1 = atlas
log.info(self.__class__)
if atlas is None:
atlas = atlas_1
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
if lapack is not None:
dict_append(info, **lapack)
dict_append(info, **atlas)
elif 'lapack_atlas' in atlas['libraries']:
dict_append(info, **atlas)
dict_append(info,
define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])
self.set_info(**info)
return
else:
dict_append(info, **atlas)
dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])
message = """
*********************************************************************
Could not find lapack library within the ATLAS installation.
*********************************************************************
"""
warnings.warn(message, stacklevel=2)
self.set_info(**info)
return
# Check if lapack library is complete, only warn if it is not.
lapack_dir = lapack['library_dirs'][0]
lapack_name = lapack['libraries'][0]
lapack_lib = None
lib_prefixes = ['lib']
if sys.platform == 'win32':
lib_prefixes.append('')
for e in self.library_extensions():
for prefix in lib_prefixes:
fn = os.path.join(lapack_dir, prefix + lapack_name + e)
if os.path.exists(fn):
lapack_lib = fn
break
if lapack_lib:
break
if lapack_lib is not None:
sz = os.stat(lapack_lib)[6]
if sz <= 4000 * 1024:
message = """
*********************************************************************
Lapack library (from ATLAS) is probably incomplete:
size of %s is %sk (expected >4000k)
Follow the instructions in the KNOWN PROBLEMS section of the file
numpy/INSTALL.txt.
*********************************************************************
""" % (lapack_lib, sz / 1024)
warnings.warn(message, stacklevel=2)
else:
info['language'] = 'f77'
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(info, **atlas_extra_info)
self.set_info(**info)
class atlas_blas_info(atlas_info):
_lib_names = ['f77blas', 'cblas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_threads_info(atlas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class atlas_blas_threads_info(atlas_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class lapack_atlas_info(atlas_info):
_lib_names = ['lapack_atlas'] + atlas_info._lib_names
class lapack_atlas_threads_info(atlas_threads_info):
_lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names
class atlas_3_10_info(atlas_info):
_lib_names = ['satlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_info(atlas_3_10_info):
_lib_names = ['satlas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_3_10_threads_info(atlas_3_10_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_threads_info(atlas_3_10_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
class lapack_atlas_3_10_info(atlas_3_10_info):
pass
class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):
pass
class lapack_info(system_info):
section = 'lapack'
dir_env_var = 'LAPACK'
_lib_names = ['lapack']
notfounderror = LapackNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
lapack_libs = self.get_libs('lapack_libs', self._lib_names)
info = self.check_libs(lib_dirs, lapack_libs, [])
if info is None:
return
info['language'] = 'f77'
self.set_info(**info)
class lapack_src_info(system_info):
section = 'lapack_src'
dir_env_var = 'LAPACK_SRC'
notfounderror = LapackSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'dgesv.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
# The following is extracted from LAPACK-3.0/SRC/Makefile.
# Added missing names from lapack-lite-3.1.1/SRC/Makefile
# while keeping removed names for Lapack-3.0 compatibility.
allaux = '''
ilaenv ieeeck lsame lsamen xerbla
iparmq
''' # *.f
laux = '''
bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1
laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2
lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre
larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4
lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1
lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf
stebz stedc steqr sterf
larra larrc larrd larr larrk larrj larrr laneg laisnan isnan
lazq3 lazq4
''' # [s|d]*.f
lasrc = '''
gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak
gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv
gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2
geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd
gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal
gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd
ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein
hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0
lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb
lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp
laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv
lartv larz larzb larzt laswp lasyf latbs latdf latps latrd
latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv
pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2
potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri
pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs
spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv
sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2
tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs
trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs
tzrqf tzrzf
lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5
''' # [s|c|d|z]*.f
sd_lasrc = '''
laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l
org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr
orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3
ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx
sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd
stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd
sygvx sytd2 sytrd
''' # [s|d]*.f
cz_lasrc = '''
bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev
heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv
hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd
hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf
hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7
laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe
laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv
spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq
ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2
unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr
''' # [c|z]*.f
#######
sclaux = laux + ' econd ' # s*.f
dzlaux = laux + ' secnd ' # d*.f
slasrc = lasrc + sd_lasrc # s*.f
dlasrc = lasrc + sd_lasrc # d*.f
clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f
zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f
oclasrc = ' icmax1 scsum1 ' # *.f
ozlasrc = ' izmax1 dzsum1 ' # *.f
sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \
+ ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \
+ ['c%s.f' % f for f in (clasrc).split()] \
+ ['z%s.f' % f for f in (zlasrc).split()] \
+ ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]
sources = [os.path.join(src_dir, f) for f in sources]
# Lapack 3.1:
src_dir2 = os.path.join(src_dir, '..', 'INSTALL')
sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']
# Lapack 3.2.1:
sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']
# Should we check here actual existence of source files?
# Yes, the file listing is different between 3.0 and 3.1
# versions.
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
atlas_version_c_text = r'''
/* This file is generated from numpy/distutils/system_info.py */
void ATL_buildinfo(void);
int main(void) {
ATL_buildinfo();
return 0;
}
'''
_cached_atlas_version = {}
def get_atlas_version(**config):
libraries = config.get('libraries', [])
library_dirs = config.get('library_dirs', [])
key = (tuple(libraries), tuple(library_dirs))
if key in _cached_atlas_version:
return _cached_atlas_version[key]
c = cmd_config(Distribution())
atlas_version = None
info = {}
try:
s, o = c.get_output(atlas_version_c_text,
libraries=libraries, library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if s and re.search(r'undefined reference to `_gfortran', o, re.M):
s, o = c.get_output(atlas_version_c_text,
libraries=libraries + ['gfortran'],
library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if not s:
warnings.warn("""
*****************************************************
Linkage with ATLAS requires gfortran. Use
python setup.py config_fc --fcompiler=gnu95 ...
when building extension libraries that use ATLAS.
Make sure that -lgfortran is used for C++ extensions.
*****************************************************
""", stacklevel=2)
dict_append(info, language='f90',
define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])
except Exception: # failed to get version from file -- maybe on Windows
# look at directory name
for o in library_dirs:
m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o)
if m:
atlas_version = m.group('version')
if atlas_version is not None:
break
# final choice --- look at ATLAS_VERSION environment
# variable
if atlas_version is None:
atlas_version = os.environ.get('ATLAS_VERSION', None)
if atlas_version:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
else:
dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
return atlas_version or '?.?.?', info
if not s:
m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o)
if m:
atlas_version = m.group('version')
if atlas_version is None:
if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):
atlas_version = '3.2.1_pre3.3.6'
else:
log.info('Status: %d', s)
log.info('Output: %s', o)
if atlas_version == '3.2.1_pre3.3.6':
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
result = _cached_atlas_version[key] = atlas_version, info
return result
class lapack_opt_info(system_info):
notfounderror = LapackNotFoundError
def calc_info(self):
lapack_mkl_info = get_info('lapack_mkl')
if lapack_mkl_info:
self.set_info(**lapack_mkl_info)
return
openblas_info = get_info('openblas_lapack')
if openblas_info:
self.set_info(**openblas_info)
return
atlas_info = get_info('atlas_3_10_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10')
if not atlas_info:
atlas_info = get_info('atlas_threads')
if not atlas_info:
atlas_info = get_info('atlas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system lapack from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
need_lapack = 0
need_blas = 0
info = {}
if atlas_info:
l = atlas_info.get('define_macros', [])
if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \
or ('ATLAS_WITHOUT_LAPACK', None) in l:
need_lapack = 1
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__, stacklevel=2)
need_blas = 1
need_lapack = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_lapack:
lapack_info = get_info('lapack')
#lapack_info = {} ## uncomment for testing
if lapack_info:
dict_append(info, **lapack_info)
else:
warnings.warn(LapackNotFoundError.__doc__, stacklevel=2)
lapack_src_info = get_info('lapack_src')
if not lapack_src_info:
warnings.warn(LapackSrcNotFoundError.__doc__, stacklevel=2)
return
dict_append(info, libraries=[('flapack_src', lapack_src_info)])
if need_blas:
blas_info = get_info('blas')
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__, stacklevel=2)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__, stacklevel=2)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
def calc_info(self):
blas_mkl_info = get_info('blas_mkl')
if blas_mkl_info:
self.set_info(**blas_mkl_info)
return
blis_info = get_info('blis')
if blis_info:
self.set_info(**blis_info)
return
openblas_info = get_info('openblas')
if openblas_info:
self.set_info(**openblas_info)
return
atlas_info = get_info('atlas_3_10_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10_blas')
if not atlas_info:
atlas_info = get_info('atlas_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_blas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system BLAS from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
need_blas = 0
info = {}
if atlas_info:
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__, stacklevel=2)
need_blas = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_blas:
blas_info = get_info('blas')
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__, stacklevel=2)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__, stacklevel=2)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_info(system_info):
section = 'blas'
dir_env_var = 'BLAS'
_lib_names = ['blas']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
blas_libs = self.get_libs('blas_libs', self._lib_names)
info = self.check_libs(lib_dirs, blas_libs, [])
if info is None:
return
if platform.system() == 'Windows':
# The check for windows is needed because has_cblas uses the
# same compiler that was used to compile Python and msvc is
# often not installed when mingw is being used. This rough
# treatment is not desirable, but windows is tricky.
info['language'] = 'f77' # XXX: is it generally true?
else:
lib = self.has_cblas(info)
if lib is not None:
info['language'] = 'c'
info['libraries'] = [lib]
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
def has_cblas(self, info):
# primitive cblas check by looking for the header and trying to link
# cblas or blas
res = False
c = distutils.ccompiler.new_compiler()
c.customize('')
tmpdir = tempfile.mkdtemp()
s = """#include <cblas.h>
int main(int argc, const char *argv[])
{
double a[4] = {1,2,3,4};
double b[4] = {5,6,7,8};
return cblas_ddot(4, a, 1, b, 1) > 10;
}"""
src = os.path.join(tmpdir, 'source.c')
try:
with open(src, 'wt') as f:
f.write(s)
try:
# check we can compile (find headers)
obj = c.compile([src], output_dir=tmpdir,
include_dirs=self.get_include_dirs())
# check we can link (find library)
# some systems have separate cblas and blas libs. First
# check for cblas lib, and if not present check for blas lib.
try:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=["cblas"],
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
res = "cblas"
except distutils.ccompiler.LinkError:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=["blas"],
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
res = "blas"
except distutils.ccompiler.CompileError:
res = None
finally:
shutil.rmtree(tmpdir)
return res
class openblas_info(blas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
return True
def calc_info(self):
lib_dirs = self.get_lib_dirs()
openblas_libs = self.get_libs('libraries', self._lib_names)
if openblas_libs == self._lib_names: # backward compat with 1.8.0
openblas_libs = self.get_libs('openblas_libs', self._lib_names)
info = self.check_libs(lib_dirs, openblas_libs, [])
if info is None:
return
# Add extra info for OpenBLAS
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
if not self.check_embedded_lapack(info):
return
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
class openblas_lapack_info(openblas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
res = False
c = distutils.ccompiler.new_compiler()
c.customize('')
tmpdir = tempfile.mkdtemp()
s = """void zungqr();
int main(int argc, const char *argv[])
{
zungqr_();
return 0;
}"""
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
try:
extra_args = info['extra_link_args']
except:
extra_args = []
try:
with open(src, 'wt') as f:
f.write(s)
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
library_dirs=info['library_dirs'],
extra_postargs=extra_args)
res = True
except distutils.ccompiler.LinkError:
res = False
finally:
shutil.rmtree(tmpdir)
return res
class blis_info(blas_info):
section = 'blis'
dir_env_var = 'BLIS'
_lib_names = ['blis']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
blis_libs = self.get_libs('libraries', self._lib_names)
if blis_libs == self._lib_names:
blis_libs = self.get_libs('blis_libs', self._lib_names)
info = self.check_libs2(lib_dirs, blis_libs, [])
if info is None:
return
# Add include dirs
incl_dirs = self.get_include_dirs()
dict_append(info,
language='c',
define_macros=[('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
self.set_info(**info)
class blas_src_info(system_info):
section = 'blas_src'
dir_env_var = 'BLAS_SRC'
notfounderror = BlasSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['blas']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'daxpy.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
blas1 = '''
caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot
dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2
srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg
dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax
snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap
scabs1
'''
blas2 = '''
cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv
chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv
dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv
sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger
stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc
zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2
ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv
'''
blas3 = '''
cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k
dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm
ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm
'''
sources = [os.path.join(src_dir, f + '.f') \
for f in (blas1 + blas2 + blas3).split()]
#XXX: should we check here actual existence of source files?
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
class x11_info(system_info):
section = 'x11'
notfounderror = X11NotFoundError
def __init__(self):
system_info.__init__(self,
default_lib_dirs=default_x11_lib_dirs,
default_include_dirs=default_x11_include_dirs)
def calc_info(self):
if sys.platform in ['win32']:
return
lib_dirs = self.get_lib_dirs()
include_dirs = self.get_include_dirs()
x11_libs = self.get_libs('x11_libs', ['X11'])
info = self.check_libs(lib_dirs, x11_libs, [])
if info is None:
return
inc_dir = None
for d in include_dirs:
if self.combine_paths(d, 'X11/X.h'):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
self.set_info(**info)
class _numpy_info(system_info):
section = 'Numeric'
modulename = 'Numeric'
notfounderror = NumericNotFoundError
def __init__(self):
include_dirs = []
try:
module = __import__(self.modulename)
prefix = []
for name in module.__file__.split(os.sep):
if name == 'lib':
break
prefix.append(name)
# Ask numpy for its own include path before attempting
# anything else
try:
include_dirs.append(getattr(module, 'get_include')())
except AttributeError:
pass
include_dirs.append(distutils.sysconfig.get_python_inc(
prefix=os.sep.join(prefix)))
except ImportError:
pass
py_incl_dir = distutils.sysconfig.get_python_inc()
include_dirs.append(py_incl_dir)
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in include_dirs:
include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
d = os.path.join(d, os.path.basename(py_incl_dir))
if d not in include_dirs:
include_dirs.append(d)
system_info.__init__(self,
default_lib_dirs=[],
default_include_dirs=include_dirs)
def calc_info(self):
try:
module = __import__(self.modulename)
except ImportError:
return
info = {}
macros = []
for v in ['__version__', 'version']:
vrs = getattr(module, v, None)
if vrs is None:
continue
macros = [(self.modulename.upper() + '_VERSION',
'"\\"%s\\""' % (vrs)),
(self.modulename.upper(), None)]
break
dict_append(info, define_macros=macros)
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
if self.combine_paths(d,
os.path.join(self.modulename,
'arrayobject.h')):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
if info:
self.set_info(**info)
return
class numarray_info(_numpy_info):
section = 'numarray'
modulename = 'numarray'
class Numeric_info(_numpy_info):
section = 'Numeric'
modulename = 'Numeric'
class numpy_info(_numpy_info):
section = 'numpy'
modulename = 'numpy'
class numerix_info(system_info):
section = 'numerix'
def calc_info(self):
which = None, None
if os.getenv("NUMERIX"):
which = os.getenv("NUMERIX"), "environment var"
# If all the above fail, default to numpy.
if which[0] is None:
which = "numpy", "defaulted"
try:
import numpy
which = "numpy", "defaulted"
except ImportError:
msg1 = str(get_exception())
try:
import Numeric
which = "numeric", "defaulted"
except ImportError:
msg2 = str(get_exception())
try:
import numarray
which = "numarray", "defaulted"
except ImportError:
msg3 = str(get_exception())
log.info(msg1)
log.info(msg2)
log.info(msg3)
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric' "
"or 'numarray' or 'numpy' but the value obtained"
" from the %s was '%s'." % (which[1], which[0]))
os.environ['NUMERIX'] = which[0]
self.set_info(**get_info(which[0]))
class f2py_info(system_info):
def calc_info(self):
try:
import numpy.f2py as f2py
except ImportError:
return
f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')
self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],
include_dirs=[f2py_dir])
return
class boost_python_info(system_info):
section = 'boost_python'
dir_env_var = 'BOOST'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['boost*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',
'module.cpp')):
src_dir = d
break
if not src_dir:
return
py_incl_dirs = [distutils.sysconfig.get_python_inc()]
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in py_incl_dirs:
py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))
bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))
info = {'libraries': [('boost_python_src',
{'include_dirs': [src_dir] + py_incl_dirs,
'sources':bpl_srcs}
)],
'include_dirs': [src_dir],
}
if info:
self.set_info(**info)
return
class agg2_info(system_info):
section = 'agg2'
dir_env_var = 'AGG2'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['agg2*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):
src_dir = d
break
if not src_dir:
return
if sys.platform == 'win32':
agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',
'win32', 'agg_win32_bmp.cpp'))
else:
agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))
agg2_srcs += [os.path.join(src_dir, 'src', 'platform',
'X11',
'agg_platform_support.cpp')]
info = {'libraries':
[('agg2_src',
{'sources': agg2_srcs,
'include_dirs': [os.path.join(src_dir, 'include')],
}
)],
'include_dirs': [os.path.join(src_dir, 'include')],
}
if info:
self.set_info(**info)
return
class _pkg_config_info(system_info):
section = None
config_env_var = 'PKG_CONFIG'
default_config_exe = 'pkg-config'
append_config_exe = ''
version_macro_name = None
release_macro_name = None
version_flag = '--modversion'
cflags_flag = '--cflags'
def get_config_exe(self):
if self.config_env_var in os.environ:
return os.environ[self.config_env_var]
return self.default_config_exe
def get_config_output(self, config_exe, option):
cmd = config_exe + ' ' + self.append_config_exe + ' ' + option
s, o = exec_command(cmd, use_tee=0)
if not s:
return o
def calc_info(self):
config_exe = find_executable(self.get_config_exe())
if not config_exe:
log.warn('File not found: %s. Cannot determine %s info.' \
% (config_exe, self.section))
return
info = {}
macros = []
libraries = []
library_dirs = []
include_dirs = []
extra_link_args = []
extra_compile_args = []
version = self.get_config_output(config_exe, self.version_flag)
if version:
macros.append((self.__class__.__name__.split('.')[-1].upper(),
'"\\"%s\\""' % (version)))
if self.version_macro_name:
macros.append((self.version_macro_name + '_%s'
% (version.replace('.', '_')), None))
if self.release_macro_name:
release = self.get_config_output(config_exe, '--release')
if release:
macros.append((self.release_macro_name + '_%s'
% (release.replace('.', '_')), None))
opts = self.get_config_output(config_exe, '--libs')
if opts:
for opt in opts.split():
if opt[:2] == '-l':
libraries.append(opt[2:])
elif opt[:2] == '-L':
library_dirs.append(opt[2:])
else:
extra_link_args.append(opt)
opts = self.get_config_output(config_exe, self.cflags_flag)
if opts:
for opt in opts.split():
if opt[:2] == '-I':
include_dirs.append(opt[2:])
elif opt[:2] == '-D':
if '=' in opt:
n, v = opt[2:].split('=')
macros.append((n, v))
else:
macros.append((opt[2:], None))
else:
extra_compile_args.append(opt)
if macros:
dict_append(info, define_macros=macros)
if libraries:
dict_append(info, libraries=libraries)
if library_dirs:
dict_append(info, library_dirs=library_dirs)
if include_dirs:
dict_append(info, include_dirs=include_dirs)
if extra_link_args:
dict_append(info, extra_link_args=extra_link_args)
if extra_compile_args:
dict_append(info, extra_compile_args=extra_compile_args)
if info:
self.set_info(**info)
return
class wx_info(_pkg_config_info):
section = 'wx'
config_env_var = 'WX_CONFIG'
default_config_exe = 'wx-config'
append_config_exe = ''
version_macro_name = 'WX_VERSION'
release_macro_name = 'WX_RELEASE'
version_flag = '--version'
cflags_flag = '--cxxflags'
class gdk_pixbuf_xlib_2_info(_pkg_config_info):
section = 'gdk_pixbuf_xlib_2'
append_config_exe = 'gdk-pixbuf-xlib-2.0'
version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'
class gdk_pixbuf_2_info(_pkg_config_info):
section = 'gdk_pixbuf_2'
append_config_exe = 'gdk-pixbuf-2.0'
version_macro_name = 'GDK_PIXBUF_VERSION'
class gdk_x11_2_info(_pkg_config_info):
section = 'gdk_x11_2'
append_config_exe = 'gdk-x11-2.0'
version_macro_name = 'GDK_X11_VERSION'
class gdk_2_info(_pkg_config_info):
section = 'gdk_2'
append_config_exe = 'gdk-2.0'
version_macro_name = 'GDK_VERSION'
class gdk_info(_pkg_config_info):
section = 'gdk'
append_config_exe = 'gdk'
version_macro_name = 'GDK_VERSION'
class gtkp_x11_2_info(_pkg_config_info):
section = 'gtkp_x11_2'
append_config_exe = 'gtk+-x11-2.0'
version_macro_name = 'GTK_X11_VERSION'
class gtkp_2_info(_pkg_config_info):
section = 'gtkp_2'
append_config_exe = 'gtk+-2.0'
version_macro_name = 'GTK_VERSION'
class xft_info(_pkg_config_info):
section = 'xft'
append_config_exe = 'xft'
version_macro_name = 'XFT_VERSION'
class freetype2_info(_pkg_config_info):
section = 'freetype2'
append_config_exe = 'freetype2'
version_macro_name = 'FREETYPE2_VERSION'
class amd_info(system_info):
section = 'amd'
dir_env_var = 'AMD'
_lib_names = ['amd']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
amd_libs = self.get_libs('amd_libs', self._lib_names)
info = self.check_libs(lib_dirs, amd_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, 'amd.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_AMD_H', None)],
swig_opts=['-I' + inc_dir])
self.set_info(**info)
return
class umfpack_info(system_info):
section = 'umfpack'
dir_env_var = 'UMFPACK'
notfounderror = UmfpackNotFoundError
_lib_names = ['umfpack']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
umfpack_libs = self.get_libs('umfpack_libs', self._lib_names)
info = self.check_libs(lib_dirs, umfpack_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_UMFPACK_H', None)],
swig_opts=['-I' + inc_dir])
amd = get_info('amd')
dict_append(info, **get_info('amd'))
self.set_info(**info)
return
def combine_paths(*args, **kws):
""" Return a list of existing paths composed by all combinations of
items from arguments.
"""
r = []
for a in args:
if not a:
continue
if is_string(a):
a = [a]
r.append(a)
args = r
if not args:
return []
if len(args) == 1:
result = reduce(lambda a, b: a + b, map(glob, args[0]), [])
elif len(args) == 2:
result = []
for a0 in args[0]:
for a1 in args[1]:
result.extend(glob(os.path.join(a0, a1)))
else:
result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))
verbosity = kws.get('verbosity', 1)
log.debug('(paths: %s)', ','.join(result))
return result
language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}
inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}
def dict_append(d, **kws):
languages = []
for k, v in kws.items():
if k == 'language':
languages.append(v)
continue
if k in d:
if k in ['library_dirs', 'include_dirs',
'extra_compile_args', 'extra_link_args',
'runtime_library_dirs', 'define_macros']:
[d[k].append(vv) for vv in v if vv not in d[k]]
else:
d[k].extend(v)
else:
d[k] = v
if languages:
l = inv_language_map[max([language_map.get(l, 0) for l in languages])]
d['language'] = l
return
def parseCmdLine(argv=(None,)):
import optparse
parser = optparse.OptionParser("usage: %prog [-v] [info objs]")
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False,
help='be verbose and print more messages')
opts, args = parser.parse_args(args=argv[1:])
return opts, args
def show_all(argv=None):
import inspect
if argv is None:
argv = sys.argv
opts, args = parseCmdLine(argv)
if opts.verbose:
log.set_threshold(log.DEBUG)
else:
log.set_threshold(log.INFO)
show_only = []
for n in args:
if n[-5:] != '_info':
n = n + '_info'
show_only.append(n)
show_all = not show_only
_gdict_ = globals().copy()
for name, c in _gdict_.items():
if not inspect.isclass(c):
continue
if not issubclass(c, system_info) or c is system_info:
continue
if not show_all:
if name not in show_only:
continue
del show_only[show_only.index(name)]
conf = c()
conf.verbosity = 2
r = conf.get_info()
if show_only:
log.info('Info classes not defined: %s', ','.join(show_only))
if __name__ == "__main__":
show_all()
| dwillmer/numpy | numpy/distutils/system_info.py | Python | bsd-3-clause | 85,113 | 0.000881 |
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Tests for RecordView module and view
Note: this module tests for rendering specifically for RecordView values, using
view description sitedata files, and as such duplicates some tests covered by
module test_entitygenericedit.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import json
import unittest
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.db import models
from django.http import QueryDict
from django.contrib.auth.models import User
from django.test import TestCase # cf. https://docs.djangoproject.com/en/dev/topics/testing/tools/#assertions
from django.test.client import Client
from annalist.identifiers import RDF, RDFS, ANNAL
from annalist import layout
from annalist import message
from annalist.models.site import Site
from annalist.models.sitedata import SiteData
from annalist.models.collection import Collection
from annalist.models.recordview import RecordView
from annalist.models.recordfield import RecordField
from annalist.views.uri_builder import uri_with_params
from annalist.views.recordviewdelete import RecordViewDeleteConfirmedView
from annalist.views.form_utils.fieldchoice import FieldChoice
from .AnnalistTestCase import AnnalistTestCase
from .tests import (
TestHost, TestHostUri, TestBasePath, TestBaseUri, TestBaseDir
)
from .init_tests import (
init_annalist_test_site,
init_annalist_test_coll,
install_annalist_named_coll,
create_test_coll_inheriting,
init_annalist_named_test_coll,
resetSitedata
)
from .entity_testutils import (
make_message, make_quoted_message,
site_dir, collection_dir,
site_view_url, collection_edit_url,
collection_entity_view_url,
collection_create_values,
render_select_options,
create_test_user,
context_field_map,
context_view_field,
context_bind_fields,
check_context_field, check_context_field_value,
)
from .entity_testviewdata import (
recordview_dir,
recordview_coll_url, recordview_url, recordview_edit_url,
recordview_value_keys, recordview_load_keys,
recordview_create_values, recordview_values, recordview_read_values,
view_view_context_data,
default_view_fields_list, view_view_fields_list,
view_view_form_data,
recordview_delete_confirm_form_data
)
from .entity_testentitydata import (
entity_url, entitydata_edit_url, entitydata_list_type_url,
default_fields, default_label, default_comment, error_label,
layout_classes
)
from .entity_testsitedata import (
make_field_choices, no_selection,
get_site_default_entity_fields_sorted,
get_site_bibentry_fields_sorted
)
# -----------------------------------------------------------------------------
#
# RecordView tests
#
# -----------------------------------------------------------------------------
class RecordViewTest(AnnalistTestCase):
def setUp(self):
init_annalist_test_site()
self.testsite = Site(TestBaseUri, TestBaseDir)
self.sitedata = SiteData(self.testsite)
self.testcoll = Collection(self.testsite, "testcoll")
self.layout = (
{ 'enum_field_placement_id': layout.ENUM_FIELD_PLACEMENT_ID
, 'enum_list_type_id': layout.ENUM_LIST_TYPE_ID
, 'enum_render_type_id': layout.ENUM_RENDER_TYPE_ID
, 'enum_value_type_id': layout.ENUM_VALUE_TYPE_ID
, 'enum_value_mode_id': layout.ENUM_VALUE_MODE_ID
, 'field_typeid': layout.FIELD_TYPEID
, 'group_typeid': layout.GROUP_TYPEID
, 'list_typeid': layout.LIST_TYPEID
, 'type_typeid': layout.TYPE_TYPEID
, 'user_typeid': layout.USER_TYPEID
, 'view_typeid': layout.VIEW_TYPEID
, 'vocab_typeid': layout.VOCAB_TYPEID
, 'field_dir': layout.FIELD_DIR
, 'group_dir': layout.GROUP_DIR
, 'list_dir': layout.LIST_DIR
, 'type_dir': layout.TYPE_DIR
, 'user_dir': layout.USER_DIR
, 'view_dir': layout.VIEW_DIR
, 'vocab_dir': layout.VOCAB_DIR
})
return
def tearDown(self):
return
@classmethod
def setUpClass(cls):
super(RecordViewTest, cls).setUpClass()
return
@classmethod
def tearDownClass(cls):
super(RecordViewTest, cls).tearDownClass()
resetSitedata(scope="collections")
return
def test_RecordViewTest(self):
self.assertEqual(Collection.__name__, "Collection", "Check Collection class name")
return
def test_recordview_init(self):
t = RecordView(self.testcoll, "testview")
u = recordview_coll_url(self.testsite, coll_id="testcoll", view_id="testview")
self.assertEqual(t._entitytype, ANNAL.CURIE.View)
self.assertEqual(t._entityfile, layout.VIEW_META_FILE)
self.assertEqual(t._entityref, layout.COLL_BASE_VIEW_REF%{'id': "testview"})
self.assertEqual(t._entityid, "testview")
self.assertEqual(t._entityurl, u)
self.assertEqual(t._entitydir, recordview_dir(view_id="testview"))
self.assertEqual(t._values, None)
return
def test_recordview1_data(self):
t = RecordView(self.testcoll, "view1")
self.assertEqual(t.get_id(), "view1")
self.assertEqual(t.get_type_id(), layout.VIEW_TYPEID)
self.assertIn(
"/c/testcoll/d/%(view_dir)s/view1/"%self.layout,
t.get_url()
)
self.assertEqual(
TestBaseUri + "/c/testcoll/d/%(view_typeid)s/view1/"%self.layout,
t.get_view_url()
)
t.set_values(recordview_create_values(view_id="view1"))
td = t.get_values()
self.assertEqual(set(td.keys()), set(recordview_value_keys()))
v = recordview_values(view_id="view1")
self.assertDictionaryMatch(td, v)
return
def test_recordview2_data(self):
t = RecordView(self.testcoll, "view2")
self.assertEqual(t.get_id(), "view2")
self.assertEqual(t.get_type_id(), layout.VIEW_TYPEID)
self.assertIn(
"/c/testcoll/d/%(view_dir)s/view2/"%self.layout,
t.get_url()
)
self.assertEqual(
TestBaseUri + "/c/testcoll/d/%(view_typeid)s/view2/"%self.layout,
t.get_view_url()
)
t.set_values(recordview_create_values(view_id="view2"))
td = t.get_values()
self.assertEqual(set(td.keys()), set(recordview_value_keys()))
v = recordview_values(view_id="view2")
self.assertDictionaryMatch(td, v)
return
def test_recordview_create_load(self):
t = RecordView.create(self.testcoll, "view1", recordview_create_values(view_id="view1"))
td = RecordView.load(self.testcoll, "view1").get_values()
v = recordview_read_values(view_id="view1")
self.assertKeysMatch(td, v)
self.assertDictionaryMatch(td, v)
return
def test_recordview_default_data(self):
t = RecordView.load(self.testcoll, "Default_view", altscope="all")
self.assertEqual(t.get_id(), "Default_view")
self.assertIn(
"/c/_annalist_site/d/%(view_dir)s/Default_view"%self.layout,
t.get_url()
)
self.assertIn(
"/c/testcoll/d/%(view_typeid)s/Default_view"%self.layout,
t.get_view_url()
)
self.assertEqual(t.get_type_id(), layout.VIEW_TYPEID)
td = t.get_values()
self.assertEqual(
set(td.keys()),
set(recordview_load_keys(view_uri=True, view_entity_type=True))
)
v = recordview_read_values(view_id="Default_view")
v.update(
{ 'rdfs:label': 'Default record view'
, 'annal:uri': 'annal:display/Default_view'
})
v.pop('rdfs:comment', None)
v.pop('annal:view_entity_type', None)
self.assertDictionaryMatch(td, v) # actual, expect
return
# -----------------------------------------------------------------------------
#
# RecordView edit view tests
#
# -----------------------------------------------------------------------------
class RecordViewEditViewTest(AnnalistTestCase):
"""
Tests for record view edit views
"""
def setUp(self):
init_annalist_test_site()
self.testsite = Site(TestBaseUri, TestBaseDir)
self.testcoll = Collection.create(self.testsite, "testcoll", collection_create_values("testcoll"))
self.no_options = [ FieldChoice('', label="(no options)") ]
def special_field(fid):
return (
fid == "Entity_see_also" or
fid.startswith("Field_") or
fid.startswith("List_") or
fid.startswith("Type_") or
fid.startswith("View_") or
fid.startswith("User_") or
fid.startswith("Coll_") or
fid.startswith("Vocab_") or
fid.startswith("Enum_") or
fid.startswith("Group_") or
False
)
self.field_options = sorted(
[ fid for fid in self.testcoll.child_entity_ids(RecordField, altscope="all")
if fid != layout.INITIAL_VALUES_ID
])
self.field_options_no_bibentry = sorted(
[ fid for fid in self.testcoll.child_entity_ids(RecordField, altscope="all")
if fid != layout.INITIAL_VALUES_ID and not fid.startswith("Bib_")
])
self.field_options_bib_no_special = sorted(
[ fid for fid in self.testcoll.child_entity_ids(RecordField, altscope="all")
if fid != layout.INITIAL_VALUES_ID and not special_field(fid)
])
self.field_options_no_special = sorted(
[ fid for fid in self.testcoll.child_entity_ids(RecordField, altscope="all")
if fid != layout.INITIAL_VALUES_ID and
not ((fid.startswith("Bib_") or special_field(fid)))
])
# log.info(self.field_options_no_bibentry)
# For checking Location: header values...
self.continuation_path = entitydata_list_type_url(
coll_id="testcoll", type_id=layout.VIEW_TYPEID
)
self.continuation_url = self.continuation_path
create_test_user(self.testcoll, "testuser", "testpassword")
self.client = Client(HTTP_HOST=TestHost)
loggedin = self.client.login(username="testuser", password="testpassword")
self.assertTrue(loggedin)
return
def tearDown(self):
resetSitedata(scope="collections")
return
@classmethod
def setUpClass(cls):
super(RecordViewEditViewTest, cls).setUpClass()
return
@classmethod
def tearDownClass(cls):
super(RecordViewEditViewTest, cls).tearDownClass()
resetSitedata()
return
# -----------------------------------------------------------------------------
# Helpers
# -----------------------------------------------------------------------------
def _create_record_view(
self, view_id,
view_entity_type="annal:View",
extra_field=None, extra_field_uri=None
):
"Helper function creates record view entry with supplied view_id"
t = RecordView.create(
self.testcoll, view_id,
recordview_create_values(
view_id=view_id,
view_entity_type=view_entity_type,
extra_field=extra_field, extra_field_uri=extra_field_uri
)
)
return t
def _check_recordview_values(
self, view_id, view_uri=None,
view_entity_type="annal:View",
update="RecordView",
num_fields=4, field3_placement="small:0,12",
extra_field=None, extra_field_uri=None,
update_dict=None,
):
"Helper function checks content of record view entry with supplied view_id"
self.assertTrue(RecordView.exists(self.testcoll, view_id))
t = RecordView.load(self.testcoll, view_id)
self.assertEqual(t.get_id(), view_id)
self.assertEqual(t.get_view_url(), TestHostUri + recordview_url("testcoll", view_id))
v = recordview_values(
view_id=view_id, view_uri=view_uri, update=update,
view_entity_type=view_entity_type,
num_fields=num_fields, field3_placement=field3_placement,
extra_field=extra_field, extra_field_uri=extra_field_uri
)
if update_dict:
v.update(update_dict)
for k in update_dict:
if update_dict[k] is None:
v.pop(k, None)
# log.info("*** actual: %r"%(t.get_values(),))
# log.info("*** expect: %r"%(v,))
self.assertDictionaryMatch(t.get_values(), v)
return t
# Check context values common to all view fields
#@@TODO: remove when references below replaced
# see: _check_view_view_context_fields
def _check_common_view_context_fields(self, response,
action="",
view_id="(?view_id)", orig_view_id=None,
view_label="(?view_label)",
view_entity_type="(?view_entity_type)",
view_edit_view=True
):
self.assertEqual(response.context['entity_id'], view_id)
self.assertEqual(response.context['orig_id'], orig_view_id)
self.assertEqual(response.context['type_id'], '_view')
self.assertEqual(response.context['orig_type'], '_view')
self.assertEqual(response.context['coll_id'], 'testcoll')
self.assertEqual(response.context['action'], action)
self.assertEqual(response.context['view_id'], 'View_view')
# Fields
#
# NOTE: context['fields'][i]['field_id'] comes from FieldDescription instance via
# bound_field, so type prefix is stripped. This does not apply to the field
# ids actually coming from the view form.
#
self.assertEqual(len(response.context['fields']), 6)
f0 = context_view_field(response.context, 0, 0)
f1 = context_view_field(response.context, 1, 0)
f2 = context_view_field(response.context, 2, 0)
f3 = context_view_field(response.context, 3, 0)
f4 = context_view_field(response.context, 4, 0)
# 1st field - Id
check_context_field(self, f0,
field_id= "View_id",
field_name= "entity_id",
field_label= "View Id",
field_placeholder= "(view id)",
field_property_uri= "annal:id",
field_render_type= "EntityId",
field_value_mode= "Value_direct",
field_value_type= "annal:EntityRef",
field_placement= "small-12 medium-6 columns",
field_value= view_id,
options= self.no_options
)
# 2nd field - Label
check_context_field(self, f1,
field_id= "View_label",
field_name= "View_label",
field_label= "Label",
field_placeholder= "(view label)",
field_property_uri= "rdfs:label",
field_render_type= "Text",
field_value_mode= "Value_direct",
field_value_type= "annal:Text",
field_placement= "small-12 columns",
field_value= view_label,
options= self.no_options
)
# 3rd field - comment
check_context_field(self, f2,
field_id= "View_comment",
field_name= "View_comment",
field_label= "Help",
field_property_uri= "rdfs:comment",
field_render_type= "Markdown",
field_value_mode= "Value_direct",
field_value_type= "annal:Richtext",
field_placement= "small-12 columns",
options= self.no_options
)
# 4th field - type of entity for view
check_context_field(self, f3,
field_id= "View_entity_type",
field_name= "View_entity_type",
field_property_uri= "annal:view_entity_type",
field_render_type= "Identifier",
field_value_mode= "Value_direct",
field_value_type= "annal:Identifier",
field_value= view_entity_type,
options= self.no_options
)
# 5th field - editable view option
check_context_field(self, f4,
field_id= "View_edit_view",
field_name= "View_edit_view",
field_property_uri= "annal:open_view",
field_render_type= "CheckBox",
field_value_mode= "Value_direct",
field_value_type= "annal:Boolean",
field_value= view_edit_view,
options= self.no_options
)
return
# Check context values for view using default record view
def _check_default_view_context_fields(self, response,
action="",
view_id="(?view_id)", orig_view_id=None,
view_uri=None,
view_label="(?view_label)",
view_descr=None,
view_entity_type="(?view_entity_type)",
view_edit_view=True,
view_fields=None, field_choices=None,
add_field=None, remove_field=None,
move_up=None, move_down=None,
update="RecordView",
continuation_url=None
):
expect_context = view_view_context_data(
coll_id="testcoll", view_id=view_id, orig_id=orig_view_id,
action=action,
view_uri=view_uri,
view_label=view_label,
view_descr=view_descr,
view_entity_type=view_entity_type,
view_edit_view=view_edit_view,
view_fields=view_fields, field_choices=field_choices,
add_field=add_field, remove_field=remove_field,
move_up=move_up, move_down=move_down,
update=update,
continuation_url=continuation_url
)
actual_context = context_bind_fields(response.context)
self.assertEqual(len(response.context['fields']), 6)
self.assertDictionaryMatch(actual_context, expect_context)
return
# The View_view test case checks descriptions of repeat-field-groups that are not
# covererd by the Default_view case.
def _check_view_view_context_fields(self, response,
action="",
num_fields=6):
# Common fields
self._check_common_view_context_fields(response,
action=action,
view_id="View_view", orig_view_id="View_view",
view_label="View definition",
view_entity_type="annal:View",
view_edit_view=False
)
# 6th field - field list
f5 = context_view_field(response.context, 5, 0)
expect_field_data = (
[
{ 'annal:field_placement': 'small:0,12;medium:0,6'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_id'
}
, { 'annal:field_placement': 'small:0,12'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_label'
}
, { 'annal:field_placement': 'small:0,12'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_comment'
}
, { 'annal:field_placement': 'small:0,12'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_entity_type'
}
, { 'annal:field_placement': 'small:0,12;medium:0,6'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_edit_view'
}
, { 'annal:field_placement': 'small:0,12'
, 'annal:field_id': layout.FIELD_TYPEID+'/View_fields'
}
])
if num_fields == 7:
# New blank field, if selected
expect_field_data.append(
{ 'annal:property_uri': None
, 'annal:field_placement': None
, 'annal:field_id': None
})
# log.info(repr(r.context['fields'][5]['field_value']))
check_context_field(self, f5,
field_id= "View_fields",
field_name= "View_fields",
field_label= "Fields",
field_property_uri= "annal:view_fields",
field_render_type= "Group_Seq_Row",
field_value_mode= "Value_direct",
field_value_type= "annal:View_field",
field_value= expect_field_data,
options= self.no_options
)
return
# -----------------------------------------------------------------------------
# Form rendering tests
# -----------------------------------------------------------------------------
def test_get_form_rendering(self):
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
r = self.client.get(u+"?continuation_url=/xyzzy/")
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
field_vals = default_fields(
coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="00000001",
tooltip1=context_view_field(r.context, 0, 0)['field_tooltip'],
tooltip2=context_view_field(r.context, 1, 0)['field_tooltip'],
tooltip3=context_view_field(r.context, 2, 0)['field_tooltip'],
tooltip4=context_view_field(r.context, 3, 0)['field_tooltip'],
tooltip5=context_view_field(r.context, 4, 0)['field_tooltip'],
tooltip6f1=context_view_field(r.context, 5, 0).
_field_description['group_field_descs'][0]['field_tooltip_test']
)
formrow1 = """
<div class="small-12 medium-6 columns" title="%(tooltip1)s">
<div class="row view-value-row">
<div class="%(label_classes)s">
<span>View Id</span>
</div>
<div class="%(input_classes)s">
<input type="text" size="64" name="entity_id"
placeholder="(view id)" value="%(entity_id)s"/>
</div>
</div>
</div>
"""%field_vals(width=6)
formrow2 = """
<div class="small-12 columns" title="%(tooltip2)s">
<div class="row view-value-row">
<div class="%(label_classes)s">
<span>Label</span>
</div>
<div class="%(input_classes)s">
<input type="text" size="64" name="View_label"
placeholder="(view label)"
value="%(default_label_esc)s"/>
</div>
</div>
</div>
"""%field_vals(width=12)
formrow3 = """
<div class="small-12 columns" title="%(tooltip3)s">
<div class="row view-value-row">
<div class="%(label_classes)s">
<span>Help</span>
</div>
<div class="%(input_classes)s">
<textarea cols="64" rows="6" name="View_comment"
class="small-rows-4 medium-rows-8"
placeholder="(description of record view)">
%(default_comment_esc)s
</textarea>
</div>
</div>
</div>
"""%field_vals(width=12)
formrow4 = """
<div class="small-12 columns" title="%(tooltip4)s">
<div class="row view-value-row">
<div class="%(label_classes)s">
<span>View entity type</span>
</div>
<div class="%(input_classes)s">
<input type="text" size="64" name="View_entity_type"
placeholder="(Entity type URI/CURIE displayed by view)"
value=""/>
</div>
</div>
</div>
"""%field_vals(width=12)
formrow5 = """
<div class="small-12 medium-6 columns" title="%(tooltip5)s">
<div class="row view-value-row">
<div class="%(label_classes)s">
<span>Editable view?</span>
</div>
<div class="%(input_classes)s">
<input type="checkbox" name="View_edit_view" value="Yes" checked="checked" />
<span class="value-placeholder">(edit view from edit entity form)</span>
</div>
</div>
</div>
"""%field_vals(width=6)
formrow6 = """
<div class="small-1 columns checkbox-in-edit-padding">
<input type="checkbox" class="select-box right"
name="View_fields__select_fields"
value="0" />
</div>
"""
formrow6f1 = ("""
<div class="small-12 medium-4 columns" title="%(tooltip6f1)s">
<div class="row show-for-small-only">
<div class="view-label small-12 columns">
<span>Field ref</span>
</div>
</div>
<div class="row view-value-col">
<div class="view-value small-12 columns">
"""+
render_select_options(
"View_fields__0__View_field_sel", "Field ref",
no_selection("(field ref)") + get_site_default_entity_fields_sorted(),
layout.FIELD_TYPEID+"/Entity_id",
placeholder="(field reference)"
)+
"""
</div>
</div>
</div>
""")%field_vals(width=4)
# log.info("*** View content: "+r.content)
self.assertContains(r, formrow1, html=True)
self.assertContains(r, formrow2, html=True)
self.assertContains(r, formrow3, html=True)
self.assertContains(r, formrow4, html=True)
self.assertContains(r, formrow5, html=True)
self.assertContains(r, formrow6, html=True)
self.assertContains(r, formrow6f1, html=True)
return
def test_get_new(self):
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
r = self.client.get(u+"?continuation_url=/xyzzy/")
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context
# view_url = collection_entity_view_url(
# coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="00000001"
# )
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], layout.VIEW_TYPEID)
self.assertEqual(r.context['entity_id'], "00000001")
self.assertEqual(r.context['orig_id'], None)
self.assertEqual(r.context['entity_uri'], None)
self.assertEqual(r.context['action'], "new")
self.assertEqual(r.context['edit_view_button'], False)
self.assertEqual(r.context['continuation_url'], "/xyzzy/")
# Fields initially created
self._check_default_view_context_fields(r,
action="new",
view_id="00000001", orig_view_id=None,
view_label="", # default_label("testcoll", layout.VIEW_TYPEID, "00000001"),
view_entity_type="",
# view_url=recordview_url("testcoll", "00000001"),
field_choices=self.field_options_no_special,
continuation_url="/xyzzy/"
)
return
def test_get_copy(self):
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context (values read from test data fixture)
# view_url = collection_entity_view_url(
# coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="Default_view"
# )
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], layout.VIEW_TYPEID)
self.assertEqual(r.context['entity_id'], "Default_view_01")
self.assertEqual(r.context['orig_id'], "Default_view")
self.assertEqual(r.context['entity_uri'], None)
self.assertEqual(r.context['action'], "copy")
self.assertEqual(r.context['edit_view_button'], False)
self.assertEqual(r.context['continuation_url'], "")
# Fields
self._check_default_view_context_fields(r,
action="copy",
view_id="Default_view_01", orig_view_id="Default_view",
view_label="Default record view",
# view_url=view_url,
view_uri=None,
view_entity_type="",
field_choices=self.field_options_no_special,
continuation_url=""
)
return
def test_get_copy_not_exists(self):
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID,
entity_id="noview", view_id="View_view"
)
r = self.client.get(u)
# log.info(r.content)
self.check_entity_not_found_response(r,
err_msg=make_message(
message.ENTITY_DOES_NOT_EXIST,
type_id=layout.VIEW_TYPEID,
id="noview",
label=error_label("testcoll", layout.VIEW_TYPEID, "noview")
)
)
return
def test_get_edit(self):
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context (values read from test data fixture)
# view_url = collection_entity_view_url(
# coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="Default_view"
# )
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], layout.VIEW_TYPEID)
self.assertEqual(r.context['entity_id'], "Default_view")
self.assertEqual(r.context['orig_id'], "Default_view")
self.assertEqual(r.context['entity_uri'], "annal:display/Default_view")
self.assertEqual(r.context['action'], "edit")
self.assertEqual(r.context['edit_view_button'], False)
self.assertEqual(r.context['continuation_url'], "")
# Fields
self._check_default_view_context_fields(r,
action="edit",
view_id="Default_view", orig_view_id="Default_view",
view_label="Default record view",
# view_url=view_url,
view_uri="annal:display/Default_view",
view_entity_type="",
field_choices=self.field_options_no_special,
continuation_url=""
)
return
def test_get_edit_not_exists(self):
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID,
entity_id="noview", view_id="View_view"
)
r = self.client.get(u)
# log.info(r.content)
self.check_entity_not_found_response(r,
err_msg=make_message(
message.ENTITY_DOES_NOT_EXIST,
type_id=layout.VIEW_TYPEID,
id="noview",
label=error_label("testcoll", layout.VIEW_TYPEID, "noview")
)
)
return
# Test rendering of view with repeated field structure - in this case, View_view
def test_get_recordview_edit(self):
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="View_view",
view_id="View_view"
)
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context (values read from test data fixture)
# view_url = collection_entity_view_url(
# coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="View_view"
# )
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], layout.VIEW_TYPEID)
self.assertEqual(r.context['entity_id'], "View_view")
self.assertEqual(r.context['orig_id'], "View_view")
self.assertEqual(r.context['entity_uri'], "annal:display/View_view")
self.assertEqual(r.context['action'], "edit")
self.assertEqual(r.context['continuation_url'], "")
# Fields
self._check_view_view_context_fields(r, action="edit")
return
def test_get_recordview_edit_add_field(self):
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="View_view",
view_id="View_view"
)
u = uri_with_params(u, {'add_field': 'View_fields'})
r = self.client.get(u)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context (values read from test data fixture)
# view_url = collection_entity_view_url(
# coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="View_view"
# )
self.assertEqual(r.context['coll_id'], "testcoll")
self.assertEqual(r.context['type_id'], layout.VIEW_TYPEID)
self.assertEqual(r.context['entity_id'], "View_view")
self.assertEqual(r.context['orig_id'], "View_view")
self.assertEqual(r.context['entity_uri'], "annal:display/View_view")
self.assertEqual(r.context['action'], "edit")
self.assertEqual(r.context['continuation_url'], "")
# View context
self._check_view_view_context_fields(r, action="edit", num_fields=7)
return
# -----------------------------------------------------------------------------
# Form response tests
# -----------------------------------------------------------------------------
# -------- new view --------
def test_post_new_view(self):
self.assertFalse(RecordView.exists(self.testcoll, "newview"))
f = view_view_form_data(view_id="newview", action="new", update="NewView")
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
r = self.client.post(u, f)
# print r.content
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record type exists
self._check_recordview_values("newview", update="NewView", num_fields=0)
return
def test_post_new_view_cancel(self):
self.assertFalse(RecordView.exists(self.testcoll, "newview"))
f = view_view_form_data(
view_id="newview",
action="new", cancel="Cancel", update="Updated RecordView"
)
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record type still does not exist
self.assertFalse(RecordView.exists(self.testcoll, "newview"))
return
def test_post_new_view_missing_id(self):
f = view_view_form_data(
view_id="",
action="new", update="RecordView"
)
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
# log.info("u %s, f %r"%(u,f))
r = self.client.post(u, f)
# print r.content
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Test context
self._check_default_view_context_fields(r,
action="new",
view_id="", orig_view_id="orig_view_id",
view_label=None,
view_entity_type="annal:View",
)
return
def test_post_new_view_invalid_id(self):
f = view_view_form_data(
view_id="!badview", orig_id="orig_view_id",
action="new", update="RecordView"
)
u = entitydata_edit_url("new", "testcoll", layout.VIEW_TYPEID, view_id="View_view")
# log.info("u %s, f %r"%(u,f))
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Check context
self._check_default_view_context_fields(r,
action="new",
view_id="!badview", orig_view_id="orig_view_id",
view_label=None,
view_entity_type="annal:View",
)
return
# -------- copy view --------
def test_post_copy_view(self):
self.assertFalse(RecordView.exists(self.testcoll, "copyview"))
f = view_view_form_data(
view_id="copyview",
orig_id="Default_view", orig_coll="_annalist_site", action="copy",
update="RecordView"
)
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record type exists
self._check_recordview_values("copyview", update="RecordView")
return
def test_post_copy_view_cancel(self):
self.assertFalse(RecordView.exists(self.testcoll, "copyview"))
f = view_view_form_data(
view_id="copyview", orig_id="Default_view",
action="copy", cancel="Cancel", update="RecordView"
)
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that target record view still does not exist
self.assertFalse(RecordView.exists(self.testcoll, "copyview"))
return
def test_post_copy_view_missing_id(self):
f = view_view_form_data(
view_id="", orig_id="Default_view",
action="copy", update="Updated RecordView"
)
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Test context
self._check_default_view_context_fields(r,
action="copy",
view_id="", orig_view_id="Default_view",
view_label=None,
view_entity_type="annal:View",
update="Updated RecordView"
)
return
def test_post_copy_view_invalid_id(self):
f = view_view_form_data(
view_id="!badview", orig_id="Default_view", action="copy", update="Updated RecordView"
)
u = entitydata_edit_url(
"copy", "testcoll", layout.VIEW_TYPEID, entity_id="Default_view", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Test context
self._check_default_view_context_fields(r,
action="copy",
view_id="!badview", orig_view_id="Default_view",
view_label=None,
view_entity_type="annal:View",
update="Updated RecordView"
)
return
# -------- edit view --------
def test_post_edit_view(self):
self._create_record_view("editview")
self._check_recordview_values("editview")
f = view_view_form_data(
view_id="editview", orig_id="editview",
action="edit",
view_entity_type="annal:View",
update="Updated RecordView"
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record view exists
self._check_recordview_values("editview", update="Updated RecordView")
return
def test_post_edit_view_new_id(self):
self._create_record_view("editview1")
self._check_recordview_values("editview1")
f = view_view_form_data(
view_id="editview2", orig_id="editview1",
action="edit",
view_entity_type="annal:View",
update="Updated RecordView"
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview1", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record view exists and old does not
self.assertFalse(RecordView.exists(self.testcoll, "editview1"))
self._check_recordview_values("editview2", update="Updated RecordView")
return
def test_post_edit_view_cancel(self):
self._create_record_view("editview")
self._check_recordview_values("editview")
f = view_view_form_data(
view_id="editview", orig_id="editview",
action="edit", cancel="Cancel",
view_entity_type="annal:View",
update="Updated RecordView"
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that target record view still does not exist and unchanged
self._check_recordview_values("editview")
return
def test_post_edit_view_missing_id(self):
self._create_record_view("editview")
self._check_recordview_values("editview")
# Form post with ID missing
f = view_view_form_data(
view_id="", orig_id="editview",
action="edit",
view_entity_type="annal:View",
update="Updated RecordView"
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Test context
self._check_default_view_context_fields(r,
action="edit",
view_id="", orig_view_id="editview",
view_label=None,
view_entity_type="annal:View",
update="Updated RecordView"
)
# Check original data is unchanged
self._check_recordview_values("editview")
return
def test_post_edit_view_invalid_id(self):
self._create_record_view("editview")
self._check_recordview_values("editview")
# Form post with invalid ID
f = view_view_form_data(
view_id="!badview", orig_id="editview", action="edit", update="Updated RecordView"
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.RECORD_VIEW_ID,))
# Test context
self._check_default_view_context_fields(r,
action="edit",
view_id="!badview", orig_view_id="editview",
view_label=None,
view_entity_type="annal:View",
update="Updated RecordView"
)
# Check original data is unchanged
self._check_recordview_values("editview")
return
def test_post_edit_view_field_placement_missing(self):
self._create_record_view("editview")
self._check_recordview_values("editview")
f = view_view_form_data(
view_id="editview", orig_id="editview",
action="edit", update="Updated RecordView",
field3_placement=""
)
u = entitydata_edit_url(
"edit", "testcoll", layout.VIEW_TYPEID, entity_id="editview", view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
self.assertEqual(r['location'], self.continuation_url)
# Check that new record view exists
self._check_recordview_values("editview", update="Updated RecordView", field3_placement="")
return
# -----------------------------------------------------------------------------
# Form response tests for view descriptions with repeating fields
# -----------------------------------------------------------------------------
def test_post_add_field(self):
self._create_record_view("addfieldview")
self._check_recordview_values("addfieldview")
f = view_view_form_data(
view_id="addfieldview", orig_id="addfieldview",
action="edit",
view_entity_type="annal:View",
add_field=True
)
u = entitydata_edit_url(
action="edit", coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="addfieldview",
view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = u + "?continuation_url=" + self.continuation_path
self.assertEqual(v, r['location'])
# Retrieve from redirect location, and test result
r = self.client.get(v)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context
self._check_default_view_context_fields(r,
action="edit",
add_field=True,
view_id="addfieldview", orig_view_id="addfieldview",
view_label=None,
view_entity_type="annal:View",
)
return
def test_post_remove_field(self):
self._create_record_view("removefieldview")
self._check_recordview_values("removefieldview")
f = view_view_form_data(
view_id="removefieldview", orig_id="removefieldview",
action="edit",
remove_fields=['3']
)
u = entitydata_edit_url(
action="edit", coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="removefieldview",
view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = u + "?continuation_url=" + self.continuation_path
self.assertEqual(v, r['location'])
# Retrieve from redirect location, and test result
r = self.client.get(v)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context
self._check_default_view_context_fields(r,
action="edit",
remove_field=True,
view_id="removefieldview", orig_view_id="removefieldview",
view_label=None,
view_entity_type="annal:View",
)
return
def test_post_remove_no_field_selected(self):
self._create_record_view("removefieldview")
self._check_recordview_values("removefieldview")
f = view_view_form_data(
view_id="removefieldview", orig_id="removefieldview",
action="edit",
remove_fields="no-selection"
)
u = entitydata_edit_url(
action="edit", coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="removefieldview",
view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
self.assertContains(r, "<h3>%s</h3>"%(message.REMOVE_FIELD_ERROR,))
self.assertContains(r, """<p class="messages">%s</p>"""%(message.NO_FIELD_SELECTED,))
# Test context
self._check_default_view_context_fields(r,
action="edit",
view_id="removefieldview", orig_view_id="removefieldview",
view_label=None,
view_entity_type="annal:View",
)
return
def test_post_move_up_fields(self):
self._create_record_view("movefieldview")
self._check_recordview_values("movefieldview")
f = view_view_form_data(
view_id="movefieldview", orig_id="movefieldview",
action="edit",
view_entity_type="annal:View",
move_up_fields=["2","3"]
)
u = entitydata_edit_url(
action="edit", coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="movefieldview",
view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = u + "?continuation_url=" + self.continuation_path
self.assertEqual(v, r['location'])
# Retrieve from redirect location, and test result
r = self.client.get(v)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
# Test context
self._check_default_view_context_fields(r,
action="edit",
move_up=[2,3],
view_id="movefieldview", orig_view_id="movefieldview",
view_label=None,
view_entity_type="annal:View",
)
return
def test_post_move_down_fields(self):
self._create_record_view("movefieldview")
self._check_recordview_values("movefieldview")
f = view_view_form_data(
view_id="movefieldview", orig_id="movefieldview",
action="edit",
view_entity_type="annal:View",
move_down_fields=["1"]
)
u = entitydata_edit_url(
action="edit", coll_id="testcoll", type_id=layout.VIEW_TYPEID, entity_id="movefieldview",
view_id="View_view"
)
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = u + "?continuation_url=" + self.continuation_path
self.assertEqual(v, r['location'])
# Retrieve from redirect location, and test result
r = self.client.get(v)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.reason_phrase, "OK")
return
# -----------------------------------------------------------------------------
#
# ConfirmRecordViewDeleteTests tests for completion of record deletion
#
# -----------------------------------------------------------------------------
class ConfirmRecordViewDeleteTests(AnnalistTestCase):
"""
Tests for record type deletion on response to confirmation form
"""
def setUp(self):
init_annalist_test_site()
self.testsite = Site(TestBaseUri, TestBaseDir)
self.testcoll = Collection.create(self.testsite, "testcoll", collection_create_values("testcoll"))
# Login and permissions
create_test_user(self.testcoll, "testuser", "testpassword")
self.client = Client(HTTP_HOST=TestHost)
loggedin = self.client.login(username="testuser", password="testpassword")
self.assertTrue(loggedin)
return
def tearDown(self):
return
def test_CollectionActionViewTest(self):
self.assertEqual(RecordViewDeleteConfirmedView.__name__, "RecordViewDeleteConfirmedView", "Check RecordViewDeleteConfirmedView class name")
return
# NOTE: test_collection checks the appropriate response from clicking the delete button,
# so here only need to test completion code.
def test_post_confirmed_remove_view(self):
t = RecordView.create(self.testcoll, "deleteview", recordview_create_values("deleteview"))
self.assertTrue(RecordView.exists(self.testcoll, "deleteview"))
# Submit positive confirmation
u = TestHostUri + recordview_edit_url("delete", "testcoll")
f = recordview_delete_confirm_form_data("deleteview")
r = self.client.post(u, f)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.reason_phrase, "Found")
self.assertEqual(r.content, b"")
v = collection_edit_url("testcoll")
e1 = "info_head="
e2 = "info_message="
e3 = "deleteview"
e4 = "testcoll"
self.assertIn(v, r['location'])
self.assertIn(e1, r['location'])
self.assertIn(e2, r['location'])
self.assertIn(e3, r['location'])
# Confirm deletion
self.assertFalse(RecordView.exists(self.testcoll, "deleteview"))
return
# End.
#........1.........2.........3.........4.........5.........6.........7.........8
| gklyne/annalist | src/annalist_root/annalist/tests/test_recordview.py | Python | mit | 56,603 | 0.010918 |
'''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <[email protected]>
.. py:module:: fantastico.oauth2.models.tests
'''
| rcosnita/fantastico | fantastico/oauth2/models/tests/__init__.py | Python | mit | 1,175 | 0.00766 |
import re
from django.conf import settings
from django.http import HttpResponsePermanentRedirect
class SecurityMiddleware(object):
def __init__(self):
self.sts_seconds = settings.SECURE_HSTS_SECONDS
self.sts_include_subdomains = settings.SECURE_HSTS_INCLUDE_SUBDOMAINS
self.content_type_nosniff = settings.SECURE_CONTENT_TYPE_NOSNIFF
self.xss_filter = settings.SECURE_BROWSER_XSS_FILTER
self.redirect = settings.SECURE_SSL_REDIRECT
self.redirect_host = settings.SECURE_SSL_HOST
self.redirect_exempt = [re.compile(r) for r in settings.SECURE_REDIRECT_EXEMPT]
def process_request(self, request):
path = request.path.lstrip("/")
if (self.redirect and not request.is_secure() and
not any(pattern.search(path)
for pattern in self.redirect_exempt)):
host = self.redirect_host or request.get_host()
return HttpResponsePermanentRedirect(
"https://%s%s" % (host, request.get_full_path())
)
def process_response(self, request, response):
if (self.sts_seconds and request.is_secure() and
'strict-transport-security' not in response):
sts_header = "max-age=%s" % self.sts_seconds
if self.sts_include_subdomains:
sts_header = sts_header + "; includeSubDomains"
response["strict-transport-security"] = sts_header
if self.content_type_nosniff and 'x-content-type-options' not in response:
response["x-content-type-options"] = "nosniff"
if self.xss_filter and 'x-xss-protection' not in response:
response["x-xss-protection"] = "1; mode=block"
return response
| BitWriters/Zenith_project | zango/lib/python3.5/site-packages/django/middleware/security.py | Python | mit | 1,753 | 0.001141 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProject.drive_auth'
db.add_column(u'user_project', 'drive_auth',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProject.drive_auth'
db.delete_column(u'user_project', 'drive_auth')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'home.category': {
'Meta': {'object_name': 'Category', 'db_table': "u'category'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150'})
},
'projects.project': {
'Meta': {'object_name': 'Project', 'db_table': "u'project'"},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['home.Category']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'image_original_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'licence': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'type_field': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'db_column': "'type'", 'blank': 'True'})
},
'projects.projectpart': {
'Meta': {'object_name': 'ProjectPart', 'db_table': "u'project_part'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
'created_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projectpart_created_user'", 'to': "orm['auth.User']"}),
'drive_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
'modified_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'projectpart_modified_user'", 'null': 'True', 'to': "orm['auth.User']"}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'project_part': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.ProjectPart']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'projects.userproject': {
'Meta': {'object_name': 'UserProject', 'db_table': "u'user_project'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 27, 0, 0)', 'null': 'True', 'blank': 'True'}),
'created_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userproject_created_user'", 'to': "orm['auth.User']"}),
'drive_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modified_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'userproject_modified_user'", 'null': 'True', 'to': "orm['auth.User']"}),
'permission': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']", 'db_column': "'project_id'"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['projects'] | taikoa/wevolver-server | wevolve/projects/migrations/0006_auto__add_field_userproject_drive_auth.py | Python | agpl-3.0 | 8,173 | 0.007708 |
#!/usr/bin/env python
import fnmatch
import glob
import os
import sys
from setuptools import setup
with open("requirements.txt") as f:
required = f.read().splitlines()
VERSION = "1.2.4"
setup(
name='shmooze',
version=VERSION,
description='Framework for processed-backed web applications',
author='Zach Banks',
author_email='[email protected]',
url='https://github.com/zbanks/shmooze',
packages=[
'shmooze',
'shmooze.wsgi',
'shmooze.modules',
'shmooze.lib',
],
download_url="https://github.com/zbanks/shmooze/tarball/{}".format(VERSION),
zip_safe=False,
install_requires=required,
scripts=[
"bin/shmooze",
"bin/shmz",
],
package_dir = {
},
package_data={
'musicazoo': [
"../supervisord.conf",
"../requirements.txt",
"../settings.json",
'../static/settings.json',
'../static/*.js',
'../static/*.html',
],
},
)
| zbanks/shmooze | setup.py | Python | mit | 1,029 | 0.013605 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from ggrc import db
from ggrc.login import get_current_user
from ggrc.models import all_models
from ggrc.models.object_person import ObjectPerson
from ggrc.models.object_owner import ObjectOwner
from ggrc.models.relationship import Relationship
from ggrc_basic_permissions.models import UserRole
from ggrc_basic_permissions import objects_via_assignable_query
from ggrc_basic_permissions import program_relationship_query
from ggrc.rbac import permissions, context_query_filter
from sqlalchemy import \
event, and_, or_, literal, union, alias, case, func, distinct
from sqlalchemy.sql import false
from sqlalchemy.schema import DDL
from sqlalchemy.ext.declarative import declared_attr
from .sql import SqlIndexer
class MysqlRecordProperty(db.Model):
__tablename__ = 'fulltext_record_properties'
key = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(64), primary_key=True)
context_id = db.Column(db.Integer)
tags = db.Column(db.String)
property = db.Column(db.String(64), primary_key=True)
content = db.Column(db.Text)
@declared_attr
def __table_args__(self):
return (
# NOTE
# This is here to prevent Alembic from wanting to drop the index, but
# the DDL below or a similar Alembic migration should be used to create
# the index.
db.Index('{}_text_idx'.format(self.__tablename__), 'content'),
# These are real indexes
db.Index('ix_{}_key'.format(self.__tablename__), 'key'),
db.Index('ix_{}_type'.format(self.__tablename__), 'type'),
db.Index('ix_{}_tags'.format(self.__tablename__), 'tags'),
db.Index('ix_{}_context_id'.format(self.__tablename__), 'context_id'),
# Only MyISAM supports fulltext indexes until newer MySQL/MariaDB
{'mysql_engine': 'myisam'},
)
event.listen(
MysqlRecordProperty.__table__,
'after_create',
DDL('ALTER TABLE {tablename} ADD FULLTEXT INDEX {tablename}_text_idx '
'(content)'.format(tablename=MysqlRecordProperty.__tablename__))
)
class MysqlIndexer(SqlIndexer):
record_type = MysqlRecordProperty
def _get_type_query(self, model_names, permission_type='read',
permission_model=None):
type_queries = []
for model_name in model_names:
type_query = None
if permission_type == 'read':
contexts = permissions.read_contexts_for(
permission_model or model_name)
resources = permissions.read_resources_for(
permission_model or model_name)
elif permission_type == 'create':
contexts = permissions.create_contexts_for(
permission_model or model_name)
resources = permissions.create_resources_for(
permission_model or model_name)
elif permission_type == 'update':
contexts = permissions.update_contexts_for(
permission_model or model_name)
resources = permissions.update_resources_for(
permission_model or model_name)
elif permission_type == 'delete':
contexts = permissions.delete_contexts_for(
permission_model or model_name)
resources = permissions.delete_resources_for(
permission_model or model_name)
if permission_model and contexts:
contexts = set(contexts) & set(
permissions.read_contexts_for(model_name))
if contexts is not None:
# Don't filter out None contexts here
if None not in contexts and permission_type == "read":
contexts.append(None)
if resources:
resource_sql = and_(
MysqlRecordProperty.type == model_name,
MysqlRecordProperty.key.in_(resources))
else:
resource_sql = false()
type_query = or_(
and_(
MysqlRecordProperty.type == model_name,
context_query_filter(MysqlRecordProperty.context_id, contexts)
),
resource_sql)
type_queries.append(type_query)
else:
type_queries.append(MysqlRecordProperty.type == model_name)
return and_(
MysqlRecordProperty.type.in_(model_names),
or_(*type_queries))
def _get_filter_query(self, terms):
whitelist = or_(
# Because property values for custom attributes are
# `attribute_value_<id>`
MysqlRecordProperty.property.contains('attribute_value'),
MysqlRecordProperty.property.in_(
['title', 'name', 'email', 'notes', 'description', 'slug'])
)
if not terms:
return whitelist
elif terms:
return and_(whitelist, MysqlRecordProperty.content.contains(terms))
# FIXME: Temporary (slow) fix for words shorter than MySQL default limit
# elif len(terms) < 4:
# return MysqlRecordProperty.content.contains(terms)
# else:
# return MysqlRecordProperty.content.match(terms)
def _get_type_select_column(self, model):
mapper = model._sa_class_manager.mapper
if mapper.polymorphic_on is None:
type_column = literal(mapper.class_.__name__)
else:
# Handle polymorphic types with CASE
type_column = case(
value=mapper.polymorphic_on,
whens={
val: m.class_.__name__
for val, m in mapper.polymorphic_map.items()
})
return type_column
def _types_to_type_models(self, types):
if types is None:
return all_models.all_models
return [m for m in all_models.all_models if m.__name__ in types]
# filters by "myview" for a given person
def _add_owner_query(self, query, types=None, contact_id=None): # noqa
'''
Finds all objects which might appear on a user's Profile or Dashboard
pages, including:
Objects mapped via ObjectPerson
Objects owned via ObjectOwner
Objects in private contexts via UserRole (e.g. for Private Programs)
Objects for which the user is the "contact"
Objects for which the user is the "primary_assessor" or
"secondary_assessor"
Objects to which the user is mapped via a custom attribute
Assignable objects for which the user is an assignee
This method only *limits* the result set -- Contexts and Roles will still
filter out forbidden objects.
'''
# Check if the user has Creator role
current_user = get_current_user()
my_objects = contact_id is not None
if current_user.system_wide_role == "Creator":
contact_id = current_user.id
if not contact_id:
return query
type_models = self._types_to_type_models(types)
model_names = [model.__name__ for model in type_models]
models = []
for model in type_models:
base_model = model._sa_class_manager.mapper.primary_base_mapper.class_
if base_model not in models:
models.append(base_model)
models = [(model, self._get_type_select_column(model)) for model in models]
type_union_queries = []
all_people = db.session.query(
all_models.Person.id.label('id'),
literal(all_models.Person.__name__).label('type'),
literal(None).label('context_id')
)
type_union_queries.append(all_people)
# Objects to which the user is "mapped"
# We don't return mapped objects for the Creator because being mapped
# does not give the Creator necessary permissions to view the object.
if current_user.system_wide_role != "Creator":
object_people_query = db.session.query(
ObjectPerson.personable_id.label('id'),
ObjectPerson.personable_type.label('type'),
literal(None).label('context_id')
).filter(
and_(
ObjectPerson.person_id == contact_id,
ObjectPerson.personable_type.in_(model_names)
)
)
type_union_queries.append(object_people_query)
# Objects for which the user is an "owner"
object_owners_query = db.session.query(
ObjectOwner.ownable_id.label('id'),
ObjectOwner.ownable_type.label('type'),
literal(None).label('context_id')
).filter(
and_(
ObjectOwner.person_id == contact_id,
ObjectOwner.ownable_type.in_(model_names),
)
)
type_union_queries.append(object_owners_query)
# Objects to which the user is mapped via a custom attribute
ca_mapped_objects_query = db.session.query(
all_models.CustomAttributeValue.attributable_id.label('id'),
all_models.CustomAttributeValue.attributable_type.label('type'),
literal(None).label('context_id')
).filter(
and_(
all_models.CustomAttributeValue.attribute_value == "Person",
all_models.CustomAttributeValue.attribute_object_id == contact_id
)
)
type_union_queries.append(ca_mapped_objects_query)
# Objects for which the user is assigned
model_assignee_query = db.session.query(
Relationship.destination_id.label('id'),
Relationship.destination_type.label('type'),
literal(None).label('context_id'),
).filter(
and_(
Relationship.source_type == "Person",
Relationship.source_id == contact_id,
),
)
type_union_queries.append(model_assignee_query)
model_assignee_query = db.session.query(
Relationship.source_id.label('id'),
Relationship.source_type.label('type'),
literal(None).label('context_id'),
).filter(
and_(
Relationship.destination_type == "Person",
Relationship.destination_id == contact_id,
),
)
type_union_queries.append(model_assignee_query)
if not my_objects:
type_union_queries.append(
program_relationship_query(contact_id, True))
type_union_queries.append(
objects_via_assignable_query(contact_id)
)
# FIXME The following line crashes if the Workflow extension is not enabled
for model in [all_models.Program, all_models.Audit, all_models.Workflow]:
context_query = db.session.query(
model.id.label('id'),
literal(model.__name__).label('type'),
literal(None).label('context_id'),
).join(
UserRole,
and_(
UserRole.context_id == model.context_id,
UserRole.person_id == contact_id,
)
)
type_union_queries.append(context_query)
for model, type_column in models:
# Objects for which the user is the "contact" or "secondary contact"
if hasattr(model, 'contact_id'):
model_type_query = db.session.query(
model.id.label('id'),
type_column.label('type'),
literal(None).label('context_id')
).filter(
model.contact_id == contact_id
).distinct()
type_union_queries.append(model_type_query)
# Objects for which the user is the "contact"
if hasattr(model, 'secondary_contact_id'):
model_type_query = db.session.query(
model.id.label('id'),
type_column.label('type'),
literal(None).label('context_id')
).filter(
model.secondary_contact_id == contact_id
).distinct()
type_union_queries.append(model_type_query)
if model is all_models.Control:
# Control also has `principal_assessor` and `secondary_assessor`
assessor_queries = []
if hasattr(model, 'principal_assessor_id'):
assessor_queries.append(or_(
model.principal_assessor_id == contact_id))
if hasattr(model, 'secondary_assessor_id'):
assessor_queries.append(or_(
model.secondary_assessor_id == contact_id))
model_type_query = db.session.query(
model.id.label('id'),
type_column.label('type'),
literal(None).label('context_id')
).filter(
or_(*assessor_queries)
).distinct()
type_union_queries.append(model_type_query)
# Construct and JOIN to the UNIONed result set
type_union_query = alias(union(*type_union_queries))
query = query.join(
type_union_query,
and_(
type_union_query.c.id == MysqlRecordProperty.key,
type_union_query.c.type == MysqlRecordProperty.type),
)
return query
def _add_extra_params_query(self, query, type, extra_param):
if not extra_param:
return query
models = [m for m in all_models.all_models if m.__name__ == type]
if len(models) == 0:
return query
model = models[0]
return query.filter(self.record_type.key.in_(
db.session.query(
model.id.label('id')
).filter_by(**extra_param)
))
def _get_grouped_types(self, types, extra_params=None):
model_names = [model.__name__ for model in all_models.all_models]
if types is not None:
model_names = [m for m in model_names if m in types]
if extra_params is not None:
model_names = [m for m in model_names if m not in extra_params]
return model_names
def search(self, terms, types=None, permission_type='read',
permission_model=None, contact_id=None, extra_params={}):
model_names = self._get_grouped_types(types, extra_params)
query = db.session.query(
self.record_type.key, self.record_type.type,
self.record_type.property, self.record_type.content)
query = query.filter(
self._get_type_query(model_names, permission_type, permission_model))
query = query.filter(self._get_filter_query(terms))
query = self._add_owner_query(query, types, contact_id)
model_names = [model.__name__ for model in all_models.all_models]
if types is not None:
model_names = [m for m in model_names if m in types]
unions = []
# Add extra_params and extra_colums:
for k, v in extra_params.iteritems():
if k not in model_names:
continue
q = db.session.query(
self.record_type.key, self.record_type.type,
self.record_type.property, self.record_type.content)
q = q.filter(
self._get_type_query([k], permission_type, permission_model))
q = q.filter(self._get_filter_query(terms))
q = self._add_owner_query(q, [k], contact_id)
q = self._add_extra_params_query(q, k, v)
unions.append(q)
# Sort by title:
# FIXME: This only orders by `title` if title was the matching property
query = query.union(*unions)
query = query.order_by(case(
[(self.record_type.property == "title", self.record_type.content)],
else_=literal("ZZZZZ")))
return query
def counts(self, terms, group_by_type=True, types=None, contact_id=None,
extra_params={}, extra_columns={}):
model_names = self._get_grouped_types(types, extra_params)
query = db.session.query(
self.record_type.type, func.count(distinct(
self.record_type.key)), literal(""))
query = query.filter(self._get_type_query(model_names))
query = query.filter(self._get_filter_query(terms))
query = self._add_owner_query(query, types, contact_id)
query = query.group_by(self.record_type.type)
all_extra_columns = dict(extra_columns.items() +
[(p, p) for p in extra_params
if p not in extra_columns])
if not all_extra_columns:
return query.all()
# Add extra_params and extra_colums:
for k, v in all_extra_columns.iteritems():
q = db.session.query(
self.record_type.type, func.count(
distinct(self.record_type.key)), literal(k))
q = q.filter(self._get_type_query([v]))
q = q.filter(self._get_filter_query(terms))
q = self._add_owner_query(q, [v], contact_id)
q = self._add_extra_params_query(q, v, extra_params.get(k, None))
q = q.group_by(self.record_type.type)
query = query.union(q)
return query.all()
Indexer = MysqlIndexer
| prasannav7/ggrc-core | src/ggrc/fulltext/mysql.py | Python | apache-2.0 | 16,137 | 0.00471 |
from functools import partial
import threading
from PIL import Image
from PyQt4.Qt import Qt
from PyQt4.Qt import QGridLayout, QInputDialog, QPushButton
from PyQt4.Qt import QVBoxLayout, QLabel, SIGNAL
from electrum_gui.qt.main_window import StatusBarButton
from electrum_gui.qt.password_dialog import PasswordDialog
from electrum_gui.qt.util import *
from .plugin import TrezorCompatiblePlugin, TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
from electrum.i18n import _
from electrum.plugins import hook, DeviceMgr
from electrum.util import PrintError
from electrum.wallet import Wallet, BIP44_Wallet
from electrum.wizard import UserCancelled
# By far the trickiest thing about this handler is the window stack;
# MacOSX is very fussy the modal dialogs are perfectly parented
class QtHandler(PrintError):
'''An interface between the GUI (here, QT) and the device handling
logic for handling I/O. This is a generic implementation of the
Trezor protocol; derived classes can customize it.'''
def __init__(self, win, pin_matrix_widget_class, device):
win.connect(win, SIGNAL('clear_dialog'), self.clear_dialog)
win.connect(win, SIGNAL('error_dialog'), self.error_dialog)
win.connect(win, SIGNAL('message_dialog'), self.message_dialog)
win.connect(win, SIGNAL('pin_dialog'), self.pin_dialog)
win.connect(win, SIGNAL('passphrase_dialog'), self.passphrase_dialog)
win.connect(win, SIGNAL('word_dialog'), self.word_dialog)
self.win = win
self.pin_matrix_widget_class = pin_matrix_widget_class
self.device = device
self.dialog = None
self.done = threading.Event()
def top_level_window(self):
return self.win.top_level_window()
def watching_only_changed(self):
self.win.emit(SIGNAL('watching_only_changed'))
def show_message(self, msg, cancel_callback=None):
self.win.emit(SIGNAL('message_dialog'), msg, cancel_callback)
def show_error(self, msg):
self.win.emit(SIGNAL('error_dialog'), msg)
def finished(self):
self.win.emit(SIGNAL('clear_dialog'))
def get_pin(self, msg):
self.done.clear()
self.win.emit(SIGNAL('pin_dialog'), msg)
self.done.wait()
return self.response
def get_word(self, msg):
self.done.clear()
self.win.emit(SIGNAL('word_dialog'), msg)
self.done.wait()
return self.word
def get_passphrase(self, msg):
self.done.clear()
self.win.emit(SIGNAL('passphrase_dialog'), msg)
self.done.wait()
return self.passphrase
def pin_dialog(self, msg):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class()
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
def passphrase_dialog(self, msg):
d = PasswordDialog(self.top_level_window(), None, msg,
PasswordDialog.PW_PASSPHRASE)
confirmed, p, passphrase = d.run()
if confirmed:
passphrase = BIP44_Wallet.normalize_passphrase(passphrase)
self.passphrase = passphrase
self.done.set()
def word_dialog(self, msg):
dialog = WindowModalDialog(self.top_level_window(), "")
hbox = QHBoxLayout(dialog)
hbox.addWidget(QLabel(msg))
text = QLineEdit()
text.setMaximumWidth(100)
text.returnPressed.connect(dialog.accept)
hbox.addWidget(text)
hbox.addStretch(1)
if not dialog.exec_():
return None
self.word = unicode(text.text())
self.done.set()
def message_dialog(self, msg, cancel_callback):
# Called more than once during signing, to confirm output and fee
self.clear_dialog()
title = _('Please check your %s device') % self.device
self.dialog = dialog = WindowModalDialog(self.top_level_window(), title)
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
if cancel_callback:
vbox.addLayout(Buttons(CancelButton(dialog)))
dialog.connect(dialog, SIGNAL('rejected()'), cancel_callback)
vbox.addWidget(l)
dialog.show()
def error_dialog(self, msg):
self.win.show_error(msg, parent=self.top_level_window())
def clear_dialog(self):
if self.dialog:
self.dialog.accept()
self.dialog = None
def query_choice(self, msg, labels):
return self.win.query_choice(msg, labels)
def request_trezor_init_settings(self, method, device):
wizard = self.win
vbox = QVBoxLayout()
main_label = QLabel(_("Initialization settings for your %s:") % device)
vbox.addWidget(main_label)
OK_button = OkButton(wizard, _('Next'))
def clean_text(widget):
text = unicode(widget.toPlainText()).strip()
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
vbox1 = QVBoxLayout()
gb.setLayout(vbox1)
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
choices = [
_("12 words"),
_("18 words"),
_("24 words"),
]
bg = QButtonGroup()
for i, choice in enumerate(choices):
rb = QRadioButton(gb)
rb.setText(choice)
bg.addButton(rb)
bg.setId(rb, i)
vbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
OK_button.setEnabled(Wallet.is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
OK_button.setEnabled(False)
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,10}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
cb_phrase = QCheckBox(_('Enable Passphrase protection'))
cb_phrase.setChecked(False)
vbox.addWidget(cb_phrase)
vbox.addStretch(1)
vbox.addLayout(Buttons(CancelButton(wizard), OK_button))
wizard.set_layout(vbox)
if not wizard.exec_():
raise UserCancelled
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, unicode(name.text()), pin, cb_phrase.isChecked())
def qt_plugin_class(base_plugin_class):
class QtPlugin(base_plugin_class):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@hook
def load_wallet(self, wallet, window):
if type(wallet) != self.wallet_class:
return
window.tzb = StatusBarButton(QIcon(self.icon_file), self.device,
partial(self.settings_dialog, window))
window.statusBar().addPermanentWidget(window.tzb)
wallet.handler = self.create_handler(window)
# Trigger a pairing
self.get_client(wallet)
def on_create_wallet(self, wallet, wizard):
assert type(wallet) == self.wallet_class
wallet.handler = self.create_handler(wizard)
self.select_device(wallet)
wallet.create_hd_account(None)
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) == self.wallet_class and len(addrs) == 1:
menu.addAction(_("Show on %s") % self.device,
lambda: self.show_address(wallet, addrs[0]))
def settings_dialog(self, window):
hid_id = self.choose_device(window)
if hid_id:
SettingsDialog(window, self, hid_id).exec_()
def choose_device(self, window):
'''This dialog box should be usable even if the user has
forgotten their PIN or it is in bootloader mode.'''
handler = window.wallet.handler
hid_id = self.device_manager().wallet_hid_id(window.wallet)
if not hid_id:
clients, labels = self.unpaired_clients(handler)
if clients:
msg = _("Select a %s device:") % self.device
choice = self.query_choice(window, msg, labels)
if choice is not None:
hid_id = clients[choice].hid_id()
else:
handler.show_error(_("No devices found"))
return hid_id
def query_choice(self, window, msg, choices):
dialog = WindowModalDialog(window)
clayout = ChoicesLayout(msg, choices)
layout = clayout.layout()
layout.addStretch(1)
layout.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(layout)
if not dialog.exec_():
return None
return clayout.selected_index()
return QtPlugin
class SettingsDialog(WindowModalDialog):
'''This dialog doesn't require a device be paired with a wallet.
We want users to be able to wipe a device even if they've forgotten
their PIN.'''
def __init__(self, window, plugin, hid_id):
title = _("%s Settings") % plugin.device
super(SettingsDialog, self).__init__(window, title)
self.setMaximumWidth(540)
devmgr = plugin.device_manager()
handler = window.wallet.handler
# wallet can be None, needn't be window.wallet
wallet = devmgr.wallet_by_hid_id(hid_id)
hs_rows, hs_cols = (64, 128)
def get_client():
client = devmgr.client_by_hid_id(hid_id, handler)
if not client:
self.show_error("Device not connected!")
raise RuntimeError("Device not connected")
return client
def update():
# self.features for outer scopes
client = get_client()
features = self.features = client.features
set_label_enabled()
bl_hash = features.bootloader_hash.encode('hex')
bl_hash = "\n".join([bl_hash[:32], bl_hash[32:]])
noyes = [_("No"), _("Yes")]
endis = [_("Enable Passphrases"), _("Disable Passphrases")]
setchange = [_("Set a PIN"), _("Change PIN")]
version = "%d.%d.%d" % (features.major_version,
features.minor_version,
features.patch_version)
coins = ", ".join(coin.coin_name for coin in features.coins)
device_label.setText(features.label)
pin_set_label.setText(noyes[features.pin_protection])
bl_hash_label.setText(bl_hash)
label_edit.setText(features.label)
device_id_label.setText(features.device_id)
serial_number_label.setText(client.hid_id())
initialized_label.setText(noyes[features.initialized])
version_label.setText(version)
coins_label.setText(coins)
clear_pin_button.setVisible(features.pin_protection)
clear_pin_warning.setVisible(features.pin_protection)
pin_button.setText(setchange[features.pin_protection])
pin_msg.setVisible(not features.pin_protection)
passphrase_button.setText(endis[features.passphrase_protection])
language_label.setText(features.language)
def set_label_enabled():
label_apply.setEnabled(label_edit.text() != self.features.label)
def rename():
get_client().change_label(unicode(label_edit.text()))
update()
def toggle_passphrase():
title = _("Confirm Toggle Passphrase Protection")
msg = _("This will cause your Electrum wallet to be unpaired "
"unless your passphrase was or will be empty.\n\n"
"This is because addresses will no "
"longer correspond to those used by your %s.\n\n"
"You will need to create a new Electrum wallet "
"with the install wizard so that they match.\n\n"
"Are you sure you want to proceed?") % plugin.device
if not self.question(msg, title=title):
return
get_client().toggle_passphrase()
devmgr.unpair(hid_id)
update()
def change_homescreen():
dialog = QFileDialog(self, _("Choose Homescreen"))
filename = dialog.getOpenFileName()
if filename:
im = Image.open(str(filename))
if im.size != (hs_cols, hs_rows):
raise Exception('Image must be 64 x 128 pixels')
im = im.convert('1')
pix = im.load()
img = ''
for j in range(hs_rows):
for i in range(hs_cols):
img += '1' if pix[i, j] else '0'
img = ''.join(chr(int(img[i:i + 8], 2))
for i in range(0, len(img), 8))
get_client().change_homescreen(img)
def clear_homescreen():
get_client().change_homescreen('\x00')
def set_pin(remove=False):
get_client().set_pin(remove=remove)
update()
def clear_pin():
set_pin(remove=True)
def wipe_device():
if wallet and sum(wallet.get_balance()):
title = _("Confirm Device Wipe")
msg = _("Are you SURE you want to wipe the device?\n"
"Your wallet still has bitcoins in it!")
if not self.question(msg, title=title,
icon=QMessageBox.Critical):
return
get_client().wipe_device()
devmgr.unpair(hid_id)
update()
def slider_moved():
mins = timeout_slider.sliderPosition()
timeout_minutes.setText(_("%2d minutes") % mins)
def slider_released():
seconds = timeout_slider.sliderPosition() * 60
wallet.set_session_timeout(seconds)
dialog_vbox = QVBoxLayout(self)
# Information tab
info_tab = QWidget()
info_layout = QVBoxLayout(info_tab)
info_glayout = QGridLayout()
info_glayout.setColumnStretch(2, 1)
device_label = QLabel()
pin_set_label = QLabel()
version_label = QLabel()
device_id_label = QLabel()
serial_number_label = QLabel()
bl_hash_label = QLabel()
bl_hash_label.setWordWrap(True)
coins_label = QLabel()
coins_label.setWordWrap(True)
language_label = QLabel()
initialized_label = QLabel()
rows = [
(_("Device Label"), device_label),
(_("PIN set"), pin_set_label),
(_("Firmware Version"), version_label),
(_("Device ID"), device_id_label),
(_("Serial Number"), serial_number_label),
(_("Bootloader Hash"), bl_hash_label),
(_("Supported Coins"), coins_label),
(_("Language"), language_label),
(_("Initialized"), initialized_label),
]
for row_num, (label, widget) in enumerate(rows):
info_glayout.addWidget(QLabel(label), row_num, 0)
info_glayout.addWidget(widget, row_num, 1)
info_layout.addLayout(info_glayout)
# Settings tab
settings_tab = QWidget()
settings_layout = QVBoxLayout(settings_tab)
settings_glayout = QGridLayout()
# Settings tab - Label
label_msg = QLabel(_("Name this %s. If you have mutiple devices "
"their labels help distinguish them.")
% plugin.device)
label_msg.setWordWrap(True)
label_label = QLabel(_("Device Label"))
label_edit = QLineEdit()
label_edit.setMinimumWidth(150)
label_edit.setMaxLength(plugin.MAX_LABEL_LEN)
label_apply = QPushButton(_("Apply"))
label_apply.clicked.connect(rename)
label_edit.textChanged.connect(set_label_enabled)
settings_glayout.addWidget(label_label, 0, 0)
settings_glayout.addWidget(label_edit, 0, 1, 1, 2)
settings_glayout.addWidget(label_apply, 0, 3)
settings_glayout.addWidget(label_msg, 1, 1, 1, -1)
# Settings tab - PIN
pin_label = QLabel(_("PIN Protection"))
pin_button = QPushButton()
pin_button.clicked.connect(set_pin)
settings_glayout.addWidget(pin_label, 2, 0)
settings_glayout.addWidget(pin_button, 2, 1)
pin_msg = QLabel(_("PIN protection is strongly recommended. "
"A PIN is your only protection against someone "
"stealing your bitcoins if they obtain physical "
"access to your %s.") % plugin.device)
pin_msg.setWordWrap(True)
pin_msg.setStyleSheet("color: red")
settings_glayout.addWidget(pin_msg, 3, 1, 1, -1)
# Settings tab - Homescreen
homescreen_layout = QHBoxLayout()
homescreen_label = QLabel(_("Homescreen"))
homescreen_change_button = QPushButton(_("Change..."))
homescreen_clear_button = QPushButton(_("Reset"))
homescreen_change_button.clicked.connect(change_homescreen)
homescreen_clear_button.clicked.connect(clear_homescreen)
homescreen_msg = QLabel(_("You can set the homescreen on your device "
"to personalize it. You must choose a "
"%d x %d monochrome black and white image.")
% (hs_rows, hs_cols))
homescreen_msg.setWordWrap(True)
settings_glayout.addWidget(homescreen_label, 4, 0)
settings_glayout.addWidget(homescreen_change_button, 4, 1)
settings_glayout.addWidget(homescreen_clear_button, 4, 2)
settings_glayout.addWidget(homescreen_msg, 5, 1, 1, -1)
# Settings tab - Session Timeout
if wallet:
timeout_label = QLabel(_("Session Timeout"))
timeout_minutes = QLabel()
timeout_slider = QSlider(Qt.Horizontal)
timeout_slider.setRange(1, 60)
timeout_slider.setSingleStep(1)
timeout_slider.setTickInterval(5)
timeout_slider.setTickPosition(QSlider.TicksBelow)
timeout_slider.setTracking(True)
timeout_msg = QLabel(
_("Clear the session after the specified period "
"of inactivity. Once a session has timed out, "
"your PIN and passphrase (if enabled) must be "
"re-entered to use the device."))
timeout_msg.setWordWrap(True)
timeout_slider.setSliderPosition(wallet.session_timeout // 60)
slider_moved()
timeout_slider.valueChanged.connect(slider_moved)
timeout_slider.sliderReleased.connect(slider_released)
settings_glayout.addWidget(timeout_label, 6, 0)
settings_glayout.addWidget(timeout_slider, 6, 1, 1, 3)
settings_glayout.addWidget(timeout_minutes, 6, 4)
settings_glayout.addWidget(timeout_msg, 7, 1, 1, -1)
settings_layout.addLayout(settings_glayout)
settings_layout.addStretch(1)
# Advanced tab
advanced_tab = QWidget()
advanced_layout = QVBoxLayout(advanced_tab)
advanced_glayout = QGridLayout()
# Advanced tab - clear PIN
clear_pin_button = QPushButton(_("Disable PIN"))
clear_pin_button.clicked.connect(clear_pin)
clear_pin_warning = QLabel(
_("If you disable your PIN, anyone with physical access to your "
"%s device can spend your bitcoins.") % plugin.device)
clear_pin_warning.setWordWrap(True)
clear_pin_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(clear_pin_button, 0, 2)
advanced_glayout.addWidget(clear_pin_warning, 1, 0, 1, 5)
# Advanced tab - toggle passphrase protection
passphrase_button = QPushButton()
passphrase_button.clicked.connect(toggle_passphrase)
passphrase_msg = QLabel(
_("Passphrases allow you to access new wallets, each "
"hidden behind a particular case-sensitive passphrase. You "
"need to create a separate Electrum wallet for each passphrase "
"you use as they each generate different addresses. Changing "
"your passphrase does not lose other wallets, each is still "
"accessible behind its own passphrase."))
passphrase_msg.setWordWrap(True)
passphrase_warning = QLabel(
_("If you forget a passphrase you will be unable to access any "
"bitcoins in the wallet behind it. A passphrase is not a PIN. "
"Only change this if you are sure you understand it."))
passphrase_warning.setWordWrap(True)
passphrase_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(passphrase_button, 3, 2)
advanced_glayout.addWidget(passphrase_msg, 4, 0, 1, 5)
advanced_glayout.addWidget(passphrase_warning, 5, 0, 1, 5)
# Advanced tab - wipe device
wipe_device_button = QPushButton(_("Wipe Device"))
wipe_device_button.clicked.connect(wipe_device)
wipe_device_msg = QLabel(
_("Wipe the device, removing all data from it. The firmware "
"is left unchanged."))
wipe_device_msg.setWordWrap(True)
wipe_device_warning = QLabel(
_("Only wipe a device if you have the recovery seed written down "
"and the device wallet(s) are empty, otherwise the bitcoins "
"will be lost forever."))
wipe_device_warning.setWordWrap(True)
wipe_device_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(wipe_device_button, 6, 2)
advanced_glayout.addWidget(wipe_device_msg, 7, 0, 1, 5)
advanced_glayout.addWidget(wipe_device_warning, 8, 0, 1, 5)
advanced_layout.addLayout(advanced_glayout)
advanced_layout.addStretch(1)
tabs = QTabWidget(self)
tabs.addTab(info_tab, _("Information"))
tabs.addTab(settings_tab, _("Settings"))
tabs.addTab(advanced_tab, _("Advanced"))
# Update information
update()
dialog_vbox.addWidget(tabs)
dialog_vbox.addLayout(Buttons(CloseButton(self)))
| joelstanner/electrum | plugins/trezor/qt_generic.py | Python | gpl-3.0 | 24,003 | 0.00025 |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import urllib
import kodi
import log_utils
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
import scraper
BASE_URL = 'http://onlinedizi.co'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'OnlineDizi'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
sources = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.25)
fragment = dom_parser.parse_dom(html, 'ul', {'class': 'dropdown-menu'})
if fragment:
match = re.search('''href=['"]([^'"]+)[^>]*>(?:Altyaz.{1,3}s.{1,3}z)<''', fragment[0])
if match:
option_url = urlparse.urljoin(self.base_url, match.group(1))
html = self._http_get(option_url, cache_limit=2)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'video-player'})
if fragment:
iframe_url = dom_parser.parse_dom(fragment[0], 'iframe', ret='src')
if iframe_url:
html = self._http_get(iframe_url[0], cache_limit=.25)
iframe_url = dom_parser.parse_dom(html, 'iframe', {'id': 'ifr'}, ret='src')
if iframe_url:
html = self._http_get(iframe_url[0], allow_redirect=False, method='HEAD', cache_limit=.25)
if html.startswith('http'):
sources.append(html)
for match in re.finditer('"((?:\\\\x[A-Fa-f0-9]+)+)"', html):
s = match.group(1).replace('\\x', '').decode('hex')
if s.startswith('http'):
s = urllib.unquote(s)
match = re.search('videoPlayerMetadata&mid=(\d+)', s)
if match:
s = 'http://ok.ru/video/%s' % (match.group(1))
sources.append(s)
for stream_url in sources:
host = urlparse.urlparse(stream_url).hostname
quality = QUALITIES.HIGH
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': False}
hosters.append(hoster)
return hosters
def _get_episode_url(self, show_url, video):
episode_pattern = '''href=['"]([^'"]+-%s-sezon-%s-bolum[^'"]*)''' % (video.season, video.episode)
return self._default_get_episode_url(show_url, video, episode_pattern)
def search(self, video_type, title, year, season=''):
html = self._http_get(self.base_url, cache_limit=48)
results = []
seen_urls = {}
norm_title = scraper_utils.normalize_title(title)
for fragment in dom_parser.parse_dom(html, 'ul', {'class': '[^"]*all-series-list[^"]*'}):
for match in re.finditer('''href=["']([^'"]+)[^>]+>([^<]+)''', fragment):
url, match_title = match.groups()
if url not in seen_urls:
seen_urls[url] = True
if norm_title in scraper_utils.normalize_title(match_title):
result = {'url': scraper_utils.pathify_url(url), 'title': scraper_utils.cleanse_title(match_title), 'year': ''}
results.append(result)
return results
| felipenaselva/repo.felipe | plugin.video.salts/scrapers/onlinedizi_scraper.py | Python | gpl-2.0 | 4,998 | 0.005002 |
from __future__ import division, print_function, absolute_import
import itertools
from numpy.testing import (assert_, assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_equal,
assert_allclose)
from pytest import raises as assert_raises
from numpy import mgrid, pi, sin, ogrid, poly1d, linspace
import numpy as np
from scipy._lib.six import xrange
from scipy._lib._numpy_compat import _assert_warns, suppress_warnings
from scipy.interpolate import (interp1d, interp2d, lagrange, PPoly, BPoly,
splrep, splev, splantider, splint, sproot, Akima1DInterpolator,
RegularGridInterpolator, LinearNDInterpolator, NearestNDInterpolator,
RectBivariateSpline, interpn, NdPPoly, BSpline)
from scipy.special import poch, gamma
from scipy.interpolate import _ppoly
from scipy._lib._gcutils import assert_deallocated
from scipy.integrate import nquad
from scipy.special import binom
class TestInterp2D(object):
def test_interp2d(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x+0.5*y)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
v,u = ogrid[0:2:24j, 0:pi:25j]
assert_almost_equal(I(u.ravel(), v.ravel()), sin(u+0.5*v), decimal=2)
def test_interp2d_meshgrid_input(self):
# Ticket #703
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
def test_interp2d_meshgrid_input_unsorted(self):
np.random.seed(1234)
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
ip1 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
z = sin(x[None,:] + y[:,None]/2.)
ip2 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
np.random.shuffle(y)
z = sin(x[None,:] + y[:,None]/2.)
ip3 = interp2d(x, y, z, kind='cubic')
x = linspace(0, 2, 31)
y = linspace(0, pi, 30)
assert_equal(ip1(x, y), ip2(x, y))
assert_equal(ip1(x, y), ip3(x, y))
def test_interp2d_eval_unsorted(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x + 0.5*y)
func = interp2d(x, y, z)
xe = np.array([3, 4, 5])
ye = np.array([5.3, 7.1])
assert_allclose(func(xe, ye), func(xe, ye[::-1]))
assert_raises(ValueError, func, xe, ye[::-1], 0, 0, True)
def test_interp2d_linear(self):
# Ticket #898
a = np.zeros([5, 5])
a[2, 2] = 1.0
x = y = np.arange(5)
b = interp2d(x, y, a, 'linear')
assert_almost_equal(b(2.0, 1.5), np.array([0.5]), decimal=2)
assert_almost_equal(b(2.0, 2.5), np.array([0.5]), decimal=2)
def test_interp2d_bounds(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 2, 7)
z = x[None, :]**2 + y[:, None]
ix = np.linspace(-1, 3, 31)
iy = np.linspace(-1, 3, 33)
b = interp2d(x, y, z, bounds_error=True)
assert_raises(ValueError, b, ix, iy)
b = interp2d(x, y, z, fill_value=np.nan)
iz = b(ix, iy)
mx = (ix < 0) | (ix > 1)
my = (iy < 0) | (iy > 2)
assert_(np.isnan(iz[my,:]).all())
assert_(np.isnan(iz[:,mx]).all())
assert_(np.isfinite(iz[~my,:][:,~mx]).all())
class TestInterp1D(object):
def setup_method(self):
self.x5 = np.arange(5.)
self.x10 = np.arange(10.)
self.y10 = np.arange(10.)
self.x25 = self.x10.reshape((2,5))
self.x2 = np.arange(2.)
self.y2 = np.arange(2.)
self.x1 = np.array([0.])
self.y1 = np.array([0.])
self.y210 = np.arange(20.).reshape((2, 10))
self.y102 = np.arange(20.).reshape((10, 2))
self.y225 = np.arange(20.).reshape((2, 2, 5))
self.y25 = np.arange(10.).reshape((2, 5))
self.y235 = np.arange(30.).reshape((2, 3, 5))
self.y325 = np.arange(30.).reshape((3, 2, 5))
self.fill_value = -100.0
def test_validation(self):
# Make sure that appropriate exceptions are raised when invalid values
# are given to the constructor.
# These should all work.
for kind in ('nearest', 'zero', 'linear', 'slinear', 'quadratic', 'cubic'):
interp1d(self.x10, self.y10, kind=kind)
interp1d(self.x10, self.y10, kind=kind, fill_value="extrapolate")
interp1d(self.x10, self.y10, kind='linear', fill_value=(-1, 1))
interp1d(self.x10, self.y10, kind='linear',
fill_value=np.array([-1]))
interp1d(self.x10, self.y10, kind='linear',
fill_value=(-1,))
interp1d(self.x10, self.y10, kind='linear',
fill_value=-1)
interp1d(self.x10, self.y10, kind='linear',
fill_value=(-1, -1))
interp1d(self.x10, self.y10, kind=0)
interp1d(self.x10, self.y10, kind=1)
interp1d(self.x10, self.y10, kind=2)
interp1d(self.x10, self.y10, kind=3)
interp1d(self.x10, self.y210, kind='linear', axis=-1,
fill_value=(-1, -1))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=np.ones(10))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=(np.ones(10), np.ones(10)))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=(np.ones(10), -1))
# x array must be 1D.
assert_raises(ValueError, interp1d, self.x25, self.y10)
# y array cannot be a scalar.
assert_raises(ValueError, interp1d, self.x10, np.array(0))
# Check for x and y arrays having the same length.
assert_raises(ValueError, interp1d, self.x10, self.y2)
assert_raises(ValueError, interp1d, self.x2, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y102)
interp1d(self.x10, self.y210)
interp1d(self.x10, self.y102, axis=0)
# Check for x and y having at least 1 element.
assert_raises(ValueError, interp1d, self.x1, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y1)
assert_raises(ValueError, interp1d, self.x1, self.y1)
# Bad fill values
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=(-1, -1, -1)) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[-1, -1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=np.array((-1, -1, -1))) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[[-1]]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[-1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=np.array([])) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=()) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
axis=0, fill_value=[-1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
axis=0, fill_value=(0., [-1, -1])) # above doesn't bc
def test_init(self):
# Check that the attributes are initialized appropriately by the
# constructor.
assert_(interp1d(self.x10, self.y10).copy)
assert_(not interp1d(self.x10, self.y10, copy=False).copy)
assert_(interp1d(self.x10, self.y10).bounds_error)
assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error)
assert_(np.isnan(interp1d(self.x10, self.y10).fill_value))
assert_equal(interp1d(self.x10, self.y10, fill_value=3.0).fill_value,
3.0)
assert_equal(interp1d(self.x10, self.y10, fill_value=(1.0, 2.0)).fill_value,
(1.0, 2.0))
assert_equal(interp1d(self.x10, self.y10).axis, 0)
assert_equal(interp1d(self.x10, self.y210).axis, 1)
assert_equal(interp1d(self.x10, self.y102, axis=0).axis, 0)
assert_array_equal(interp1d(self.x10, self.y10).x, self.x10)
assert_array_equal(interp1d(self.x10, self.y10).y, self.y10)
assert_array_equal(interp1d(self.x10, self.y210).y, self.y210)
def test_assume_sorted(self):
# Check for unsorted arrays
interp10 = interp1d(self.x10, self.y10)
interp10_unsorted = interp1d(self.x10[::-1], self.y10[::-1])
assert_array_almost_equal(interp10_unsorted(self.x10), self.y10)
assert_array_almost_equal(interp10_unsorted(1.2), np.array([1.2]))
assert_array_almost_equal(interp10_unsorted([2.4, 5.6, 6.0]),
interp10([2.4, 5.6, 6.0]))
# Check assume_sorted keyword (defaults to False)
interp10_assume_kw = interp1d(self.x10[::-1], self.y10[::-1],
assume_sorted=False)
assert_array_almost_equal(interp10_assume_kw(self.x10), self.y10)
interp10_assume_kw2 = interp1d(self.x10[::-1], self.y10[::-1],
assume_sorted=True)
# Should raise an error for unsorted input if assume_sorted=True
assert_raises(ValueError, interp10_assume_kw2, self.x10)
# Check that if y is a 2-D array, things are still consistent
interp10_y_2d = interp1d(self.x10, self.y210)
interp10_y_2d_unsorted = interp1d(self.x10[::-1], self.y210[:, ::-1])
assert_array_almost_equal(interp10_y_2d(self.x10),
interp10_y_2d_unsorted(self.x10))
def test_linear(self):
for kind in ['linear', 'slinear']:
self._check_linear(kind)
def _check_linear(self, kind):
# Check the actual implementation of linear interpolation.
interp10 = interp1d(self.x10, self.y10, kind=kind)
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array([1.2]))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2.4, 5.6, 6.0]))
# test fill_value="extrapolate"
extrapolator = interp1d(self.x10, self.y10, kind=kind,
fill_value='extrapolate')
assert_allclose(extrapolator([-1., 0, 9, 11]),
[-1, 0, 9, 11], rtol=1e-14)
opts = dict(kind=kind,
fill_value='extrapolate',
bounds_error=True)
assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
def test_linear_dtypes(self):
# regression test for gh-5898, where 1D linear interpolation has been
# delegated to numpy.interp for all float dtypes, and the latter was
# not handling e.g. np.float128.
for dtyp in np.sctypes["float"]:
x = np.arange(8, dtype=dtyp)
y = x
yp = interp1d(x, y, kind='linear')(x)
assert_equal(yp.dtype, dtyp)
assert_allclose(yp, y, atol=1e-15)
def test_slinear_dtypes(self):
# regression test for gh-7273: 1D slinear interpolation fails with
# float32 inputs
dt_r = [np.float16, np.float32, np.float64]
dt_rc = dt_r + [np.complex64, np.complex128]
spline_kinds = ['slinear', 'zero', 'quadratic', 'cubic']
for dtx in dt_r:
x = np.arange(0, 10, dtype=dtx)
for dty in dt_rc:
y = np.exp(-x/3.0).astype(dty)
for dtn in dt_r:
xnew = x.astype(dtn)
for kind in spline_kinds:
f = interp1d(x, y, kind=kind, bounds_error=False)
assert_allclose(f(xnew), y, atol=1e-7,
err_msg="%s, %s %s" % (dtx, dty, dtn))
def test_cubic(self):
# Check the actual implementation of spline interpolation.
interp10 = interp1d(self.x10, self.y10, kind='cubic')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array([1.2]))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2.4, 5.6, 6.0]),)
def test_nearest(self):
# Check the actual implementation of nearest-neighbour interpolation.
interp10 = interp1d(self.x10, self.y10, kind='nearest')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(1.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2., 6., 6.]),)
# test fill_value="extrapolate"
extrapolator = interp1d(self.x10, self.y10, kind='nearest',
fill_value='extrapolate')
assert_allclose(extrapolator([-1., 0, 9, 11]),
[0, 0, 9, 9], rtol=1e-14)
opts = dict(kind='nearest',
fill_value='extrapolate',
bounds_error=True)
assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
def test_zero(self):
# Check the actual implementation of zero-order spline interpolation.
interp10 = interp1d(self.x10, self.y10, kind='zero')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(1.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2., 5., 6.]))
def _bounds_check(self, kind='linear'):
# Test that our handling of out-of-bounds input is correct.
extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value,
bounds_error=False, kind=kind)
assert_array_equal(extrap10(11.2), np.array(self.fill_value))
assert_array_equal(extrap10(-3.4), np.array(self.fill_value))
assert_array_equal(extrap10([[[11.2], [-3.4], [12.6], [19.3]]]),
np.array(self.fill_value),)
assert_array_equal(extrap10._check_bounds(
np.array([-1.0, 0.0, 5.0, 9.0, 11.0])),
np.array([[True, False, False, False, False],
[False, False, False, False, True]]))
raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True,
kind=kind)
assert_raises(ValueError, raises_bounds_error, -1.0)
assert_raises(ValueError, raises_bounds_error, 11.0)
raises_bounds_error([0.0, 5.0, 9.0])
def _bounds_check_int_nan_fill(self, kind='linear'):
x = np.arange(10).astype(np.int_)
y = np.arange(10).astype(np.int_)
c = interp1d(x, y, kind=kind, fill_value=np.nan, bounds_error=False)
yi = c(x - 1)
assert_(np.isnan(yi[0]))
assert_array_almost_equal(yi, np.r_[np.nan, y[:-1]])
def test_bounds(self):
for kind in ('linear', 'cubic', 'nearest',
'slinear', 'zero', 'quadratic'):
self._bounds_check(kind)
self._bounds_check_int_nan_fill(kind)
def _check_fill_value(self, kind):
interp = interp1d(self.x10, self.y10, kind=kind,
fill_value=(-100, 100), bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), -100)
assert_array_almost_equal(interp([-10, 10]), [-100, 100])
# Proper broadcasting:
# interp along axis of length 5
# other dim=(2, 3), (3, 2), (2, 2), or (2,)
# one singleton fill_value (works for all)
for y in (self.y235, self.y325, self.y225, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=100, bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), 100)
assert_array_almost_equal(interp([-10, 10]), 100)
# singleton lower, singleton upper
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=(-100, 100), bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), -100)
if y.ndim == 3:
result = [[[-100, 100]] * y.shape[1]] * y.shape[0]
else:
result = [[-100, 100]] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# one broadcastable (3,) fill_value
fill_value = [100, 200, 300]
for y in (self.y325, self.y225):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
assert_array_almost_equal(interp(-10), [[100, 200, 300]] * 2)
assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
[200, 200],
[300, 300]]] * 2)
# one broadcastable (2,) fill_value
fill_value = [100, 200]
assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for y in (self.y225, self.y325, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
result = [100, 200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(10), result)
assert_array_almost_equal(interp(-10), result)
result = [[100, 100], [200, 200]]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# broadcastable (3,) lower, singleton upper
fill_value = (np.array([-100, -200, -300]), 100)
for y in (self.y325, self.y225):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
[-200, 100],
[-300, 100]]] * 2)
# broadcastable (2,) lower, singleton upper
fill_value = (np.array([-100, -200]), 100)
assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for y in (self.y225, self.y325, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), 100)
result = [-100, -200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(-10), result)
result = [[-100, 100], [-200, 100]]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# broadcastable (3,) lower, broadcastable (3,) upper
fill_value = ([-100, -200, -300], [100, 200, 300])
for y in (self.y325, self.y225):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for ii in range(2): # check ndarray as well as list here
if ii == 1:
fill_value = tuple(np.array(f) for f in fill_value)
interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
[-200, 200],
[-300, 300]]] * 2)
# broadcastable (2,) lower, broadcastable (2,) upper
fill_value = ([-100, -200], [100, 200])
assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for y in (self.y325, self.y225, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
result = [100, 200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(10), result)
result = [-100, -200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(-10), result)
result = [[-100, 100], [-200, 200]]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# one broadcastable (2, 2) array-like
fill_value = [[100, 200], [1000, 2000]]
for y in (self.y235, self.y325, self.y25):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for ii in range(2):
if ii == 1:
fill_value = np.array(fill_value)
interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
assert_array_almost_equal(interp(-10), [[100, 200], [1000, 2000]])
assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
[200, 200]],
[[1000, 1000],
[2000, 2000]]])
# broadcastable (2, 2) lower, broadcastable (2, 2) upper
fill_value = ([[-100, -200], [-1000, -2000]],
[[100, 200], [1000, 2000]])
for y in (self.y235, self.y325, self.y25):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for ii in range(2):
if ii == 1:
fill_value = (np.array(fill_value[0]), np.array(fill_value[1]))
interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
assert_array_almost_equal(interp(-10), [[-100, -200],
[-1000, -2000]])
assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
[-200, 200]],
[[-1000, 1000],
[-2000, 2000]]])
def test_fill_value(self):
# test that two-element fill value works
for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
'zero'):
self._check_fill_value(kind)
def test_fill_value_writeable(self):
# backwards compat: fill_value is a public writeable attribute
interp = interp1d(self.x10, self.y10, fill_value=123.0)
assert_equal(interp.fill_value, 123.0)
interp.fill_value = 321.0
assert_equal(interp.fill_value, 321.0)
def _nd_check_interp(self, kind='linear'):
# Check the behavior when the inputs and outputs are multidimensional.
# Multidimensional input.
interp10 = interp1d(self.x10, self.y10, kind=kind)
assert_array_almost_equal(interp10(np.array([[3., 5.], [2., 7.]])),
np.array([[3., 5.], [2., 7.]]))
# Scalar input -> 0-dim scalar array output
assert_(isinstance(interp10(1.2), np.ndarray))
assert_equal(interp10(1.2).shape, ())
# Multidimensional outputs.
interp210 = interp1d(self.x10, self.y210, kind=kind)
assert_array_almost_equal(interp210(1.), np.array([1., 11.]))
assert_array_almost_equal(interp210(np.array([1., 2.])),
np.array([[1., 2.], [11., 12.]]))
interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind)
assert_array_almost_equal(interp102(1.), np.array([2.0, 3.0]))
assert_array_almost_equal(interp102(np.array([1., 3.])),
np.array([[2., 3.], [6., 7.]]))
# Both at the same time!
x_new = np.array([[3., 5.], [2., 7.]])
assert_array_almost_equal(interp210(x_new),
np.array([[[3., 5.], [2., 7.]],
[[13., 15.], [12., 17.]]]))
assert_array_almost_equal(interp102(x_new),
np.array([[[6., 7.], [10., 11.]],
[[4., 5.], [14., 15.]]]))
def _nd_check_shape(self, kind='linear'):
# Check large ndim output shape
a = [4, 5, 6, 7]
y = np.arange(np.prod(a)).reshape(*a)
for n, s in enumerate(a):
x = np.arange(s)
z = interp1d(x, y, axis=n, kind=kind)
assert_array_almost_equal(z(x), y, err_msg=kind)
x2 = np.arange(2*3*1).reshape((2,3,1)) / 12.
b = list(a)
b[n:n+1] = [2,3,1]
assert_array_almost_equal(z(x2).shape, b, err_msg=kind)
def test_nd(self):
for kind in ('linear', 'cubic', 'slinear', 'quadratic', 'nearest',
'zero'):
self._nd_check_interp(kind)
self._nd_check_shape(kind)
def _check_complex(self, dtype=np.complex_, kind='linear'):
x = np.array([1, 2.5, 3, 3.1, 4, 6.4, 7.9, 8.0, 9.5, 10])
y = x * x ** (1 + 2j)
y = y.astype(dtype)
# simple test
c = interp1d(x, y, kind=kind)
assert_array_almost_equal(y[:-1], c(x)[:-1])
# check against interpolating real+imag separately
xi = np.linspace(1, 10, 31)
cr = interp1d(x, y.real, kind=kind)
ci = interp1d(x, y.imag, kind=kind)
assert_array_almost_equal(c(xi).real, cr(xi))
assert_array_almost_equal(c(xi).imag, ci(xi))
def test_complex(self):
for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
'zero'):
self._check_complex(np.complex64, kind)
self._check_complex(np.complex128, kind)
def test_circular_refs(self):
# Test interp1d can be automatically garbage collected
x = np.linspace(0, 1)
y = np.linspace(0, 1)
# Confirm interp can be released from memory after use
with assert_deallocated(interp1d, x, y) as interp:
new_y = interp([0.1, 0.2])
del interp
def test_overflow_nearest(self):
# Test that the x range doesn't overflow when given integers as input
x = np.array([0, 50, 127], dtype=np.int8)
ii = interp1d(x, x, kind='nearest')
assert_array_almost_equal(ii(x), x)
def test_local_nans(self):
# check that for local interpolation kinds (slinear, zero) a single nan
# only affects its local neighborhood
x = np.arange(10).astype(float)
y = x.copy()
y[6] = np.nan
for kind in ('zero', 'slinear'):
ir = interp1d(x, y, kind=kind)
vals = ir([4.9, 7.0])
assert_(np.isfinite(vals).all())
def test_spline_nans(self):
# Backwards compat: a single nan makes the whole spline interpolation
# return nans in an array of the correct shape. And it doesn't raise,
# just quiet nans because of backcompat.
x = np.arange(8).astype(float)
y = x.copy()
yn = y.copy()
yn[3] = np.nan
for kind in ['quadratic', 'cubic']:
ir = interp1d(x, y, kind=kind)
irn = interp1d(x, yn, kind=kind)
for xnew in (6, [1, 6], [[1, 6], [3, 5]]):
xnew = np.asarray(xnew)
out, outn = ir(x), irn(x)
assert_(np.isnan(outn).all())
assert_equal(out.shape, outn.shape)
class TestLagrange(object):
def test_lagrange(self):
p = poly1d([5,2,1,4,3])
xs = np.arange(len(p.coeffs))
ys = p(xs)
pl = lagrange(xs,ys)
assert_array_almost_equal(p.coeffs,pl.coeffs)
class TestAkima1DInterpolator(object):
def test_eval(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344, 5.9803623910336236590978842,
5.5067291516462386624652936, 5.2031367459745245795943447,
4.1796554159017080820603951, 3.4110386597938129327189927,
3.])
assert_allclose(ak(xi), yi)
def test_eval_2d(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
y = np.column_stack((y, 2. * y))
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344,
5.9803623910336236590978842,
5.5067291516462386624652936,
5.2031367459745245795943447,
4.1796554159017080820603951,
3.4110386597938129327189927, 3.])
yi = np.column_stack((yi, 2. * yi))
assert_allclose(ak(xi), yi)
def test_eval_3d(self):
x = np.arange(0., 11.)
y_ = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
y = np.empty((11, 2, 2))
y[:, 0, 0] = y_
y[:, 1, 0] = 2. * y_
y[:, 0, 1] = 3. * y_
y[:, 1, 1] = 4. * y_
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.empty((13, 2, 2))
yi_ = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344,
5.9803623910336236590978842,
5.5067291516462386624652936,
5.2031367459745245795943447,
4.1796554159017080820603951,
3.4110386597938129327189927, 3.])
yi[:, 0, 0] = yi_
yi[:, 1, 0] = 2. * yi_
yi[:, 0, 1] = 3. * yi_
yi[:, 1, 1] = 4. * yi_
assert_allclose(ak(xi), yi)
def test_degenerate_case_multidimensional(self):
# This test is for issue #5683.
x = np.array([0, 1, 2])
y = np.vstack((x, x**2)).T
ak = Akima1DInterpolator(x, y)
x_eval = np.array([0.5, 1.5])
y_eval = ak(x_eval)
assert_allclose(y_eval, np.vstack((x_eval, x_eval**2)).T)
def test_extend(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
ak = Akima1DInterpolator(x, y)
try:
ak.extend(None, None)
except NotImplementedError as e:
if str(e) != ("Extending a 1D Akima interpolator is not "
"yet implemented"):
raise
except:
raise
class TestPPolyCommon(object):
# test basic functionality for PPoly and BPoly
def test_sort_check(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 1, 0.5])
assert_raises(ValueError, PPoly, c, x)
assert_raises(ValueError, BPoly, c, x)
def test_ctor_c(self):
# wrong shape: `c` must be at least 2-dimensional
with assert_raises(ValueError):
PPoly([1, 2], [0, 1])
def test_extend(self):
# Test adding new points to the piecewise polynomial
np.random.seed(1234)
order = 3
x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
for cls in (PPoly, BPoly):
pp = cls(c[:,:9], x[:10])
pp.extend(c[:,9:], x[10:])
pp2 = cls(c[:, 10:], x[10:])
pp2.extend(c[:, :10], x[:10])
pp3 = cls(c, x)
assert_array_equal(pp.c, pp3.c)
assert_array_equal(pp.x, pp3.x)
assert_array_equal(pp2.c, pp3.c)
assert_array_equal(pp2.x, pp3.x)
def test_extend_diff_orders(self):
# Test extending polynomial with different order one
np.random.seed(1234)
x = np.linspace(0, 1, 6)
c = np.random.rand(2, 5)
x2 = np.linspace(1, 2, 6)
c2 = np.random.rand(4, 5)
for cls in (PPoly, BPoly):
pp1 = cls(c, x)
pp2 = cls(c2, x2)
pp_comb = cls(c, x)
pp_comb.extend(c2, x2[1:])
# NB. doesn't match to pp1 at the endpoint, because pp1 is not
# continuous with pp2 as we took random coefs.
xi1 = np.linspace(0, 1, 300, endpoint=False)
xi2 = np.linspace(1, 2, 300)
assert_allclose(pp1(xi1), pp_comb(xi1))
assert_allclose(pp2(xi2), pp_comb(xi2))
def test_extend_descending(self):
np.random.seed(0)
order = 3
x = np.sort(np.random.uniform(0, 10, 20))
c = np.random.rand(order + 1, x.shape[0] - 1, 2, 3)
for cls in (PPoly, BPoly):
p = cls(c, x)
p1 = cls(c[:, :9], x[:10])
p1.extend(c[:, 9:], x[10:])
p2 = cls(c[:, 10:], x[10:])
p2.extend(c[:, :10], x[:10])
assert_array_equal(p1.c, p.c)
assert_array_equal(p1.x, p.x)
assert_array_equal(p2.c, p.c)
assert_array_equal(p2.x, p.x)
def test_shape(self):
np.random.seed(1234)
c = np.random.rand(8, 12, 5, 6, 7)
x = np.sort(np.random.rand(13))
xp = np.random.rand(3, 4)
for cls in (PPoly, BPoly):
p = cls(c, x)
assert_equal(p(xp).shape, (3, 4, 5, 6, 7))
# 'scalars'
for cls in (PPoly, BPoly):
p = cls(c[..., 0, 0, 0], x)
assert_equal(np.shape(p(0.5)), ())
assert_equal(np.shape(p(np.array(0.5))), ())
# can't use dtype=object (with any numpy; what fails is
# constructing the object array here for old numpy)
assert_raises(ValueError, p, np.array([[0.1, 0.2], [0.4]]))
def test_complex_coef(self):
np.random.seed(12345)
x = np.sort(np.random.random(13))
c = np.random.random((8, 12)) * (1. + 0.3j)
c_re, c_im = c.real, c.imag
xp = np.random.random(5)
for cls in (PPoly, BPoly):
p, p_re, p_im = cls(c, x), cls(c_re, x), cls(c_im, x)
for nu in [0, 1, 2]:
assert_allclose(p(xp, nu).real, p_re(xp, nu))
assert_allclose(p(xp, nu).imag, p_im(xp, nu))
def test_axis(self):
np.random.seed(12345)
c = np.random.rand(3, 4, 5, 6, 7, 8)
c_s = c.shape
xp = np.random.random((1, 2))
for axis in (0, 1, 2, 3):
k, m = c.shape[axis], c.shape[axis+1]
x = np.sort(np.random.rand(m+1))
for cls in (PPoly, BPoly):
p = cls(c, x, axis=axis)
assert_equal(p.c.shape,
c_s[axis:axis+2] + c_s[:axis] + c_s[axis+2:])
res = p(xp)
targ_shape = c_s[:axis] + xp.shape + c_s[2+axis:]
assert_equal(res.shape, targ_shape)
# deriv/antideriv does not drop the axis
for p1 in [cls(c, x, axis=axis).derivative(),
cls(c, x, axis=axis).derivative(2),
cls(c, x, axis=axis).antiderivative(),
cls(c, x, axis=axis).antiderivative(2)]:
assert_equal(p1.axis, p.axis)
# c array needs two axes for the coefficients and intervals, so
# 0 <= axis < c.ndim-1; raise otherwise
for axis in (-1, 4, 5, 6):
for cls in (BPoly, PPoly):
assert_raises(ValueError, cls, **dict(c=c, x=x, axis=axis))
class TestPolySubclassing(object):
class P(PPoly):
pass
class B(BPoly):
pass
def _make_polynomials(self):
np.random.seed(1234)
x = np.sort(np.random.random(3))
c = np.random.random((4, 2))
return self.P(c, x), self.B(c, x)
def test_derivative(self):
pp, bp = self._make_polynomials()
for p in (pp, bp):
pd = p.derivative()
assert_equal(p.__class__, pd.__class__)
ppa = pp.antiderivative()
assert_equal(pp.__class__, ppa.__class__)
def test_from_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = self.P.from_spline(spl)
assert_equal(pp.__class__, self.P)
def test_conversions(self):
pp, bp = self._make_polynomials()
pp1 = self.P.from_bernstein_basis(bp)
assert_equal(pp1.__class__, self.P)
bp1 = self.B.from_power_basis(pp)
assert_equal(bp1.__class__, self.B)
def test_from_derivatives(self):
x = [0, 1, 2]
y = [[1], [2], [3]]
bp = self.B.from_derivatives(x, y)
assert_equal(bp.__class__, self.B)
class TestPPoly(object):
def test_simple(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
def test_periodic(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 0.5, 1])
p = PPoly(c, x, extrapolate='periodic')
assert_allclose(p(1.3), 1 * 0.3 ** 2 + 2 * 0.3 + 3)
assert_allclose(p(-0.3), 4 * (0.7 - 0.5) ** 2 + 5 * (0.7 - 0.5) + 6)
assert_allclose(p(1.3, 1), 2 * 0.3 + 2)
assert_allclose(p(-0.3, 1), 8 * (0.7 - 0.5) + 5)
def test_descending(self):
def binom_matrix(power):
n = np.arange(power + 1).reshape(-1, 1)
k = np.arange(power + 1)
B = binom(n, k)
return B[::-1, ::-1]
np.random.seed(0)
power = 3
for m in [10, 20, 30]:
x = np.sort(np.random.uniform(0, 10, m + 1))
ca = np.random.uniform(-2, 2, size=(power + 1, m))
h = np.diff(x)
h_powers = h[None, :] ** np.arange(power + 1)[::-1, None]
B = binom_matrix(power)
cap = ca * h_powers
cdp = np.dot(B.T, cap)
cd = cdp / h_powers
pa = PPoly(ca, x, extrapolate=True)
pd = PPoly(cd[:, ::-1], x[::-1], extrapolate=True)
x_test = np.random.uniform(-10, 20, 100)
assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
pa_d = pa.derivative()
pd_d = pd.derivative()
assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
# Antiderivatives won't be equal because fixing continuity is
# done in the reverse order, but surely the differences should be
# equal.
pa_i = pa.antiderivative()
pd_i = pd.antiderivative()
for a, b in np.random.uniform(-10, 20, (5, 2)):
int_a = pa.integrate(a, b)
int_d = pd.integrate(a, b)
assert_allclose(int_a, int_d, rtol=1e-13)
assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
rtol=1e-13)
roots_d = pd.roots()
roots_a = pa.roots()
assert_allclose(roots_a, np.sort(roots_d), rtol=1e-12)
def test_multi_shape(self):
c = np.random.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
assert_equal(p.x.shape, x.shape)
assert_equal(p.c.shape, c.shape)
assert_equal(p(0.3).shape, c.shape[2:])
assert_equal(p(np.random.rand(5, 6)).shape, (5, 6) + c.shape[2:])
dp = p.derivative()
assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
ip = p.antiderivative()
assert_equal(ip.c.shape, (7, 2, 1, 2, 3))
def test_construct_fast(self):
np.random.seed(1234)
c = np.array([[1, 4], [2, 5], [3, 6]], dtype=float)
x = np.array([0, 0.5, 1])
p = PPoly.construct_fast(c, x)
assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
def test_vs_alternative_implementations(self):
np.random.seed(1234)
c = np.random.rand(3, 12, 22)
x = np.sort(np.r_[0, np.random.rand(11), 1])
p = PPoly(c, x)
xp = np.r_[0.3, 0.5, 0.33, 0.6]
expected = _ppoly_eval_1(c, x, xp)
assert_allclose(p(xp), expected)
expected = _ppoly_eval_2(c[:,:,0], x, xp)
assert_allclose(p(xp)[:,0], expected)
def test_from_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
assert_allclose(pp(xi), splev(xi, spl))
# make sure .from_spline accepts BSpline objects
b = BSpline(*spl)
ppp = PPoly.from_spline(b)
assert_allclose(ppp(xi), b(xi))
# BSpline's extrapolate attribute propagates unless overridden
t, c, k = spl
for extrap in (None, True, False):
b = BSpline(t, c, k, extrapolate=extrap)
p = PPoly.from_spline(b)
assert_equal(p.extrapolate, b.extrapolate)
def test_derivative_simple(self):
np.random.seed(1234)
c = np.array([[4, 3, 2, 1]]).T
dc = np.array([[3*4, 2*3, 2]]).T
ddc = np.array([[2*3*4, 1*2*3]]).T
x = np.array([0, 1])
pp = PPoly(c, x)
dpp = PPoly(dc, x)
ddpp = PPoly(ddc, x)
assert_allclose(pp.derivative().c, dpp.c)
assert_allclose(pp.derivative(2).c, ddpp.c)
def test_derivative_eval(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 3):
assert_allclose(pp(xi, dx), splev(xi, spl, dx))
def test_derivative(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 10):
assert_allclose(pp(xi, dx), pp.derivative(dx)(xi),
err_msg="dx=%d" % (dx,))
def test_antiderivative_of_constant(self):
# https://github.com/scipy/scipy/issues/4216
p = PPoly([[1.]], [0, 1])
assert_equal(p.antiderivative().c, PPoly([[1], [0]], [0, 1]).c)
assert_equal(p.antiderivative().x, PPoly([[1], [0]], [0, 1]).x)
def test_antiderivative_regression_4355(self):
# https://github.com/scipy/scipy/issues/4355
p = PPoly([[1., 0.5]], [0, 1, 2])
q = p.antiderivative()
assert_equal(q.c, [[1, 0.5], [0, 1]])
assert_equal(q.x, [0, 1, 2])
assert_allclose(p.integrate(0, 2), 1.5)
assert_allclose(q(2) - q(0), 1.5)
def test_antiderivative_simple(self):
np.random.seed(1234)
# [ p1(x) = 3*x**2 + 2*x + 1,
# p2(x) = 1.6875]
c = np.array([[3, 2, 1], [0, 0, 1.6875]]).T
# [ pp1(x) = x**3 + x**2 + x,
# pp2(x) = 1.6875*(x - 0.25) + pp1(0.25)]
ic = np.array([[1, 1, 1, 0], [0, 0, 1.6875, 0.328125]]).T
# [ ppp1(x) = (1/4)*x**4 + (1/3)*x**3 + (1/2)*x**2,
# ppp2(x) = (1.6875/2)*(x - 0.25)**2 + pp1(0.25)*x + ppp1(0.25)]
iic = np.array([[1/4, 1/3, 1/2, 0, 0],
[0, 0, 1.6875/2, 0.328125, 0.037434895833333336]]).T
x = np.array([0, 0.25, 1])
pp = PPoly(c, x)
ipp = pp.antiderivative()
iipp = pp.antiderivative(2)
iipp2 = ipp.antiderivative()
assert_allclose(ipp.x, x)
assert_allclose(ipp.c.T, ic.T)
assert_allclose(iipp.c.T, iic.T)
assert_allclose(iipp2.c.T, iic.T)
def test_antiderivative_vs_derivative(self):
np.random.seed(1234)
x = np.linspace(0, 1, 30)**2
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
ipp = pp.antiderivative(dx)
# check that derivative is inverse op
pp2 = ipp.derivative(dx)
assert_allclose(pp.c, pp2.c)
# check continuity
for k in range(dx):
pp2 = ipp.derivative(k)
r = 1e-13
endpoint = r*pp2.x[:-1] + (1 - r)*pp2.x[1:]
assert_allclose(pp2(pp2.x[1:]), pp2(endpoint),
rtol=1e-7, err_msg="dx=%d k=%d" % (dx, k))
def test_antiderivative_vs_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
pp2 = pp.antiderivative(dx)
spl2 = splantider(spl, dx)
xi = np.linspace(0, 1, 200)
assert_allclose(pp2(xi), splev(xi, spl2),
rtol=1e-7)
def test_antiderivative_continuity(self):
c = np.array([[2, 1, 2, 2], [2, 1, 3, 3]]).T
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
ip = p.antiderivative()
# check continuity
assert_allclose(ip(0.5 - 1e-9), ip(0.5 + 1e-9), rtol=1e-8)
# check that only lowest order coefficients were changed
p2 = ip.derivative()
assert_allclose(p2.c, p.c)
def test_integrate(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
a, b = 0.3, 0.9
ig = pp.integrate(a, b)
ipp = pp.antiderivative()
assert_allclose(ig, ipp(b) - ipp(a))
assert_allclose(ig, splint(a, b, spl))
a, b = -0.3, 0.9
ig = pp.integrate(a, b, extrapolate=True)
assert_allclose(ig, ipp(b) - ipp(a))
assert_(np.isnan(pp.integrate(a, b, extrapolate=False)).all())
def test_integrate_periodic(self):
x = np.array([1, 2, 4])
c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
P = PPoly(c, x, extrapolate='periodic')
I = P.antiderivative()
period_int = I(4) - I(1)
assert_allclose(P.integrate(1, 4), period_int)
assert_allclose(P.integrate(-10, -7), period_int)
assert_allclose(P.integrate(-10, -4), 2 * period_int)
assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5 + 12, 5 + 12),
I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5, 5 + 12),
I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
assert_allclose(P.integrate(0, -1), I(2) - I(3))
assert_allclose(P.integrate(-9, -10), I(2) - I(3))
assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
def test_roots(self):
x = np.linspace(0, 1, 31)**2
y = np.sin(30*x)
spl = splrep(x, y, s=0, k=3)
pp = PPoly.from_spline(spl)
r = pp.roots()
r = r[(r >= 0 - 1e-15) & (r <= 1 + 1e-15)]
assert_allclose(r, sproot(spl), atol=1e-15)
def test_roots_idzero(self):
# Roots for piecewise polynomials with identically zero
# sections.
c = np.array([[-1, 0.25], [0, 0], [-1, 0.25]]).T
x = np.array([0, 0.4, 0.6, 1.0])
pp = PPoly(c, x)
assert_array_equal(pp.roots(),
[0.25, 0.4, np.nan, 0.6 + 0.25])
# ditto for p.solve(const) with sections identically equal const
const = 2.
c1 = c.copy()
c1[1, :] += const
pp1 = PPoly(c1, x)
assert_array_equal(pp1.solve(const),
[0.25, 0.4, np.nan, 0.6 + 0.25])
def test_roots_all_zero(self):
# test the code path for the polynomial being identically zero everywhere
c = [[0], [0]]
x = [0, 1]
p = PPoly(c, x)
assert_array_equal(p.roots(), [0, np.nan])
assert_array_equal(p.solve(0), [0, np.nan])
assert_array_equal(p.solve(1), [])
c = [[0, 0], [0, 0]]
x = [0, 1, 2]
p = PPoly(c, x)
assert_array_equal(p.roots(), [0, np.nan, 1, np.nan])
assert_array_equal(p.solve(0), [0, np.nan, 1, np.nan])
assert_array_equal(p.solve(1), [])
def test_roots_repeated(self):
# Check roots repeated in multiple sections are reported only
# once.
# [(x + 1)**2 - 1, -x**2] ; x == 0 is a repeated root
c = np.array([[1, 0, -1], [-1, 0, 0]]).T
x = np.array([-1, 0, 1])
pp = PPoly(c, x)
assert_array_equal(pp.roots(), [-2, 0])
assert_array_equal(pp.roots(extrapolate=False), [0])
def test_roots_discont(self):
# Check that a discontinuity across zero is reported as root
c = np.array([[1], [-1]]).T
x = np.array([0, 0.5, 1])
pp = PPoly(c, x)
assert_array_equal(pp.roots(), [0.5])
assert_array_equal(pp.roots(discontinuity=False), [])
# ditto for a discontinuity across y:
assert_array_equal(pp.solve(0.5), [0.5])
assert_array_equal(pp.solve(0.5, discontinuity=False), [])
assert_array_equal(pp.solve(1.5), [])
assert_array_equal(pp.solve(1.5, discontinuity=False), [])
def test_roots_random(self):
# Check high-order polynomials with random coefficients
np.random.seed(1234)
num = 0
for extrapolate in (True, False):
for order in range(0, 20):
x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
pp = PPoly(c, x)
for y in [0, np.random.random()]:
r = pp.solve(y, discontinuity=False, extrapolate=extrapolate)
for i in range(2):
for j in range(3):
rr = r[i,j]
if rr.size > 0:
# Check that the reported roots indeed are roots
num += rr.size
val = pp(rr, extrapolate=extrapolate)[:,i,j]
cmpval = pp(rr, nu=1,
extrapolate=extrapolate)[:,i,j]
msg = "(%r) r = %s" % (extrapolate, repr(rr),)
assert_allclose((val-y) / cmpval, 0, atol=1e-7,
err_msg=msg)
# Check that we checked a number of roots
assert_(num > 100, repr(num))
def test_roots_croots(self):
# Test the complex root finding algorithm
np.random.seed(1234)
for k in range(1, 15):
c = np.random.rand(k, 1, 130)
if k == 3:
# add a case with zero discriminant
c[:,0,0] = 1, 2, 1
for y in [0, np.random.random()]:
w = np.empty(c.shape, dtype=complex)
_ppoly._croots_poly1(c, w)
if k == 1:
assert_(np.isnan(w).all())
continue
res = 0
cres = 0
for i in range(k):
res += c[i,None] * w**(k-1-i)
cres += abs(c[i,None] * w**(k-1-i))
with np.errstate(invalid='ignore'):
res /= cres
res = res.ravel()
res = res[~np.isnan(res)]
assert_allclose(res, 0, atol=1e-10)
def test_extrapolate_attr(self):
# [ 1 - x**2 ]
c = np.array([[-1, 0, 1]]).T
x = np.array([0, 1])
for extrapolate in [True, False, None]:
pp = PPoly(c, x, extrapolate=extrapolate)
pp_d = pp.derivative()
pp_i = pp.antiderivative()
if extrapolate is False:
assert_(np.isnan(pp([-0.1, 1.1])).all())
assert_(np.isnan(pp_i([-0.1, 1.1])).all())
assert_(np.isnan(pp_d([-0.1, 1.1])).all())
assert_equal(pp.roots(), [1])
else:
assert_allclose(pp([-0.1, 1.1]), [1-0.1**2, 1-1.1**2])
assert_(not np.isnan(pp_i([-0.1, 1.1])).any())
assert_(not np.isnan(pp_d([-0.1, 1.1])).any())
assert_allclose(pp.roots(), [1, -1])
class TestBPoly(object):
def test_simple(self):
x = [0, 1]
c = [[3]]
bp = BPoly(c, x)
assert_allclose(bp(0.1), 3.)
def test_simple2(self):
x = [0, 1]
c = [[3], [1]]
bp = BPoly(c, x) # 3*(1-x) + 1*x
assert_allclose(bp(0.1), 3*0.9 + 1.*0.1)
def test_simple3(self):
x = [0, 1]
c = [[3], [1], [4]]
bp = BPoly(c, x) # 3 * (1-x)**2 + 2 * x (1-x) + 4 * x**2
assert_allclose(bp(0.2),
3 * 0.8*0.8 + 1 * 2*0.2*0.8 + 4 * 0.2*0.2)
def test_simple4(self):
x = [0, 1]
c = [[1], [1], [1], [2]]
bp = BPoly(c, x)
assert_allclose(bp(0.3), 0.7**3 +
3 * 0.7**2 * 0.3 +
3 * 0.7 * 0.3**2 +
2 * 0.3**3)
def test_simple5(self):
x = [0, 1]
c = [[1], [1], [8], [2], [1]]
bp = BPoly(c, x)
assert_allclose(bp(0.3), 0.7**4 +
4 * 0.7**3 * 0.3 +
8 * 6 * 0.7**2 * 0.3**2 +
2 * 4 * 0.7 * 0.3**3 +
0.3**4)
def test_periodic(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
# [3*(1-x)**2, 2*((x-1)/2)**2]
bp = BPoly(c, x, extrapolate='periodic')
assert_allclose(bp(3.4), 3 * 0.6**2)
assert_allclose(bp(-1.3), 2 * (0.7/2)**2)
assert_allclose(bp(3.4, 1), -6 * 0.6)
assert_allclose(bp(-1.3, 1), 2 * (0.7/2))
def test_descending(self):
np.random.seed(0)
power = 3
for m in [10, 20, 30]:
x = np.sort(np.random.uniform(0, 10, m + 1))
ca = np.random.uniform(-0.1, 0.1, size=(power + 1, m))
# We need only to flip coefficients to get it right!
cd = ca[::-1].copy()
pa = BPoly(ca, x, extrapolate=True)
pd = BPoly(cd[:, ::-1], x[::-1], extrapolate=True)
x_test = np.random.uniform(-10, 20, 100)
assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
pa_d = pa.derivative()
pd_d = pd.derivative()
assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
# Antiderivatives won't be equal because fixing continuity is
# done in the reverse order, but surely the differences should be
# equal.
pa_i = pa.antiderivative()
pd_i = pd.antiderivative()
for a, b in np.random.uniform(-10, 20, (5, 2)):
int_a = pa.integrate(a, b)
int_d = pd.integrate(a, b)
assert_allclose(int_a, int_d, rtol=1e-12)
assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
rtol=1e-12)
def test_multi_shape(self):
c = np.random.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = BPoly(c, x)
assert_equal(p.x.shape, x.shape)
assert_equal(p.c.shape, c.shape)
assert_equal(p(0.3).shape, c.shape[2:])
assert_equal(p(np.random.rand(5,6)).shape,
(5,6)+c.shape[2:])
dp = p.derivative()
assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
def test_interval_length(self):
x = [0, 2]
c = [[3], [1], [4]]
bp = BPoly(c, x)
xval = 0.1
s = xval / 2 # s = (x - xa) / (xb - xa)
assert_allclose(bp(xval), 3 * (1-s)*(1-s) + 1 * 2*s*(1-s) + 4 * s*s)
def test_two_intervals(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
assert_allclose(bp(0.4), 3 * 0.6*0.6)
assert_allclose(bp(1.7), 2 * (0.7/2)**2)
def test_extrapolate_attr(self):
x = [0, 2]
c = [[3], [1], [4]]
bp = BPoly(c, x)
for extrapolate in (True, False, None):
bp = BPoly(c, x, extrapolate=extrapolate)
bp_d = bp.derivative()
if extrapolate is False:
assert_(np.isnan(bp([-0.1, 2.1])).all())
assert_(np.isnan(bp_d([-0.1, 2.1])).all())
else:
assert_(not np.isnan(bp([-0.1, 2.1])).any())
assert_(not np.isnan(bp_d([-0.1, 2.1])).any())
class TestBPolyCalculus(object):
def test_derivative(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
bp_der = bp.derivative()
assert_allclose(bp_der(0.4), -6*(0.6))
assert_allclose(bp_der(1.7), 0.7)
# derivatives in-place
assert_allclose([bp(0.4, nu=1), bp(0.4, nu=2), bp(0.4, nu=3)],
[-6*(1-0.4), 6., 0.])
assert_allclose([bp(1.7, nu=1), bp(1.7, nu=2), bp(1.7, nu=3)],
[0.7, 1., 0])
def test_derivative_ppoly(self):
# make sure it's consistent w/ power basis
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
for d in range(k):
bp = bp.derivative()
pp = pp.derivative()
xp = np.linspace(x[0], x[-1], 21)
assert_allclose(bp(xp), pp(xp))
def test_deriv_inplace(self):
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
# test both real and complex coefficients
for cc in [c.copy(), c*(1. + 2.j)]:
bp = BPoly(cc, x)
xp = np.linspace(x[0], x[-1], 21)
for i in range(k):
assert_allclose(bp(xp, i), bp.derivative(i)(xp))
def test_antiderivative_simple(self):
# f(x) = x for x \in [0, 1),
# (x-1)/2 for x \in [1, 3]
#
# antiderivative is then
# F(x) = x**2 / 2 for x \in [0, 1),
# 0.5*x*(x/2 - 1) + A for x \in [1, 3]
# where A = 3/4 for continuity at x = 1.
x = [0, 1, 3]
c = [[0, 0], [1, 1]]
bp = BPoly(c, x)
bi = bp.antiderivative()
xx = np.linspace(0, 3, 11)
assert_allclose(bi(xx),
np.where(xx < 1, xx**2 / 2.,
0.5 * xx * (xx/2. - 1) + 3./4),
atol=1e-12, rtol=1e-12)
def test_der_antider(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10, 2, 3))
bp = BPoly(c, x)
xx = np.linspace(x[0], x[-1], 100)
assert_allclose(bp.antiderivative().derivative()(xx),
bp(xx), atol=1e-12, rtol=1e-12)
def test_antider_ppoly(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10, 2, 3))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
xx = np.linspace(x[0], x[-1], 10)
assert_allclose(bp.antiderivative(2)(xx),
pp.antiderivative(2)(xx), atol=1e-12, rtol=1e-12)
def test_antider_continuous(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10))
bp = BPoly(c, x).antiderivative()
xx = bp.x[1:-1]
assert_allclose(bp(xx - 1e-14),
bp(xx + 1e-14), atol=1e-12, rtol=1e-12)
def test_integrate(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
assert_allclose(bp.integrate(0, 1),
pp.integrate(0, 1), atol=1e-12, rtol=1e-12)
def test_integrate_extrap(self):
c = [[1]]
x = [0, 1]
b = BPoly(c, x)
# default is extrapolate=True
assert_allclose(b.integrate(0, 2), 2., atol=1e-14)
# .integrate argument overrides self.extrapolate
b1 = BPoly(c, x, extrapolate=False)
assert_(np.isnan(b1.integrate(0, 2)))
assert_allclose(b1.integrate(0, 2, extrapolate=True), 2., atol=1e-14)
def test_integrate_periodic(self):
x = np.array([1, 2, 4])
c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
P = BPoly.from_power_basis(PPoly(c, x), extrapolate='periodic')
I = P.antiderivative()
period_int = I(4) - I(1)
assert_allclose(P.integrate(1, 4), period_int)
assert_allclose(P.integrate(-10, -7), period_int)
assert_allclose(P.integrate(-10, -4), 2 * period_int)
assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5 + 12, 5 + 12),
I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5, 5 + 12),
I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
assert_allclose(P.integrate(0, -1), I(2) - I(3))
assert_allclose(P.integrate(-9, -10), I(2) - I(3))
assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
def test_antider_neg(self):
# .derivative(-nu) ==> .andiderivative(nu) and vice versa
c = [[1]]
x = [0, 1]
b = BPoly(c, x)
xx = np.linspace(0, 1, 21)
assert_allclose(b.derivative(-1)(xx), b.antiderivative()(xx),
atol=1e-12, rtol=1e-12)
assert_allclose(b.derivative(1)(xx), b.antiderivative(-1)(xx),
atol=1e-12, rtol=1e-12)
class TestPolyConversions(object):
def test_bp_from_pp(self):
x = [0, 1, 3]
c = [[3, 2], [1, 8], [4, 3]]
pp = PPoly(c, x)
bp = BPoly.from_power_basis(pp)
pp1 = PPoly.from_bernstein_basis(bp)
xp = [0.1, 1.4]
assert_allclose(pp(xp), bp(xp))
assert_allclose(pp(xp), pp1(xp))
def test_bp_from_pp_random(self):
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
pp = PPoly(c, x)
bp = BPoly.from_power_basis(pp)
pp1 = PPoly.from_bernstein_basis(bp)
xp = np.linspace(x[0], x[-1], 21)
assert_allclose(pp(xp), bp(xp))
assert_allclose(pp(xp), pp1(xp))
def test_pp_from_bp(self):
x = [0, 1, 3]
c = [[3, 3], [1, 1], [4, 2]]
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
bp1 = BPoly.from_power_basis(pp)
xp = [0.1, 1.4]
assert_allclose(bp(xp), pp(xp))
assert_allclose(bp(xp), bp1(xp))
class TestBPolyFromDerivatives(object):
def test_make_poly_1(self):
c1 = BPoly._construct_from_derivatives(0, 1, [2], [3])
assert_allclose(c1, [2., 3.])
def test_make_poly_2(self):
c1 = BPoly._construct_from_derivatives(0, 1, [1, 0], [1])
assert_allclose(c1, [1., 1., 1.])
# f'(0) = 3
c2 = BPoly._construct_from_derivatives(0, 1, [2, 3], [1])
assert_allclose(c2, [2., 7./2, 1.])
# f'(1) = 3
c3 = BPoly._construct_from_derivatives(0, 1, [2], [1, 3])
assert_allclose(c3, [2., -0.5, 1.])
def test_make_poly_3(self):
# f'(0)=2, f''(0)=3
c1 = BPoly._construct_from_derivatives(0, 1, [1, 2, 3], [4])
assert_allclose(c1, [1., 5./3, 17./6, 4.])
# f'(1)=2, f''(1)=3
c2 = BPoly._construct_from_derivatives(0, 1, [1], [4, 2, 3])
assert_allclose(c2, [1., 19./6, 10./3, 4.])
# f'(0)=2, f'(1)=3
c3 = BPoly._construct_from_derivatives(0, 1, [1, 2], [4, 3])
assert_allclose(c3, [1., 5./3, 3., 4.])
def test_make_poly_12(self):
np.random.seed(12345)
ya = np.r_[0, np.random.random(5)]
yb = np.r_[0, np.random.random(5)]
c = BPoly._construct_from_derivatives(0, 1, ya, yb)
pp = BPoly(c[:, None], [0, 1])
for j in range(6):
assert_allclose([pp(0.), pp(1.)], [ya[j], yb[j]])
pp = pp.derivative()
def test_raise_degree(self):
np.random.seed(12345)
x = [0, 1]
k, d = 8, 5
c = np.random.random((k, 1, 2, 3, 4))
bp = BPoly(c, x)
c1 = BPoly._raise_degree(c, d)
bp1 = BPoly(c1, x)
xp = np.linspace(0, 1, 11)
assert_allclose(bp(xp), bp1(xp))
def test_xi_yi(self):
assert_raises(ValueError, BPoly.from_derivatives, [0, 1], [0])
def test_coords_order(self):
xi = [0, 0, 1]
yi = [[0], [0], [0]]
assert_raises(ValueError, BPoly.from_derivatives, xi, yi)
def test_zeros(self):
xi = [0, 1, 2, 3]
yi = [[0, 0], [0], [0, 0], [0, 0]] # NB: will have to raise the degree
pp = BPoly.from_derivatives(xi, yi)
assert_(pp.c.shape == (4, 3))
ppd = pp.derivative()
for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]:
assert_allclose([pp(xp), ppd(xp)], [0., 0.])
def _make_random_mk(self, m, k):
# k derivatives at each breakpoint
np.random.seed(1234)
xi = np.asarray([1. * j**2 for j in range(m+1)])
yi = [np.random.random(k) for j in range(m+1)]
return xi, yi
def test_random_12(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
pp = BPoly.from_derivatives(xi, yi)
for order in range(k//2):
assert_allclose(pp(xi), [yy[order] for yy in yi])
pp = pp.derivative()
def test_order_zero(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
assert_raises(ValueError, BPoly.from_derivatives,
**dict(xi=xi, yi=yi, orders=0))
def test_orders_too_high(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
pp = BPoly.from_derivatives(xi, yi, orders=2*k-1) # this is still ok
assert_raises(ValueError, BPoly.from_derivatives, # but this is not
**dict(xi=xi, yi=yi, orders=2*k))
def test_orders_global(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
# ok, this is confusing. Local polynomials will be of the order 5
# which means that up to the 2nd derivatives will be used at each point
order = 5
pp = BPoly.from_derivatives(xi, yi, orders=order)
for j in range(order//2+1):
assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
# now repeat with `order` being even: on each interval, it uses
# order//2 'derivatives' @ the right-hand endpoint and
# order//2+1 @ 'derivatives' the left-hand endpoint
order = 6
pp = BPoly.from_derivatives(xi, yi, orders=order)
for j in range(order//2):
assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
def test_orders_local(self):
m, k = 7, 12
xi, yi = self._make_random_mk(m, k)
orders = [o + 1 for o in range(m)]
for i, x in enumerate(xi[1:-1]):
pp = BPoly.from_derivatives(xi, yi, orders=orders)
for j in range(orders[i] // 2 + 1):
assert_allclose(pp(x - 1e-12), pp(x + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12)))
def test_yi_trailing_dims(self):
m, k = 7, 5
xi = np.sort(np.random.random(m+1))
yi = np.random.random((m+1, k, 6, 7, 8))
pp = BPoly.from_derivatives(xi, yi)
assert_equal(pp.c.shape, (2*k, m, 6, 7, 8))
def test_gh_5430(self):
# At least one of these raises an error unless gh-5430 is
# fixed. In py2k an int is implemented using a C long, so
# which one fails depends on your system. In py3k there is only
# one arbitrary precision integer type, so both should fail.
orders = np.int32(1)
p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
assert_almost_equal(p(0), 0)
orders = np.int64(1)
p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
assert_almost_equal(p(0), 0)
orders = 1
# This worked before; make sure it still works
p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
assert_almost_equal(p(0), 0)
orders = 1
class TestNdPPoly(object):
def test_simple_1d(self):
np.random.seed(1234)
c = np.random.rand(4, 5)
x = np.linspace(0, 1, 5+1)
xi = np.random.rand(200)
p = NdPPoly(c, (x,))
v1 = p((xi,))
v2 = _ppoly_eval_1(c[:,:,None], x, xi).ravel()
assert_allclose(v1, v2)
def test_simple_2d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7)
x = np.linspace(0, 1, 6+1)
y = np.linspace(0, 1, 7+1)**2
xi = np.random.rand(200)
yi = np.random.rand(200)
v1 = np.empty([len(xi), 1], dtype=c.dtype)
v1.fill(np.nan)
_ppoly.evaluate_nd(c.reshape(4*5, 6*7, 1),
(x, y),
np.array([4, 5], dtype=np.intc),
np.c_[xi, yi],
np.array([0, 0], dtype=np.intc),
1,
v1)
v1 = v1.ravel()
v2 = _ppoly2d_eval(c, (x, y), xi, yi)
assert_allclose(v1, v2)
p = NdPPoly(c, (x, y))
for nu in (None, (0, 0), (0, 1), (1, 0), (2, 3), (9, 2)):
v1 = p(np.c_[xi, yi], nu=nu)
v2 = _ppoly2d_eval(c, (x, y), xi, yi, nu=nu)
assert_allclose(v1, v2, err_msg=repr(nu))
def test_simple_3d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7, 8, 9)
x = np.linspace(0, 1, 7+1)
y = np.linspace(0, 1, 8+1)**2
z = np.linspace(0, 1, 9+1)**3
xi = np.random.rand(40)
yi = np.random.rand(40)
zi = np.random.rand(40)
p = NdPPoly(c, (x, y, z))
for nu in (None, (0, 0, 0), (0, 1, 0), (1, 0, 0), (2, 3, 0),
(6, 0, 2)):
v1 = p((xi, yi, zi), nu=nu)
v2 = _ppoly3d_eval(c, (x, y, z), xi, yi, zi, nu=nu)
assert_allclose(v1, v2, err_msg=repr(nu))
def test_simple_4d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7, 8, 9, 10, 11)
x = np.linspace(0, 1, 8+1)
y = np.linspace(0, 1, 9+1)**2
z = np.linspace(0, 1, 10+1)**3
u = np.linspace(0, 1, 11+1)**4
xi = np.random.rand(20)
yi = np.random.rand(20)
zi = np.random.rand(20)
ui = np.random.rand(20)
p = NdPPoly(c, (x, y, z, u))
v1 = p((xi, yi, zi, ui))
v2 = _ppoly4d_eval(c, (x, y, z, u), xi, yi, zi, ui)
assert_allclose(v1, v2)
def test_deriv_1d(self):
np.random.seed(1234)
c = np.random.rand(4, 5)
x = np.linspace(0, 1, 5+1)
p = NdPPoly(c, (x,))
# derivative
dp = p.derivative(nu=[1])
p1 = PPoly(c, x)
dp1 = p1.derivative()
assert_allclose(dp.c, dp1.c)
# antiderivative
dp = p.antiderivative(nu=[2])
p1 = PPoly(c, x)
dp1 = p1.antiderivative(2)
assert_allclose(dp.c, dp1.c)
def test_deriv_3d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7, 8, 9)
x = np.linspace(0, 1, 7+1)
y = np.linspace(0, 1, 8+1)**2
z = np.linspace(0, 1, 9+1)**3
p = NdPPoly(c, (x, y, z))
# differentiate vs x
p1 = PPoly(c.transpose(0, 3, 1, 2, 4, 5), x)
dp = p.derivative(nu=[2])
dp1 = p1.derivative(2)
assert_allclose(dp.c,
dp1.c.transpose(0, 2, 3, 1, 4, 5))
# antidifferentiate vs y
p1 = PPoly(c.transpose(1, 4, 0, 2, 3, 5), y)
dp = p.antiderivative(nu=[0, 1, 0])
dp1 = p1.antiderivative(1)
assert_allclose(dp.c,
dp1.c.transpose(2, 0, 3, 4, 1, 5))
# differentiate vs z
p1 = PPoly(c.transpose(2, 5, 0, 1, 3, 4), z)
dp = p.derivative(nu=[0, 0, 3])
dp1 = p1.derivative(3)
assert_allclose(dp.c,
dp1.c.transpose(2, 3, 0, 4, 5, 1))
def test_deriv_3d_simple(self):
# Integrate to obtain function x y**2 z**4 / (2! 4!)
c = np.ones((1, 1, 1, 3, 4, 5))
x = np.linspace(0, 1, 3+1)**1
y = np.linspace(0, 1, 4+1)**2
z = np.linspace(0, 1, 5+1)**3
p = NdPPoly(c, (x, y, z))
ip = p.antiderivative((1, 0, 4))
ip = ip.antiderivative((0, 2, 0))
xi = np.random.rand(20)
yi = np.random.rand(20)
zi = np.random.rand(20)
assert_allclose(ip((xi, yi, zi)),
xi * yi**2 * zi**4 / (gamma(3)*gamma(5)))
def test_integrate_2d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 16, 17)
x = np.linspace(0, 1, 16+1)**1
y = np.linspace(0, 1, 17+1)**2
# make continuously differentiable so that nquad() has an
# easier time
c = c.transpose(0, 2, 1, 3)
cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
_ppoly.fix_continuity(cx, x, 2)
c = cx.reshape(c.shape)
c = c.transpose(0, 2, 1, 3)
c = c.transpose(1, 3, 0, 2)
cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
_ppoly.fix_continuity(cx, y, 2)
c = cx.reshape(c.shape)
c = c.transpose(2, 0, 3, 1).copy()
# Check integration
p = NdPPoly(c, (x, y))
for ranges in [[(0, 1), (0, 1)],
[(0, 0.5), (0, 1)],
[(0, 1), (0, 0.5)],
[(0.3, 0.7), (0.6, 0.2)]]:
ig = p.integrate(ranges)
ig2, err2 = nquad(lambda x, y: p((x, y)), ranges,
opts=[dict(epsrel=1e-5, epsabs=1e-5)]*2)
assert_allclose(ig, ig2, rtol=1e-5, atol=1e-5,
err_msg=repr(ranges))
def test_integrate_1d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 16, 17, 18)
x = np.linspace(0, 1, 16+1)**1
y = np.linspace(0, 1, 17+1)**2
z = np.linspace(0, 1, 18+1)**3
# Check 1D integration
p = NdPPoly(c, (x, y, z))
u = np.random.rand(200)
v = np.random.rand(200)
a, b = 0.2, 0.7
px = p.integrate_1d(a, b, axis=0)
pax = p.antiderivative((1, 0, 0))
assert_allclose(px((u, v)), pax((b, u, v)) - pax((a, u, v)))
py = p.integrate_1d(a, b, axis=1)
pay = p.antiderivative((0, 1, 0))
assert_allclose(py((u, v)), pay((u, b, v)) - pay((u, a, v)))
pz = p.integrate_1d(a, b, axis=2)
paz = p.antiderivative((0, 0, 1))
assert_allclose(pz((u, v)), paz((u, v, b)) - paz((u, v, a)))
def _ppoly_eval_1(c, x, xps):
"""Evaluate piecewise polynomial manually"""
out = np.zeros((len(xps), c.shape[2]))
for i, xp in enumerate(xps):
if xp < 0 or xp > 1:
out[i,:] = np.nan
continue
j = np.searchsorted(x, xp) - 1
d = xp - x[j]
assert_(x[j] <= xp < x[j+1])
r = sum(c[k,j] * d**(c.shape[0]-k-1)
for k in range(c.shape[0]))
out[i,:] = r
return out
def _ppoly_eval_2(coeffs, breaks, xnew, fill=np.nan):
"""Evaluate piecewise polynomial manually (another way)"""
a = breaks[0]
b = breaks[-1]
K = coeffs.shape[0]
saveshape = np.shape(xnew)
xnew = np.ravel(xnew)
res = np.empty_like(xnew)
mask = (xnew >= a) & (xnew <= b)
res[~mask] = fill
xx = xnew.compress(mask)
indxs = np.searchsorted(breaks, xx)-1
indxs = indxs.clip(0, len(breaks))
pp = coeffs
diff = xx - breaks.take(indxs)
V = np.vander(diff, N=K)
values = np.array([np.dot(V[k, :], pp[:, indxs[k]]) for k in xrange(len(xx))])
res[mask] = values
res.shape = saveshape
return res
def _dpow(x, y, n):
"""
d^n (x**y) / dx^n
"""
if n < 0:
raise ValueError("invalid derivative order")
elif n > y:
return 0
else:
return poch(y - n + 1, n) * x**(y - n)
def _ppoly2d_eval(c, xs, xnew, ynew, nu=None):
"""
Straightforward evaluation of 2D piecewise polynomial
"""
if nu is None:
nu = (0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
nx, ny = c.shape[:2]
for jout, (x, y) in enumerate(zip(xnew, ynew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
val += (c[nx-k1-1,ny-k2-1,j1,j2]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1]))
out[jout] = val
return out
def _ppoly3d_eval(c, xs, xnew, ynew, znew, nu=None):
"""
Straightforward evaluation of 3D piecewise polynomial
"""
if nu is None:
nu = (0, 0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
nx, ny, nz = c.shape[:3]
for jout, (x, y, z) in enumerate(zip(xnew, ynew, znew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1]) and
(xs[2][0] <= z <= xs[2][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
j3 = np.searchsorted(xs[2], z) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
s3 = z - xs[2][j3]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
for k3 in range(c.shape[2]):
val += (c[nx-k1-1,ny-k2-1,nz-k3-1,j1,j2,j3]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1])
* _dpow(s3, k3, nu[2]))
out[jout] = val
return out
def _ppoly4d_eval(c, xs, xnew, ynew, znew, unew, nu=None):
"""
Straightforward evaluation of 4D piecewise polynomial
"""
if nu is None:
nu = (0, 0, 0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
mx, my, mz, mu = c.shape[:4]
for jout, (x, y, z, u) in enumerate(zip(xnew, ynew, znew, unew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1]) and
(xs[2][0] <= z <= xs[2][-1]) and
(xs[3][0] <= u <= xs[3][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
j3 = np.searchsorted(xs[2], z) - 1
j4 = np.searchsorted(xs[3], u) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
s3 = z - xs[2][j3]
s4 = u - xs[3][j4]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
for k3 in range(c.shape[2]):
for k4 in range(c.shape[3]):
val += (c[mx-k1-1,my-k2-1,mz-k3-1,mu-k4-1,j1,j2,j3,j4]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1])
* _dpow(s3, k3, nu[2])
* _dpow(s4, k4, nu[3]))
out[jout] = val
return out
class TestRegularGridInterpolator(object):
def _get_sample_4d(self):
# create a 4d grid of 3 points in each dimension
points = [(0., .5, 1.)] * 4
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def _get_sample_4d_2(self):
# create another 4d grid of 3 points in each dimension
points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def test_list_input(self):
points, values = self._get_sample_4d()
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
for method in ['linear', 'nearest']:
interp = RegularGridInterpolator(points,
values.tolist(),
method=method)
v1 = interp(sample.tolist())
interp = RegularGridInterpolator(points,
values,
method=method)
v2 = interp(sample)
assert_allclose(v1, v2)
def test_complex(self):
points, values = self._get_sample_4d()
values = values - 2j*values
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
for method in ['linear', 'nearest']:
interp = RegularGridInterpolator(points, values,
method=method)
rinterp = RegularGridInterpolator(points, values.real,
method=method)
iinterp = RegularGridInterpolator(points, values.imag,
method=method)
v1 = interp(sample)
v2 = rinterp(sample) + 1j*iinterp(sample)
assert_allclose(v1, v2)
def test_linear_xi1d(self):
points, values = self._get_sample_4d_2()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([0.1, 0.1, 10., 9.])
wanted = 1001.1
assert_array_almost_equal(interp(sample), wanted)
def test_linear_xi3d(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
wanted = np.asarray([1001.1, 846.2, 555.5])
assert_array_almost_equal(interp(sample), wanted)
def test_nearest(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, method="nearest")
sample = np.asarray([0.1, 0.1, .9, .9])
wanted = 1100.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0.1, 0.1, 0.1, 0.1])
wanted = 0.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0., 0., 0., 0.])
wanted = 0.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([1., 1., 1., 1.])
wanted = 1111.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0.1, 0.4, 0.6, 0.9])
wanted = 1055.
assert_array_almost_equal(interp(sample), wanted)
def test_linear_edges(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
wanted = np.asarray([0., 1111.])
assert_array_almost_equal(interp(sample), wanted)
def test_valid_create(self):
# create a 2d grid of 3 points in each dimension
points = [(0., .5, 1.), (0., 1., .5)]
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis]
values1 = values[np.newaxis, :]
values = (values0 + values1 * 10)
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [((0., .5, 1.), ), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, .75, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, 1.), (0., .5, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values,
method="undefmethod")
def test_valid_call(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
assert_raises(ValueError, interp, sample, "undefmethod")
sample = np.asarray([[0., 0., 0.], [1., 1., 1.]])
assert_raises(ValueError, interp, sample)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.1]])
assert_raises(ValueError, interp, sample)
def test_out_of_bounds_extrap(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=None)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([0., 1111., 11., 11.])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
wanted = np.asarray([-111.1, 1222.1, -11068., -1186.9])
assert_array_almost_equal(interp(sample, method="linear"), wanted)
def test_out_of_bounds_extrap2(self):
points, values = self._get_sample_4d_2()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=None)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([0., 11., 11., 11.])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
wanted = np.asarray([-12.1, 133.1, -1069., -97.9])
assert_array_almost_equal(interp(sample, method="linear"), wanted)
def test_out_of_bounds_fill(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=np.nan)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([np.nan, np.nan, np.nan])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
assert_array_almost_equal(interp(sample, method="linear"), wanted)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
wanted = np.asarray([1001.1, 846.2, 555.5])
assert_array_almost_equal(interp(sample), wanted)
def test_nearest_compare_qhull(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, method="nearest")
points_qhull = itertools.product(*points)
points_qhull = [p for p in points_qhull]
points_qhull = np.asarray(points_qhull)
values_qhull = values.reshape(-1)
interp_qhull = NearestNDInterpolator(points_qhull, values_qhull)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
assert_array_almost_equal(interp(sample), interp_qhull(sample))
def test_linear_compare_qhull(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
points_qhull = itertools.product(*points)
points_qhull = [p for p in points_qhull]
points_qhull = np.asarray(points_qhull)
values_qhull = values.reshape(-1)
interp_qhull = LinearNDInterpolator(points_qhull, values_qhull)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
assert_array_almost_equal(interp(sample), interp_qhull(sample))
def test_duck_typed_values(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = MyValue((5, 7))
for method in ('nearest', 'linear'):
interp = RegularGridInterpolator((x, y), values,
method=method)
v1 = interp([0.4, 0.7])
interp = RegularGridInterpolator((x, y), values._v,
method=method)
v2 = interp([0.4, 0.7])
assert_allclose(v1, v2)
def test_invalid_fill_value(self):
np.random.seed(1234)
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = np.random.rand(5, 7)
# integers can be cast to floats
RegularGridInterpolator((x, y), values, fill_value=1)
# complex values cannot
assert_raises(ValueError, RegularGridInterpolator,
(x, y), values, fill_value=1+2j)
def test_fillvalue_type(self):
# from #3703; test that interpolator object construction succeeds
values = np.ones((10, 20, 30), dtype='>f4')
points = [np.arange(n) for n in values.shape]
xi = [(1, 1, 1)]
interpolator = RegularGridInterpolator(points, values)
interpolator = RegularGridInterpolator(points, values, fill_value=0.)
class MyValue(object):
"""
Minimal indexable object
"""
def __init__(self, shape):
self.ndim = 2
self.shape = shape
self._v = np.arange(np.prod(shape)).reshape(shape)
def __getitem__(self, idx):
return self._v[idx]
def __array_interface__(self):
return None
def __array__(self):
raise RuntimeError("No array representation")
class TestInterpN(object):
def _sample_2d_data(self):
x = np.arange(1, 6)
x = np.array([.5, 2., 3., 4., 5.5])
y = np.arange(1, 6)
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
return x, y, z
def test_spline_2d(self):
x, y, z = self._sample_2d_data()
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
assert_array_almost_equal(interpn((x, y), z, xi, method="splinef2d"),
lut.ev(xi[:, 0], xi[:, 1]))
def test_list_input(self):
x, y, z = self._sample_2d_data()
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
for method in ['nearest', 'linear', 'splinef2d']:
v1 = interpn((x, y), z, xi, method=method)
v2 = interpn((x.tolist(), y.tolist()), z.tolist(),
xi.tolist(), method=method)
assert_allclose(v1, v2, err_msg=method)
def test_spline_2d_outofbounds(self):
x = np.array([.5, 2., 3., 4., 5.5])
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
actual = interpn((x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=999.99)
expected = lut.ev(xi[:, 0], xi[:, 1])
expected[2:4] = 999.99
assert_array_almost_equal(actual, expected)
# no extrapolation for splinef2d
assert_raises(ValueError, interpn, (x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=None)
def _sample_4d_data(self):
points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def test_linear_4d(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values)
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="linear")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_linear_outofbounds(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = 999.99
actual = interpn(points, values, sample, method="linear",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_nearest_4d(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values, method="nearest")
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="nearest")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_nearest_outofbounds(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = 999.99
actual = interpn(points, values, sample, method="nearest",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_xi_1d(self):
# verify that 1D xi works as expected
points, values = self._sample_4d_data()
sample = np.asarray([0.1, 0.1, 10., 9.])
v1 = interpn(points, values, sample, bounds_error=False)
v2 = interpn(points, values, sample[None,:], bounds_error=False)
assert_allclose(v1, v2)
def test_xi_nd(self):
# verify that higher-d xi works as expected
points, values = self._sample_4d_data()
np.random.seed(1234)
sample = np.random.rand(2, 3, 4)
v1 = interpn(points, values, sample, method='nearest',
bounds_error=False)
assert_equal(v1.shape, (2, 3))
v2 = interpn(points, values, sample.reshape(-1, 4),
method='nearest', bounds_error=False)
assert_allclose(v1, v2.reshape(v1.shape))
def test_xi_broadcast(self):
# verify that the interpolators broadcast xi
x, y, values = self._sample_2d_data()
points = (x, y)
xi = np.linspace(0, 1, 2)
yi = np.linspace(0, 3, 3)
for method in ['nearest', 'linear', 'splinef2d']:
sample = (xi[:,None], yi[None,:])
v1 = interpn(points, values, sample, method=method,
bounds_error=False)
assert_equal(v1.shape, (2, 3))
xx, yy = np.meshgrid(xi, yi)
sample = np.c_[xx.T.ravel(), yy.T.ravel()]
v2 = interpn(points, values, sample,
method=method, bounds_error=False)
assert_allclose(v1, v2.reshape(v1.shape))
def test_nonscalar_values(self):
# Verify that non-scalar valued values also works
points, values = self._sample_4d_data()
np.random.seed(1234)
values = np.random.rand(3, 3, 3, 3, 6)
sample = np.random.rand(7, 11, 4)
for method in ['nearest', 'linear']:
v = interpn(points, values, sample, method=method,
bounds_error=False)
assert_equal(v.shape, (7, 11, 6), err_msg=method)
vs = [interpn(points, values[...,j], sample, method=method,
bounds_error=False)
for j in range(6)]
v2 = np.array(vs).transpose(1, 2, 0)
assert_allclose(v, v2, err_msg=method)
# Vector-valued splines supported with fitpack
assert_raises(ValueError, interpn, points, values, sample,
method='splinef2d')
def test_complex(self):
x, y, values = self._sample_2d_data()
points = (x, y)
values = values - 2j*values
sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
for method in ['linear', 'nearest']:
v1 = interpn(points, values, sample, method=method)
v2r = interpn(points, values.real, sample, method=method)
v2i = interpn(points, values.imag, sample, method=method)
v2 = v2r + 1j*v2i
assert_allclose(v1, v2)
# Complex-valued data not supported by spline2fd
_assert_warns(np.ComplexWarning, interpn, points, values,
sample, method='splinef2d')
def test_duck_typed_values(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = MyValue((5, 7))
for method in ('nearest', 'linear'):
v1 = interpn((x, y), values, [0.4, 0.7], method=method)
v2 = interpn((x, y), values._v, [0.4, 0.7], method=method)
assert_allclose(v1, v2)
def test_matrix_input(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = np.matrix(np.random.rand(5, 7))
sample = np.random.rand(3, 7, 2)
for method in ('nearest', 'linear', 'splinef2d'):
v1 = interpn((x, y), values, sample, method=method)
v2 = interpn((x, y), np.asarray(values), sample, method=method)
assert_allclose(v1, np.asmatrix(v2))
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_interpolate.py | Python | mit | 102,313 | 0.000958 |
#!/usr/bin/env python
#coding: utf-8
class Solution:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):#
if not prices: return 0
n = len(prices)
min_p = prices[0]
max_profict = 0
for i in range(1, n):
if prices[i] < min_p:
min_p = prices[i]
cur_profit = prices[i] - min_p
if cur_profit > max_profict:
max_profict = cur_profit
return max_profict
if __name__ == '__main__':
s = Solution()
assert 0 == s.maxProfit([1])
assert 1 == s.maxProfit([1, 2])
assert 0 == s.maxProfit([2, 1])
assert 8 == s.maxProfit([1,3,9])
| wh-acmer/minixalpha-acm | LeetCode/Python/best_time_to_buy_and_sell_stock.py | Python | mit | 708 | 0.008475 |
from django.utils.encoding import force_text
from .models import Tree
def get_survey(trigger, connection):
"""Returns a survey only if it matches the connection's tenant."""
from decisiontree.multitenancy.utils import multitenancy_enabled
queryset = Tree.objects.filter(trigger__iexact=trigger)
if multitenancy_enabled():
tenant = connection.backend.tenantlink.tenant
queryset = queryset.filter(tenantlink__tenant=tenant)
return queryset.first()
def parse_tags(tagstring):
"""
Parses tag input, with multiple word input being activated and
delineated by commas and double quotes. Quotes take precedence, so
they may contain commas.
Returns a sorted list of unique tag names.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
if not tagstring:
return []
tagstring = force_text(tagstring)
# Special case - if there are no commas or double quotes in the
# input, we don't *do* a recall... I mean, we know we only need to
# split on spaces.
if u',' not in tagstring and u'"' not in tagstring:
words = list(set(split_strip(tagstring, u' ')))
words.sort()
return words
words = []
buffer = []
# Defer splitting of non-quoted sections until we know if there are
# any unquoted commas.
to_be_split = []
saw_loose_comma = False
open_quote = False
i = iter(tagstring)
try:
while True:
c = i.next()
if c == u'"':
if buffer:
to_be_split.append(u''.join(buffer))
buffer = []
# Find the matching quote
open_quote = True
c = i.next()
while c != u'"':
buffer.append(c)
c = i.next()
if buffer:
word = u''.join(buffer).strip()
if word:
words.append(word)
buffer = []
open_quote = False
else:
if not saw_loose_comma and c == u',':
saw_loose_comma = True
buffer.append(c)
except StopIteration:
# If we were parsing an open quote which was never closed treat
# the buffer as unquoted.
if buffer:
if open_quote and u',' in buffer:
saw_loose_comma = True
to_be_split.append(u''.join(buffer))
if to_be_split:
if saw_loose_comma:
delimiter = u','
else:
delimiter = u' '
for chunk in to_be_split:
words.extend(split_strip(chunk, delimiter))
words = list(set(words))
words.sort()
return words
def split_strip(string, delimiter=u','):
"""
Splits ``string`` on ``delimiter``, stripping each resulting string
and returning a list of non-empty strings.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
if not string:
return []
words = [w.strip() for w in string.split(delimiter)]
return [w for w in words if w]
def edit_string_for_tags(tags):
"""
Given list of ``Tag`` instances, creates a string representation of
the list suitable for editing by the user, such that submitting the
given string representation back without changing it will give the
same list of tags.
Tag names which contain commas will be double quoted.
If any tag name which isn't being quoted contains whitespace, the
resulting string of tag names will be comma-delimited, otherwise
it will be space-delimited.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
names = []
for tag in tags:
name = tag.name
if u',' in name or u' ' in name:
names.append('"%s"' % name)
else:
names.append(name)
return u', '.join(sorted(names))
| caktus/rapidsms-decisiontree-app | decisiontree/utils.py | Python | bsd-3-clause | 4,043 | 0 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('greck_mugger')
mobileTemplate.setLevel(31)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("olag greck")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(128)
templates = Vector()
templates.add('object/mobile/shared_greck_thug_f_01.iff')
templates.add('object/mobile/shared_greck_thug_f_02.iff')
templates.add('object/mobile/shared_greck_thug_f_03.iff')
templates.add('object/mobile/shared_greck_thug_m_01.iff')
templates.add('object/mobile/shared_greck_thug_m_02.iff')
templates.add('object/mobile/shared_greck_thug_m_03.iff')
templates.add('object/mobile/shared_greck_thug_m_04.iff')
templates.add('object/mobile/shared_greck_thug_m_05.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('meleeHit')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 100
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
core.spawnService.addMobileTemplate('greck_mugger', mobileTemplate)
return | agry/NGECore2 | scripts/mobiles/corellia/greck_mugger.py | Python | lgpl-3.0 | 1,920 | 0.022917 |
class BinaryOperator:
def __init__(self, left, right):
self.left = left
self.right = right
def __str__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.left, self.right)
def toCalchas(self, op):
return '(%s%s%s)' % (self.left.toCalchas(), op, self.right.toCalchas())
class Plus(BinaryOperator):
def toCalchas(self):
return super().toCalchas('+')
class Divide(BinaryOperator):
def toCalchas(self):
return super().toCalchas('/')
class Times(BinaryOperator):
def toCalchas(self):
return super().toCalchas('*')
class Minus(BinaryOperator):
def toCalchas(self):
return super().toCalchas('-')
class Pow(BinaryOperator):
def toCalchas(self):
return super().toCalchas('**')
class Arrow(BinaryOperator):
def toCalchas(self):
return '%s,%s' % (self.left.toCalchas(), self.right.toCalchas())
class UnaryOperator:
def __init__(self, val):
self.val = val
def __str__(self):
return '%s(%s)' % (self.__class__.__name__, self.val)
class Opp(UnaryOperator):
def toCalchas(self):
return '('+ '-' + self.val.toCalchas() +')'
class Fact(UnaryOperator):
def toCalchas(self):
return '(' + self.val.toCalchas() +'!)'
class Diff:
def __init__(self, val, nb):
self.val = val
self.nb=nb
def __str__(self):
return 'Diff('+str(self.val)+','+str(self.nb)+')'
def toCalchas(self):
return 'diff('+self.val.toCalchas()+','+self.val.args[0].toCalchas()+','+str(self.nb)+')'
class List:
def __init__(self, l):
self.list = l
def __str__(self):
if len(self.list)==0:
return 'List([])'
s = 'List(['+str(self.list[0])
for e in self.list[1:]:
s = s + ', ' + str(e)
return s+'])'
def __getitem__(self,index):
return self.list[index]
def __add__(self, other):
return List(self.list+other.list)
def __len__(self):
return len(self.list)
def getList(self):
return self.list
def toCalchas(self):
if len(self.list)==0:
return ''
s = self.list[0].toCalchas()
for e in self.list[1:]:
s = s + ', ' + e.toCalchas()
return s
class FunctionCall:
def __init__(self, function, args):
self.function = function
self.args = args
def __str__(self):
return 'FunctionCall('+str(self.function)+','+str(self.args)+')'
def toCalchas(self):
if type(self.function)==Id:
return self.translate(self.function.toCalchas(), self.args)
def translate(self, function, args):
def bigoppTranslation(functionName, args):
if len(args)==0:
return ''
if len(args)==1:
return args[0].toCalchas()
if isinstance(args[-1], List):
return '%s(%s, %s, %s, %s)'%(functionName, bigoppTranslation(functionName, args[0:-1]),args[-1][0].toCalchas(),args[-1][1].toCalchas(),args[-1][2].toCalchas())
return '%s(%s, %s)'%(functionName, bigoppTranslation(functionName, args[0:-1]),args[-1].toCalchas())
mathematicatoCalchas={'Sqrt' : (lambda a: 'sqrt('+a[0].toCalchas()+')'),
'Sin' : (lambda a: 'sin('+a[0].toCalchas()+')'),
'Cos' : (lambda a: 'cos('+a[0].toCalchas()+')'),
'Tan' : (lambda a: 'tan('+a[0].toCalchas()+')'),
'Arccos' : (lambda a: 'acos('+a[0].toCalchas()+')'),
'Arcsin' : (lambda a: 'asin('+a[0].toCalchas()+')'),
'Arctan' : (lambda a: 'atan('+a[0].toCalchas()+')'),
'Sum' : (lambda a: bigoppTranslation("sum", a)),
'Integrate' : (lambda a: bigoppTranslation("int", [a[0]]+list(reversed(a[1:])))),
'N' : (lambda a: 'N('+a.toCalchas()+')'),
'D' : (lambda a: 'diff('+a[0].toCalchas()+', '+', '.join([l.toCalchas() for l in a[1:]])+')'),
'Exp' : (lambda a: 'exp('+a.toCalchas()+')'),
'Simplify' : (lambda a: 'simplify('+a.toCalchas()+')'),
'Power' : (lambda a: 'Pow('+a.toCalchas()+')'),
'Log' : (lambda a: 'log('+List(list(reversed(a.getList()))).toCalchas()+')'),
'Log10' : (lambda a: 'lg('+a[0].toCalchas()+')'),
'Log2' : (lambda a: 'lb('+a[0].toCalchas()+')'),
'Factorial' : (lambda a: '('+a[0].toCalchas()+'!)'),
'Abs' : (lambda a: 'Abs('+a[0].toCalchas()+')'),
'Ceiling' : (lambda a: 'ceiling('+a[0].toCalchas()+')'),
'Floor' : (lambda a: 'floor('+a[0].toCalchas()+')'),
'Limit' : (lambda a: 'limit('+a[0].toCalchas() +','+ a[1].toCalchas()+')'),
'Solve' : (lambda a: 'solve(['+a[0].toCalchas() +'],['+ a[1].toCalchas()+'])'),
'Expand' : (lambda a: 'expand('+a.toCalchas()+')'),
'Factor' : (lambda a: 'factor('+a.toCalchas()+')'),
'Prime' : (lambda a: 'prime('+a.toCalchas()+')'),
'PrimeQ' : (lambda a: 'isprime('+a.toCalchas()+')'),
}
for name in mathematicatoCalchas.keys():
if name == function:
return '('+mathematicatoCalchas[name](args)+')'
return '('+function+'('+ self.args.toCalchas() +')'+')'
class Id:
def __init__(self, id):
self.id=id
def __str__(self):
return 'Id(\''+str(self.id)+'\')'
def toCalchas(self):
return self.translateId(self.id)
def translateId(self, id):
mathematicatoCalchas={'Infinity' : 'oo',
'I' : 'I',
'Pi' : 'pi',
'GoldenRatio' : 'GoldenRatio',
'EulerGamma' : 'EulerGamma',
}
if id in mathematicatoCalchas.keys():
return mathematicatoCalchas[id]
return str(id)
| iScienceLuvr/PPP-CAS | ppp_cas/mathematicaTree.py | Python | mit | 6,347 | 0.019064 |
class __WorkerPool__:
def create_webworker(self, cpuid):
## this is lazy because if the blob is created when the js is first executed,
## then it will pick all functions of `window` but they will be `undefined`
## if their definition comes after the construction of this singleton.
print 'creating blob'
## having the worker report back the current time to the main thread allows
## some gauge of its CPU load, this can be average over time, and the user
## could call something like `worker.how_busy()` which is some relative value.
header = [
'setInterval(',
' function(){',
' self.postMessage({time_update:(new Date()).getTime()});',
' }, 100',
');',
## TODO other builtins prototype hacks. see above.
'Array.prototype.append = function(a) {this.push(a);};',
]
## this is something extra stuff injected from NW.js
## that should not be inserted into the webworker.
nwjs_skip = ('Buffer', 'AppView', 'WebView')
for name in dir(window):
if name in nwjs_skip:
continue
ob = window[name]
if ob is undefined:
print 'WARNING: object in toplevel namespace window is undefined ->' + name
elif typeof(ob) == 'function':
## should actually check function code for `[ native code ]` and skip those.
header.append( 'var ' + name + '=' + ob.toString() + ';\n' )
for subname in dir(ob.prototype):
sob = ob.prototype[subname]
header.append(name + '.prototype.' +subname + '=' + sob.toString() + ';\n' )
#elif typeof(ob) == 'object':
# header.append( 'var ' + name + '=' + ob.toString() + ';\n' )
xlibs = []
for name in self.extras:
if '.' in name:
print 'import webworker submodule: ' + name
mod = name.split('.')[0]
xname = name.split('.')[1]
ob = eval(name)
if typeof(ob) == 'object': ## copy objects with static methods
print 'import object: ' + xname
header.append( name + '= {' )
for sname in Object.keys(ob):
subob = ob[sname]
ok = True
try:
tmp = eval("("+subob+")")
except:
ok = False
if ok:
print 'import->: ' + sname
header.append( '"'+sname + '":(' + ob[sname] +')' )
header.append(',\n')
header.pop()
header.append('};\n')
#if mod not in xlibs:
# print 'new module: '+mod
# header.append('var ' + mod + '= {};' )
# xlibs.append(mod)
else:
print 'import webworker module: ' + name
header.append( 'var ' + name + '= {};\n' )
modulemain = window[name]
for xname in dir(modulemain):
ob = modulemain[xname]
if typeof(ob) == 'function':
print 'import class: ' + xname
header.append( name + '.' + xname + '=' + ob.toString() + ';\n' )
if ob.prototype: ## copy methods
#for method_name in dir(ob.prototype):
for method_name in Object.keys(ob.prototype):
if method_name == 'constructor': continue
ok = True
try:
## getting some properties can throw deprecation errors
sub = ob.prototype[method_name]
except:
ok = False
if ok and typeof(sub) == 'function':
print 'import method: ' + method_name
header.append(name + '.' + xname + '.prototype.' + method_name + '=' + sub.toString() + ';' )
#header.append(name + '.' + xname + '.' + method_name + '=' + ob.toString() + ';' )
## Web Worker ##
header.extend( self.source )
blob = new(Blob(header, type='application/javascript'))
url = URL.createObjectURL(blob)
ww = new(Worker(url))
#self.thread = ww ## temp, TODO multiple threads
#self.thread.onmessage = self.update.bind(this)
ww._cpuid = cpuid
ww._last_time_update = 0
ww._stream_callbacks = {}
ww._stream_triggers = {}
ww._get_callback = None ## this should actually be a stack of callbacks, right now it assumes its synced
ww._call_callback = None ## this should actually be a stack of callbacks.
ww._callmeth_callback = None ## TODO also should be a stack
## if worker has not sent a time update in awhile ##
ww.busy = lambda : ww._last_time_update - (new(Date())).getTime() < 200
ww.how_busy = lambda : 100.0 / (ww._last_time_update - (new(Date())).getTime())
@bind(ww.spawn_class)
def _spawn_class(cfg):
sid = cfg['spawn']
print '_spawn_class:' + ww._cpuid + '|' + sid
ww._stream_callbacks[sid] = []
ww._stream_triggers[sid] = []
ww.postMessage(cfg)
def onmessage_update(evt):
if self._binrecv:
#print 'got binary....'
id = self._binrecv['id']
btype = self._binrecv['type']
self._binrecv = None
msg = None
switch btype:
case "Float32Array":
msg = new Float32Array(evt.data)
case "Float64Array":
msg = new Float64Array(evt.data)
case "Int32Array":
msg = new Int32Array(evt.data)
if id in ww._stream_callbacks: ## channels
callbacks = ww._stream_callbacks[id]
if len(callbacks):
cb = callbacks.pop()
cb( msg )
else:
ww._stream_triggers[id].push( msg )
else:
raise WebWorkerError('invalid id:' + id)
elif evt.data.time_update: ## the worker uses setInterval to report the time, see `worker.busy()`
ww._last_time_update = evt.data.time_update
elif evt.data.debug:
console.warn( ww._cpuid + '|' + evt.data.debug)
else:
ww._last_time_update = (new(Date())).getTime()
msg = evt.data.message
## restore object class if `proto` was given (user static return type)
if evt.data.proto: msg.__proto__ = eval(evt.data.proto + '.prototype')
if evt.data.GET:
ww._get_callback( msg )
elif evt.data.CALL:
ww._call_callback( msg )
elif evt.data.CALLMETH:
ww._callmeth_callback( msg )
else:
id = evt.data.id
if evt.data.bin:
self._binrecv = {'id':id, 'type':evt.data.bin}
elif id in ww._stream_callbacks: ## channels
callbacks = ww._stream_callbacks[id]
if len(callbacks):
cb = callbacks.pop()
cb( msg )
else:
ww._stream_triggers[id].push( msg )
else:
raise WebWorkerError('invalid id:' + id)
ww.onmessage = onmessage_update
return ww
def __init__(self, src, extras):
## note: src is an array
## note: thread-ids = `cpu-id:spawned-id`
self.source = src
self.extras = extras
## each worker in this pool runs on its own CPU core
## how to get number of CPU cores in JS?
self.pool = {}
self.num_spawned = 1 ## must be 1, not zero
def spawn(self, cfg, options):
cpu = 0
autoscale = True
if options is not undefined:
print 'using CPU:'+options.cpu
cpu = options.cpu
autoscale = False
id = str(cpu) + '|' + str(self.num_spawned)
cfg['spawn'] = self.num_spawned
self.num_spawned += 1
if cpu in self.pool:
## this thread could be busy, spawn into it anyways.
print 'reusing cpu already in pool'
self.pool[cpu].spawn_class(cfg)
elif autoscale:
print 'spawn auto scale up'
## first check if any of the other threads are not busy
readythread = None
cpu = len(self.pool.keys())
for cid in self.pool.keys():
thread = self.pool[ cid ]
if not thread.busy():
print 'reusing thread is not busy:' + cid
readythread = thread
cpu = cid
break
if not readythread:
assert cpu not in self.pool.keys()
readythread = self.create_webworker(cpu)
self.pool[cpu] = readythread
readythread.spawn_class(cfg)
else:
## user defined CPU ##
print 'spawn user defined cpu:' + cpu
assert cpu not in self.pool.keys()
readythread = self.create_webworker(cpu)
self.pool[cpu] = readythread
self.pool[cpu].spawn_class(cfg)
return id
def send(self, id=None, message=None):
tid, sid = id.split('|')
if tid not in self.pool:
raise RuntimeError('send: invalid cpu id')
if __is_typed_array(message): ## transferable buffers (no copy, moves data into worker)
bspec = {'send_binary':sid}
if instanceof(message, Float32Array):
bspec['type'] = 'Float32Array'
elif instanceof(message, Float64Array):
bspec['type'] = 'Float64Array'
elif instanceof( ob, Int32Array ):
bspec['type'] = 'Int32Array'
elif instanceof( ob, Int16Array ):
bspec['type'] = 'Int16Array'
elif instanceof( ob, Uint16Array ):
bspec['type'] = 'Uint16Array'
elif instanceof( ob, Uint32Array ):
bspec['type'] = 'Uint32Array'
self.pool[tid].postMessage(bspec) ## object header
self.pool[tid].postMessage(message.buffer, [message.buffer]) ## binary body
else:
try:
self.pool[tid].postMessage({'send':sid, 'message':message})
except:
print 'DataCloneError: can not send data to webworker'
print message
raise RuntimeError('DataCloneError: can not send data to webworker')
def recv(self, id, callback):
if id is undefined:
raise WebWorkerError("undefined id")
tid, sid = id.split('|')
if tid not in self.pool:
raise RuntimeError('send: invalid cpu id')
ww = self.pool[ tid ]
if sid in ww._stream_triggers and ww._stream_triggers[sid].length:
callback( ww._stream_triggers[sid].pop() )
elif sid in ww._stream_callbacks:
ww._stream_callbacks[sid].insert(0, callback)
else:
raise WebWorkerError('webworker.recv - invaid id: '+id)
def get(self, id, attr, callback):
tid, sid = id.split('|')
if tid not in self.pool:
raise RuntimeError('get: invalid cpu id')
ww = self.pool[ tid ]
ww._get_callback = callback
ww.postMessage(
id = sid,
get = attr
)
def call(self, func, args, callback):
#self._call = callback
#self.thread.postMessage({'call':func, 'args':args})
## which CPU do we select? default to `0`, have extra option for CPU?
raise RuntimeError('TODO call plain function in webworker')
def callmeth(self, id, func, args, callback):
tid, sid = id.split('|')
if tid not in self.pool:
raise RuntimeError('callmeth: invalid cpu id')
ww = self.pool[ tid ]
ww._callmeth_callback = callback
ww.postMessage(
id = sid,
callmeth = func,
args = args
)
def select(self, id):
tid, sid = id.split('|')
if tid not in self.pool:
raise RuntimeError('select: invalid cpu id')
if sid not in self.pool[ tid ]._stream_triggers:
raise RuntimeError('select: invalid worker id')
return self.pool[ tid ]._stream_triggers[ sid ]
| pombredanne/Rusthon | src/runtime/builtins_webworker.py | Python | bsd-3-clause | 10,276 | 0.041845 |
"""Helper methods to handle the time in Home Assistant."""
from __future__ import annotations
from contextlib import suppress
import datetime as dt
import re
from typing import Any, cast
import ciso8601
import pytz
import pytz.exceptions as pytzexceptions
import pytz.tzinfo as pytzinfo
from homeassistant.const import MATCH_ALL
DATE_STR_FORMAT = "%Y-%m-%d"
NATIVE_UTC = dt.timezone.utc
UTC = pytz.utc
DEFAULT_TIME_ZONE: dt.tzinfo = pytz.utc
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
DATETIME_RE = re.compile(
r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
r"[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
)
def set_default_time_zone(time_zone: dt.tzinfo) -> None:
"""Set a default time zone to be used when none is specified.
Async friendly.
"""
global DEFAULT_TIME_ZONE # pylint: disable=global-statement
# NOTE: Remove in the future in favour of typing
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone
def get_time_zone(time_zone_str: str) -> dt.tzinfo | None:
"""Get time zone from string. Return None if unable to determine.
Async friendly.
"""
try:
return pytz.timezone(time_zone_str)
except pytzexceptions.UnknownTimeZoneError:
return None
def utcnow() -> dt.datetime:
"""Get now in UTC time."""
return dt.datetime.now(NATIVE_UTC)
def now(time_zone: dt.tzinfo | None = None) -> dt.datetime:
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
def as_utc(dattim: dt.datetime) -> dt.datetime:
"""Return a datetime as UTC time.
Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.
"""
if dattim.tzinfo == UTC:
return dattim
if dattim.tzinfo is None:
dattim = DEFAULT_TIME_ZONE.localize(dattim) # type: ignore
return dattim.astimezone(UTC)
def as_timestamp(dt_value: dt.datetime) -> float:
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt: dt.datetime | None = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if parsed_dt is None:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
def as_local(dattim: dt.datetime) -> dt.datetime:
"""Convert a UTC datetime object to local time zone."""
if dattim.tzinfo == DEFAULT_TIME_ZONE:
return dattim
if dattim.tzinfo is None:
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE)
def utc_from_timestamp(timestamp: float) -> dt.datetime:
"""Return a UTC time from a timestamp."""
return UTC.localize(dt.datetime.utcfromtimestamp(timestamp))
def start_of_local_day(dt_or_d: dt.date | dt.datetime | None = None) -> dt.datetime:
"""Return local datetime object of start of day from date or datetime."""
if dt_or_d is None:
date: dt.date = now().date()
elif isinstance(dt_or_d, dt.datetime):
date = dt_or_d.date()
else:
date = dt_or_d
return DEFAULT_TIME_ZONE.localize( # type: ignore
dt.datetime.combine(date, dt.time())
)
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
def parse_datetime(dt_str: str) -> dt.datetime | None:
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
with suppress(ValueError, IndexError):
return ciso8601.parse_datetime(dt_str)
match = DATETIME_RE.match(dt_str)
if not match:
return None
kws: dict[str, Any] = match.groupdict()
if kws["microsecond"]:
kws["microsecond"] = kws["microsecond"].ljust(6, "0")
tzinfo_str = kws.pop("tzinfo")
tzinfo: dt.tzinfo | None = None
if tzinfo_str == "Z":
tzinfo = UTC
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if tzinfo_str[0] == "-":
offset = -offset
tzinfo = dt.timezone(offset)
kws = {k: int(v) for k, v in kws.items() if v is not None}
kws["tzinfo"] = tzinfo
return dt.datetime(**kws)
def parse_date(dt_str: str) -> dt.date | None:
"""Convert a date string to a date object."""
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError: # If dt_str did not match our format
return None
def parse_time(time_str: str) -> dt.time | None:
"""Parse a time string (00:20:00) into Time object.
Return None if invalid.
"""
parts = str(time_str).split(":")
if len(parts) < 2:
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = int(parts[2]) if len(parts) > 2 else 0
return dt.time(hour, minute, second)
except ValueError:
# ValueError if value cannot be converted to an int or not in range
return None
def get_age(date: dt.datetime) -> str:
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(number: int, unit: str) -> str:
"""Add "unit" if it's plural."""
if number == 1:
return f"1 {unit}"
return f"{number:d} {unit}s"
delta = (now() - date).total_seconds()
rounded_delta = round(delta)
units = ["second", "minute", "hour", "day", "month"]
factors = [60, 60, 24, 30, 12]
selected_unit = "year"
for i, next_factor in enumerate(factors):
if rounded_delta < next_factor:
selected_unit = units[i]
break
delta /= next_factor
rounded_delta = round(delta)
return formatn(rounded_delta, selected_unit)
def parse_time_expression(parameter: Any, min_value: int, max_value: int) -> list[int]:
"""Parse the time expression part and return a list of times to match."""
if parameter is None or parameter == MATCH_ALL:
res = list(range(min_value, max_value + 1))
elif isinstance(parameter, str):
if parameter.startswith("/"):
parameter = int(parameter[1:])
res = [x for x in range(min_value, max_value + 1) if x % parameter == 0]
else:
res = [int(parameter)]
elif not hasattr(parameter, "__iter__"):
res = [int(parameter)]
else:
res = sorted(int(x) for x in parameter)
for val in res:
if val < min_value or val > max_value:
raise ValueError(
f"Time expression '{parameter}': parameter {val} out of range "
f"({min_value} to {max_value})"
)
return res
def find_next_time_expression_time(
now: dt.datetime, # pylint: disable=redefined-outer-name
seconds: list[int],
minutes: list[int],
hours: list[int],
) -> dt.datetime:
"""Find the next datetime from now for which the time expression matches.
The algorithm looks at each time unit separately and tries to find the
next one that matches for each. If any of them would roll over, all
time units below that are reset to the first matching value.
Timezones are also handled (the tzinfo of the now object is used),
including daylight saving time.
"""
if not seconds or not minutes or not hours:
raise ValueError("Cannot find a next time: Time expression never matches!")
def _lower_bound(arr: list[int], cmp: int) -> int | None:
"""Return the first value in arr greater or equal to cmp.
Return None if no such value exists.
"""
left = 0
right = len(arr)
while left < right:
mid = (left + right) // 2
if arr[mid] < cmp:
left = mid + 1
else:
right = mid
if left == len(arr):
return None
return arr[left]
result = now.replace(microsecond=0)
# Match next second
next_second = _lower_bound(seconds, result.second)
if next_second is None:
# No second to match in this minute. Roll-over to next minute.
next_second = seconds[0]
result += dt.timedelta(minutes=1)
result = result.replace(second=next_second)
# Match next minute
next_minute = _lower_bound(minutes, result.minute)
if next_minute != result.minute:
# We're in the next minute. Seconds needs to be reset.
result = result.replace(second=seconds[0])
if next_minute is None:
# No minute to match in this hour. Roll-over to next hour.
next_minute = minutes[0]
result += dt.timedelta(hours=1)
result = result.replace(minute=next_minute)
# Match next hour
next_hour = _lower_bound(hours, result.hour)
if next_hour != result.hour:
# We're in the next hour. Seconds+minutes needs to be reset.
result = result.replace(second=seconds[0], minute=minutes[0])
if next_hour is None:
# No minute to match in this day. Roll-over to next day.
next_hour = hours[0]
result += dt.timedelta(days=1)
result = result.replace(hour=next_hour)
if result.tzinfo is None:
return result
# Now we need to handle timezones. We will make this datetime object
# "naive" first and then re-convert it to the target timezone.
# This is so that we can call pytz's localize and handle DST changes.
tzinfo: pytzinfo.DstTzInfo = UTC if result.tzinfo == NATIVE_UTC else result.tzinfo
result = result.replace(tzinfo=None)
try:
result = tzinfo.localize(result, is_dst=None)
except pytzexceptions.AmbiguousTimeError:
# This happens when we're leaving daylight saving time and local
# clocks are rolled back. In this case, we want to trigger
# on both the DST and non-DST time. So when "now" is in the DST
# use the DST-on time, and if not, use the DST-off time.
use_dst = bool(now.dst())
result = tzinfo.localize(result, is_dst=use_dst)
except pytzexceptions.NonExistentTimeError:
# This happens when we're entering daylight saving time and local
# clocks are rolled forward, thus there are local times that do
# not exist. In this case, we want to trigger on the next time
# that *does* exist.
# In the worst case, this will run through all the seconds in the
# time shift, but that's max 3600 operations for once per year
result = result.replace(tzinfo=tzinfo) + dt.timedelta(seconds=1)
return find_next_time_expression_time(result, seconds, minutes, hours)
result_dst = cast(dt.timedelta, result.dst())
now_dst = cast(dt.timedelta, now.dst()) or dt.timedelta(0)
if result_dst >= now_dst:
return result
# Another edge-case when leaving DST:
# When now is in DST and ambiguous *and* the next trigger time we *should*
# trigger is ambiguous and outside DST, the excepts above won't catch it.
# For example: if triggering on 2:30 and now is 28.10.2018 2:30 (in DST)
# we should trigger next on 28.10.2018 2:30 (out of DST), but our
# algorithm above would produce 29.10.2018 2:30 (out of DST)
# Step 1: Check if now is ambiguous
try:
tzinfo.localize(now.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# Step 2: Check if result of (now - DST) is ambiguous.
check = now - now_dst
check_result = find_next_time_expression_time(check, seconds, minutes, hours)
try:
tzinfo.localize(check_result.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# OK, edge case does apply. We must override the DST to DST-off
check_result = tzinfo.localize(check_result.replace(tzinfo=None), is_dst=False)
return check_result
| w1ll1am23/home-assistant | homeassistant/util/dt.py | Python | apache-2.0 | 12,636 | 0.000554 |
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SkipSummaryDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(
oslist=['freebsd'],
bugnumber="llvm.org/pr20548 fails to build on lab.llvm.org buildbot")
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr24462, Data formatters have problems on Windows")
def test_with_run_command(self):
"""Test data formatter commands."""
self.build()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.cpp', '// Set break point at this line.')
def data_formatter_commands(self):
"""Test that that file and class static variables display correctly."""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
#import lldbsuite.test.lldbutil as lldbutil
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Setup the summaries for this scenario
#self.runCmd("type summary add --summary-string \"${var._M_dataplus._M_p}\" std::string")
self.runCmd(
"type summary add --summary-string \"Level 1\" \"DeepData_1\"")
self.runCmd(
"type summary add --summary-string \"Level 2\" \"DeepData_2\" -e")
self.runCmd(
"type summary add --summary-string \"Level 3\" \"DeepData_3\"")
self.runCmd(
"type summary add --summary-string \"Level 4\" \"DeepData_4\"")
self.runCmd(
"type summary add --summary-string \"Level 5\" \"DeepData_5\"")
# Default case, just print out summaries
self.expect('frame variable',
substrs=['(DeepData_1) data1 = Level 1',
'(DeepData_2) data2 = Level 2 {',
'm_child1 = Level 3',
'm_child2 = Level 3',
'm_child3 = Level 3',
'm_child4 = Level 3',
'}'])
# Skip the default (should be 1) levels of summaries
self.expect('frame variable --no-summary-depth',
substrs=['(DeepData_1) data1 = {',
'm_child1 = 0x',
'}',
'(DeepData_2) data2 = {',
'm_child1 = Level 3',
'm_child2 = Level 3',
'm_child3 = Level 3',
'm_child4 = Level 3',
'}'])
# Now skip 2 levels of summaries
self.expect('frame variable --no-summary-depth=2',
substrs=['(DeepData_1) data1 = {',
'm_child1 = 0x',
'}',
'(DeepData_2) data2 = {',
'm_child1 = {',
'm_child1 = 0x',
'Level 4',
'm_child2 = {',
'm_child3 = {',
'}'])
# Check that no "Level 3" comes out
self.expect(
'frame variable data1.m_child1 --no-summary-depth=2',
matching=False,
substrs=['Level 3'])
# Now expand a pointer with 2 level of skipped summaries
self.expect('frame variable data1.m_child1 --no-summary-depth=2',
substrs=['(DeepData_2 *) data1.m_child1 = 0x'])
# Deref and expand said pointer
self.expect('frame variable *data1.m_child1 --no-summary-depth=2',
substrs=['(DeepData_2) *data1.m_child1 = {',
'm_child2 = {',
'm_child1 = 0x',
'Level 4',
'}'])
# Expand an expression, skipping 2 layers of summaries
self.expect(
'frame variable data1.m_child1->m_child2 --no-summary-depth=2',
substrs=[
'(DeepData_3) data1.m_child1->m_child2 = {',
'm_child2 = {',
'm_child1 = Level 5',
'm_child2 = Level 5',
'm_child3 = Level 5',
'}'])
# Expand same expression, skipping only 1 layer of summaries
self.expect(
'frame variable data1.m_child1->m_child2 --no-summary-depth=1',
substrs=[
'(DeepData_3) data1.m_child1->m_child2 = {',
'm_child1 = 0x',
'Level 4',
'm_child2 = Level 4',
'}'])
# Bad debugging info on SnowLeopard gcc (Apple Inc. build 5666).
# Skip the following tests if the condition is met.
if self.getCompiler().endswith('gcc') and not self.getCompiler().endswith('llvm-gcc'):
import re
gcc_version_output = system(
[[lldbutil.which(self.getCompiler()), "-v"]])[1]
#print("my output:", gcc_version_output)
for line in gcc_version_output.split(os.linesep):
m = re.search('\(Apple Inc\. build ([0-9]+)\)', line)
#print("line:", line)
if m:
gcc_build = int(m.group(1))
#print("gcc build:", gcc_build)
if gcc_build >= 5666:
# rdar://problem/9804600"
self.skipTest(
"rdar://problem/9804600 wrong namespace for std::string in debug info")
# Expand same expression, skipping 3 layers of summaries
self.expect(
'frame variable data1.m_child1->m_child2 --show-types --no-summary-depth=3',
substrs=[
'(DeepData_3) data1.m_child1->m_child2 = {',
'm_some_text = "Just a test"',
'm_child2 = {',
'm_some_text = "Just a test"'])
# Change summary and expand, first without --no-summary-depth then with
# --no-summary-depth
self.runCmd(
"type summary add --summary-string \"${var.m_some_text}\" DeepData_5")
self.expect('fr var data2.m_child4.m_child2.m_child2', substrs=[
'(DeepData_5) data2.m_child4.m_child2.m_child2 = "Just a test"'])
self.expect(
'fr var data2.m_child4.m_child2.m_child2 --no-summary-depth',
substrs=[
'(DeepData_5) data2.m_child4.m_child2.m_child2 = {',
'm_some_text = "Just a test"',
'}'])
| apple/swift-lldb | packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-skip-summary/TestDataFormatterSkipSummary.py | Python | apache-2.0 | 7,621 | 0.002099 |
from tf_idf import *
class EventAnalysis:
"""
Class that contains all the necessary operation to process to a text analysis
"""
@staticmethod
def get_id_website(id_doc, is_website):
"""
Apply the processing to have a website id
"""
return id_doc if not is_website else id_doc + '_'
def __init__(self):
self.corpus = Corpus()
self.is_corpus_complete = False
self.tf_idf = None
def add_document_in_corpus(self, text, id_doc):
"""
The id is as follow :
- A description : Event's id
- A website : Event's id + "_"
"""
self.corpus.add_document(Document(text, id_doc))
def set_corpus_complete(self):
"""
Define the corpus as complete to proceed to the next step with tf-idf
"""
self.is_corpus_complete = True
self.tf_idf = TfIdf(self.corpus)
def compute_tf_idf(self, term, id_doc):
"""
The id is as follow :
- A description : Event's id
- A website : Event's id + "_"
"""
return self.tf_idf.get_tf_idf(term, id_doc)
def get_tf_idf_the_k_most_important(self, k, id_doc):
"""
Return a OrderedDict that contains the k most important term (sorted by frequences). If there are
less terms as k, it returns the number of terms.
"""
if not self.is_corpus_complete:
raise Exception("The corpus is not complete ! Please call set_corpus_complete when you've filled it.")
if k <= 0:
raise Exception("The k is <= 0 !")
from itertools import islice
from collections import OrderedDict
#Transform OrderedDict(key, tuple(double1, double2)) in OrderedDict(key, double2)
return OrderedDict((x[0], (x[1][0], x[1][1], x[1][2])) for x in
islice(self.tf_idf.get_all_tf_idf_sorted(id_doc).items(), 0, k))
| Diego999/Social-Recommendation-System | event_analyse/event_analysis.py | Python | mit | 2,010 | 0.002985 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test pruning code
# ********
# WARNING:
# This test uses 4GB of disk space.
# This test takes 30 mins or more (up to 2 hours)
# ********
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
import os
MIN_BLOCKS_TO_KEEP = 288
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
RESCAN_WINDOW = 2 * 60 * 60
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.)
class PruneTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 6
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache_0 = []
self.utxo_cache_1 = []
def setup_network(self):
self.nodes = []
self.is_network_split = False
# Create nodes 0 and 1 to mine
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900))
# Create node 2 to test pruning
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-prune=550"], timewait=900))
self.prunedir = self.options.tmpdir+"/node2/regtest/blocks/"
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
self.nodes.append(start_node(4, self.options.tmpdir, ["-debug=0","-maxreceivebuffer=20000","-blockmaxsize=999000"], timewait=900))
# Create nodes 5 to test wallet in prune mode, but do not connect
self.nodes.append(start_node(5, self.options.tmpdir, ["-debug=0", "-prune=550"]))
# Determine default relay fee
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[0], 4)
sync_blocks(self.nodes[0:5])
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# Then mine enough full blocks to create more than 550MiB of data
for i in range(645):
mine_large_block(self.nodes[0], self.utxo_cache_0)
sync_blocks(self.nodes[0:5])
def test_height_min(self):
if not os.path.isfile(self.prunedir+"blk00000.dat"):
raise AssertionError("blk00000.dat is missing, pruning too early")
print("Success")
print("Though we're already using more than 550MiB, current usage:", calc_usage(self.prunedir))
print("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
waitstart = time.time()
while os.path.isfile(self.prunedir+"blk00000.dat"):
time.sleep(0.1)
if time.time() - waitstart > 30:
raise AssertionError("blk00000.dat not pruned when it should be")
print("Success")
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
print("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for j in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
# Stopping node 0 also clears its mempool, so it doesn't have node1's transactions to accidentally mine
self.stop_node(0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=999000", "-checkblocks=5"], timewait=900)
# Mine 24 blocks in node 1
for i in range(24):
if j == 0:
mine_large_block(self.nodes[1], self.utxo_cache_1)
else:
self.nodes[1].generate(1) #tx's already in mempool from previous disconnects
# Reorg back with 25 block chain from node 0
for i in range(25):
mine_large_block(self.nodes[0], self.utxo_cache_0)
# Create connections in the order so both nodes can see the reorg at the same time
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
sync_blocks(self.nodes[0:3])
print("Usage can be over target because of high stale rate:", calc_usage(self.prunedir))
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
# Reboot node 1 to clear its mempool (hopefully make the invalidate faster)
# Lower the block max size so we don't keep mining all our big mempool transactions (from disconnected blocks)
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
height = self.nodes[1].getblockcount()
print("Current block height:", height)
invalidheight = height-287
badhash = self.nodes[1].getblockhash(invalidheight)
print("Invalidating block at height:",invalidheight,badhash)
self.nodes[1].invalidateblock(badhash)
# We've now switched to our previously mined-24 block fork on node 1, but thats not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(invalidheight - 1)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(invalidheight - 1)
assert(self.nodes[1].getblockcount() == invalidheight - 1)
print("New best height", self.nodes[1].getblockcount())
# Reboot node1 to clear those giant tx's from mempool
self.stop_node(1)
self.nodes[1]=start_node(1, self.options.tmpdir, ["-debug","-maxreceivebuffer=20000","-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900)
print("Generating new longer chain of 300 more blocks")
self.nodes[1].generate(300)
print("Reconnect nodes")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[2], 1)
sync_blocks(self.nodes[0:3], timeout=120)
print("Verify height on node 2:",self.nodes[2].getblockcount())
print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir))
print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
for i in range(22):
# This can be slow, so do this in multiple RPC calls to avoid
# RPC timeouts.
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
sync_blocks(self.nodes[0:3], timeout=300)
usage = calc_usage(self.prunedir)
print("Usage should be below target:", usage)
if (usage > 550):
raise AssertionError("Pruning target not being met")
return invalidheight,badhash
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_jsonrpc(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
print("Will need to redownload block",self.forkheight)
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large block are in the block files after it,
# its expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer thats on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
print("Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed:", blocks_to_mine)
self.nodes[0].invalidateblock(curchainhash)
assert(self.nodes[0].getblockcount() == self.mainchainheight)
assert(self.nodes[0].getbestblockhash() == self.mainchainhash2)
goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
print("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
waitstart = time.time()
while self.nodes[2].getblockcount() < goalbestheight:
time.sleep(0.1)
if time.time() - waitstart > 900:
raise AssertionError("Node 2 didn't reorg to proper height")
assert(self.nodes[2].getbestblockhash() == goalbesthash)
# Verify we can now have the data for a block previously pruned
assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0"], timewait=900)
assert_equal(node.getblockcount(), 995)
assert_raises_jsonrpc(-1, "not in prune mode", node.pruneblockchain, 500)
self.stop_node(node_number)
# now re-start in manual pruning mode
node = self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=1"], timewait=900)
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + RESCAN_WINDOW
else:
return index
def prune(index, expected_ret=None):
ret = node.pruneblockchain(height(index))
# Check the return value. When use_timestamp is True, just check
# that the return value is less than or equal to the expected
# value, because when more than one block is generated per second,
# a timestamp will not be granular enough to uniquely identify an
# individual block.
if expected_ret is None:
expected_ret = index
if use_timestamp:
assert_greater_than(ret, 0)
assert_greater_than(expected_ret + 1, ret)
else:
assert_equal(ret, expected_ret)
def has_block(index):
return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_jsonrpc(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# negative heights should raise an exception
assert_raises_jsonrpc(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# Does nothing
node.pruneblockchain(height(0))
if not has_block(0):
raise AssertionError("blk00000.dat is missing when should still be there")
# height=500 should prune first file
prune(500)
if has_block(0):
raise AssertionError("blk00000.dat is still there, should be pruned by now")
if not has_block(1):
raise AssertionError("blk00001.dat is missing when should still be there")
# height=650 should prune second file
prune(650)
if has_block(1):
raise AssertionError("blk00001.dat is still there, should be pruned by now")
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000, 1001 - MIN_BLOCKS_TO_KEEP)
if not has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
if has_block(2):
raise AssertionError("blk00002.dat is still there, should be pruned by now")
if has_block(3):
raise AssertionError("blk00003.dat is still there, should be pruned by now")
# stop node, start back up with auto-prune at 550MB, make sure still runs
self.stop_node(node_number)
self.nodes[node_number] = start_node(node_number, self.options.tmpdir, ["-debug=0","-prune=550"], timewait=900)
print("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
print("Stop and start pruning node to trigger wallet rescan")
self.stop_node(2)
start_node(2, self.options.tmpdir, ["-debug=1","-prune=550"])
print("Success")
# check that wallet loads loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
print ("Syncing node 5 to test wallet")
connect_nodes(self.nodes[0], 5)
nds = [self.nodes[0], self.nodes[5]]
sync_blocks(nds, wait=5, timeout=300)
self.stop_node(5) #stop and start to trigger rescan
start_node(5, self.options.tmpdir, ["-debug=1","-prune=550"])
print ("Success")
def run_test(self):
print("Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)")
print("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
print("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
print("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() #1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
print("Check that we can survive a 288 block reorg still")
(self.forkheight,self.forkhash) = self.reorg_test() #(1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
print("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
print("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
print("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
print("Test wallet re-scan")
self.wallet_test()
print("Done")
if __name__ == '__main__':
PruneTest().main()
| psionin/smartcoin | qa/rpc-tests/pruning.py | Python | mit | 21,037 | 0.005371 |
from django.db import models
# Create your models here.
class Autor(models.Model):
nombre = models.TextField(max_length=100)
apellido = models.TextField(max_length=100)
class Libro(models.Model):
nombre = models.TextField(max_length=100)
editorial = models.TextField(max_length=100)
genero = models.TextField(max_length=100)
descripcion = models.TextField()
autor = models.ForeignKey(
Autor,
null=True
)
def __unicode__(self):
return self.editorial
| pollitosabroson/pycarribean | src/books/models.py | Python | apache-2.0 | 515 | 0 |
from django.views.generic import TemplateView
from django.conf import settings
from volunteer.apps.events.utils import get_active_event
class SiteIndexView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(SiteIndexView, self).get_context_data(**kwargs)
context['current_event'] = get_active_event(self.request.session)
context['support_email'] = settings.DEFAULT_FROM_EMAIL
return context
| Apogaea/voldb | volunteer/core/views.py | Python | gpl-3.0 | 481 | 0 |
# PyDia SQL.py : SQL dump.
# Copy it to /usr/share/dia/python
import dia
# import sys
# import os
import string
import re
import datetime
class SQLRenderer:
def __init__(self):
self.f = None
def begin_render(self, data, filename):
self.f = open(filename, "w")
# name = os.path.split(filename)[1]
self.f.write('''BEGIN TRANSACTION;\n''')
for layer in data.layers:
self.WriteTables(layer)
def WriteTables(self, layer):
tables = {}
appdata = 'appdata'
priority = {'fields': 0, 'foreign_keys': 100}
# value for id
z = ["INSERT INTO zf VALUES ('id', 'No', 'INTEGER', '1');"]
z.append("INSERT INTO z VALUES('diadate', '%s');" % datetime.date.today().isoformat())
zsql = "INSERT INTO z VALUES('%s', '%s');"
zfsql = "INSERT INTO zf VALUES ('%s', '%s', '%s', '%s');"
ztsql = "INSERT INTO zt VALUES ('%s', '%s', '%s', '%s');"
for o in layer.objects:
if o.type.name == 'Database - Table':
if "name" in o.properties.keys():
table = o.properties["name"].value
elif "text" in o.properties.keys():
table = o.properties["text"].value.text
else:
continue
if len(table) == 0 or string.find(table, " ") >= 0:
continue
if table not in tables.keys():
tables[table] = ''
if table == appdata:
attrs = o.properties['attributes'].value
for attr in attrs:
z.append(zsql % (attr[0], attr[1]))
continue
# zt.append(comment)
# first line is label
# second line is label plural
# third line is rpr
clst = o.properties['comment'].value.split('\n')
if len(clst) >= 3:
z.append(ztsql % (table, clst[0], clst[1], clst[2]))
atributes = o.properties['attributes'].value
for i in range(0, len(atributes)):
a = atributes[i]
if a[0] == 'id':
tables[table] = '%0.3d\tid INTEGER PRIMARY KEY\n' %\
(priority['fields'] + i)
continue
if len(a[0]) > 4:
if a[0][-3:] == '_id':
nnul = ''
if a[4] == 0:
nnul = ' NOT NULL'
tables[table] += '%0.3d\t%s INTEGER%s REFERENCES %s(id)\n' % (priority['fields'] + i, a[0], nnul, a[0][:-3])
continue
tipo = ''
if re.match('.*enum\(.*', a[1], re.I):
tipo = a[1]
else:
tipo = a[1].upper()
if tipo == '':
tipo = 'TEXT'
tables[table] += '%0.3d\t%s %s' % (priority['fields'] + i, a[0], tipo)
if a[3] == 1:
tables[table] += ' PRIMARY KEY'
if a[4] == 0:
if a[3] != 1:
tables[table] += ' NOT NULL'
notnull = 1
else:
tables[table] += ''
notnull = 0
if a[5] == 1:
if a[3] != 1:
tables[table] += ' UNIQUE'
# Create insert for table zflbl
if (len(a[2]) > 0):
z.append(zfsql % (a[0], a[2], tipo, notnull))
tables[table] += '\n'
elif o.type.name == 'Database - Reference':
continue
for k in sorted(tables.keys()):
# self.f.write('\n-- %s --\nDROP TABLE IF EXISTS `%s`;\n' % (k,k) )
if k != appdata:
self.f.write('CREATE TABLE IF NOT EXISTS %s (\n' % k)
sentences = sorted(tables[k].split('\n'))
sentences = [str(s[3:]) for s in sentences if len(s) > 4]
sentences = ",\n".join(sentences)
self.f.write('%s\n' % sentences)
self.f.write(');\n')
self.f.write('CREATE TABLE IF NOT EXISTS z (key TEXT PRIMARY KEY, val TEXT NOT NULL);\n')
self.f.write('CREATE TABLE IF NOT EXISTS zt (tbl TEXT PRIMARY KEY, tlbl TEXT NOT NULL UNIQUE, tlblp TEXT NOT NULL UNIQUE, rpr TEXT NOT NULL);\n')
self.f.write('CREATE TABLE IF NOT EXISTS zf (fld TEXT PRIMARY KEY, flbl TEXT NOT NULL UNIQUE, typos TEXT NOT NULL, nonull INTEGER NOT NULL DEFAULT 1);\n')
self.f.write('\n'.join(sorted(z)))
self.f.write('\n')
def end_render(self):
self.f.write('COMMIT;\n')
self.f.close()
# reference
dia.register_export("PyDia SQL generator", "sql", SQLRenderer())
| tedlaz/pyted | pymiles/pyMiles2.old/pymiles/sqlite/diasql.py | Python | gpl-3.0 | 5,065 | 0.001579 |
from __future__ import division
import json
import os
import copy
import collections
import argparse
import csv
import neuroglancer
import neuroglancer.cli
import numpy as np
class State(object):
def __init__(self, path):
self.path = path
self.body_labels = collections.OrderedDict()
def load(self):
if os.path.exists(self.path):
with open(self.path, 'r') as f:
self.body_labels = collections.OrderedDict(json.load(f))
def save(self):
tmp_path = self.path + '.tmp'
with open(tmp_path, 'w') as f:
f.write(json.dumps(self.body_labels.items()))
os.rename(tmp_path, self.path)
Body = collections.namedtuple('Body', ['segment_id', 'num_voxels', 'bbox_start', 'bbox_size'])
class Tool(object):
def __init__(self, state_path, bodies, labels, segmentation_url, image_url, num_to_prefetch):
self.state = State(state_path)
self.num_to_prefetch = num_to_prefetch
self.viewer = neuroglancer.Viewer()
self.bodies = bodies
self.state.load()
self.total_voxels = sum(x.num_voxels for x in bodies)
self.cumulative_voxels = np.cumsum([x.num_voxels for x in bodies])
with self.viewer.txn() as s:
s.layers['image'] = neuroglancer.ImageLayer(source=image_url)
s.layers['segmentation'] = neuroglancer.SegmentationLayer(source=segmentation_url)
s.show_slices = False
s.concurrent_downloads = 256
s.gpu_memory_limit = 2 * 1024 * 1024 * 1024
s.layout = '3d'
key_bindings = [
['bracketleft', 'prev-index'],
['bracketright', 'next-index'],
['home', 'first-index'],
['end', 'last-index'],
['control+keys', 'save'],
]
label_keys = ['keyd', 'keyf', 'keyg', 'keyh']
for label, label_key in zip(labels, label_keys):
key_bindings.append([label_key, 'label-%s' % label])
def label_func(s, label=label):
self.set_label(s, label)
self.viewer.actions.add('label-%s' % label, label_func)
self.viewer.actions.add('prev-index', self._prev_index)
self.viewer.actions.add('next-index', self._next_index)
self.viewer.actions.add('first-index', self._first_index)
self.viewer.actions.add('last-index', self._last_index)
self.viewer.actions.add('save', self.save)
with self.viewer.config_state.txn() as s:
for key, command in key_bindings:
s.input_event_bindings.viewer[key] = command
s.status_messages['help'] = ('KEYS: ' + ' | '.join('%s=%s' % (key, command)
for key, command in key_bindings))
self.index = -1
self.set_index(self._find_one_after_last_labeled_index())
def _find_one_after_last_labeled_index(self):
body_index = 0
while self.bodies[body_index].segment_id in self.state.body_labels:
body_index += 1
return body_index
def set_index(self, index):
if index == self.index:
return
body = self.bodies[index]
self.index = index
def modify_state_for_body(s, body):
s.layers['segmentation'].segments = frozenset([body.segment_id])
s.voxel_coordinates = body.bbox_start + body.bbox_size // 2
with self.viewer.txn() as s:
modify_state_for_body(s, body)
prefetch_states = []
for i in range(self.num_to_prefetch):
prefetch_index = self.index + i + 1
if prefetch_index >= len(self.bodies):
break
prefetch_state = copy.deepcopy(self.viewer.state)
prefetch_state.layout = '3d'
modify_state_for_body(prefetch_state, self.bodies[prefetch_index])
prefetch_states.append(prefetch_state)
with self.viewer.config_state.txn() as s:
s.prefetch = [
neuroglancer.PrefetchState(state=prefetch_state, priority=-i)
for i, prefetch_state in enumerate(prefetch_states)
]
label = self.state.body_labels.get(body.segment_id, '')
with self.viewer.config_state.txn() as s:
s.status_messages['status'] = (
'[Segment %d/%d : %d/%d voxels labeled = %.3f fraction] label=%s' %
(index, len(self.bodies), self.cumulative_voxels[index], self.total_voxels,
self.cumulative_voxels[index] / self.total_voxels, label))
def save(self, s):
self.state.save()
def set_label(self, s, label):
self.state.body_labels[self.bodies[self.index].segment_id] = label
self.set_index(self.index + 1)
def _first_index(self, s):
self.set_index(0)
def _last_index(self, s):
self.set_index(max(0, self._find_one_after_last_labeled_index() - 1))
def _next_index(self, s):
self.set_index(self.index + 1)
def _prev_index(self, s):
self.set_index(max(0, self.index - 1))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
ap.add_argument('--image-url', required=True, help='Neuroglancer data source URL for image')
ap.add_argument('--segmentation-url',
required=True,
help='Neuroglancer data source URL for segmentation')
ap.add_argument('--state', required=True, help='Path to proofreading state file')
ap.add_argument('--bodies', required=True, help='Path to list of bodies to proofread')
ap.add_argument('--labels', nargs='+', help='Labels to use')
ap.add_argument('--prefetch', type=int, default=10, help='Number of bodies to prefetch')
args = ap.parse_args()
neuroglancer.cli.handle_server_arguments(args)
bodies = []
with open(args.bodies, 'r') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
bodies.append(
Body(
segment_id=int(row['id']),
num_voxels=int(row['num_voxels']),
bbox_start=np.array([
int(row['bbox.start.x']),
int(row['bbox.start.y']),
int(row['bbox.start.z'])
],
dtype=np.int64),
bbox_size=np.array(
[int(row['bbox.size.x']),
int(row['bbox.size.y']),
int(row['bbox.size.z'])],
dtype=np.int64),
))
tool = Tool(
state_path=args.state,
image_url=args.image_url,
segmentation_url=args.segmentation_url,
labels=args.labels,
bodies=bodies,
num_to_prefetch=args.prefetch,
)
print(tool.viewer)
| janelia-flyem/neuroglancer | python/neuroglancer/tool/filter_bodies.py | Python | apache-2.0 | 6,932 | 0.001587 |
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""TLS Lite is a free python library that implements SSL and TLS. TLS Lite
supports RSA and SRP ciphersuites. TLS Lite is pure python, however it can use
other libraries for faster crypto operations. TLS Lite integrates with several
stdlib neworking libraries.
API documentation is available in the 'docs' directory.
If you have questions or feedback, feel free to contact me.
To use, do::
from tlslite import TLSConnection, ...
If you want to import the most useful objects, the cleanest way is:
from tlslite.api import *
Then use the L{tlslite.TLSConnection.TLSConnection} class with a socket.
(Or, use one of the integration classes in L{tlslite.integration}).
@version: 0.4.8
"""
from tlslite.api import *
from tlslite.api import __version__ # Unsure why this is needed, but it is
| Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/tlslite/tlslite/__init__.py | Python | mit | 904 | 0.002212 |
class ParamsException(Exception):
"""Exception raised when tp, fmt and size values are wrongs"""
pass
class LineSizeException(Exception):
"""Exception raised when line size is bigger then specified"""
pass
class LineIdentifierException(Exception):
"""Exception raised when line indentifier rased from the
file is different to the line identifier used in the specification
obs: line identifier is defined using .eq() function
"""
pass
| anderson89marques/PyFixedFlatFile | pyFixedFlatFile/exceptions.py | Python | mit | 476 | 0.002101 |
"""Extended file operations available in POSIX.
f = posixfile.open(filename, [mode, [bufsize]])
will create a new posixfile object
f = posixfile.fileopen(fileobject)
will create a posixfile object from a builtin file object
f.file()
will return the original builtin file object
f.dup()
will return a new file object based on a new filedescriptor
f.dup2(fd)
will return a new file object based on the given filedescriptor
f.flags(mode)
will turn on the associated flag (merge)
mode can contain the following characters:
(character representing a flag)
a append only flag
c close on exec flag
n no delay flag
s synchronization flag
(modifiers)
! turn flags 'off' instead of default 'on'
= copy flags 'as is' instead of default 'merge'
? return a string in which the characters represent the flags
that are set
note: - the '!' and '=' modifiers are mutually exclusive.
- the '?' modifier will return the status of the flags after they
have been changed by other characters in the mode string
f.lock(mode [, len [, start [, whence]]])
will (un)lock a region
mode can contain the following characters:
(character representing type of lock)
u unlock
r read lock
w write lock
(modifiers)
| wait until the lock can be granted
? return the first lock conflicting with the requested lock
or 'None' if there is no conflict. The lock returned is in the
format (mode, len, start, whence, pid) where mode is a
character representing the type of lock ('r' or 'w')
note: - the '?' modifier prevents a region from being locked; it is
query only
"""
import warnings
warnings.warn("The posixfile module is deprecated; "
"fcntl.lockf() provides better locking", DeprecationWarning, 2)
class _posixfile_:
"""File wrapper class that provides extra POSIX file routines."""
states = ['open', 'closed']
#
# Internal routines
#
def __repr__(self):
file = self._file_
return "<%s posixfile '%s', mode '%s' at %s>" % \
(self.states[file.closed], file.name, file.mode, \
hex(id(self))[2:])
#
# Initialization routines
#
def open(self, name, mode='r', bufsize=-1):
import __builtin__
return self.fileopen(__builtin__.open(name, mode, bufsize))
def fileopen(self, file):
import types
if repr(type(file)) != "<type 'file'>":
raise TypeError, 'posixfile.fileopen() arg must be file object'
self._file_ = file
# Copy basic file methods
for maybemethod in dir(file):
if not maybemethod.startswith('_'):
attr = getattr(file, maybemethod)
if isinstance(attr, types.BuiltinMethodType):
setattr(self, maybemethod, attr)
return self
#
# New methods
#
def file(self):
return self._file_
def dup(self):
import posix
if not hasattr(posix, 'fdopen'):
raise AttributeError, 'dup() method unavailable'
return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
def dup2(self, fd):
import posix
if not hasattr(posix, 'fdopen'):
raise AttributeError, 'dup() method unavailable'
posix.dup2(self._file_.fileno(), fd)
return posix.fdopen(fd, self._file_.mode)
def flags(self, *which):
import fcntl, os
if which:
if len(which) > 1:
raise TypeError, 'Too many arguments'
which = which[0]
else: which = '?'
l_flags = 0
if 'n' in which: l_flags = l_flags | os.O_NDELAY
if 'a' in which: l_flags = l_flags | os.O_APPEND
if 's' in which: l_flags = l_flags | os.O_SYNC
file = self._file_
if '=' not in which:
cur_fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
if '!' in which: l_flags = cur_fl & ~ l_flags
else: l_flags = cur_fl | l_flags
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFL, l_flags)
if 'c' in which:
arg = ('!' not in which) # 0 is don't, 1 is do close on exec
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFD, arg)
if '?' in which:
which = '' # Return current flags
l_flags = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
if os.O_APPEND & l_flags: which = which + 'a'
if fcntl.fcntl(file.fileno(), fcntl.F_GETFD, 0) & 1:
which = which + 'c'
if os.O_NDELAY & l_flags: which = which + 'n'
if os.O_SYNC & l_flags: which = which + 's'
return which
def lock(self, how, *args):
import struct, fcntl
if 'w' in how: l_type = fcntl.F_WRLCK
elif 'r' in how: l_type = fcntl.F_RDLCK
elif 'u' in how: l_type = fcntl.F_UNLCK
else: raise TypeError, 'no type of lock specified'
if '|' in how: cmd = fcntl.F_SETLKW
elif '?' in how: cmd = fcntl.F_GETLK
else: cmd = fcntl.F_SETLK
l_whence = 0
l_start = 0
l_len = 0
if len(args) == 1:
l_len = args[0]
elif len(args) == 2:
l_len, l_start = args
elif len(args) == 3:
l_len, l_start, l_whence = args
elif len(args) > 3:
raise TypeError, 'too many arguments'
# Hack by [email protected] to get locking to go on freebsd;
# additions for AIX by [email protected]
import sys, os
if sys.platform in ('netbsd1',
'openbsd2',
'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
'freebsd6', 'freebsd7', 'freebsd8',
'bsdos2', 'bsdos3', 'bsdos4'):
flock = struct.pack('lxxxxlxxxxlhh', \
l_start, l_len, os.getpid(), l_type, l_whence)
elif sys.platform in ('aix3', 'aix4'):
flock = struct.pack('hhlllii', \
l_type, l_whence, l_start, l_len, 0, 0, 0)
else:
flock = struct.pack('hhllhh', \
l_type, l_whence, l_start, l_len, 0, 0)
flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
if '?' in how:
if sys.platform in ('netbsd1',
'openbsd2',
'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
'bsdos2', 'bsdos3', 'bsdos4'):
l_start, l_len, l_pid, l_type, l_whence = \
struct.unpack('lxxxxlxxxxlhh', flock)
elif sys.platform in ('aix3', 'aix4'):
l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \
struct.unpack('hhlllii', flock)
elif sys.platform == "linux2":
l_type, l_whence, l_start, l_len, l_pid, l_sysid = \
struct.unpack('hhllhh', flock)
else:
l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
struct.unpack('hhllhh', flock)
if l_type != fcntl.F_UNLCK:
if l_type == fcntl.F_RDLCK:
return 'r', l_len, l_start, l_whence, l_pid
else:
return 'w', l_len, l_start, l_whence, l_pid
def open(name, mode='r', bufsize=-1):
"""Public routine to open a file as a posixfile object."""
return _posixfile_().open(name, mode, bufsize)
def fileopen(file):
"""Public routine to get a posixfile object from a Python file object."""
return _posixfile_().fileopen(file)
#
# Constants
#
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#
# End of posixfile.py
#
| huran2014/huran.github.io | wot_gateway/usr/lib/python2.7/posixfile.py | Python | gpl-2.0 | 8,003 | 0.004748 |
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Tampere University of Technology,
# Intel Corporation,
# OptoFidelity,
# and authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: disable = C0103, C0111, C0302, C0326
# pylint: disable = R0902, R0903, R0904, R0911, R0912, R0913, R0914, R0915
# pylint: disable = W0212
import unittest
_testloader = unittest.TestLoader()
_testsuite = _testloader.discover(".")
_testresult = unittest.TextTestRunner(verbosity = 2).run(_testsuite)
| teroc/Otero | tests/run_tests.py | Python | mit | 1,499 | 0.002668 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CMS.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| IEEEDTU/CMS | manage.py | Python | mit | 246 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-03-01 02:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('file_tags', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='filetags',
name='parent_folder_uuid',
),
]
| miurahr/seahub | seahub/file_tags/migrations/0002_remove_filetags_parent_folder_uuid.py | Python | apache-2.0 | 360 | 0 |
"""Calculate exact solutions for the zero dimensional LLG as given by
[Mallinson2000]
"""
from __future__ import division
from __future__ import absolute_import
from math import sin, cos, tan, log, atan2, acos, pi, sqrt
import scipy as sp
import matplotlib.pyplot as plt
import functools as ft
import simpleode.core.utils as utils
def calculate_switching_time(magnetic_parameters, p_start, p_now):
"""Calculate the time taken to switch from polar angle p_start to p_now
with the magnetic parameters given.
"""
# Should never quite get to pi/2
# if p_now >= pi/2:
# return sp.inf
# Cache some things to simplify the expressions later
H = magnetic_parameters.H(None)
Hk = magnetic_parameters.Hk()
alpha = magnetic_parameters.alpha
gamma = magnetic_parameters.gamma
# Calculate the various parts of the expression
prefactor = ((alpha**2 + 1)/(gamma * alpha)) \
* (1.0 / (H**2 - Hk**2))
a = H * log(tan(p_now/2) / tan(p_start/2))
b = Hk * log((H - Hk*cos(p_start)) /
(H - Hk*cos(p_now)))
c = Hk * log(sin(p_now) / sin(p_start))
# Put everything together
return prefactor * (a + b + c)
def calculate_azimuthal(magnetic_parameters, p_start, p_now):
"""Calculate the azimuthal angle corresponding to switching from
p_start to p_now with the magnetic parameters given.
"""
def azi_into_range(azi):
a = azi % (2*pi)
if a < 0:
a += 2*pi
return a
alpha = magnetic_parameters.alpha
no_range_azi = (-1/alpha) * log(tan(p_now/2) / tan(p_start/2))
return azi_into_range(no_range_azi)
def generate_dynamics(magnetic_parameters,
start_angle=pi/18,
end_angle=17*pi/18,
steps=1000):
"""Generate a list of polar angles then return a list of corresponding
m directions (in spherical polar coordinates) and switching times.
"""
mag_params = magnetic_parameters
# Construct a set of solution positions
pols = sp.linspace(start_angle, end_angle, steps)
azis = [calculate_azimuthal(mag_params, start_angle, p) for p in pols]
sphs = [utils.SphPoint(1.0, azi, pol) for azi, pol in zip(azis, pols)]
# Calculate switching times for these positions
times = [calculate_switching_time(mag_params, start_angle, p)
for p in pols]
return (sphs, times)
def plot_dynamics(magnetic_parameters,
start_angle=pi/18,
end_angle=17*pi/18,
steps=1000):
"""Plot exact positions given start/finish angles and magnetic
parameters.
"""
sphs, times = generate_dynamics(magnetic_parameters, start_angle,
end_angle, steps)
sphstitle = "Path of m for " + str(magnetic_parameters) \
+ "\n (starting point is marked)."
utils.plot_sph_points(sphs, title=sphstitle)
timestitle = "Polar angle vs time for " + str(magnetic_parameters)
utils.plot_polar_vs_time(sphs, times, title=timestitle)
plt.show()
def calculate_equivalent_dynamics(magnetic_parameters, polars):
"""Given a list of polar angles (and some magnetic parameters)
calculate what the corresponding azimuthal angles and switching times
(from the first angle) should be.
"""
start_angle = polars[0]
f_times = ft.partial(calculate_switching_time, magnetic_parameters,
start_angle)
exact_times = [f_times(p) for p in polars]
f_azi = ft.partial(calculate_azimuthal, magnetic_parameters, start_angle)
exact_azis = [f_azi(p) for p in polars]
return exact_times, exact_azis
def plot_vs_exact(magnetic_parameters, ts, ms):
# Extract lists of the polar coordinates
m_as_sph_points = map(utils.array2sph, ms)
pols = [m.pol for m in m_as_sph_points]
azis = [m.azi for m in m_as_sph_points]
# Calculate the corresponding exact dynamics
exact_times, exact_azis = \
calculate_equivalent_dynamics(magnetic_parameters, pols)
# Plot
plt.figure()
plt.plot(ts, pols, '--',
exact_times, pols)
plt.figure()
plt.plot(pols, azis, '--',
pols, exact_azis)
plt.show()
| davidshepherd7/Landau-Lifshitz-Gilbert-ODE-model | llg/mallinson.py | Python | gpl-3.0 | 4,251 | 0.000235 |
'''
Created on Feb 4, 2016
Decoding tables taken from https://github.com/typiconman/Perl-Lingua-CU
@author: mike kroutikov
'''
from __future__ import print_function, unicode_literals
import codecs
def ucs_decode(input_, errors='strict'):
return ''.join(decoding_table[x] for x in input_), len(input_)
def ucs_encode(input_, errors):
raise NotImplementedError('encoding to UCS is not implemented')
### Decoding Table
decoding_table = (
'\x00',
'\x01',
'\x02',
'\x03',
'\x04',
'\x05',
'\x06',
'\x07',
'\x08',
'\t',
'\n',
'\x0b',
'\x0c',
'\r',
'\x0e',
'\x0f',
'\x10',
'\x11',
'\x12',
'\x13',
'\x14',
'\x15',
'\x16',
'\x17',
'\x18',
'\x19',
'\x1a',
'\x1b',
'\x1c',
'\x1d',
'\x1e',
'\x1f',
' ',
'!',
'"',
'\u0486',
'\u0486\u0301',
'\u0486\u0300',
'\u0483',
"'",
'(',
')',
'\ua673',
'\u2de1\u0487', # combining VE
',',
'-',
'.',
'/',
'\u043e\u0301',
'\u0301',
'\u0300',
'\u0486',
'\u0486\u0301',
'\u0486\u0300',
'\u0311', # combining inverted breve
'\u0483', # titlo
'\u033e', # combining vertical tilde
'\u0436\u0483', # zhe with titlo above
':',
';',
'\u2def', # combining HA
'\u2de9\u0487', # combining EN
'\u2dec\u0487', # combining ER
'\u2df1\u0487', # combining CHE
'\u0300',
'\u0430\u0300', # latin A maps to AZ with grave accent
'\u0463\u0311', # latin B maps to Yat' with inverted breve
'\u2ded\u0487', # combining ES
'\u0434\u2ded\u0487',
'\u0435\u0300', # latin E maps to e with grave accent
'\u0472', # F maps to THETA
'\u0433\u0483', # G maps to ge with TITLO
'\u0461\u0301', # latin H maps to omega with acute accent
'\u0406',
'\u0456\u0300',
'\ua656\u0486', # YA with psili
'\u043b\u2de3', # el with cobining de
'\u0476', # capital IZHITSA with kendema
'\u047a\u0486', # capital WIDE ON with psili
'\u047a', # just capital WIDE ON
'\u0470', # capital PSI
'\u047c', # capital omega with great apostrophe
'\u0440\u0483', # lowercase re with titlo
'\u0467\u0300', # lowercase small yus with grave
'\u047e', # capital OT
'\u041e\u0443', # diagraph capital UK
'\u0474', # capital IZHITSA
'\u0460', # capital OMEGA
'\u046e', # capital XI
'\ua64b\u0300', # monograph uk with grave
'\u0466', # capital SMALL YUS
'[',
'\u0483', # yet another titlo
']',
'\u0311', # combining inverted breve
'\u033e', # yet another yerik
'`',
'\u0430\u0301', # latin A maps to AZ with acute accent
'\u2dea\u0487', # combining ON
'\u2ded\u0487', # combining ES
'\u2de3', # combining DE
'\u0435\u0301', # latin E maps to e with acute accent
'\u0473', # lowercase theta
'\u2de2\u0487', # combining ge
'\u044b\u0301', # ery with acute accent
'\u0456',
'\u0456\u0301', # i with acute accent
'\ua657\u0486', # iotaed a with psili
'\u043b\u0483', # el with titlo
'\u0477', # izhitsa with izhe titlo
'\u047b\u0486', # wide on with psili
'\u047b', # wide on
'\u0471', # lowercase psi
'\u047d', # lowercase omega with great apostrophe
'\u0440\u2ded\u0487', # lowercase er with combining es
'\u0467\u0301', # lowercase small yus with acute accent
'\u047f', # lowercase ot
'\u1c82\u0443', # diagraph uk
'\u0475', # lowercase izhitsa
'\u0461', # lowercase omega
'\u046f', # lowercase xi
'\ua64b\u0301', # monograph uk with acute accent
'\u0467', # lowercase small yus
'\ua64b\u0311', # monograph uk with inverted breve
'\u0467\u0486\u0300', # lowercase small yus with apostroph
'\u0438\u0483', # the numeral eight
'\u0301', # yet another acute accent
'\x7f',
'\u0475\u0301', # lowercase izhitsa with acute
'\u0410\u0486\u0301', # uppercase A with psili and acute
'\u201a',
'\u0430\u0486\u0301', # lowercase A with psili and acute
'\u201e',
'\u046f\u0483', # the numberal sixty
'\u0430\u0311', # lowercase a with inverted breve
'\u0456\u0311', # lowercase i with inverted breve
'\u2de5', # combining ze
'\u0467\u0311', # lowercase small yus with inverted breve
'\u0466\u0486', # upercase small yus with psili
'\u0456\u0483', # the numeral ten
'\u0460\u0486', # capital OMEGA with psili
'\u041e\u0443\u0486\u0301', # diagraph uk with apostroph
'\ua656\u0486\u0301', # uppercase Iotated A with apostroph
'\u047a\u0486\u0301', # uppercase Round O with apostroph
'\u0475\u2de2\u0487', # lowercase izhitsa with combining ge
'\u2018',
'\u2019',
'\u201c',
'\u201d',
'\u2de4', # combining zhe
'\u2013',
'\u2014',
'\ufffe',
'\u0442\u0483',
'\u0467\u0486', # lowercase small yus with psili
'\u0475\u0311', # izhitsa with inverted breve
'\u0461\u0486', # lowercase omega with psili
'\u1c82\u0443\u0486\u0301', # diagraph uk with apostroph
'\ua657\u0486\u0301', # lowercase iotaed a with apostroph
'\u047b\u0486\u0301', # lowercase Round O with apostroph
'\xa0',
'\u041e\u0443\u0486', # Capital Diagraph Uk with psili
'\u1c82\u0443\u0486', # lowercase of the above
'\u0406\u0486\u0301', # Uppercase I with apostroph
'\u0482', # cyrillic thousands sign
'\u0410\u0486', # capital A with psili
'\u0445\u0483', # lowercase kha with titlo
'\u0447\u0483', # the numeral ninety
'\u0463\u0300', # lowecase yat with grave accent
'\u0441\u0483', # the numeral two hundred
'\u0404',
'\xab',
'\xac',
'\xad',
'\u0440\u2de3', # lowercase er with dobro titlo
'\u0406\u0486',
'\ua67e', # kavyka
'\ua657\u0486\u0300',
'\u0406',
'\u0456\u0308',
'\u0430\u0486',
'\u0443', # small letter u (why encoded at the micro sign?!)
'\xb6',
'\xb7',
'\u0463\u0301', # lowercase yat with acute accent
'\u0430\u0483', # the numeral one
'\u0454', # wide E
'\xbb',
'\u0456\u0486\u0301', # lowercase i with apostroph
'\u0405',
'\u0455',
'\u0456\u0486', # lowercase i with psili
'\u0410',
'\u0411',
'\u0412',
'\u0413',
'\u0414',
'\u0415',
'\u0416',
'\u0417',
'\u0418',
'\u0419',
'\u041a',
'\u041b',
'\u041c',
'\u041d',
'\u041e',
'\u041f',
'\u0420',
'\u0421',
'\u0422',
'\ua64a',
'\u0424',
'\u0425',
'\u0426',
'\u0427',
'\u0428',
'\u0429',
'\u042a',
'\u042b',
'\u042c',
'\u0462', # capital yat
'\u042e',
'\ua656', # capital Iotified A
'\u0430',
'\u0431',
'\u0432',
'\u0433',
'\u0434',
'\u0435',
'\u0436',
'\u0437',
'\u0438',
'\u0439',
'\u043a',
'\u043b',
'\u043c',
'\u043d',
'\u043e',
'\u043f',
'\u0440',
'\u0441',
'\u0442',
'\ua64b', # monograph Uk (why?!)
'\u0444',
'\u0445',
'\u0446',
'\u0447',
'\u0448',
'\u0449',
'\u044a',
'\u044b',
'\u044c',
'\u0463', # lowercase yat
'\u044e',
'\ua657', # iotaed a
)
def _build_decoding_table(fname):
'''unitily to build decoding_table from Perl's ucsequivs file. we base on cp1251 and overlay data from ucsequivs'''
from encodings import cp1251
decode_table = list(cp1251.decoding_table)
comments = [None] * 256
with codecs.open(fname, 'r', 'utf-8') as f:
for line in f:
line = line.strip()
if not line or line == 'use utf8;' or line.startswith('#'):
continue
key, chars, comment = parse_perl_dictionary_entry(line)
decode_table[key] = chars
comments[key] = comment
return decode_table, comments
def parse_perl_dictionary_entry(line):
key, value = line.split('=>')
key = key.strip().strip("'")
if key == '\\\\':
key = '\\'
key = key.encode('cp1251')
assert len(key) == 1, key
key = int(key[0])
value = value.strip()
values = value.split('#', 1)
value = values[0].strip() # removes trailing comment
if len(values) == 2:
comment = values[1].strip()
else:
comment = None
value = value.rstrip(',')
chars = [x.strip() for x in value.split('.')]
assert min(x.startswith('chr(') and x.endswith(')') for x in chars)
chars = [int(x[4:-1], 0) for x in chars]
chars = ''.join(chr(x) for x in chars)
return key, chars, comment
if __name__ == '__main__':
'''Code that generates "decoding_table" from Perl ucs encoding table.
1. Download Perl UCS encoding table from:
https://raw.githubusercontent.com/typiconman/Perl-Lingua-CU/master/lib/Lingua/CU/Scripts/ucsequivs
2. Put it into current directory.
3. Run this code to generate Python array "decoding_table"
'''
dt, cm = _build_decoding_table('ucsequivs')
print('decoding_table = (')
for x,c in zip(dt, cm):
if c is not None:
c = ' # ' + c
else:
c = ''
if x == "'": # treat single quote separately to avoid syntax error (is there a better way? - MK)
print('\t"%s",%s' % (x.encode('unicode-escape').decode(), c))
else:
print("\t'%s',%s" % (x.encode('unicode-escape').decode(), c))
print(')') | pgmmpk/cslavonic | cslavonic/ucs_decode.py | Python | mit | 9,501 | 0.001053 |
## BTisWatchingU ##
#
# This program scans for bluetooth devices and add their address and name to a
# centralized database. This database have some simple facilities to determine
# where and when the device have been spotted.
# Copyright (C) 2008,2009 Philippe Chretien
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License Version 2
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# You will find the latest version of this code at the following address:
# http://github.com/pchretien
#
# You can contact me at the following email address:
# [email protected]
class device:
__name = ""
__address = ""
def __init__(self, deviceName, deviceAddress):
self.__name = deviceName
self.__address = deviceAddress
def getName(self):
return self.__name
def getAddress(self):
return self.__address | pchretien/btiswatchingu | python/bt_device.py | Python | gpl-2.0 | 1,349 | 0.004448 |
"""Support for RESTful API sensors."""
import json
import logging
from xml.parsers.expat import ExpatError
import httpx
from jsonpath import jsonpath
import voluptuous as vol
import xmltodict
from homeassistant.components.sensor import DEVICE_CLASSES_SCHEMA, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_DEVICE_CLASS,
CONF_FORCE_UPDATE,
CONF_HEADERS,
CONF_METHOD,
CONF_NAME,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_RESOURCE,
CONF_RESOURCE_TEMPLATE,
CONF_TIMEOUT,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.reload import async_setup_reload_service
from . import DOMAIN, PLATFORMS
from .data import DEFAULT_TIMEOUT, RestData
_LOGGER = logging.getLogger(__name__)
DEFAULT_METHOD = "GET"
DEFAULT_NAME = "REST Sensor"
DEFAULT_VERIFY_SSL = True
DEFAULT_FORCE_UPDATE = False
CONF_JSON_ATTRS = "json_attributes"
CONF_JSON_ATTRS_PATH = "json_attributes_path"
METHODS = ["POST", "GET"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Exclusive(CONF_RESOURCE, CONF_RESOURCE): cv.url,
vol.Exclusive(CONF_RESOURCE_TEMPLATE, CONF_RESOURCE): cv.template,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_JSON_ATTRS, default=[]): cv.ensure_list_csv,
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(METHODS),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PAYLOAD): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_JSON_ATTRS_PATH): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_RESOURCE, CONF_RESOURCE_TEMPLATE), PLATFORM_SCHEMA
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the RESTful sensor."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
resource_template = config.get(CONF_RESOURCE_TEMPLATE)
method = config.get(CONF_METHOD)
payload = config.get(CONF_PAYLOAD)
verify_ssl = config.get(CONF_VERIFY_SSL)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
headers = config.get(CONF_HEADERS)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
device_class = config.get(CONF_DEVICE_CLASS)
value_template = config.get(CONF_VALUE_TEMPLATE)
json_attrs = config.get(CONF_JSON_ATTRS)
json_attrs_path = config.get(CONF_JSON_ATTRS_PATH)
force_update = config.get(CONF_FORCE_UPDATE)
timeout = config.get(CONF_TIMEOUT)
if value_template is not None:
value_template.hass = hass
if resource_template is not None:
resource_template.hass = hass
resource = resource_template.render(parse_result=False)
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = httpx.DigestAuth(username, password)
else:
auth = (username, password)
else:
auth = None
rest = RestData(method, resource, auth, headers, payload, verify_ssl, timeout)
await rest.async_update()
if rest.data is None:
raise PlatformNotReady
# Must update the sensor now (including fetching the rest resource) to
# ensure it's updating its state.
async_add_entities(
[
RestSensor(
hass,
rest,
name,
unit,
device_class,
value_template,
json_attrs,
force_update,
resource_template,
json_attrs_path,
)
],
True,
)
class RestSensor(Entity):
"""Implementation of a REST sensor."""
def __init__(
self,
hass,
rest,
name,
unit_of_measurement,
device_class,
value_template,
json_attrs,
force_update,
resource_template,
json_attrs_path,
):
"""Initialize the REST sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._device_class = device_class
self._value_template = value_template
self._json_attrs = json_attrs
self._attributes = None
self._force_update = force_update
self._resource_template = resource_template
self._json_attrs_path = json_attrs_path
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def available(self):
"""Return if the sensor data are available."""
return self.rest.data is not None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def force_update(self):
"""Force update."""
return self._force_update
async def async_update(self):
"""Get the latest data from REST API and update the state."""
if self._resource_template is not None:
self.rest.set_url(self._resource_template.render(parse_result=False))
await self.rest.async_update()
value = self.rest.data
_LOGGER.debug("Data fetched from resource: %s", value)
if self.rest.headers is not None:
# If the http request failed, headers will be None
content_type = self.rest.headers.get("content-type")
if content_type and (
content_type.startswith("text/xml")
or content_type.startswith("application/xml")
):
try:
value = json.dumps(xmltodict.parse(value))
_LOGGER.debug("JSON converted from XML: %s", value)
except ExpatError:
_LOGGER.warning(
"REST xml result could not be parsed and converted to JSON"
)
_LOGGER.debug("Erroneous XML: %s", value)
if self._json_attrs:
self._attributes = {}
if value:
try:
json_dict = json.loads(value)
if self._json_attrs_path is not None:
json_dict = jsonpath(json_dict, self._json_attrs_path)
# jsonpath will always store the result in json_dict[0]
# so the next line happens to work exactly as needed to
# find the result
if isinstance(json_dict, list):
json_dict = json_dict[0]
if isinstance(json_dict, dict):
attrs = {
k: json_dict[k] for k in self._json_attrs if k in json_dict
}
self._attributes = attrs
else:
_LOGGER.warning(
"JSON result was not a dictionary"
" or list with 0th element a dictionary"
)
except ValueError:
_LOGGER.warning("REST result could not be parsed as JSON")
_LOGGER.debug("Erroneous JSON: %s", value)
else:
_LOGGER.warning("Empty reply found when expecting JSON data")
if value is not None and self._value_template is not None:
value = self._value_template.async_render_with_possible_json_value(
value, None
)
self._state = value
async def async_will_remove_from_hass(self):
"""Shutdown the session."""
await self.rest.async_remove()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
| balloob/home-assistant | homeassistant/components/rest/sensor.py | Python | apache-2.0 | 9,072 | 0.000882 |
from SPARQLWrapper import SPARQLWrapper, JSON
sparql = SPARQLWrapper("http://dbpedia.org/sparql")
sparql.setQuery("""
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?label
WHERE { <http://dbpedia.org/resource/Asturias> rdfs:label ?label }
""")
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
print(result["label"]["value"]) | davidam/python-examples | sparql/dbpedia-asturias.py | Python | gpl-3.0 | 416 | 0.002404 |
#!/usr/bin/env python3
import apts
from distutils.core import setup
setup(
name = apts.__name__,
packages = [apts.__name__],
scripts = ['bin/apts'],
version = apts.__version__,
description = apts.__description__,
author = apts.__author__,
author_email = apts.__author_email__,
license = apts.__license__,
platforms = apts.__platforms__,
url = apts.__url__,
download_url = apts.__download_url__,
keywords = ['tftp', 'server', 'file transfer'],
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: GNU General Public License (GPL)',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX :: Linux',
'Development Status :: 3 - Alpha',
'Environment :: No Input/Output (Daemon)',
'Natural Language :: English',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'Topic :: Communications :: File Sharing',
],
)
| Ilias95/apts | setup.py | Python | gpl-3.0 | 1,102 | 0.024501 |
# Copyright (c) 2018, Xilinx Inc.
#
# Siva Durga Prasad Paladugu
#
# SPDX-License-Identifier: GPL-2.0
import pytest
import re
import random
import u_boot_utils
"""
Note: This test relies on boardenv_* containing configuration values to define
the network available and files to be used for testing. Without this, this test
will be automatically skipped.
For example:
# True if a DHCP server is attached to the network, and should be tested.
env__net_dhcp_server = True
# A list of environment variables that should be set in order to configure a
# static IP. In this test case we atleast need serverip for performing tftpb
# to get required files.
env__net_static_env_vars = [
("ipaddr", "10.0.0.100"),
("netmask", "255.255.255.0"),
("serverip", "10.0.0.1"),
]
# Details regarding the files that may be read from a TFTP server. .
env__zynqmp_secure_readable_file = {
"fn": "auth_bhdr_ppk1.bin",
"enckupfn": "auth_bhdr_enc_kup_load.bin",
"addr": 0x1000000,
"keyaddr": 0x100000,
"keyfn": "aes.txt",
}
"""
import test_net
@pytest.mark.buildconfigspec('cmd_zynqmp')
@pytest.mark.buildconfigspec('cmd_net')
@pytest.mark.buildconfigspec('cmd_dhcp')
@pytest.mark.buildconfigspec('net')
def test_zynqmp_secure_boot_image(u_boot_console):
test_net.test_net_dhcp(u_boot_console)
test_net.test_net_setup_static(u_boot_console)
f = u_boot_console.config.env.get('env__zynqmp_secure_readable_file', None)
if not f:
pytest.skip('No TFTP readable file to read')
addr = f.get('addr', None)
if not addr:
addr = u_boot_utils.find_ram_base(u_boot_console)
expected_tftp = 'Bytes transferred = '
fn = f['fn']
output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn))
assert expected_tftp in output
expected_zynqmpsecure = 'Verified image at'
output = u_boot_console.run_command('zynqmp secure %x $filesize' % (addr))
assert expected_zynqmpsecure in output
output = u_boot_console.run_command('pri zynqmp_verified_img_addr')
assert "Error" not in output
@pytest.mark.buildconfigspec('cmd_zynqmp')
@pytest.mark.buildconfigspec('cmd_net')
@pytest.mark.buildconfigspec('cmd_dhcp')
@pytest.mark.buildconfigspec('net')
def test_zynqmp_secure_boot_img_kup(u_boot_console):
test_net.test_net_dhcp(u_boot_console)
test_net.test_net_setup_static(u_boot_console)
f = u_boot_console.config.env.get('env__zynqmp_secure_readable_file', None)
if not f:
pytest.skip('No TFTP readable file to read')
keyaddr = f.get('keyaddr', None)
if not keyaddr:
addr = u_boot_utils.find_ram_base(u_boot_console)
expected_tftp = 'Bytes transferred = '
keyfn = f['keyfn']
output = u_boot_console.run_command('tftpboot %x %s' % (keyaddr, keyfn))
assert expected_tftp in output
addr = f.get('addr', None)
if not addr:
addr = u_boot_utils.find_ram_base(u_boot_console)
expected_tftp = 'Bytes transferred = '
fn = f['enckupfn']
output = u_boot_console.run_command('tftpboot %x %s' % (addr, fn))
assert expected_tftp in output
expected_zynqmpsecure = 'Verified image at'
output = u_boot_console.run_command('zynqmp secure %x $filesize %x' % (addr, keyaddr))
assert expected_zynqmpsecure in output
output = u_boot_console.run_command('pri zynqmp_verified_img_addr')
assert "Error" not in output
| Digilent/u-boot-digilent | test/py/tests/test_zynqmp_secure.py | Python | gpl-2.0 | 3,375 | 0.001778 |
########################################################################
# Rancho - Open Source Group/Project Management Tool
# Copyright (C) 2008 The Rancho Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
########################################################################
from django.db.models import signals
from django.utils.translation import ugettext_noop as _
from rancho.notification import models as notification
from rancho.message import models as message_app
def create_notice_types(app, created_models, verbosity, **kwargs):
notification.create_notice_type("message_new", _("New message"), _("A new message has been created"))
notification.create_notice_type("message_replied", _("Message replyed"), _("A message has been replyed"))
signals.post_syncdb.connect(create_notice_types, message_app)
| joaquimrocha/Rancho | rancho/message/management.py | Python | agpl-3.0 | 1,442 | 0.002774 |
"""
Apple Push Notification Service
Documentation is available on the iOS Developer Library:
https://developer.apple.com/library/content/documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/APNSOverview.html
"""
import time
from apns2 import client as apns2_client
from apns2 import credentials as apns2_credentials
from apns2 import errors as apns2_errors
from apns2 import payload as apns2_payload
from . import models
from . import NotificationError
from .apns_errors import reason_for_exception_class
from .conf import get_manager
class APNSError(NotificationError):
pass
class APNSUnsupportedPriority(APNSError):
pass
class APNSServerError(APNSError):
def __init__(self, status):
super(APNSServerError, self).__init__(status)
self.status = status
def _apns_create_socket(creds=None, application_id=None):
if creds is None:
if not get_manager().has_auth_token_creds(application_id):
cert = get_manager().get_apns_certificate(application_id)
creds = apns2_credentials.CertificateCredentials(cert)
else:
keyPath, keyId, teamId = get_manager().get_apns_auth_creds(application_id)
# No use getting a lifetime because this credential is
# ephemeral, but if you're looking at this to see how to
# create a credential, you could also pass the lifetime and
# algorithm. Neither of those settings are exposed in the
# settings API at the moment.
creds = creds or apns2_credentials.TokenCredentials(keyPath, keyId, teamId)
client = apns2_client.APNsClient(
creds,
use_sandbox=get_manager().get_apns_use_sandbox(application_id),
use_alternative_port=get_manager().get_apns_use_alternative_port(application_id)
)
client.connect()
return client
def _apns_prepare(
token, alert, application_id=None, badge=None, sound=None, category=None,
content_available=False, action_loc_key=None, loc_key=None, loc_args=[],
extra={}, mutable_content=False, thread_id=None, url_args=None):
if action_loc_key or loc_key or loc_args:
apns2_alert = apns2_payload.PayloadAlert(
body=alert if alert else {}, body_localized_key=loc_key,
body_localized_args=loc_args, action_localized_key=action_loc_key)
else:
apns2_alert = alert
if callable(badge):
badge = badge(token)
return apns2_payload.Payload(
apns2_alert, badge, sound, content_available, mutable_content, category,
url_args, custom=extra, thread_id=thread_id)
def _apns_send(
registration_id, alert, batch=False, application_id=None, creds=None, **kwargs
):
client = _apns_create_socket(creds=creds, application_id=application_id)
notification_kwargs = {}
# if expiration isn"t specified use 1 month from now
notification_kwargs["expiration"] = kwargs.pop("expiration", None)
if not notification_kwargs["expiration"]:
notification_kwargs["expiration"] = int(time.time()) + 2592000
priority = kwargs.pop("priority", None)
if priority:
try:
notification_kwargs["priority"] = apns2_client.NotificationPriority(str(priority))
except ValueError:
raise APNSUnsupportedPriority("Unsupported priority %d" % (priority))
if batch:
data = [apns2_client.Notification(
token=rid, payload=_apns_prepare(rid, alert, **kwargs)) for rid in registration_id]
# returns a dictionary mapping each token to its result. That
# result is either "Success" or the reason for the failure.
return client.send_notification_batch(
data, get_manager().get_apns_topic(application_id=application_id),
**notification_kwargs
)
data = _apns_prepare(registration_id, alert, **kwargs)
client.send_notification(
registration_id, data,
get_manager().get_apns_topic(application_id=application_id),
**notification_kwargs
)
def apns_send_message(registration_id, alert, application_id=None, creds=None, **kwargs):
"""
Sends an APNS notification to a single registration_id.
This will send the notification as form data.
If sending multiple notifications, it is more efficient to use
apns_send_bulk_message()
Note that if set alert should always be a string. If it is not set,
it won"t be included in the notification. You will need to pass None
to this for silent notifications.
"""
try:
_apns_send(
registration_id, alert, application_id=application_id,
creds=creds, **kwargs
)
except apns2_errors.APNsException as apns2_exception:
if isinstance(apns2_exception, apns2_errors.Unregistered):
device = models.APNSDevice.objects.get(registration_id=registration_id)
device.active = False
device.save()
raise APNSServerError(status=reason_for_exception_class(apns2_exception.__class__))
def apns_send_bulk_message(
registration_ids, alert, application_id=None, creds=None, **kwargs
):
"""
Sends an APNS notification to one or more registration_ids.
The registration_ids argument needs to be a list.
Note that if set alert should always be a string. If it is not set,
it won"t be included in the notification. You will need to pass None
to this for silent notifications.
"""
results = _apns_send(
registration_ids, alert, batch=True, application_id=application_id,
creds=creds, **kwargs
)
inactive_tokens = [token for token, result in results.items() if result == "Unregistered"]
models.APNSDevice.objects.filter(registration_id__in=inactive_tokens).update(active=False)
return results
| shigmas/django-push-notifications | push_notifications/apns.py | Python | mit | 5,297 | 0.020578 |
# coding: utf-8
# # Clean the raw data
#
# ## Data from U.S. Energy Information Administration
#
# Data URL: [eia.gov](http://www.eia.gov/coal/data.cfm)
#
# Combining and cleaning the raw csv files into a cleaned data set and coherent database.
#
# Generally a good idea to have a separate data folder with the raw data.
#
# When you clean the raw data, leave the raw in place, and create cleaned version with the steps included (ideal situation for Notebook).
# In[1]:
# %install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py
get_ipython().magic(u'load_ext version_information')
get_ipython().magic(u'reload_ext version_information')
get_ipython().magic(u'version_information numpy, scipy, matplotlib, pandas')
# In[2]:
import numpy as np
import pandas as pd
# In[3]:
get_ipython().system(u'pwd')
# In[4]:
# The cleaned data file is saved here:
output_file = "../data/coal_prod_cleaned.csv"
# In[5]:
df1 = pd.read_csv("../data/coal_prod_2002.csv", index_col="MSHA_ID")
df2 = pd.read_csv("../data/coal_prod_2003.csv", index_col="MSHA_ID")
df3 = pd.read_csv("../data/coal_prod_2004.csv", index_col="MSHA_ID")
df4 = pd.read_csv("../data/coal_prod_2005.csv", index_col="MSHA_ID")
df5 = pd.read_csv("../data/coal_prod_2006.csv", index_col="MSHA_ID")
df6 = pd.read_csv("../data/coal_prod_2007.csv", index_col="MSHA_ID")
df7 = pd.read_csv("../data/coal_prod_2008.csv", index_col="MSHA_ID")
df8 = pd.read_csv("../data/coal_prod_2009.csv", index_col="MSHA_ID")
df9 = pd.read_csv("../data/coal_prod_2010.csv", index_col="MSHA_ID")
df10 = pd.read_csv("../data/coal_prod_2011.csv", index_col="MSHA_ID")
df11 = pd.read_csv("../data/coal_prod_2012.csv", index_col="MSHA_ID")
# In[6]:
dframe = pd.concat((df1, df2, df3, df4, df5, df6, df7, df8, df9, df10, df11))
# In[7]:
# Noticed a probable typo in the data set:
dframe['Company_Type'].unique()
# In[8]:
# Correcting the Company_Type
dframe.loc[dframe['Company_Type'] == 'Indepedent Producer Operator', 'Company_Type'] = 'Independent Producer Operator'
dframe.head()
# In[9]:
dframe[dframe.Year == 2003].head()
# # Final Cleaned Data Product
# In[10]:
dframe.to_csv(output_file, )
# In[ ]:
| jbwhit/OSCON-2015 | deliver/coal_data_cleanup.py | Python | mit | 2,214 | 0.006323 |
from property import *
# Neuron common parameters
iaf_neuronparams = {'E_L': -70.,
'V_th': -50.,
'V_reset': -67.,
'C_m': 2.,
't_ref': 2.,
'V_m': -60.,
'tau_syn_ex': 1.,
'tau_syn_in': 1.33}
# Synapse common parameters
STDP_synapseparams = {
'model': 'stdp_synapse',
'tau_m': {'distribution': 'uniform', 'low': 15., 'high': 25.},
'alpha': {'distribution': 'normal_clipped', 'low': 0.5, 'mu': 5.0, 'sigma': 1.0},
'delay': {'distribution': 'uniform', 'low': 0.8, 'high': 2.5},
'lambda': 0.5
}
# Glutamate synapse
STDP_synparams_Glu = dict({'delay': {'distribution': 'uniform', 'low': 1, 'high': 1.3},
'weight': w_Glu,
'Wmax': 70.}, **STDP_synapseparams)
# GABA synapse
STDP_synparams_GABA = dict({'delay': {'distribution': 'uniform', 'low': 1., 'high': 1.3},
'weight': w_GABA,
'Wmax': -60.}, **STDP_synapseparams)
# Acetylcholine synapse
STDP_synparams_ACh = dict({'delay': {'distribution': 'uniform', 'low': 1, 'high': 1.3},
'weight': w_ACh,
'Wmax': 70.}, **STDP_synapseparams)
# Dopamine synapse common parameter
DOPA_synparams = {'delay': 1.}
# Dopamine exhibitory synapse
DOPA_synparams_ex = dict({'weight': w_DA_ex,
'Wmax': 100.,
'Wmin': 85.}, **DOPA_synparams)
# Dopamine inhibitory synapse
DOPA_synparams_in = dict({'weight': w_DA_in,
'Wmax': -100.,
'Wmin': -85.}, **DOPA_synparams)
# Noradreanaline synapse common parameter
NORA_synparams = {'delay': 1.}
# Noradreanaline exhibitory synapse
NORA_synparams_ex = dict({'weight': w_NA_ex,
'Wmax': 100.,
'Wmin': 85.}, **NORA_synparams)
# Serotonin synapse common parameter
SERO_synparams = {'delay': 1.}
# Serotonin inhibitory synapse
SERO_synparams_in = dict({'weight': w_SE_in,
'Wmax': -100.,
'Wmin': -85.}, **SERO_synparams)
# Create volume transmitters
# Dictionary of synapses with keys and their parameters
types = {GABA: (STDP_synparams_GABA, w_GABA, 'GABA'),
ACh: (STDP_synparams_ACh, w_ACh, 'Ach'),
Glu: (STDP_synparams_Glu, w_Glu, 'Glu'),
DA_ex: (DOPA_synparams_ex, w_DA_ex, 'DA_ex', dopa_model_ex),
DA_in: (DOPA_synparams_in, w_DA_in, 'DA_in', dopa_model_in),
NA_ex: (NORA_synparams_ex, w_NA_ex, 'NA_ex', nora_model_ex),
SE_in: (SERO_synparams_in, w_SE_in, 'SE_in', sero_model_in) }
# Parameters for generator links
static_syn = {
'model': 'static_synapse',
'weight': w_Glu * 5,
'delay': pg_delay
}
# Connection parameters
conn_dict = {'rule': 'all_to_all',
'multapses': True}
# Device parameters
multimeter_param = {'to_memory': True,
'to_file': False,
'withtime': True,
'interval': 0.1,
'record_from': ['V_m'],
'withgid': True}
detector_param = {'label': 'spikes',
'withtime': True,
'withgid': True,
'to_file': False,
'to_memory': True,
'scientific': True} | magnastrazh/NEUCOGAR | nest/GDP/scripts/parameters.py | Python | gpl-2.0 | 3,431 | 0.001749 |
#!/opt/python3.3/bin/python3.3
from threading import Thread
import RPi.GPIO as GPIO
import time
import datetime
import os
import sys
import dht11_sensor
import psycopg2
import copy
###-----------------Hardware Settings-----------------------
PIN_LC=25 #Light Sensor (GPIO.IN, pull_up_down=GPIO.PUD_UP)
PIN_MC=17 #Motion Sensor (GPIO.IN, pull_up_down=GPIO.PUD_UP)
PIN_TC=4 #Temp Sensor (GPIO.IN, pull_up_down=GPIO.PUD_UP)
PIN_TC_WP=7 #Temp Sensor #Wirepi pin 7
PIN_LED1=23 #LED Blue 1
PIN_LED2=24 #LED Blue 2
###------------------SQL Settings-----------------------------
SQL_SRV='127.0.0.1'
SQL_USER='pistats'
SQL_PASSWD='pistats'
SQL_DB='pistats'
#setup pins. Some are setup by functions below.
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_LED1,GPIO.OUT)
GPIO.setup(PIN_LED2,GPIO.OUT)
GPIO.setup(PIN_MC, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#dim leds
GPIO.output(PIN_LED1,GPIO.LOW)
GPIO.output(PIN_LED2,GPIO.LOW)
def UnixLocalEpoch():
dt = datetime.datetime.now()
return int((dt - datetime.datetime(1970,1,1)).total_seconds())
def PhotoSensor(RCpin):
reading = 0
GPIO.setup(RCpin, GPIO.OUT)
GPIO.output(RCpin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(RCpin, GPIO.IN)
# This takes about 1 millisecond per loop cycle
while (GPIO.input(RCpin) == GPIO.LOW):
reading += 1
return reading
def TempsensorRead():
### Loop through the temp sensor library until we get a valid reading ###
for i in range(1,100):
data = dht11_sensor.read(PIN_TC_WP)
#print('Temp={0}*C Humidity={1}% Status={2} Error={3}'.format(data['temperature'], data['humidity'], data['valid'], data['err']))
if data['valid'] == 1:
return data
return None
def save_data(p_SensorValues):
try:
sql_con = psycopg2.connect(host=SQL_SRV, user=SQL_USER,password=SQL_PASSWD,database=SQL_DB)
sql_cur = sql_con.cursor()
print('2temp->' + str(p_SensorValues['temperature']['save']))
print('2light->' + str(p_SensorValues['light']['save']))
print('2motion->' + str(p_SensorValues['motion']['save']))
if p_SensorValues.get('motion', None):
sql_cur.execute("""select id, data_read from sensordata
where sensor_type = 'motion' order by id desc limit 1""")
data = sql_cur.fetchone()
if not data or p_SensorValues['motion']['save']: #(data and str(data[1]) != str(p_SensorValues['motion']['data'])):
sql_cur.execute("""INSERT INTO sensordata (sensor_type, data_read, date_added,time_added)
VALUES (%s, %s, TIMESTAMP 'epoch' + %s * INTERVAL '1 second',TIMESTAMP 'epoch' + %s * INTERVAL '1 second')""",
('motion', p_SensorValues['motion']['data'], p_SensorValues['motion']['read'],p_SensorValues['motion']['read'] ))
if p_SensorValues.get('light', None):
sql_cur.execute("select id, data_read from sensordata where sensor_type = 'light' order by id desc limit 1")
data = sql_cur.fetchone()
#we have a +- 10 variance on light.
if not data or p_SensorValues['light']['save']: #(data and (int(p_SensorValues['light']['data']) > int(data[1])+10 or int(p_SensorValues['light']['data']) < int(data[1]) - 10) ):
sql_cur.execute("""INSERT INTO sensordata (sensor_type, data_read, date_added,time_added)
VALUES(%s, %s, TIMESTAMP 'epoch' + %s * INTERVAL '1 second',TIMESTAMP 'epoch' + %s * INTERVAL '1 second')""",
('light', p_SensorValues['light']['data'], p_SensorValues['light']['read'],p_SensorValues['light']['read'] ))
if p_SensorValues.get('temperature', None):
sql_cur.execute("select id, data_read from sensordata where sensor_type = 'temperature' order by id desc limit 1")
data = sql_cur.fetchone()
if not data or p_SensorValues['temperature']['save']: #(data and str(data[1]) != str(p_SensorValues['temperature']['temperature'])):
sql_cur.execute("""INSERT INTO sensordata (sensor_type, data_read, date_added,time_added)
VALUES(%s, %s, TIMESTAMP 'epoch' + %s * INTERVAL '1 second',TIMESTAMP 'epoch' + %s * INTERVAL '1 second')""",
('temperature', p_SensorValues['temperature']['temperature'], p_SensorValues['temperature']['read'], p_SensorValues['temperature']['read'] ))
if p_SensorValues.get('temperature', None):
sql_cur.execute("select id, data_read from sensordata where sensor_type = 'humidity' order by id desc limit 1")
data = sql_cur.fetchone()
if not data or p_SensorValues['temperature']['save']:#(data and str(data[1]) != str(p_SensorValues['temperature']['humidity'])):
sql_cur.execute("""INSERT INTO sensordata (sensor_type, data_read, date_added,time_added)
VALUES(%s, %s, TIMESTAMP 'epoch' + %s * INTERVAL '1 second',TIMESTAMP 'epoch' + %s * INTERVAL '1 second')""",
('humidity', p_SensorValues['temperature']['humidity'], p_SensorValues['temperature']['read'], p_SensorValues['temperature']['read'] ))
sql_con.commit()
sql_cur.close()
sql_con.close()
except psycopg2.Error as e:
print("SQL error in save_data: " + str(e))
except Exception as e:
print("Unknown error in save_data: " + str(e))
def main():
SensorValue = {}
TICK_LT = 0 #light detect ticker
TICK_LTI = 0 #light insert ticker
TICK_TMP = 0 #temp ticker
BlueLed = False
while True:
changed = False
motionData = GPIO.input(PIN_MC)
if not SensorValue.get('motion', None):
SensorValue['motion'] = {'data': motionData , 'read': UnixLocalEpoch(), 'changed': UnixLocalEpoch(), 'save': False}
SensorValue['motion']['save'] = False
if int(SensorValue['motion'].get('data', 0)) != int(motionData) :
changed = True
SensorValue['motion']['changed'] = UnixLocalEpoch()
SensorValue['motion']['save'] = True
SensorValue['motion']['data'] = int(motionData)
SensorValue['motion']['read'] = UnixLocalEpoch()
if (SensorValue['motion']['data'] > 0):
#GPIO.output(PIN_LED1,GPIO.HIGH) #flash led
SensorValue['motion']['lastmotion'] = UnixLocalEpoch()
BlueLed = True
else:
#GPIO.output(PIN_LED1,GPIO.LOW) #flash led stop
BlueLed = False
#Measure Light
if not SensorValue.get('light', None):
SensorValue['light'] = {'data': PhotoSensor(PIN_LC) , 'read': UnixLocalEpoch(), 'save': False }
SensorValue['light']['save'] = False
lightChanges = 0
if (TICK_LT < time.perf_counter()):
TICK_LT = time.perf_counter()+1
lightData = PhotoSensor(PIN_LC)
lightChanges = abs(SensorValue['light'].get('data', 0) - lightData)
#print("LC->" + str(lightData ) + "DF:" + str(lightChanges))
if (TICK_LTI < time.perf_counter() or (lightData > 600 and lightChanges > 200) or (lightData < 600 and lightChanges > 30)):
TICK_LTI = time.perf_counter()+30
if SensorValue['light'].get('data', 0) != lightData :
changed = True
SensorValue['light']['changed'] = UnixLocalEpoch()
SensorValue['light']['save'] = True
SensorValue['light']['data'] = lightData
SensorValue['light']['read'] = UnixLocalEpoch()
#Measure Temprature, this might hold the thread for a few seconds at most.
if not SensorValue.get('temperature', None):
SensorValue['temperature'] = {'temperature': 0, 'humidity': 0, 'changed': 0, 'save': False}
SensorValue['temperature']['save'] = False
if (TICK_TMP < time.perf_counter()):
TICK_TMP = time.perf_counter()+10
tempData = TempsensorRead()
if tempData:
print('temperature reading...')
if (SensorValue['temperature'].get('temperature', 0) != tempData['temperature']
or SensorValue['temperature'].get('humidity', 0) != tempData['humidity']):
SensorValue['temperature']['changed'] = UnixLocalEpoch()
SensorValue['temperature']['temperature'] = tempData['temperature']
SensorValue['temperature']['humidity'] = tempData['humidity']
SensorValue['temperature']['save'] = True
changed = True
SensorValue['temperature']['read'] = UnixLocalEpoch()
if changed:
print('---------------change-------------')
print('temp->' + str(SensorValue['temperature']['save']))
print('light->' + str(SensorValue['light']['save']))
print('motion->' + str(SensorValue['motion']['save']))
#Gosh we need a copy cause sql can be to slow sometimes
ThreadsData = copy.deepcopy(SensorValue)
t = Thread(target=save_data, args=(ThreadsData,))
t.start()
time.sleep(0.01)
if __name__ == '__main__':
sys.exit(main())
| warkanum/warkanums-pi-device-snippets | build1/boot_special.py | Python | mit | 8,959 | 0.018306 |
"""Test that sys.modules is used properly by import."""
from .. import util
from . import util as import_util
import sys
from types import MethodType
import unittest
class UseCache:
"""When it comes to sys.modules, import prefers it over anything else.
Once a name has been resolved, sys.modules is checked to see if it contains
the module desired. If so, then it is returned [use cache]. If it is not
found, then the proper steps are taken to perform the import, but
sys.modules is still used to return the imported module (e.g., not what a
loader returns) [from cache on return]. This also applies to imports of
things contained within a package and thus get assigned as an attribute
[from cache to attribute] or pulled in thanks to a fromlist import
[from cache for fromlist]. But if sys.modules contains None then
ImportError is raised [None in cache].
"""
def test_using_cache(self):
# [use cache]
module_to_use = "some module found!"
with util.uncache('some_module'):
sys.modules['some_module'] = module_to_use
module = self.__import__('some_module')
self.assertEqual(id(module_to_use), id(module))
def test_None_in_cache(self):
#[None in cache]
name = 'using_None'
with util.uncache(name):
sys.modules[name] = None
with self.assertRaises(ImportError) as cm:
self.__import__(name)
self.assertEqual(cm.exception.name, name)
Frozen_UseCache, Source_UseCache = util.test_both(
UseCache, __import__=import_util.__import__)
class ImportlibUseCache(UseCache, unittest.TestCase):
# Pertinent only to PEP 302; exec_module() doesn't return a module.
__import__ = import_util.__import__[1]
def create_mock(self, *names, return_=None):
mock = util.mock_modules(*names)
original_load = mock.load_module
def load_module(self, fullname):
original_load(fullname)
return return_
mock.load_module = MethodType(load_module, mock)
return mock
# __import__ inconsistent between loaders and built-in import when it comes
# to when to use the module in sys.modules and when not to.
def test_using_cache_after_loader(self):
# [from cache on return]
with self.create_mock('module') as mock:
with util.import_state(meta_path=[mock]):
module = self.__import__('module')
self.assertEqual(id(module), id(sys.modules['module']))
# See test_using_cache_after_loader() for reasoning.
def test_using_cache_for_assigning_to_attribute(self):
# [from cache to attribute]
with self.create_mock('pkg.__init__', 'pkg.module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg.module')
self.assertTrue(hasattr(module, 'module'))
self.assertEqual(id(module.module),
id(sys.modules['pkg.module']))
# See test_using_cache_after_loader() for reasoning.
def test_using_cache_for_fromlist(self):
# [from cache for fromlist]
with self.create_mock('pkg.__init__', 'pkg.module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg', fromlist=['module'])
self.assertTrue(hasattr(module, 'module'))
self.assertEqual(id(module.module),
id(sys.modules['pkg.module']))
if __name__ == '__main__':
unittest.main()
| michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_importlib/import_/test_caching.py | Python | gpl-2.0 | 3,643 | 0.000823 |
u"""Entry points for job execution
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkinspect, pkjson
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdexc, pkdlog, pkdp, pkdpretty
from sirepo import api_perm
from sirepo import simulation_db
from sirepo.template import template_common
import inspect
import pykern.pkconfig
import pykern.pkio
import re
import requests
import sirepo.auth
import sirepo.http_reply
import sirepo.http_request
import sirepo.job
import sirepo.mpi
import sirepo.sim_data
import sirepo.util
cfg = None
#: how many call frames to search backwards to find the api_.* caller
_MAX_FRAME_SEARCH_DEPTH = 6
@api_perm.require_user
def api_downloadDataFile(simulation_type, simulation_id, model, frame, suffix=None):
#TODO(robnagler) validate suffix and frame
req = sirepo.http_request.parse_params(
id=simulation_id,
model=model,
type=simulation_type,
check_sim_exists=True,
)
s = suffix and sirepo.srschema.parse_name(suffix)
t = None
with simulation_db.tmp_dir() as d:
# TODO(e-carlin): computeJobHash
t = sirepo.job.DATA_FILE_ROOT.join(sirepo.job.unique_key())
t.mksymlinkto(d, absolute=True)
try:
_request(
computeJobHash='unused',
dataFileKey=t.basename,
frame=int(frame),
isParallel=False,
req_data=req.req_data,
suffix=s,
)
f = d.listdir()
if len(f) > 0:
assert len(f) == 1, \
'too many files={}'.format(f)
return sirepo.http_reply.gen_file_as_attachment(f[0])
except requests.exceptions.HTTPError:
#TODO(robnagler) HTTPError is too coarse a check
pass
finally:
if t:
pykern.pkio.unchecked_remove(t)
raise sirepo.util.raise_not_found(
'frame={} not found {id} {type}'.format(frame, **req)
)
@api_perm.allow_visitor
def api_jobSupervisorPing():
import requests.exceptions
e = None
try:
k = sirepo.job.unique_key()
r = _request(
_request_content=PKDict(ping=k),
_request_uri=cfg.supervisor_uri + sirepo.job.SERVER_PING_URI,
)
if r.get('state') != 'ok':
return r
try:
x = r.pknested_get('ping')
if x == k:
return r
e = 'expected={} but got ping={}'.format(k, x)
except KeyError:
e = 'incorrectly formatted reply'
pkdlog(r)
except requests.exceptions.ConnectionError:
e = 'unable to connect to supervisor'
except Exception as e:
pkdlog(e)
e = 'unexpected exception'
return PKDict(state='error', error=e)
@api_perm.require_user
def api_runCancel():
try:
return _request()
except Exception as e:
pkdlog('ignoring exception={} stack={}', e, pkdexc())
# Always true from the client's perspective
return sirepo.http_reply.gen_json({'state': 'canceled'})
@api_perm.require_user
def api_runSimulation():
r = _request_content(PKDict(fixup_old_data=True))
# TODO(e-carlin): This should really be done in job_supervisor._lib_dir_symlink()
# but that is outside of the Flask context so it won't work
r.simulation_lib_dir = sirepo.simulation_db.simulation_lib_dir(r.simulationType)
return _request(_request_content=r)
@api_perm.require_user
def api_runStatus():
return _request()
@api_perm.require_user
def api_simulationFrame(frame_id):
return template_common.sim_frame(
frame_id,
lambda a: _request(
analysisModel=a.frameReport,
# simulation frames are always sequential requests even though
# the report name has 'animation' in it.
isParallel=False,
req_data=PKDict(**a),
)
)
@api_perm.require_user
def api_sbatchLogin():
r = _request_content(
PKDict(computeJobHash='unused', jobRunMode=sirepo.job.SBATCH),
)
r.sbatchCredentials = r.pkdel('data')
return _request(_request_content=r)
def init_apis(*args, **kwargs):
global cfg
#TODO(robnagler) if we recover connections with agents and running jobs remove this
pykern.pkio.unchecked_remove(sirepo.job.LIB_FILE_ROOT, sirepo.job.DATA_FILE_ROOT)
pykern.pkio.mkdir_parent(sirepo.job.LIB_FILE_ROOT)
pykern.pkio.mkdir_parent(sirepo.job.DATA_FILE_ROOT)
cfg = pykern.pkconfig.init(
supervisor_uri=sirepo.job.DEFAULT_SUPERVISOR_URI_DECL,
)
def _request(**kwargs):
def get_api_name():
f = inspect.currentframe()
for _ in range(_MAX_FRAME_SEARCH_DEPTH):
m = re.search(r'^api_.*$', f.f_code.co_name)
if m:
return m.group()
f = f.f_back
else:
raise AssertionError(
'{}: max frame search depth reached'.format(f.f_code)
)
k = PKDict(kwargs)
u = k.pkdel('_request_uri') or cfg.supervisor_uri + sirepo.job.SERVER_URI
c = k.pkdel('_request_content') or _request_content(k)
c.pkupdate(
api=get_api_name(),
serverSecret=sirepo.job.cfg.server_secret,
)
pkdlog(
'api={} runDir={}',
c.api,
c.get('runDir')
)
r = requests.post(
u,
data=pkjson.dump_bytes(c),
headers=PKDict({'Content-type': 'application/json'}),
verify=sirepo.job.cfg.verify_tls,
)
r.raise_for_status()
return pkjson.load_any(r.content)
def _request_content(kwargs):
d = kwargs.pkdel('req_data')
if not d:
#TODO(robnagler) need to use parsed values, ok for now, becasue none of
# of the used values are modified by parse_post. If we have files (e.g. file_type, filename),
# we need to use those values from parse_post
d = sirepo.http_request.parse_post(
fixup_old_data=kwargs.pkdel('fixup_old_data', False),
id=True,
model=True,
check_sim_exists=True,
).req_data
s = sirepo.sim_data.get_class(d)
##TODO(robnagler) this should be req_data
b = PKDict(data=d, **kwargs)
# TODO(e-carlin): some of these fields are only used for some type of reqs
b.pksetdefault(
analysisModel=lambda: s.parse_model(d),
computeJobHash=lambda: d.get('computeJobHash') or s.compute_job_hash(d),
computeJobSerial=lambda: d.get('computeJobSerial', 0),
computeModel=lambda: s.compute_model(d),
isParallel=lambda: s.is_parallel(d),
#TODO(robnagler) relative to srdb root
simulationId=lambda: s.parse_sid(d),
simulationType=lambda: d.simulationType,
).pkupdate(
reqId=sirepo.job.unique_key(),
runDir=str(simulation_db.simulation_run_dir(d)),
uid=sirepo.auth.logged_in_user(),
).pkupdate(
computeJid=s.parse_jid(d, uid=b.uid),
userDir=str(sirepo.simulation_db.user_dir_name(b.uid)),
)
return _run_mode(b)
def _run_mode(request_content):
if 'models' not in request_content.data:
return request_content
#TODO(robnagler) make sure this is set for animation sim frames
m = request_content.data.models.get(request_content.computeModel)
j = m and m.get('jobRunMode')
if not j:
request_content.jobRunMode = sirepo.job.PARALLEL if request_content.isParallel \
else sirepo.job.SEQUENTIAL
return request_content
if j not in simulation_db.JOB_RUN_MODE_MAP:
raise sirepo.util.Error(
'invalid jobRunMode={} computeModel={} computeJid={}'.format(
j,
request_content.computeModel,
request_content.computeJid,
)
)
return request_content.pkupdate(
jobRunMode=j,
sbatchCores=m.sbatchCores,
sbatchHours=m.sbatchHours,
)
| mrakitin/sirepo | sirepo/job_api.py | Python | apache-2.0 | 8,078 | 0.002723 |
#
# Copyright 2010-2014 Fabric Technologies Inc. All rights reserved.
#
import os
import json
import collections
from kraken.ui.Qt import QtGui, QtWidgets, QtCore
class PreferenceEditor(QtWidgets.QDialog):
"""A widget providing the ability to nest """
def __init__(self, parent=None):
# constructors of base classes
super(PreferenceEditor, self).__init__(parent)
self.setObjectName('PreferenceEditor')
self.setWindowTitle('Preference Editor')
self.setWindowFlags(QtCore.Qt.Dialog)
self.resize(600, 300)
self.prefValueWidgets = []
self.createLayout()
self.createConnections()
def createLayout(self):
# Parent Layout
self._topLayout = QtWidgets.QVBoxLayout()
self._topLayout.setContentsMargins(0, 0, 0, 0)
self._topLayout.setSpacing(0)
self._mainWidget = QtWidgets.QWidget()
self._mainWidget.setObjectName('mainPrefWidget')
# Main Layout
self._mainLayout = QtWidgets.QVBoxLayout(self._mainWidget)
self._mainLayout.setContentsMargins(0, 0, 0, 0)
self._mainLayout.setSpacing(0)
self._preferenceLayout = QtWidgets.QGridLayout()
self._preferenceLayout.setContentsMargins(10, 10, 10, 10)
self._preferenceLayout.setSpacing(3)
self._preferenceLayout.setColumnMinimumWidth(0, 200)
self._preferenceLayout.setColumnStretch(0, 1)
self._preferenceLayout.setColumnStretch(1, 2)
# Add widgets based on type here
preferences = self.parentWidget().window().preferences.getPreferences()
i = 0
sortedPrefs = collections.OrderedDict(sorted(preferences.items(), key=lambda p: p[0]))
for k, v in sortedPrefs.iteritems():
labelFrameWidget = QtWidgets.QFrame()
labelFrameWidget.setObjectName('prefLabelWidgetFrame')
labelFrameWidget.setFrameStyle(QtWidgets.QFrame.NoFrame | QtWidgets.QFrame.Plain)
labelFrameWidget.setToolTip(v['description'])
labelFrameLayout = QtWidgets.QHBoxLayout()
prefLabel = QtWidgets.QLabel(v['nice_name'], self)
prefLabel.setProperty('labelClass', 'preferenceLabel')
prefLabel.setObjectName(k + "_label")
prefLabel.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
prefLabel.setMinimumWidth(300)
labelFrameLayout.addWidget(prefLabel)
labelFrameWidget.setLayout(labelFrameLayout)
self._preferenceLayout.addWidget(labelFrameWidget, i, 0)
if v['type'] == 'bool':
valueFrameWidget = QtWidgets.QFrame()
valueFrameWidget.setObjectName('prefValueWidgetFrame')
valueFrameWidget.setFrameStyle(QtWidgets.QFrame.NoFrame | QtWidgets.QFrame.Plain)
valueFrameLayout = QtWidgets.QHBoxLayout()
valueWidget = QtWidgets.QCheckBox(self)
valueWidget.setObjectName(k + "_valueWidget")
valueWidget.setChecked(v['value'])
valueFrameLayout.addWidget(valueWidget)
valueFrameWidget.setLayout(valueFrameLayout)
self._preferenceLayout.addWidget(valueFrameWidget, i, 1, 1, 1)
self.prefValueWidgets.append(valueWidget)
i += 1
# OK and Cancel buttons
buttonLayout = QtWidgets.QHBoxLayout()
buttonLayout.setContentsMargins(10, 10, 10, 10)
buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal, self)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
buttonLayout.addWidget(buttons)
# Menu Bar
self.menu_bar = QtWidgets.QMenuBar()
self.file_menu = self.menu_bar.addMenu('&File')
self.importPrefAction = self.file_menu.addAction('&Import...')
self.exportPrefAction = self.file_menu.addAction('&Export...')
self._mainLayout.addWidget(self.menu_bar)
self._mainLayout.addLayout(self._preferenceLayout)
self._mainLayout.addStretch(1)
self._mainLayout.addLayout(buttonLayout)
self._topLayout.addWidget(self._mainWidget)
self.setLayout(self._topLayout)
def createConnections(self):
self.importPrefAction.triggered.connect(self.importPrefs)
self.exportPrefAction.triggered.connect(self.exportPrefs)
def importPrefs(self):
fileDialog = QtWidgets.QFileDialog(self)
fileDialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, on=True)
fileDialog.setWindowTitle('Import Preferences')
fileDialog.setDirectory(os.path.expanduser('~'))
fileDialog.setAcceptMode(QtWidgets.QFileDialog.AcceptOpen)
fileDialog.setNameFilter('JSON files (*.json)')
if fileDialog.exec_() == QtWidgets.QFileDialog.Accepted:
filePath = fileDialog.selectedFiles()[0]
with open(filePath, "r") as openPrefFile:
loadedPrefs = json.load(openPrefFile)
self.parentWidget().window().preferences.loadPreferences(loadedPrefs)
self.updatePrefValues()
def exportPrefs(self):
fileDialog = QtWidgets.QFileDialog(self)
fileDialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, on=True)
fileDialog.setWindowTitle('Export Preferences')
fileDialog.setDirectory(os.path.expanduser('~'))
fileDialog.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)
fileDialog.setNameFilter('JSON files (*.json)')
fileDialog.setDefaultSuffix('json')
if fileDialog.exec_() == QtWidgets.QFileDialog.Accepted:
filePath = fileDialog.selectedFiles()[0]
preferences = self.parentWidget().window().preferences.getPreferences()
with open(filePath, "w+") as savePrefFile:
json.dump(preferences, savePrefFile)
def updatePrefValues(self):
"""Updates the preference widgets with the values from the preferences.
This is used when loading preferences from a file so that the widgets in
the UI match what was loaded.
"""
preferences = self.parentWidget().window().preferences
for widget in self.prefValueWidgets:
prefName = widget.objectName().rsplit('_', 1)[0]
pref = preferences.getPreference(prefName)
if pref['type'] == 'bool':
widget.setChecked(pref['value'])
# =======
# Events
# =======
def accept(self):
preferences = self.parentWidget().window().preferences
for widget in self.prefValueWidgets:
if type(widget) == QtWidgets.QCheckBox:
prefName = widget.objectName().rsplit('_', 1)[0]
preferences.setPreference(prefName, widget.isChecked())
super(PreferenceEditor, self).accept()
def closeEvent(self, event):
pass
| oculusstorystudio/kraken | Python/kraken/ui/preference_editor.py | Python | bsd-3-clause | 7,028 | 0.001565 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = u'DeepOBS'
copyright = u'2019, Frank Schneider'
author = u'Frank Schneider, Lukas Balles & Philipp Hennig'
# The short X.Y version
version = u'1.1'
# The full version, including alpha/beta/rc tags
release = u'1.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'sphinxarg.ext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# Theme options
html_theme_options = {
'collapse_navigation': False, # Collapse navigation
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeepOBSdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DeepOBS.tex', u'DeepOBS Documentation',
u'Frank Schneider', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deepobs', u'DeepOBS Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DeepOBS', u'DeepOBS Documentation',
author, 'DeepOBS', 'Documentation for the DeepOBS package.',
'Frank Schneider'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
smartquotes = False
| fsschneider/DeepOBS | docs/conf.py | Python | mit | 5,559 | 0 |
def primesSum(a, b):
return sum([a for a in range(a, b+1) if not (a < 2 or any(a % x == 0 for x in range(2, int(a ** 0.5) + 1))) ])
| emirot/codefights | python/primesSum.py | Python | apache-2.0 | 136 | 0.014706 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._test_base import TestBase
__all__ = ['TestBase']
| Azure/azure-sdk-for-python | sdk/testbase/azure-mgmt-testbase/azure/mgmt/testbase/aio/__init__.py | Python | mit | 524 | 0.003817 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python utilities required by Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import codecs
import marshal
import os
import re
import sys
import time
import types as python_types
import numpy as np
import six
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
_GLOBAL_CUSTOM_OBJECTS = {}
@keras_export('keras.utils.CustomObjectScope')
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject` (e.g. a class):
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
@keras_export('keras.utils.custom_object_scope')
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
Arguments:
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
Returns:
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
@keras_export('keras.utils.get_custom_objects')
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
Returns:
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_keras_class_and_config(cls_name, cls_config):
"""Returns the serialization of the class with the given config."""
return {'class_name': cls_name, 'config': cls_config}
@keras_export('keras.utils.serialize_keras_object')
def serialize_keras_object(instance):
_, instance = tf_decorator.unwrap(instance)
if instance is None:
return None
if hasattr(instance, 'get_config'):
return serialize_keras_class_and_config(instance.__class__.__name__,
instance.get_config())
if hasattr(instance, '__name__'):
return instance.__name__
raise ValueError('Cannot serialize', instance)
def class_and_config_for_serialized_keras_object(
config,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
"""Returns the class name and config for a serialized keras object."""
if (not isinstance(config, dict) or 'class_name' not in config or
'config' not in config):
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
return (cls, config['config'])
@keras_export('keras.utils.deserialize_keras_object')
def deserialize_keras_object(identifier,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
if identifier is None:
return None
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
(cls, cls_config) = class_and_config_for_serialized_keras_object(
config, module_objects, custom_objects, printable_module_name)
if hasattr(cls, 'from_config'):
arg_spec = tf_inspect.getfullargspec(cls.from_config)
custom_objects = custom_objects or {}
if 'custom_objects' in arg_spec.args:
return cls.from_config(
cls_config,
custom_objects=dict(
list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
with CustomObjectScope(custom_objects):
return cls.from_config(cls_config)
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with CustomObjectScope(custom_objects):
return cls(**cls_config)
elif isinstance(identifier, six.string_types):
object_name = identifier
if custom_objects and object_name in custom_objects:
obj = custom_objects.get(object_name)
elif object_name in _GLOBAL_CUSTOM_OBJECTS:
obj = _GLOBAL_CUSTOM_OBJECTS[object_name]
else:
obj = module_objects.get(object_name)
if obj is None:
raise ValueError('Unknown ' + printable_module_name + ':' + object_name)
# Classes passed by name are instantiated with no args, functions are
# returned as-is.
if tf_inspect.isclass(obj):
return obj()
return obj
else:
raise ValueError('Could not interpret serialized ' + printable_module_name +
': ' + identifier)
def func_dump(func):
"""Serializes a user defined function.
Arguments:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == 'nt':
raw_code = marshal.dumps(func.__code__).replace(b'\\', b'/')
code = codecs.encode(raw_code, 'base64').decode('ascii')
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, 'base64').decode('ascii')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Arguments:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
Arguments:
value: Any value that needs to be casted to the cell type
Returns:
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
# pylint: disable=pointless-statement
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode('ascii'), 'base64')
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode('raw_unicode_escape')
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure)
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
Arguments:
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name`
but the function accepts a `**kwargs` argument.
Returns:
bool, whether `fn` accepts a `name` keyword argument.
"""
arg_spec = tf_inspect.getfullargspec(fn)
if accept_all and arg_spec.varkw is not None:
return True
return name in arg_spec.args
@keras_export('keras.utils.Progbar')
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
unit_name: Display name for step counts (usually "step" or "sample").
"""
def __init__(self, target, width=30, verbose=1, interval=0.05,
stateful_metrics=None, unit_name='step'):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.unit_name = unit_name
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules or
'posix' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
def update(self, current, values=None):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [v * (current - self._seen_so_far),
current - self._seen_so_far]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += (current - self._seen_so_far)
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and
self.target is not None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.log10(self.target)) + 1
bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60,
eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1 or time_per_unit == 0:
info += ' %.0fs/%s' % (time_per_unit, self.unit_name)
elif time_per_unit >= 1e-3:
info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name)
else:
info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name)
for k in self._values_order:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is not None and current >= self.target:
numdigits = int(np.log10(self.target)) + 1
count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)
info = count + info
for k in self._values_order:
info += ' - %s:' % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Arguments:
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
Returns:
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batches)]
def slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `slice_arrays(x, indices)`
Arguments:
arrays: Single array or list of arrays.
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
Returns:
A slice of the array(s).
Raises:
ValueError: If the value of start is a list and stop is not None.
"""
if arrays is None:
return [None]
if isinstance(start, list) and stop is not None:
raise ValueError('The stop argument has to be None if the value of start '
'is a list.')
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
return [
None if x is None else
None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays
]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
if hasattr(start, '__getitem__'):
return arrays[start:stop]
return [None]
def to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Arguments:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def object_list_uid(object_list):
"""Creates a single string from object ids."""
object_list = nest.flatten(object_list)
return ', '.join([str(abs(id(x))) for x in object_list])
def to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def is_all_none(structure):
iterable = nest.flatten(structure)
# We cannot use Python's `any` because the iterable may return Tensors.
for element in iterable:
if element is not None:
return False
return True
def check_for_unexpected_keys(name, input_dict, expected_values):
unknown = set(input_dict.keys()).difference(expected_values)
if unknown:
raise ValueError('Unknown entries in {} dictionary: {}. Only expected '
'following keys: {}'.format(name, list(unknown),
expected_values))
def validate_kwargs(kwargs, allowed_kwargs,
error_message='Keyword argument not understood:'):
"""Checks that all keyword arguments are in the set of allowed keys."""
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError(error_message, kwarg)
| alsrgv/tensorflow | tensorflow/python/keras/utils/generic_utils.py | Python | apache-2.0 | 19,553 | 0.007978 |
#!/usr/bin/env python
"""
Command-line utility for administrative tasks.
"""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
"AzureAuthSample.settings"
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| ElliottMiller/python-azuread-sample | manage.py | Python | mit | 336 | 0 |
from copy import deepcopy
import h5py
import numpy as np
import pytest
import six
from ...util.functions import virtual_file
from .. import (CartesianGrid,
CylindricalPolarGrid,
SphericalPolarGrid,
AMRGrid,
OctreeGrid)
ALL_GRID_TYPES = ['car', 'sph', 'cyl', 'amr', 'oct']
def exc_msg(exc):
if isinstance(exc.value, six.string_types):
return exc.value
elif type(exc.value) is tuple:
return exc.value[0]
else:
return exc.value.args[0]
class TestView(object):
def setup_method(self, method):
# Set up grids
self.grid = {}
self.grid['car'] = CartesianGrid([-1., 1.],
[-2., 2.],
[-3., 3.])
self.grid['cyl'] = CylindricalPolarGrid([0., 1.],
[-1., 1.],
[0., 2. * np.pi])
self.grid['sph'] = SphericalPolarGrid([0., 1.],
[0., np.pi],
[0., 2. * np.pi])
self.grid['amr'] = AMRGrid()
level = self.grid['amr'].add_level()
grid = level.add_grid()
grid.xmin, grid.xmax = -1., 1.
grid.ymin, grid.ymax = -1., 1.
grid.zmin, grid.zmax = -1., 1.
grid.nx, grid.ny, grid.nz = 8, 8, 8
refined = [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
self.grid['oct'] = OctreeGrid(0., 0., 0., 10., 10., 10.,
np.array(refined).astype(bool))
# Set up empty grid class
self.grid_empty = {}
self.grid_empty['car'] = CartesianGrid
self.grid_empty['cyl'] = CylindricalPolarGrid
self.grid_empty['sph'] = SphericalPolarGrid
self.grid_empty['amr'] = AMRGrid
self.grid_empty['oct'] = OctreeGrid
# Set up initial densities
self.density = {}
self.density['car'] = np.array([[[1.]]])
self.density['cyl'] = np.array([[[1.]]])
self.density['sph'] = np.array([[[1.]]])
amr_q = deepcopy(self.grid['amr'])
amr_q.levels[0].grids[0].quantities['density'] = np.ones((8, 8, 8))
self.density['amr'] = amr_q['density']
self.density['oct'] = np.ones(len(refined))
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_empty(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust is None
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_single(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust == 1
if grid_type == 'amr':
assert type(h.levels[0].grids[0].quantities['density']) is list
else:
assert type(h.quantities['density']) is list
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_double(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g['density'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust == 2
if grid_type == 'amr':
assert type(h.levels[0].grids[0].quantities['density']) is list
else:
assert type(h.quantities['density']) is list
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_double_multiple(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g['density'].append(self.density[grid_type])
g['energy'] = []
g['energy'].append(self.density[grid_type])
g['energy'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
f.close()
assert h.n_dust == 2
if grid_type == 'amr':
assert type(h.levels[0].grids[0].quantities['density']) is list
assert type(h.levels[0].grids[0].quantities['energy']) is list
else:
assert type(h.quantities['density']) is list
assert type(h.quantities['energy']) is list
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_type_mismatch(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
f['Geometry'].attrs['grid_type'] = 'invalid'.encode('utf-8')
h = self.grid_empty[grid_type]()
with pytest.raises(Exception) as exc:
h.read(f)
if grid_type == 'car':
assert exc.value.args[0] == "Grid is not cartesian"
elif grid_type == 'cyl':
assert exc.value.args[0] == "Grid is not cylindrical polar"
elif grid_type == 'sph':
assert exc.value.args[0] == "Grid is not spherical polar"
elif grid_type == 'amr':
assert exc.value.args[0] == "Grid is not an AMR grid"
elif grid_type == 'oct':
assert exc.value.args[0] == "Grid is not an octree"
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_hash_mismatch(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
f['Geometry'].attrs['geometry'] = 'a4e2805a72dfcf01b2fd94da0be32511'.encode('utf-8')
h = self.grid_empty[grid_type]()
with pytest.raises(Exception) as exc:
h.read(f)
assert exc.value.args[0] == "Calculated geometry hash does not " \
"match hash in file"
@pytest.mark.parametrize(('grid_type'), ALL_GRID_TYPES)
def test_write_read_groups_exist(self, grid_type):
g = self.grid[grid_type]
f = virtual_file()
f.create_group('Geometry')
f.create_group('Quantities')
g['density'] = []
g['density'].append(self.density[grid_type])
g.write(f)
h = self.grid_empty[grid_type]()
h.read(f)
assert h.n_dust == 1
| hyperion-rt/hyperion | hyperion/grid/tests/test_io.py | Python | bsd-2-clause | 6,744 | 0.000148 |
def is_perfect_number(n):
sum = 0
for x in range(1, n):
if n % x == 0:
sum += x
return sum == n
num = int(input("Please enter a number to check if it is perfect or not"))
print(is_perfect_number(num))
| OpenGenus/cosmos | code/mathematical_algorithms/src/perfect_number/perfect_number.py | Python | gpl-3.0 | 235 | 0 |
import os
from setuptools import setup
ROOT = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(ROOT, 'README.rst'), encoding='utf-8') as inp:
LONG_DESCRIPTION = inp.read()
setup(
# Meta data
name='user_agent',
version='0.1.10',
author="Gregory Petukhov",
author_email='[email protected]',
maintainer="Gregory Petukhov",
maintainer_email='[email protected]',
url='https://github.com/lorien/user_agent',
description='User-Agent generator',
long_description=LONG_DESCRIPTION,
download_url='http://pypi.python.org/pypi/user_agent',
keywords="user agent browser navigator",
license="MIT License",
# Package files
packages=['user_agent'],
include_package_data=True,
install_requires=['six'],
entry_points={
'console_scripts': [
'ua = user_agent.cli:script_ua',
],
},
# Topics
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'License :: OSI Approved :: MIT License',
#'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP',
],
)
| lorien/user_agent | setup.py | Python | mit | 1,599 | 0.000625 |
#!/usr/bin/env python
import nltk
import os
import sys
import include
title = sys.argv[1].lower()
html = sys.argv[2].lower()
cate_id = sys.argv[3]
def do_read_train(uni_dict, bi_dict, file):
lines = file.readlines()
for line in lines:
words = line.split()
bi_dict[words[0]] = int(words[2])
uni_dict[words[0].split("|")[1]] = int(words[4])
return int(lines[0].split()[-1])
def frequency_update(uni_dict, bi_dict, new_uni_dict, new_bi_dict):
# update uni dict
for token in new_uni_dict.keys():
if uni_dict.has_key(token):
uni_dict[token] -= new_uni_dict[token]
if uni_dict[token] == 0:
del uni_dict[token]
# update bi dict
for key in new_bi_dict:
if bi_dict.has_key(key):
bi_dict[key] -= new_bi_dict[key]
if bi_dict[key] == 0:
del bi_dict[key]
def sort_dict_to(uni_dict, bi_dict, n, sorted_list):
for key in bi_dict:
first = key.split("|")[0]
second = key.split("|")[1]
sorted_list.append([key, float(bi_dict[key]) / uni_dict[second], bi_dict[key], float(uni_dict[second]) / n, uni_dict[second], n])
sorted_list = sorted(sorted_list, key = lambda x: x[4], reverse= True)
text = nltk.clean_html(html)
cate_dir = os.path.join(include.dataset_dir, cate_id)
if not os.access(cate_dir, os.F_OK):
os.makedirs(cate_dir)
file = open(os.path.join(cate_dir, title + ".txt"), "w")
file.write(text)
file.close()
train_file = os.path.join(cate_dir, cate_id + include.bi_train_suffix)
uni_dict = {}
bi_dict = {}
n = 0
try:
with open(train_file, "r") as file:
n = do_read_train(uni_dict, bi_dict, file)
file.close()
except IOError:
pass
tokens = include.my_tokenizer(text)
if "" in tokens:
tokens.remove("")
# read unigram frequency from new post
num_tokens = len(tokens)
new_uni_dict = {}
for token in tokens:
if new_uni_dict.has_key(token):
new_uni_dict[token] += 1
else:
new_uni_dict[token] = 1
# read bigram frequency from new post
new_bi_dict = {}
for i in range(1, len(tokens)):
key = tokens[i] + "|" + tokens[i - 1]
if new_bi_dict.has_key(key):
new_bi_dict[key] += 1
else:
new_bi_dict[key] = 1
frequency_update(uni_dict, bi_dict, new_uni_dict, new_bi_dict)
sorted_list = []
sort_dict_to(uni_dict, bi_dict, n - num_tokens, sorted_list)
file = open(train_file, "w")
file.truncate()
for item in sorted_list:
token = item[0]
bi_p = item[1]
bi_freq = item[2]
uni_p = item[3]
uni_freq = item[4]
nn = item[5]
file.write("%-30s %.8f %6d %16.8f %6s %9d\n" %(token, bi_p, bi_freq, uni_p, uni_freq, nn))
file.close()
| jiayisuse/cs73 | wp-admin/data_delete.py | Python | gpl-2.0 | 2,483 | 0.024567 |
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>\d+)/$', views.DetailView.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/results/$', views.ResultsView.as_view(), name='results'),
url(r'^(?P<poll_id>\d+)/vote/$', views.vote, name='vote'),
url(r'^test/$' , views.test_view, name='test_view'),
)
| alirazabhayani/django_workshop_poll_app | polls/urls.py | Python | bsd-3-clause | 424 | 0.007075 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
.. module:: measurematrix.py
.. moduleauthor:: Jozsef Attila Janko, Bence Takacs, Zoltan Siki (code optimalization)
Sample application of Ulyxes PyAPI to measure within a rectangular area
:param argv[1] (int): number of horizontal intervals (between measurements), default 1 (perimeter only)
:param argv[2] (int): number of vertical intervals(between measurements), default 1 (perimeter only)
:param argv[3] (sensor): 1100/1800/1200/5500, default 1100
:param argv[4] (port): serial port, default COM5
:param argv[5]: output file, default stdout
usage: python measurematrix.py 9 3 1100 COM5
"""
import re
import sys
sys.path.append('../pyapi/')
from angle import Angle
from serialiface import SerialIface
from totalstation import TotalStation
from echowriter import EchoWriter
from filewriter import FileWriter
from leicatps1200 import LeicaTPS1200
from leicatcra1100 import LeicaTCRA1100
from trimble5500 import Trimble5500
if __name__ == "__main__":
if sys.version_info[0] > 2: # Python 3 compatibility
raw_input = input
if len(sys.argv) == 1:
print("Usage: {0:s} horizontal_step vertical_step instrument port output_file".format(sys.argv[0]))
exit(1)
# set horizontal stepping interval dh_nr
dh_nr = 1
if len(sys.argv) > 1:
try:
dh_nr = int(sys.argv[1])
except ValueError:
print("invalid numeric value " + sys.argv[1])
sys.exit(1)
# set vertical stepping interval dv_nr
dv_nr = 1
if len(sys.argv) > 2:
try:
dv_nr = int(sys.argv[2])
except ValueError:
print("invalid numeric value " + sys.argv[2])
#sys.exit(1)
# set instrument
stationtype = '1100'
if len(sys.argv) > 3:
stationtype = sys.argv[3]
if re.search('120[0-9]$', stationtype):
mu = LeicaTPS1200()
elif re.search('110[0-9]$', stationtype):
mu = LeicaTCRA1100()
elif re.search('550[0-9]$', stationtype):
mu = Trimble5500()
else:
print("unsupported instrument type")
sys.exit(1)
# set port
port = '/dev/ttyUSB0'
if len(sys.argv) > 4:
port = sys.argv[4]
iface = SerialIface("test", port)
# set output file name
fn = None
if len(sys.argv) > 5:
fn = sys.argv[5]
# write out measurements
if fn:
wrt = FileWriter(angle='DEG', dist='.3f', fname=fn)
else:
wrt = EchoWriter(angle='DEG', dist='.3f')
if wrt.GetState() != wrt.WR_OK:
sys.exit(-1) # open error
ts = TotalStation(stationtype, mu, iface, wrt)
if isinstance(mu, Trimble5500):
print("Please change to reflectorless EDM mode (MNU 722 from keyboard)")
print("and turn on red laser (MNU 741 from keyboard) and press enter!")
raw_input()
else:
ts.SetATR(0) # turn ATR off
ts.SetEDMMode('RLSTANDARD') # reflectorless distance measurement
ts.SetRedLaser(1) # turn red laser on
w = raw_input("Target on lower left corner and press Enter")
w1 = ts.GetAngles()
w = raw_input("Target on upper right corner and press Enter")
w2 = ts.GetAngles()
dh = (w2['hz'].GetAngle() - w1['hz'].GetAngle()) / dh_nr
dv = (w2['v'].GetAngle() - w1['v'].GetAngle()) / dv_nr
# measurement loops
for i in range(dh_nr+1): # horizontal loop
measdir = i % 2 # check modulo
hz = Angle(w1['hz'].GetAngle() + i * dh, 'RAD')
for j in range(dv_nr+1): # vertical loop
if measdir == 0:
# move downward at odd steps to right
ts.Move(hz, Angle(w1['v'].GetAngle() + j * dv, 'RAD'))
else:
# move upward at event steps to right
ts.Move(hz, Angle(w2['v'].GetAngle() - j * dv, 'RAD'))
ts.Measure()
meas = ts.GetMeasure()
if ts.measureIface.state != ts.measureIface.IF_OK or 'errorCode' in meas:
print('FATAL Cannot measure point')
| zsiki/ulyxes | pyapps/measurematrix.py | Python | gpl-2.0 | 4,051 | 0.005431 |
# -*- coding: utf-8 -*-
from module.common.json_layer import json_loads
from module.plugins.Account import Account
class SimplyPremiumCom(Account):
__name__ = "SimplyPremiumCom"
__type__ = "account"
__version__ = "0.05"
__description__ = """Simply-Premium.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("EvolutionClip", "[email protected]")]
def loadAccountInfo(self, user, req):
premium = False
validuntil = -1
trafficleft = None
json_data = req.load('http://www.simply-premium.com/api/user.php?format=json')
self.logDebug("JSON data: %s" % json_data)
json_data = json_loads(json_data)
if 'vip' in json_data['result'] and json_data['result']['vip']:
premium = True
if 'timeend' in json_data['result'] and json_data['result']['timeend']:
validuntil = float(json_data['result']['timeend'])
if 'remain_traffic' in json_data['result'] and json_data['result']['remain_traffic']:
trafficleft = float(json_data['result']['remain_traffic']) / 1024 #@TODO: Remove `/ 1024` in 0.4.10
return {"premium": premium, "validuntil": validuntil, "trafficleft": trafficleft}
def login(self, user, data, req):
req.cj.setCookie("simply-premium.com", "lang", "EN")
html = req.load("http://www.simply-premium.com/login.php",
post={'key': user} if not data['password'] else {'login_name': user, 'login_pass': data['password']},
decode=True)
if 'logout' not in html:
self.wrongPassword()
| immenz/pyload | module/plugins/accounts/SimplyPremiumCom.py | Python | gpl-3.0 | 1,650 | 0.008485 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-29 16:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0016_deprecate_rendition_filter_relation'),
('wagtailcore', '0032_add_bulk_delete_page_permission'),
('willys_website', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LandingPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.StreamField((('h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.wagtailcore.blocks.StructBlock((('quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), ('attribution', wagtail.wagtailcore.blocks.CharBlock())))), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()),), icon='image', label='image')), ('html', wagtail.wagtailcore.blocks.StructBlock((('html', wagtail.wagtailcore.blocks.RawHTMLBlock()),), icon='code', label='html'))))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='LandingPageHero',
fields=[
('heroitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='willys_website.HeroItem')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='hero', to='willys_website.LandingPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=('willys_website.heroitem', models.Model),
),
migrations.RenameField(
model_name='productpage',
old_name='cost',
new_name='price',
),
migrations.RemoveField(
model_name='heroitem',
name='position',
),
migrations.AddField(
model_name='productpage',
name='bg_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AddField(
model_name='productpage',
name='ibu',
field=models.CharField(default='notset', max_length=255),
),
migrations.AddField(
model_name='productpage',
name='proof',
field=models.CharField(default='notset', max_length=255),
),
migrations.AddField(
model_name='productpage',
name='style',
field=models.CharField(default='notset', max_length=255),
),
migrations.AddField(
model_name='productpage',
name='subtitle',
field=models.CharField(default='notset', max_length=255),
),
migrations.AlterField(
model_name='blogpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.wagtailcore.blocks.StructBlock((('quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), ('attribution', wagtail.wagtailcore.blocks.CharBlock())))), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()),), icon='image', label='image')), ('html', wagtail.wagtailcore.blocks.StructBlock((('html', wagtail.wagtailcore.blocks.RawHTMLBlock()),), icon='code', label='html')))),
),
migrations.AlterField(
model_name='eventpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.wagtailcore.blocks.StructBlock((('quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), ('attribution', wagtail.wagtailcore.blocks.CharBlock())))), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()),), icon='image', label='image')), ('html', wagtail.wagtailcore.blocks.StructBlock((('html', wagtail.wagtailcore.blocks.RawHTMLBlock()),), icon='code', label='html')))),
),
migrations.AlterField(
model_name='productpage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), ('intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('pullquote', wagtail.wagtailcore.blocks.StructBlock((('quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), ('attribution', wagtail.wagtailcore.blocks.CharBlock())))), ('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()),), icon='image', label='image')), ('html', wagtail.wagtailcore.blocks.StructBlock((('html', wagtail.wagtailcore.blocks.RawHTMLBlock()),), icon='code', label='html')))),
),
migrations.AlterField(
model_name='productpage',
name='name',
field=models.CharField(max_length=255),
),
]
| willysbrewing/willys_website | willys_website/core/migrations/0002_auto_20170129_1714.py | Python | apache-2.0 | 7,007 | 0.001284 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer for classification models for Graph Agreement Models without a graph.
This class contains functionality that allows for training a classification
model to be used as part of Graph Agreement Models.
This implementation does not use a provided graph, but samples random pairs
of samples.
"""
import logging
import os
from .adversarial_sparse import entropy_y_x
from .adversarial_sparse import get_loss_vat
import numpy as np
import tensorflow as tf
from .trainer_base import batch_iterator
from .trainer_base import Trainer
class TrainerClassificationGCN(Trainer):
"""Trainer for the classifier component of a Graph Agreement Model.
Attributes:
model: A Model object that is used to provide the architecture of the
classification model.
is_train: A placeholder for a boolean value specyfing if the model is used
for train or evaluation.
data: A CotrainDataset object.
trainer_agr: A TrainerAgreement or TrainerPerfectAgreement object.
optimizer: Optimizer used for training the classification model.
batch_size: Batch size for used when training and evaluating the
classification model.
gradient_clip: A float number representing the maximum gradient norm allowed
if we do gradient clipping. If None, no gradient clipping is performed.
min_num_iter: An integer representing the minimum number of iterations to
train the classification model.
max_num_iter: An integer representing the maximum number of iterations to
train the classification model.
num_iter_after_best_val: An integer representing the number of extra
iterations to perform after improving the validation set accuracy.
max_num_iter_cotrain: An integer representing the maximum number of cotrain
iterations to train for.
reg_weight_ll: A float representing the weight of the agreement loss term
component of the classification model loss function, between
labeled-labeled pairs of samples.
reg_weight_lu: A float representing the weight of the agreement loss term
component of the classification model loss function, between
labeled-unlabeled pairs of samples.
reg_weight_uu: A float representing the weight of the agreement loss term
component of the classification model loss function, between
unlabeled-unlabeled pairs of samples.
num_pairs_reg: An integer representing the number of sample pairs of each
type (LL, LU, UU) to include in each computation of the classification
model loss.
iter_cotrain: A Tensorflow variable containing the current cotrain
iteration.
reg_weight_vat: A float representing the weight of the virtual adversarial
training (VAT) regularization loss in the classification model loss
function.
use_ent_min: A boolean specifying whether to use entropy regularization with
VAT.
enable_summaries: Boolean specifying whether to enable variable summaries.
summary_step: Integer representing the summary step size.
summary_dir: String representing the path to a directory where to save the
variable summaries.
logging_step: Integer representing the number of iterations after which we
log the loss of the model.
eval_step: Integer representing the number of iterations after which we
evaluate the model.
warm_start: Whether the model parameters are initialized at their best value
in the previous cotrain iteration. If False, they are reinitialized.
gradient_clip=None,
abs_loss_chg_tol: A float representing the absolute tolerance for checking
if the training loss has converged. If the difference between the current
loss and previous loss is less than `abs_loss_chg_tol`, we count this
iteration towards convergence (see `loss_chg_iter_below_tol`).
rel_loss_chg_tol: A float representing the relative tolerance for checking
if the training loss has converged. If the ratio between the current loss
and previous loss is less than `rel_loss_chg_tol`, we count this iteration
towards convergence (see `loss_chg_iter_below_tol`).
loss_chg_iter_below_tol: An integer representing the number of consecutive
iterations that pass the convergence criteria before stopping training.
checkpoints_dir: Path to the folder where to store TensorFlow model
checkpoints.
weight_decay: Weight for the weight decay term in the classification model
loss.
weight_decay_schedule: Schedule how to adjust the classification weight
decay weight after every cotrain iteration.
penalize_neg_agr: Whether to not only encourage agreement between samples
that the agreement model believes should have the same label, but also
penalize agreement when two samples agree when the agreement model
predicts they should disagree.
first_iter_original: A boolean specifying whether the first cotrain
iteration trains the original classification model (with no agreement
term).
use_l2_classif: Whether to use L2 loss for classification, as opposed to the
whichever loss is specified in the provided model_cls.
seed: Seed used by all the random number generators in this class.
use_graph: Boolean specifying whether the agreement loss is applied to graph
edges, as opposed to random pairs of samples.
"""
def __init__(self,
model,
data,
trainer_agr,
optimizer,
lr_initial,
batch_size,
min_num_iter,
max_num_iter,
num_iter_after_best_val,
max_num_iter_cotrain,
reg_weight_ll,
reg_weight_lu,
reg_weight_uu,
num_pairs_reg,
iter_cotrain,
reg_weight_vat=0.0,
use_ent_min=False,
enable_summaries=False,
summary_step=1,
summary_dir=None,
warm_start=False,
gradient_clip=None,
logging_step=1,
eval_step=1,
abs_loss_chg_tol=1e-10,
rel_loss_chg_tol=1e-7,
loss_chg_iter_below_tol=30,
checkpoints_dir=None,
weight_decay=None,
weight_decay_schedule=None,
penalize_neg_agr=False,
first_iter_original=True,
use_l2_classif=True,
seed=None,
lr_decay_steps=None,
lr_decay_rate=None,
use_graph=False):
super(TrainerClassificationGCN, self).__init__(
model=model,
abs_loss_chg_tol=abs_loss_chg_tol,
rel_loss_chg_tol=rel_loss_chg_tol,
loss_chg_iter_below_tol=loss_chg_iter_below_tol)
self.data = data
self.trainer_agr = trainer_agr
self.batch_size = batch_size
self.min_num_iter = min_num_iter
self.max_num_iter = max_num_iter
self.num_iter_after_best_val = num_iter_after_best_val
self.max_num_iter_cotrain = max_num_iter_cotrain
self.enable_summaries = enable_summaries
self.summary_step = summary_step
self.summary_dir = summary_dir
self.warm_start = warm_start
self.gradient_clip = gradient_clip
self.logging_step = logging_step
self.eval_step = eval_step
self.checkpoint_path = (
os.path.join(checkpoints_dir, 'classif_best.ckpt')
if checkpoints_dir is not None else None)
self.weight_decay_initial = weight_decay
self.weight_decay_schedule = weight_decay_schedule
self.num_pairs_reg = num_pairs_reg
self.reg_weight_ll = reg_weight_ll
self.reg_weight_lu = reg_weight_lu
self.reg_weight_uu = reg_weight_uu
self.reg_weight_vat = reg_weight_vat
self.use_ent_min = use_ent_min
self.penalize_neg_agr = penalize_neg_agr
self.use_l2_classif = use_l2_classif
self.first_iter_original = first_iter_original
self.iter_cotrain = iter_cotrain
self.lr_initial = lr_initial
self.lr_decay_steps = lr_decay_steps
self.lr_decay_rate = lr_decay_rate
self.use_graph = use_graph
# Build TensorFlow graph.
logging.info('Building classification TensorFlow graph...')
# Create placeholders.
input_indices = tf.placeholder(
tf.int64, shape=(None,), name='input_indices')
input_indices_unlabeled = tf.placeholder(
tf.int32, shape=(None,), name='input_indices_unlabeled')
input_labels = tf.placeholder(tf.int64, shape=(None,), name='input_labels')
# Create a placeholder specifying if this is train time.
is_train = tf.placeholder_with_default(False, shape=[], name='is_train')
# Create some placeholders specific to GCN.
self.support_op = tf.sparse_placeholder(tf.float32, name='support')
self.features_op = tf.sparse_placeholder(tf.float32, name='features')
self.num_features_nonzero_op = tf.placeholder(
tf.int32, name='num_features_nonzero')
# Save the data required to fill in these placeholders. We don't add them
# directly in the graph as constants in order to avoid saving large
# checkpoints.
self.support = data.support
self.features = data.dataset.features_sparse
self.num_features_nonzero = data.num_features_nonzero
# Create variables and predictions.
with tf.variable_scope('predictions'):
encoding, variables_enc, reg_params_enc = (
self.model.get_encoding_and_params(
inputs=self.features_op,
is_train=is_train,
support=self.support_op,
num_features_nonzero=self.num_features_nonzero_op))
self.variables = variables_enc
self.reg_params = reg_params_enc
predictions, variables_pred, reg_params_pred = (
self.model.get_predictions_and_params(
encoding=encoding,
is_train=is_train,
support=self.support_op,
num_features_nonzero=self.num_features_nonzero_op))
self.variables.update(variables_pred)
self.reg_params.update(reg_params_pred)
normalized_predictions = self.model.normalize_predictions(predictions)
predictions_var_scope = tf.get_variable_scope()
predictions_batch = tf.gather(predictions, input_indices, axis=0)
normalized_predictions_batch = tf.gather(
normalized_predictions, input_indices, axis=0)
one_hot_labels = tf.one_hot(
input_labels, data.num_classes, name='targets_one_hot')
# Create a variable for weight decay that may be updated.
weight_decay_var, weight_decay_update = self._create_weight_decay_var(
weight_decay, weight_decay_schedule)
# Create counter for classification iterations.
iter_cls_total, iter_cls_total_update = self._create_counter()
# Create loss.
with tf.name_scope('loss'):
if self.use_l2_classif:
loss_supervised = tf.square(one_hot_labels -
normalized_predictions_batch)
loss_supervised = tf.reduce_sum(loss_supervised, axis=-1)
loss_supervised = tf.reduce_mean(loss_supervised)
else:
loss_supervised = self.model.get_loss(
predictions=predictions_batch,
targets=one_hot_labels,
weight_decay=None)
# Agreement regularization loss.
loss_agr = self._get_agreement_reg_loss(data, is_train)
# If the first co-train iteration trains the original model (for
# comparison purposes), then we do not add an agreement loss.
if self.first_iter_original:
loss_agr_weight = tf.cast(tf.greater(iter_cotrain, 0), tf.float32)
loss_agr = loss_agr * loss_agr_weight
# Weight decay loss.
loss_reg = 0.0
if weight_decay_var is not None:
for var in self.reg_params.values():
loss_reg += weight_decay_var * tf.nn.l2_loss(var)
# Adversarial loss, in case we want to add VAT on top of GAM.
ones = tf.fill(tf.shape(input_indices_unlabeled), 1.0)
unlabeled_mask = tf.scatter_nd(
input_indices_unlabeled[:, None],
updates=ones,
shape=[
data.num_samples,
],
name='unlabeled_mask')
placeholders = {
'support': self.support_op,
'num_features_nonzero': self.num_features_nonzero_op
}
loss_vat = get_loss_vat(
inputs=self.features_op,
predictions=predictions,
mask=unlabeled_mask,
is_train=is_train,
model=model,
placeholders=placeholders,
predictions_var_scope=predictions_var_scope)
num_unlabeled = tf.shape(input_indices_unlabeled)[0]
loss_vat = tf.cond(
tf.greater(num_unlabeled, 0), lambda: loss_vat, lambda: 0.0)
if self.use_ent_min:
# Use entropy minimization with VAT (i.e. VATENT).
loss_ent = entropy_y_x(predictions, unlabeled_mask)
loss_vat = loss_vat + tf.cond(
tf.greater(num_unlabeled, 0), lambda: loss_ent, lambda: 0.0)
loss_vat = loss_vat * self.reg_weight_vat
if self.first_iter_original:
# Do not add the adversarial loss in the first iteration if
# the first iteration trains the plain baseline model.
weight_loss_vat = tf.cond(
tf.greater(iter_cotrain, 0), lambda: 1.0, lambda: 0.0)
loss_vat = loss_vat * weight_loss_vat
# Total loss.
loss_op = loss_supervised + loss_agr + loss_reg + loss_vat
# Create accuracy.
accuracy = tf.equal(
tf.argmax(normalized_predictions_batch, 1), input_labels)
accuracy = tf.reduce_mean(tf.cast(accuracy, tf.float32))
# Create Tensorboard summaries.
if self.enable_summaries:
summaries = [
tf.summary.scalar('loss_supervised', loss_supervised),
tf.summary.scalar('loss_agr', loss_agr),
tf.summary.scalar('loss_reg', loss_reg),
tf.summary.scalar('loss_total', loss_op)
]
self.summary_op = tf.summary.merge(summaries)
# Create learning rate schedule and optimizer.
self.global_step = tf.train.get_or_create_global_step()
if self.lr_decay_steps is not None and self.lr_decay_rate is not None:
self.lr = tf.train.exponential_decay(
self.lr_initial,
self.global_step,
self.lr_decay_steps,
self.lr_decay_rate,
staircase=True)
self.optimizer = optimizer(self.lr)
else:
self.optimizer = optimizer(lr_initial)
# Get trainable variables and compute gradients.
grads_and_vars = self.optimizer.compute_gradients(
loss_op,
tf.trainable_variables(scope=tf.get_default_graph().get_name_scope()))
# Clip gradients.
if self.gradient_clip:
variab = [elem[1] for elem in grads_and_vars]
gradients = [elem[0] for elem in grads_and_vars]
gradients, _ = tf.clip_by_global_norm(gradients, self.gradient_clip)
grads_and_vars = tuple(zip(gradients, variab))
with tf.control_dependencies(
tf.get_collection(
tf.GraphKeys.UPDATE_OPS,
scope=tf.get_default_graph().get_name_scope())):
train_op = self.optimizer.apply_gradients(
grads_and_vars, global_step=self.global_step)
# Create a saver for model variables.
trainable_vars = [v for _, v in grads_and_vars]
# Put together the subset of variables to save and restore from the best
# validation accuracy as we train the agreement model in one cotrain round.
vars_to_save = trainable_vars + []
if isinstance(weight_decay_var, tf.Variable):
vars_to_save.append(weight_decay_var)
saver = tf.train.Saver(vars_to_save)
# Put together all variables that need to be saved in case the process is
# interrupted and needs to be restarted.
self.vars_to_save = [iter_cls_total, self.global_step]
if isinstance(weight_decay_var, tf.Variable):
self.vars_to_save.append(weight_decay_var)
if self.warm_start:
self.vars_to_save.extend([v for v in self.variables])
# More variables to be initialized after the session is created.
self.is_initialized = False
self.rng = np.random.RandomState(seed)
self.input_indices = input_indices
self.input_indices_unlabeled = input_indices_unlabeled
self.input_labels = input_labels
self.predictions = predictions
self.normalized_predictions = normalized_predictions
self.normalized_predictions_batch = normalized_predictions_batch
self.weight_decay_var = weight_decay_var
self.weight_decay_update = weight_decay_update
self.iter_cls_total = iter_cls_total
self.iter_cls_total_update = iter_cls_total_update
self.accuracy = accuracy
self.train_op = train_op
self.loss_op = loss_op
self.saver = saver
self.batch_size_actual = tf.shape(self.predictions)[0]
self.reset_optimizer = tf.variables_initializer(self.optimizer.variables())
self.is_train = is_train
def _create_weight_decay_var(self, weight_decay_initial,
weight_decay_schedule):
"""Creates a weight decay variable that can be updated using a schedule."""
weight_decay_var = None
weight_decay_update = None
if weight_decay_schedule is None:
if weight_decay_initial is not None:
weight_decay_var = tf.constant(
weight_decay_initial, dtype=tf.float32, name='weight_decay')
else:
weight_decay_var = None
elif weight_decay_schedule == 'linear':
weight_decay_var = tf.get_variable(
name='weight_decay',
initializer=tf.constant(
weight_decay_initial, name='weight_decay_initial'),
use_resource=True,
trainable=False)
update_rate = weight_decay_initial / float(self.max_num_iter_cotrain)
weight_decay_update = weight_decay_var.assign_sub(update_rate)
return weight_decay_var, weight_decay_update
def _create_counter(self):
"""Creates a cummulative iteration counter for all classification steps."""
iter_cls_total = tf.get_variable(
name='iter_cls_total',
initializer=tf.constant(0, name='iter_cls_total'),
use_resource=True,
trainable=False)
iter_cls_total_update = iter_cls_total.assign_add(1)
return iter_cls_total, iter_cls_total_update
def _get_agreement_reg_loss(self, data, is_train):
"""Computes the regularization loss coming from the agreement term.
This is calculated using the following idea: we incur a loss for pairs of
samples that should have the same label, but for which the predictions of
the classification model are not equal. The loss incured by each pair is
proportionate to the distance between the two predictions, as well as the
confidence we have that they should agree.
In the case of pairs where both samples are labeled (LL), the agreement
confidence is 1.0. When at least one sample is unlabeled (LU, UU), then we
use the agreement model to estimate this confidence.
Note that for the pairs where a label is available, we can compute this loss
wrt. the actual label, instead of the classifier predictions. However, when
both samples are labeled (LL), for one of them we use the prediction and for
the other the true label -- otherwise there are no gradients to proagate.
Args:
data: A CotrainDataset object.
is_train: A placeholder for a boolean that specifies if this is function
is called as part of model training or inference.
Returns:
The computed agreement loss op.
"""
# Select num_pairs_reg pairs of samples from each category LL, LU, UU.
# for which to do the regularization.
indices_ll_right = tf.placeholder(dtype=tf.int64, shape=(None,))
indices_lu_left = tf.placeholder(dtype=tf.int64, shape=(None,))
indices_lu_right = tf.placeholder(dtype=tf.int64, shape=(None,))
indices_uu_left = tf.placeholder(dtype=tf.int64, shape=(None,))
indices_uu_right = tf.placeholder(dtype=tf.int64, shape=(None,))
# First obtain the features shape from the dataset, and append a batch_size
# dimension to it (i.e., `None` to allow for variable batch size).
features_shape = [None] + list(data.features_shape)
features_ll_right = tf.placeholder(dtype=tf.float32, shape=features_shape)
features_lu_left = tf.placeholder(dtype=tf.float32, shape=features_shape)
features_lu_right = tf.placeholder(dtype=tf.float32, shape=features_shape)
features_uu_left = tf.placeholder(dtype=tf.float32, shape=features_shape)
features_uu_right = tf.placeholder(dtype=tf.float32, shape=features_shape)
labels_ll_left_idx = tf.placeholder(dtype=tf.int64, shape=(None,))
labels_ll_right_idx = tf.placeholder(dtype=tf.int64, shape=(None,))
labels_lu_left_idx = tf.placeholder(dtype=tf.int64, shape=(None,))
labels_ll_left = tf.one_hot(labels_ll_left_idx, data.num_classes)
labels_lu_left = tf.one_hot(labels_lu_left_idx, data.num_classes)
with tf.variable_scope('predictions', reuse=True):
# Obtain predictions for all nodes in the graph.
encoding_all, _, _ = self.model.get_encoding_and_params(
inputs=self.features_op,
is_train=is_train,
support=self.support_op,
num_features_nonzero=self.num_features_nonzero_op,
update_batch_stats=False)
predictions_all, _, _ = self.model.get_predictions_and_params(
encoding=encoding_all,
is_train=is_train,
support=self.support_op,
num_features_nonzero=self.num_features_nonzero_op)
predictions_all = self.model.normalize_predictions(predictions_all)
# Select the nodes of interest.
predictions_ll_right = tf.gather(predictions_all, indices_ll_right)
predictions_lu_right = tf.gather(predictions_all, indices_lu_right)
predictions_uu_left = tf.gather(predictions_all, indices_uu_left)
predictions_uu_right = tf.gather(predictions_all, indices_uu_right)
# Compute Euclidean distance between the label distributions that the
# classification model predicts for the src and tgt of each pair.
# Stop gradients need to be added
# The case where there are no more uu or lu
# edges at the end of training, so the shapes don't match needs fixing.
left = tf.concat((labels_ll_left, labels_lu_left, predictions_uu_left),
axis=0)
right = tf.concat(
(predictions_ll_right, predictions_lu_right, predictions_uu_right),
axis=0)
dists = tf.reduce_sum(tf.square(left - right), axis=-1)
# Estimate a weight for each distance, depending on the predictions
# of the agreement model. For the labeled samples, we can use the actual
# agreement between the labels, no need to estimate.
agreement_ll = tf.cast(
tf.equal(labels_ll_left_idx, labels_ll_right_idx), dtype=tf.float32)
_, agreement_lu, _, _ = self.trainer_agr.create_agreement_prediction(
src_features=features_lu_left,
tgt_features=features_lu_right,
is_train=is_train,
src_indices=indices_lu_left,
tgt_indices=indices_lu_right)
_, agreement_uu, _, _ = self.trainer_agr.create_agreement_prediction(
src_features=features_uu_left,
tgt_features=features_uu_right,
is_train=is_train,
src_indices=indices_uu_left,
tgt_indices=indices_uu_right)
agreement = tf.concat((agreement_ll, agreement_lu, agreement_uu), axis=0)
if self.penalize_neg_agr:
# Since the agreement is predicting scores between [0, 1], anything
# under 0.5 should represent disagreement. Therefore, we want to encourage
# agreement whenever the score is > 0.5, otherwise don't incur any loss.
agreement = tf.nn.relu(agreement - 0.5)
# Create a Tensor containing the weights assigned to each pair in the
# agreement regularization loss, depending on how many samples in the pair
# were labeled. This weight can be either reg_weight_ll, reg_weight_lu,
# or reg_weight_uu.
num_ll = tf.shape(predictions_ll_right)[0]
num_lu = tf.shape(predictions_lu_right)[0]
num_uu = tf.shape(predictions_uu_left)[0]
weights = tf.concat(
(self.reg_weight_ll * tf.ones(num_ll,), self.reg_weight_lu *
tf.ones(num_lu,), self.reg_weight_uu * tf.ones(num_uu,)),
axis=0)
# Scale each distance by its agreement weight and regularzation weight.
loss = tf.reduce_mean(dists * weights * agreement)
self.indices_ll_right = indices_ll_right
self.indices_lu_left = indices_lu_left
self.indices_lu_right = indices_lu_right
self.indices_uu_left = indices_uu_left
self.indices_uu_right = indices_uu_right
self.features_ll_right = features_ll_right
self.features_lu_left = features_lu_left
self.features_lu_right = features_lu_right
self.features_uu_left = features_uu_left
self.features_uu_right = features_uu_right
self.labels_ll_left = labels_ll_left_idx
self.labels_ll_right = labels_ll_right_idx
self.labels_lu_left = labels_lu_left_idx
self.agreement_lu = agreement_lu
return loss
def _construct_feed_dict(self,
input_indices,
split,
pair_ll_iterator=None,
pair_lu_iterator=None,
pair_uu_iterator=None,
unlabeled_indices=None):
"""Construct feed dictionary."""
try:
# Select the labels. Use the true, correct labels, at test time, and the
# self-labeled ones at train time.
labels = (
self.data.get_original_labels(input_indices)
if split == 'test' else self.data.get_labels(input_indices))
feed_dict = {
self.input_indices: input_indices,
self.input_labels: labels,
self.is_train: split == 'train',
self.features_op: self.features,
self.support_op: self.support,
self.num_features_nonzero_op: self.num_features_nonzero,
}
if unlabeled_indices is not None:
# This is not None only when using VAT regularization.
feed_dict.update({self.input_indices_unlabeled: unlabeled_indices})
if pair_ll_iterator is not None:
_, indices_tgt, _, features_tgt, labels_src, labels_tgt = next(
pair_ll_iterator)
feed_dict.update({
self.features_ll_right: features_tgt,
self.indices_ll_right: indices_tgt,
self.labels_ll_left: labels_src,
self.labels_ll_right: labels_tgt
})
if pair_lu_iterator is not None:
indices_src, indices_tgt, features_src, features_tgt, labels_src, _ = (
next(pair_lu_iterator))
feed_dict.update({
self.indices_lu_left: indices_src,
self.indices_lu_right: indices_tgt,
self.features_lu_left: features_src,
self.features_lu_right: features_tgt,
self.labels_lu_left: labels_src
})
if pair_uu_iterator is not None:
indices_src, indices_tgt, features_src, features_tgt, _, _ = next(
pair_uu_iterator)
feed_dict.update({
self.indices_uu_left: indices_src,
self.indices_uu_right: indices_tgt,
self.features_uu_left: features_src,
self.features_uu_right: features_tgt
})
return feed_dict
except StopIteration:
# If the iterator has finished, return None.
return None
def pair_iterator(self, src_indices, tgt_indices, batch_size, data):
"""Iterator over pairs of samples.
The first element of the pair is selected from the src_indices, and the
second element is selected from tgt_indices.
Args:
src_indices: Numpy array containing the indices available for the source
node.
tgt_indices: Numpy array containing the indices available for the tgt
node.
batch_size: An integer representing the desired batch size.
data: A CotrainDataset object used to extract the features and labels.
Yields:
indices_src, indices_tgt, features_src, features_tgt, labels_src,
labels_tgt
"""
def _select_from_pool(indices):
"""Selects batch_size indices from the provided list."""
num_indices = len(indices)
if num_indices > 0:
idxs = self.rng.randint(0, high=num_indices, size=(batch_size,))
indices_batch = indices[idxs]
features_batch = data.get_features(indices_batch)
labels_batch = data.get_labels(indices_batch)
else:
features_shape = [0] + list(data.features_shape)
indices_batch = np.zeros(shape=(0,), dtype=np.int64)
features_batch = np.zeros(shape=features_shape, dtype=np.float32)
labels_batch = np.zeros(shape=(0,), dtype=np.int64)
return indices_batch, features_batch, labels_batch
while True:
indices_src, features_src, labels_src = _select_from_pool(src_indices)
indices_tgt, features_tgt, labels_tgt = _select_from_pool(tgt_indices)
yield (indices_src, indices_tgt, features_src, features_tgt, labels_src,
labels_tgt)
def edge_iterator(self, data, batch_size, labeling):
"""An iterator over graph edges.
Args:
data: A CotrainDataset object used to extract the features and labels.
batch_size: An integer representing the desired batch size.
labeling: A string which can be `ll`, `lu` or `uu`, that is used to
represent the type of edges to return, where `ll` refers to
labeled-labeled, `lu` refers to labeled-unlabeled, and `uu` refers to
unlabeled-unlabeled.
Yields:
indices_src, indices_tgt, features_src, features_tgt, labels_src,
labels_tgt
"""
if labeling == 'll':
edges = data.get_edges(
src_labeled=True, tgt_labeled=True, label_must_match=True)
elif labeling == 'lu':
edges_lu = data.get_edges(src_labeled=True, tgt_labeled=False)
edges_ul = data.get_edges(src_labeled=False, tgt_labeled=True)
# Reverse the edges of UL to be LU.
edges_ul = [e.copy(src=e.tgt, tgt=e.src) for e in edges_ul]
edges = edges_lu + edges_ul
elif labeling == 'uu':
edges = data.get_edges(src_labeled=False, tgt_labeled=False)
else:
raise ValueError('Unsupported value for parameter `labeling`.')
if not edges:
indices = np.zeros(shape=(0,), dtype=np.int32)
features = np.zeros(
shape=[
0,
] + list(data.features_shape), dtype=np.float32)
labels = np.zeros(shape=(0,), dtype=np.int64)
while True:
yield (indices, indices, features, features, labels, labels)
edges = np.stack([(e.src, e.tgt) for e in edges])
iterator = batch_iterator(
inputs=edges,
batch_size=batch_size,
shuffle=True,
allow_smaller_batch=False,
repeat=True)
for edge in iterator:
indices_src = edge[:, 0]
indices_tgt = edge[:, 1]
features_src = data.get_features(indices_src)
features_tgt = data.get_features(indices_tgt)
labels_src = data.get_labels(indices_src)
labels_tgt = data.get_labels(indices_tgt)
yield (indices_src, indices_tgt, features_src, features_tgt, labels_src,
labels_tgt)
def _evaluate(self, indices, split, session, summary_writer):
"""Evaluates the samples with the provided indices."""
feed_dict_val = self._construct_feed_dict(indices, split)
val_acc = session.run(self.accuracy, feed_dict=feed_dict_val)
if self.enable_summaries:
summary = tf.Summary()
summary.value.add(
tag='ClassificationModel/' + split + '_acc', simple_value=val_acc)
iter_cls_total = session.run(self.iter_cls_total)
summary_writer.add_summary(summary, iter_cls_total)
summary_writer.flush()
return val_acc
def train(self, data, session=None, **kwargs):
"""Train the classification model on the provided dataset.
Args:
data: A CotrainDataset object.
session: A TensorFlow session or None.
**kwargs: Other keyword arguments.
Returns:
best_test_acc: A float representing the test accuracy at the iteration
where the validation accuracy is maximum.
best_val_acc: A float representing the best validation accuracy.
"""
summary_writer = kwargs['summary_writer']
logging.info('Training classifier...')
if not self.is_initialized:
self.is_initialized = True
else:
if self.weight_decay_update is not None:
session.run(self.weight_decay_update)
logging.info('New weight decay value: %f',
session.run(self.weight_decay_var))
# Reset the optimizer state (e.g., momentum).
session.run(self.reset_optimizer)
if not self.warm_start:
# Re-initialize variables.
initializers = [v.initializer for v in self.variables.values()]
initializers.append(self.global_step.initializer)
session.run(initializers)
# Construct data iterator.
logging.info('Training classifier with %d samples...', data.num_train())
train_indices = data.get_indices_train()
unlabeled_indices = data.get_indices_unlabeled()
val_indices = data.get_indices_val()
test_indices = data.get_indices_test()
# Create iterators for ll, lu, uu pairs of samples for the agreement term.
if self.use_graph:
pair_ll_iterator = self.edge_iterator(
data, batch_size=self.num_pairs_reg, labeling='ll')
pair_lu_iterator = self.edge_iterator(
data, batch_size=self.num_pairs_reg, labeling='lu')
pair_uu_iterator = self.edge_iterator(
data, batch_size=self.num_pairs_reg, labeling='uu')
else:
pair_ll_iterator = self.pair_iterator(train_indices, train_indices,
self.num_pairs_reg, data)
pair_lu_iterator = self.pair_iterator(train_indices, unlabeled_indices,
self.num_pairs_reg, data)
pair_uu_iterator = self.pair_iterator(unlabeled_indices,
unlabeled_indices,
self.num_pairs_reg, data)
step = 0
iter_below_tol = 0
min_num_iter = self.min_num_iter
has_converged = step >= self.max_num_iter
prev_loss_val = np.inf
best_test_acc = -1
best_val_acc = -1
checkpoint_saved = False
while not has_converged:
feed_dict = self._construct_feed_dict(
input_indices=train_indices,
unlabeled_indices=unlabeled_indices,
split='train',
pair_ll_iterator=pair_ll_iterator,
pair_lu_iterator=pair_lu_iterator,
pair_uu_iterator=pair_uu_iterator)
if self.enable_summaries and step % self.summary_step == 0:
loss_val, summary, iter_cls_total, _ = session.run(
[self.loss_op, self.summary_op, self.iter_cls_total, self.train_op],
feed_dict=feed_dict)
summary_writer.add_summary(summary, iter_cls_total)
summary_writer.flush()
else:
loss_val, _ = session.run((self.loss_op, self.train_op),
feed_dict=feed_dict)
# Log the loss, if necessary.
if step % self.logging_step == 0:
logging.info('Classification step %6d | Loss: %10.4f', step, loss_val)
# Evaluate, if necessary.
if step % self.eval_step == 0:
val_acc = self._evaluate(val_indices, 'val', session, summary_writer)
test_acc = self._evaluate(test_indices, 'test', session, summary_writer)
if step % self.logging_step == 0 or val_acc > best_val_acc:
logging.info(
'Classification step %6d | Loss: %10.4f | val_acc: %10.4f | '
'test_acc: %10.4f', step, loss_val, val_acc, test_acc)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
if self.checkpoint_path:
self.saver.save(
session, self.checkpoint_path, write_meta_graph=False)
checkpoint_saved = True
# Go for at least num_iter_after_best_val more iterations.
min_num_iter = max(self.min_num_iter,
step + self.num_iter_after_best_val)
logging.info(
'Achieved best validation. '
'Extending to at least %d iterations...', min_num_iter)
step += 1
has_converged, iter_below_tol = self.check_convergence(
prev_loss_val,
loss_val,
step,
self.max_num_iter,
iter_below_tol,
min_num_iter=min_num_iter)
session.run(self.iter_cls_total_update)
prev_loss_val = loss_val
# Return to the best model.
if checkpoint_saved:
logging.info('Restoring best model...')
self.saver.restore(session, self.checkpoint_path)
return best_test_acc, best_val_acc
def predict(self, session, indices, is_train):
"""Make predictions for the provided sample indices."""
if not indices.shape[0]:
return np.zeros((0, self.data.num_classes), dtype=np.float32)
feed_dict = {
self.input_indices: indices,
self.is_train: is_train,
self.num_features_nonzero_op: self.num_features_nonzero,
self.features_op: self.features,
self.support_op: self.support
}
predictions = session.run(
self.normalized_predictions_batch, feed_dict=feed_dict)
return predictions
| tensorflow/neural-structured-learning | research/gam/gam/trainer/trainer_classification_gcn.py | Python | apache-2.0 | 38,149 | 0.004351 |
import unittest
from payment_terminal.tests import test_loader
import payment_terminal.drivers.bbs.tests as test_bbs
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite((
loader.loadTestsFromModule(test_bbs),
loader.loadTestsFromModule(test_loader),
))
return suite
| bwhmather/python-payment-terminal | payment_terminal/tests/__init__.py | Python | bsd-3-clause | 320 | 0 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
from tensorboard.plugins.beholder import im_util
from tensorboard.plugins.beholder.file_system_tools import read_pickle,\
write_pickle, write_file
from tensorboard.plugins.beholder.shared_config import PLUGIN_NAME, TAG_NAME,\
SUMMARY_FILENAME, DEFAULT_CONFIG, CONFIG_FILENAME
from tensorboard.plugins.beholder import video_writing
from tensorboard.plugins.beholder.visualizer import Visualizer
class Beholder(object):
def __init__(self, logdir):
self.PLUGIN_LOGDIR = logdir + '/plugins/' + PLUGIN_NAME
self.is_recording = False
self.video_writer = video_writing.VideoWriter(
self.PLUGIN_LOGDIR,
outputs=[
video_writing.FFmpegVideoOutput,
video_writing.PNGVideoOutput])
self.frame_placeholder = tf.placeholder(tf.uint8, [None, None, None])
self.summary_op = tf.summary.tensor_summary(TAG_NAME,
self.frame_placeholder)
self.last_image_shape = []
self.last_update_time = time.time()
self.config_last_modified_time = -1
self.previous_config = dict(DEFAULT_CONFIG)
if not tf.gfile.Exists(self.PLUGIN_LOGDIR + '/config.pkl'):
tf.gfile.MakeDirs(self.PLUGIN_LOGDIR)
write_pickle(DEFAULT_CONFIG, '{}/{}'.format(self.PLUGIN_LOGDIR,
CONFIG_FILENAME))
self.visualizer = Visualizer(self.PLUGIN_LOGDIR)
def _get_config(self):
'''Reads the config file from disk or creates a new one.'''
filename = '{}/{}'.format(self.PLUGIN_LOGDIR, CONFIG_FILENAME)
modified_time = os.path.getmtime(filename)
if modified_time != self.config_last_modified_time:
config = read_pickle(filename, default=self.previous_config)
self.previous_config = config
else:
config = self.previous_config
self.config_last_modified_time = modified_time
return config
def _write_summary(self, session, frame):
'''Writes the frame to disk as a tensor summary.'''
summary = session.run(self.summary_op, feed_dict={
self.frame_placeholder: frame
})
path = '{}/{}'.format(self.PLUGIN_LOGDIR, SUMMARY_FILENAME)
write_file(summary, path)
def _get_final_image(self, session, config, arrays=None, frame=None):
if config['values'] == 'frames':
if frame is None:
final_image = im_util.get_image_relative_to_script('frame-missing.png')
else:
frame = frame() if callable(frame) else frame
final_image = im_util.scale_image_for_display(frame)
elif config['values'] == 'arrays':
if arrays is None:
final_image = im_util.get_image_relative_to_script('arrays-missing.png')
# TODO: hack to clear the info. Should be cleaner.
self.visualizer._save_section_info([], [])
else:
final_image = self.visualizer.build_frame(arrays)
elif config['values'] == 'trainable_variables':
arrays = [session.run(x) for x in tf.trainable_variables()]
final_image = self.visualizer.build_frame(arrays)
if len(final_image.shape) == 2:
# Map grayscale images to 3D tensors.
final_image = np.expand_dims(final_image, -1)
return final_image
def _enough_time_has_passed(self, FPS):
'''For limiting how often frames are computed.'''
if FPS == 0:
return False
else:
earliest_time = self.last_update_time + (1.0 / FPS)
return time.time() >= earliest_time
def _update_frame(self, session, arrays, frame, config):
final_image = self._get_final_image(session, config, arrays, frame)
self._write_summary(session, final_image)
self.last_image_shape = final_image.shape
return final_image
def _update_recording(self, frame, config):
'''Adds a frame to the current video output.'''
# pylint: disable=redefined-variable-type
should_record = config['is_recording']
if should_record:
if not self.is_recording:
self.is_recording = True
tf.logging.info(
'Starting recording using %s',
self.video_writer.current_output().name())
self.video_writer.write_frame(frame)
elif self.is_recording:
self.is_recording = False
self.video_writer.finish()
tf.logging.info('Finished recording')
# TODO: blanket try and except for production? I don't someone's script to die
# after weeks of running because of a visualization.
def update(self, session, arrays=None, frame=None):
'''Creates a frame and writes it to disk.
Args:
arrays: a list of np arrays. Use the "custom" option in the client.
frame: a 2D np array. This way the plugin can be used for video of any
kind, not just the visualization that comes with the plugin.
frame can also be a function, which only is evaluated when the
"frame" option is selected by the client.
'''
new_config = self._get_config()
if self._enough_time_has_passed(self.previous_config['FPS']):
self.visualizer.update(new_config)
self.last_update_time = time.time()
final_image = self._update_frame(session, arrays, frame, new_config)
self._update_recording(final_image, new_config)
##############################################################################
@staticmethod
def gradient_helper(optimizer, loss, var_list=None):
'''A helper to get the gradients out at each step.
Args:
optimizer: the optimizer op.
loss: the op that computes your loss value.
Returns: the gradient tensors and the train_step op.
'''
if var_list is None:
var_list = tf.trainable_variables()
grads_and_vars = optimizer.compute_gradients(loss, var_list=var_list)
grads = [pair[0] for pair in grads_and_vars]
return grads, optimizer.apply_gradients(grads_and_vars)
class BeholderHook(tf.train.SessionRunHook):
"""SessionRunHook implementation that runs Beholder every step.
Convenient when using tf.train.MonitoredSession:
```python
beholder_hook = BeholderHook(LOG_DIRECTORY)
with MonitoredSession(..., hooks=[beholder_hook]) as sess:
sess.run(train_op)
```
"""
def __init__(self, logdir):
"""Creates new Hook instance
Args:
logdir: Directory where Beholder should write data.
"""
self._logdir = logdir
self.beholder = None
def begin(self):
self.beholder = Beholder(self._logdir)
def after_run(self, run_context, unused_run_values):
self.beholder.update(run_context.session)
| ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/tensorboard/plugins/beholder/beholder.py | Python | mit | 7,267 | 0.007706 |
"""
Copyright (c) 2017-2022, Vanessa Sochat
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from expfactory.logger import bot
from expfactory.utils import write_json
from expfactory.defaults import EXPFACTORY_SUBID, EXPFACTORY_DATA
from glob import glob
import os
import uuid
import pickle
import json
import sys
# RELATIONAL ###################################################################
#
# This is an Expfactory Flask Server database plugin. It implements common
# functions (generate_subid, save_data, init_db) that should prepare a
# database and perform actions to save data to it. The functions are added
# to the main application upon initialization of the server. This relational
# module has support for sqlite3, mysql, and postgres
#
################################################################################
def generate_subid(self, token=None, return_user=False):
"""generate a new user in the database, still session based so we
create a new identifier.
"""
from expfactory.database.models import Participant
if not token:
p = Participant()
else:
p = Participant(token=token)
self.session.add(p)
self.session.commit()
if return_user is True:
return p
return p.id
def print_user(self, user):
"""print a relational database user"""
status = "active"
token = user.token
if token in ["finished", "revoked"]:
status = token
if token is None:
token = ""
subid = "%s\t%s[%s]" % (user.id, token, status)
print(subid)
return subid
def list_users(self, user=None):
"""list users, each having a model in the database. A headless experiment
will use protected tokens, and interactive will be based on auto-
incremented ids.
"""
from expfactory.database.models import Participant
participants = Participant.query.all()
users = []
for user in participants:
users.append(self.print_user(user))
return users
# Actions ######################################################################
def generate_user(self):
"""generate a new user in the database, still session based so we
create a new identifier. This function is called from the users new
entrypoint, and it assumes we want a user generated with a token.
"""
token = str(uuid.uuid4())
return self.generate_subid(token=token, return_user=True)
def finish_user(self, subid):
"""finish user will remove a user's token, making the user entry not
accesible if running in headless model"""
p = self.revoke_token(subid)
p.token = "finished"
self.session.commit()
return p
def restart_user(self, subid):
"""restart a user, which means revoking and issuing a new token."""
p = self.revoke_token(subid)
p = self.refresh_token(subid)
return p
# Tokens #######################################################################
def validate_token(self, token):
"""retrieve a subject based on a token. Valid means we return a participant
invalid means we return None
"""
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.token == token).first()
if p is not None:
if p.token.endswith(("finished", "revoked")):
p = None
else:
p = p.id
return p
def revoke_token(self, subid):
"""revoke a token by removing it. Is done at finish, and also available
as a command line option"""
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.id == subid).first()
if p is not None:
p.token = "revoked"
self.session.commit()
return p
def refresh_token(self, subid):
"""refresh or generate a new token for a user"""
from expfactory.database.models import Participant
p = Participant.query.filter(Participant.id == subid).first()
if p is not None:
p.token = str(uuid.uuid4())
self.session.commit()
return p
def save_data(self, session, exp_id, content):
"""save data will obtain the current subid from the session, and save it
depending on the database type. Currently we just support flat files"""
from expfactory.database.models import Participant, Result
subid = session.get("subid")
token = session.get("token")
self.logger.info("Saving data for subid %s" % subid)
# We only attempt save if there is a subject id, set at start
if subid is not None:
p = Participant.query.filter(
Participant.id == subid
).first() # better query here
# Does
if self.headless and p.token != token:
self.logger.warning(
"%s attempting to use mismatched token [%s] skipping save"
% (p.id, token)
)
elif self.headless and p.token.endswith(("finished", "revoked")):
self.logger.warning(
"%s attempting to use expired token [%s] skipping save" % (p.id, token)
)
else:
# Preference is to save data under 'data', otherwise do all of it
if "data" in content:
content = content["data"]
result = Result(
data=content, exp_id=exp_id, participant_id=p.id
) # check if changes from str/int
# Create and save the result
self.session.add(result)
p.results.append(result)
self.session.commit()
self.logger.info("Save [participant] %s [result] %s" % (p, result))
Base = declarative_base()
def init_db(self):
"""initialize the database, with the default database path or custom with
a format corresponding to the database type:
Examples:
sqlite:////scif/data/expfactory.db
"""
# The user can provide a custom string
if self.database is None:
self.logger.error("You must provide a database url, exiting.")
sys.exit(1)
self.engine = create_engine(self.database, convert_unicode=True)
self.session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=self.engine)
)
# Database Setup
Base.query = self.session.query_property()
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import expfactory.database.models
self.Base = Base
self.Base.metadata.create_all(bind=self.engine)
| expfactory/expfactory | expfactory/database/relational.py | Python | bsd-3-clause | 8,107 | 0.000617 |
# drivers.py - high-level functions for filtering SQLAlchemy queries
#
# Copyright 2011 Lincoln de Sousa <[email protected]>.
# Copyright 2012, 2013, 2014, 2015, 2016 Jeffrey Finkelstein
# <[email protected]> and contributors.
#
# This file is part of Flask-Restless.
#
# Flask-Restless is distributed under both the GNU Affero General Public
# License version 3 and under the 3-clause BSD license. For more
# information, see LICENSE.AGPL and LICENSE.BSD.
"""High-level functions for creating filtered SQLAlchemy queries.
The :func:`search` and :func:`search_relationship` functions return
filtered queries on a SQLAlchemy model. The latter specifically
restricts the query to only those instances of a model that are related
to a particular object via a given to-many relationship.
"""
from sqlalchemy.orm import aliased
from sqlalchemy.sql import false as FALSE
from ..helpers import get_model
from ..helpers import get_related_model
from ..helpers import primary_key_names
from ..helpers import primary_key_value
from ..helpers import session_query
from .filters import create_filters
def search_relationship(session, instance, relation, filters=None, sort=None,
group_by=None, ignorecase=False):
"""Returns a filtered, sorted, and grouped SQLAlchemy query
restricted to those objects related to a given instance.
`session` is the SQLAlchemy session in which to create the query.
`instance` is an instance of a SQLAlchemy model whose relationship
will be queried.
` `relation` is a string naming a to-many relationship of `instance`.
`filters`, `sort`, `group_by`, and `ignorecase` are identical to the
corresponding arguments of :func:`.search`.
"""
model = get_model(instance)
related_model = get_related_model(model, relation)
query = session_query(session, related_model)
# Filter by only those related values that are related to `instance`.
relationship = getattr(instance, relation)
# TODO In Python 2.7+, this should be a set comprehension.
primary_keys = set(primary_key_value(inst) for inst in relationship)
# If the relationship is empty, we can avoid a potentially expensive
# filtering operation by simply returning an intentionally empty
# query.
if not primary_keys:
return query.filter(FALSE())
query = query.filter(primary_key_value(related_model).in_(primary_keys))
return search(session, related_model, filters=filters, sort=sort,
group_by=group_by, ignorecase=ignorecase,
_initial_query=query)
def search(session, model, filters=None, sort=None, group_by=None,
ignorecase=False, _initial_query=None):
"""Returns a filtered, sorted, and grouped SQLAlchemy query.
`session` is the SQLAlchemy session in which to create the query.
`model` is the SQLAlchemy model on which to create a query.
`filters` is a list of filter objects. Each filter object is a
dictionary representation of the filters to apply to the
query. (This dictionary is provided directly to the
:func:`.filters.create_filters` function.) For more information on
the format of this dictionary, see :doc:`filtering`.
`sort` is a list of pairs of the form ``(direction, fieldname)``,
where ``direction`` is either '+' or '-' and ``fieldname`` is a
string representing an attribute of the model or a dot-separated
relationship path (for example, 'owner.name'). If `ignorecase` is
True, the sorting will be case-insensitive (so 'a' will precede 'B'
instead of the default behavior in which 'B' precedes 'a').
`group_by` is a list of dot-separated relationship paths on which to
group the query results.
If `_initial_query` is provided, the filters, sorting, and grouping
will be appended to this query. Otherwise, an empty query will be
created for the specified model.
When building the query, filters are applied first, then sorting,
then grouping.
"""
query = _initial_query
if query is None:
query = session_query(session, model)
# Filter the query.
#
# This function call may raise an exception.
filters = create_filters(model, filters)
query = query.filter(*filters)
# Order the query. If no order field is specified, order by primary
# key.
# if not _ignore_sort:
if sort:
for (symbol, field_name) in sort:
direction_name = 'asc' if symbol == '+' else 'desc'
if '.' in field_name:
field_name, field_name_in_relation = field_name.split('.')
relation_model = aliased(get_related_model(model, field_name))
field = getattr(relation_model, field_name_in_relation)
if ignorecase:
field = field.collate('NOCASE')
direction = getattr(field, direction_name)
query = query.join(relation_model)
query = query.order_by(direction())
else:
field = getattr(model, field_name)
if ignorecase:
field = field.collate('NOCASE')
direction = getattr(field, direction_name)
query = query.order_by(direction())
else:
pks = primary_key_names(model)
pk_order = (getattr(model, field).asc() for field in pks)
query = query.order_by(*pk_order)
# Group the query.
if group_by:
for field_name in group_by:
if '.' in field_name:
field_name, field_name_in_relation = field_name.split('.')
relation_model = aliased(get_related_model(model, field_name))
field = getattr(relation_model, field_name_in_relation)
query = query.join(relation_model)
query = query.group_by(field)
else:
field = getattr(model, field_name)
query = query.group_by(field)
return query
| jfinkels/flask-restless | flask_restless/search/drivers.py | Python | agpl-3.0 | 6,012 | 0 |
# -*- encoding: utf-8 -*-
from cliff.interactive import InteractiveApp
class FakeApp(object):
NAME = 'Fake'
def make_interactive_app(*command_names):
fake_command_manager = [(x, None) for x in command_names]
return InteractiveApp(FakeApp, fake_command_manager,
stdin=None, stdout=None)
def _test_completenames(expecteds, prefix):
app = make_interactive_app('hips', 'hippo', 'nonmatching')
assert set(app.completenames(prefix)) == set(expecteds)
def test_cmd2_completenames():
# cmd2.Cmd define do_help method
_test_completenames(['help'], 'he')
def test_cliff_completenames():
_test_completenames(['hips', 'hippo'], 'hip')
def test_no_completenames():
_test_completenames([], 'taz')
def test_both_completenames():
# cmd2.Cmd define do_hi and do_history methods
_test_completenames(['hi', 'history', 'hips', 'hippo'], 'hi')
def _test_completedefault(expecteds, line, begidx):
command_names = set(['show file', 'show folder', 'show long', 'list all'])
app = make_interactive_app(*command_names)
observeds = app.completedefault(None, line, begidx, None)
assert set(observeds) == set(expecteds)
assert set([line[:begidx] + x for x in observeds]) <= command_names
def test_empty_text_completedefault():
# line = 'show ' + begidx = 5 implies text = ''
_test_completedefault(['file', 'folder', ' long'], 'show ', 5)
def test_nonempty_text_completedefault2():
# line = 'show f' + begidx = 6 implies text = 'f'
_test_completedefault(['file', 'folder'], 'show f', 5)
def test_long_completedefault():
_test_completedefault(['long'], 'show ', 6)
def test_no_completedefault():
_test_completedefault([], 'taz ', 4)
| sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/cliff/tests/test_interactive.py | Python | mit | 1,743 | 0 |
import rules
from rules.predicates import is_staff, is_superuser
from .models import Policy
@rules.predicate
def is_policy_manager(user, policy=None):
if policy is None:
return bool(Policy.objects.filter(managers=user).count())
else:
return bool(policy.managers.filter(id=user.id).count())
@rules.predicate
def is_account_manager(user, account=None):
if account is None:
return bool(Policy.objects.filter(managers=user).count())
else:
return is_policy_manager(user, account.policy)
rules.add_perm('subscription.manage_policy',
is_superuser | is_staff | is_policy_manager)
rules.add_perm('subscription.manage_account',
is_superuser | is_staff | is_account_manager)
| thetoine/eruditorg | erudit/core/subscription/rules.py | Python | gpl-3.0 | 748 | 0.001337 |
# -*- coding: utf-8 -*-
__version__ = '$Id: 11c92177ab93084552b8d68021da6545c4b7674f $'
from pywikibot import family
# The Wikimedia Incubator family
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'incubator'
self.langs = {
'incubator': 'incubator.wikimedia.org',
}
| wpoa/wiki-imports | lib/python2.7/site-packages/pywikibot-2.0b1-py2.7.egg/pywikibot/families/incubator_family.py | Python | gpl-3.0 | 373 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2014, Martín Gaitán
# Copyright (c) 2012-2013, Alexander Jung-Loddenkemper
# This file is part of Waliki (http://waliki.nqnwebs.com/)
# License: BSD (https://github.com/mgaitan/waliki/blob/master/LICENSE)
#===============================================================================
# DOCS
#===============================================================================
"""All supported markups
"""
#===============================================================================
# IMPORTS
#===============================================================================
import re
import docutils.core
import docutils.io
import markdown
import textwrap
from rst2html5 import HTML5Writer
import wiki
#===============================================================================
# MARKUP BASE
#===============================================================================
class Markup(object):
""" Base markup class."""
NAME = 'Text'
META_LINE = '%s: %s\n'
EXTENSION = '.txt'
HOWTO = """ """
def __init__(self, raw_content):
self.raw_content = raw_content
@classmethod
def render_meta(cls, key, value):
return cls.META_LINE % (key, value)
def process(self):
"""
return (html, body, meta) where HTML is the rendered output
body is the the editable content (text), and meta is
a dictionary with at least ['title', 'tags'] keys
"""
raise NotImplementedError("override in a subclass")
@classmethod
def howto(cls):
return cls(textwrap.dedent(cls.HOWTO)).process()[0]
#===============================================================================
# MARKDOWN
#===============================================================================
class Markdown(Markup):
NAME = 'markdown'
META_LINE = '%s: %s\n'
EXTENSION = '.md'
HOWTO = """
This editor is [markdown][] featured.
* I am
* a
* list
Turns into:
* I am
* a
* list
`**bold** and *italics*` turn into **bold** and *italics*. Very easy!
Create links with `[Wiki](http://github.com/alexex/wiki)`.
They turn into [Wiki][http://github.com/alexex/wiki].
Headers are as follows:
# Level 1
## Level 2
### Level 3
[markdown]: http://daringfireball.net/projects/markdown/
"""
def process(self):
# Processes Markdown text to HTML, returns original markdown text,
# and adds meta
md = markdown.Markdown(['codehilite', 'fenced_code', 'meta'])
html = md.convert(self.raw_content)
meta_lines, body = self.raw_content.split('\n\n', 1)
meta = md.Meta
return html, body, meta
#===============================================================================
# RESTRUCTURED TEXT
#===============================================================================
class RestructuredText(Markup):
NAME = 'restructuredtext'
META_LINE = '.. %s: %s\n'
IMAGE_LINE = '.. image:: %(url)s'
LINK_LINE = '`%(filename)s <%(url)s>`_'
EXTENSION = '.rst'
HOWTO = """
This editor is `reStructuredText`_ featured::
* I am
* a
* list
Turns into:
* I am
* a
* list
``**bold** and *italics*`` turn into **bold** and *italics*. Very easy!
Create links with ```Wiki <http://github.com/alexex/wiki>`_``.
They turn into `Wiki <https://github.com/alexex/wiki>`_.
Headers are just any underline (and, optionally, overline).
For example::
Level 1
*******
Level 2
-------
Level 3
+++++++
.. _reStructuredText: http://docutils.sourceforge.net/rst.html
"""
def process(self):
settings = {'initial_header_level': 2,
'record_dependencies': True,
'stylesheet_path': None,
'link_stylesheet': True,
'syntax_highlight': 'short',
}
html = self._rst2html(self.raw_content,
settings_overrides=settings)
# Convert unknow links to internal wiki links.
# Examples:
# Something_ will link to '/something'
# `something great`_ to '/something_great'
# `another thing <thing>`_ '/thing'
refs = re.findall(r'Unknown target name: "(.*)"', html)
if refs:
content = self.raw_content + self.get_autolinks(refs)
html = self._rst2html(content, settings_overrides=settings)
meta_lines, body = self.raw_content.split('\n\n', 1)
meta = self._parse_meta(meta_lines.split('\n'))
return html, body, meta
def get_autolinks(self, refs):
autolinks = '\n'.join(['.. _%s: /%s' % (ref, wiki.urlify(ref, False))
for ref in refs])
return '\n\n' + autolinks
def _rst2html(self, source, source_path=None,
source_class=docutils.io.StringInput,
destination_path=None, reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext', writer=None,
writer_name=None, settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
if not writer:
writer = HTML5Writer()
# Taken from Nikola
# http://bit.ly/14CmQyh
output, pub = docutils.core.publish_programmatically(
source=source, source_path=source_path, source_class=source_class,
destination_class=docutils.io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return pub.writer.parts['body']
def _parse_meta(self, lines):
""" Parse Meta-Data. Taken from Python-Markdown"""
META_RE = re.compile(r'^\.\.\s(?P<key>.*?): (?P<value>.*)')
meta = {}
key = None
for line in lines:
if line.strip() == '':
continue
m1 = META_RE.match(line)
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
return meta
#===============================================================================
# MAIN
#===============================================================================
if __name__ == "__main__":
print(__doc__)
| mgaitan/waliki_flask | waliki/markup.py | Python | bsd-3-clause | 7,181 | 0.003343 |
import sys
import errno
import json
import os
from argparse import ArgumentParser
sys.path.insert(1, 'py-bindings')
from squad import SQUADConverter
def get_samples(test_file, vocab_file, output_dir):
print("Test file:", test_file)
print("Vocab file:", vocab_file)
print("Output dir:", output_dir)
max_seq_length = 384
max_query_length = 64
doc_stride = 128
lower_case = False
sqd = SQUADConverter(test_file, vocab_file, max_seq_length, max_query_length, doc_stride, lower_case)
samples = sqd.convert()
# Dump samples to json
print("--Dumping examples to json--")
os.makedirs(output_dir, exist_ok=True)
output_file = output_dir + "/squad_examples.json"
c = 0
with open(output_file, 'w', encoding='utf-8') as fid:
json.dump({'samples':samples}, fid, ensure_ascii=False, indent=4)
return c
def get_arguments():
parser = ArgumentParser()
parser.add_argument("--test_file", type=str, help="Path to squad test json file", required=True)
parser.add_argument("--vocab_file", type=str, help="Path to vocab.txt file", required=True)
parser.add_argument("--max_seq_length", type=int, help="Max sequence length", default=384)
parser.add_argument("--max_query_length", type=int, help="Max query length", default=64)
parser.add_argument("--doc_stride", type=int, help="Document stride", default=128)
parser.add_argument("--lower_case", type=bool, help="Lower case", default=1)
parser.add_argument("--output_dir", type=str, help="Output directory for saved json", default="samples_cache")
return parser.parse_args()
def main():
args = get_arguments()
if not os.path.isfile(args.test_file):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), args.test_file)
if not os.path.isfile(args.vocab_file):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), args.vocab_file)
sqd = SQUADConverter(args.test_file, args.vocab_file, args.max_seq_length, args.max_query_length, args.doc_stride, args.lower_case)
# Convert examples
print("--Reading samples--")
samples = sqd.convert()
# Dump samples ot json
print("--Dumping examples to json--")
os.makedirs(args.output_dir, exist_ok=True)
output_file = args.output_dir + "/squad_examples.json"
with open(output_file, 'w', encoding='utf-8') as fid:
json.dump({'samples':samples}, fid, ensure_ascii=False, indent=4)
if __name__=="__main__":
main()
| mlperf/inference_results_v0.7 | closed/Intel/code/resnet/resnet-ov/py-bindings/convert.py | Python | apache-2.0 | 2,497 | 0.007609 |
"""
Support for APT (Advanced Packaging Tool)
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
For repository management, the ``python-apt`` package must be installed.
"""
import copy
import datetime
import fnmatch
import logging
import os
import pathlib
import re
import shutil
import tempfile
import time
from urllib.error import HTTPError
from urllib.request import Request as _Request
from urllib.request import urlopen as _urlopen
import salt.config
import salt.syspaths
import salt.utils.args
import salt.utils.data
import salt.utils.environment
import salt.utils.files
import salt.utils.functools
import salt.utils.itertools
import salt.utils.json
import salt.utils.path
import salt.utils.pkg
import salt.utils.pkg.deb
import salt.utils.stringutils
import salt.utils.systemd
import salt.utils.versions
import salt.utils.yaml
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
MinionError,
SaltInvocationError,
)
from salt.modules.cmdmod import _parse_env
log = logging.getLogger(__name__)
# pylint: disable=import-error
try:
import apt.cache
import apt.debfile
from aptsources.sourceslist import (
SourceEntry,
SourcesList,
)
HAS_APT = True
except ImportError:
HAS_APT = False
try:
import apt_pkg
HAS_APTPKG = True
except ImportError:
HAS_APTPKG = False
try:
import softwareproperties.ppa
HAS_SOFTWAREPROPERTIES = True
except ImportError:
HAS_SOFTWAREPROPERTIES = False
# pylint: enable=import-error
APT_LISTS_PATH = "/var/lib/apt/lists"
PKG_ARCH_SEPARATOR = ":"
# Source format for urllib fallback on PPA handling
LP_SRC_FORMAT = "deb http://ppa.launchpad.net/{0}/{1}/ubuntu {2} main"
LP_PVT_SRC_FORMAT = "deb https://{0}private-ppa.launchpad.net/{1}/{2}/ubuntu {3} main"
_MODIFY_OK = frozenset(["uri", "comps", "architectures", "disabled", "file", "dist"])
DPKG_ENV_VARS = {
"APT_LISTBUGS_FRONTEND": "none",
"APT_LISTCHANGES_FRONTEND": "none",
"DEBIAN_FRONTEND": "noninteractive",
"UCF_FORCE_CONFFOLD": "1",
}
# Define the module's virtual name
__virtualname__ = "pkg"
def __virtual__():
"""
Confirm this module is on a Debian-based system
"""
# If your minion is running an OS which is Debian-based but does not have
# an "os_family" grain of Debian, then the proper fix is NOT to check for
# the minion's "os_family" grain here in the __virtual__. The correct fix
# is to add the value from the minion's "os" grain to the _OS_FAMILY_MAP
# dict in salt/grains/core.py, so that we assign the correct "os_family"
# grain to the minion.
if __grains__.get("os_family") == "Debian":
return __virtualname__
return False, "The pkg module could not be loaded: unsupported OS family"
def __init__(opts):
"""
For Debian and derivative systems, set up
a few env variables to keep apt happy and
non-interactive.
"""
if __virtual__() == __virtualname__:
# Export these puppies so they persist
os.environ.update(DPKG_ENV_VARS)
if not HAS_APT:
class SourceEntry: # pylint: disable=function-redefined
def __init__(self, line, file=None):
self.invalid = False
self.comps = []
self.disabled = False
self.comment = ""
self.dist = ""
self.type = ""
self.uri = ""
self.line = line
self.architectures = []
self.file = file
if not self.file:
self.file = str(pathlib.Path(os.sep, "etc", "apt", "sources.list"))
self._parse_sources(line)
def repo_line(self):
"""
Return the repo line for the sources file
"""
repo_line = []
if self.invalid:
return self.line
if self.disabled:
repo_line.append("#")
repo_line.append(self.type)
if self.architectures:
repo_line.append("[arch={}]".format(" ".join(self.architectures)))
repo_line = repo_line + [self.uri, self.dist, " ".join(self.comps)]
if self.comment:
repo_line.append("#{}".format(self.comment))
return " ".join(repo_line) + "\n"
def _parse_sources(self, line):
"""
Parse lines from sources files
"""
self.disabled = False
repo_line = self.line.strip().split()
if not repo_line:
self.invalid = True
return False
if repo_line[0].startswith("#"):
repo_line.pop(0)
self.disabled = True
if repo_line[0] not in ["deb", "deb-src", "rpm", "rpm-src"]:
self.invalid = True
return False
if repo_line[1].startswith("["):
opts = re.search(r"\[.*\]", self.line).group(0).strip("[]")
repo_line = [x for x in (line.strip("[]") for line in repo_line) if x]
for opt in opts.split():
if opt.startswith("arch"):
self.architectures.extend(opt.split("=", 1)[1].split(","))
try:
repo_line.pop(repo_line.index(opt))
except ValueError:
repo_line.pop(repo_line.index("[" + opt + "]"))
self.type = repo_line[0]
self.uri = repo_line[1]
self.dist = repo_line[2]
self.comps = repo_line[3:]
class SourcesList: # pylint: disable=function-redefined
def __init__(self):
self.list = []
self.files = [
pathlib.Path(os.sep, "etc", "apt", "sources.list"),
pathlib.Path(os.sep, "etc", "apt", "sources.list.d"),
]
for file in self.files:
if file.is_dir():
for fp in file.glob("**/*.list"):
self.add_file(file=fp)
else:
self.add_file(file)
def __iter__(self):
yield from self.list
def add_file(self, file):
"""
Add the lines of a file to self.list
"""
if file.is_file():
with salt.utils.files.fopen(file) as source:
for line in source:
self.list.append(SourceEntry(line, file=str(file)))
else:
log.debug("The apt sources file %s does not exist", file)
def add(self, type, uri, dist, orig_comps, architectures):
repo_line = [
type,
" [arch={}] ".format(" ".join(architectures)) if architectures else "",
uri,
dist,
" ".join(orig_comps),
]
return SourceEntry(" ".join(repo_line))
def remove(self, source):
"""
remove a source from the list of sources
"""
self.list.remove(source)
def save(self):
"""
write all of the sources from the list of sources
to the file.
"""
filemap = {}
with tempfile.TemporaryDirectory() as tmpdir:
for source in self.list:
fname = pathlib.Path(tmpdir, pathlib.Path(source.file).name)
with salt.utils.files.fopen(fname, "a") as fp:
fp.write(source.repo_line())
if source.file not in filemap:
filemap[source.file] = {"tmp": fname}
for fp in filemap:
shutil.move(filemap[fp]["tmp"], fp)
def _get_ppa_info_from_launchpad(owner_name, ppa_name):
"""
Idea from softwareproperties.ppa.
Uses urllib2 which sacrifices server cert verification.
This is used as fall-back code or for secure PPAs
:param owner_name:
:param ppa_name:
:return:
"""
lp_url = "https://launchpad.net/api/1.0/~{}/+archive/{}".format(
owner_name, ppa_name
)
request = _Request(lp_url, headers={"Accept": "application/json"})
lp_page = _urlopen(request)
return salt.utils.json.load(lp_page)
def _reconstruct_ppa_name(owner_name, ppa_name):
"""
Stringify PPA name from args.
"""
return "ppa:{}/{}".format(owner_name, ppa_name)
def _call_apt(args, scope=True, **kwargs):
"""
Call apt* utilities.
"""
cmd = []
if (
scope
and salt.utils.systemd.has_scope(__context__)
and __salt__["config.get"]("systemd.scope", True)
):
cmd.extend(["systemd-run", "--scope", "--description", '"{}"'.format(__name__)])
cmd.extend(args)
params = {
"output_loglevel": "trace",
"python_shell": False,
"env": salt.utils.environment.get_module_environment(globals()),
}
params.update(kwargs)
cmd_ret = __salt__["cmd.run_all"](cmd, **params)
count = 0
while "Could not get lock" in cmd_ret.get("stderr", "") and count < 10:
count += 1
log.warning("Waiting for dpkg lock release: retrying... %s/100", count)
time.sleep(2 ** count)
cmd_ret = __salt__["cmd.run_all"](cmd, **params)
return cmd_ret
def _warn_software_properties(repo):
"""
Warn of missing python-software-properties package.
"""
log.warning(
"The 'python-software-properties' package is not installed. "
"For more accurate support of PPA repositories, you should "
"install this package."
)
log.warning("Best guess at ppa format: %s", repo)
def normalize_name(name):
"""
Strips the architecture from the specified package name, if necessary.
CLI Example:
.. code-block:: bash
salt '*' pkg.normalize_name zsh:amd64
"""
try:
pkgname, pkgarch = name.rsplit(PKG_ARCH_SEPARATOR, 1)
except ValueError:
pkgname = name
pkgarch = __grains__["osarch"]
return pkgname if pkgarch in (__grains__["osarch"], "all", "any") else name
def parse_arch(name):
"""
Parse name and architecture from the specified package name.
CLI Example:
.. code-block:: bash
salt '*' pkg.parse_arch zsh:amd64
"""
try:
_name, _arch = name.rsplit(PKG_ARCH_SEPARATOR, 1)
except ValueError:
_name, _arch = name, None
return {"name": _name, "arch": _arch}
def latest_version(*names, **kwargs):
"""
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
A specific repo can be requested using the ``fromrepo`` keyword argument.
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package name> fromrepo=unstable
salt '*' pkg.latest_version <package1> <package2> <package3> ...
"""
refresh = salt.utils.data.is_true(kwargs.pop("refresh", True))
show_installed = salt.utils.data.is_true(kwargs.pop("show_installed", False))
if "repo" in kwargs:
raise SaltInvocationError(
"The 'repo' argument is invalid, use 'fromrepo' instead"
)
fromrepo = kwargs.pop("fromrepo", None)
cache_valid_time = kwargs.pop("cache_valid_time", 0)
if not names:
return ""
ret = {}
# Initialize the dict with empty strings
for name in names:
ret[name] = ""
pkgs = list_pkgs(versions_as_list=True)
repo = ["-o", "APT::Default-Release={}".format(fromrepo)] if fromrepo else None
# Refresh before looking for the latest version available
if refresh:
refresh_db(cache_valid_time)
for name in names:
cmd = ["apt-cache", "-q", "policy", name]
if repo is not None:
cmd.extend(repo)
out = _call_apt(cmd, scope=False)
candidate = ""
for line in salt.utils.itertools.split(out["stdout"], "\n"):
if "Candidate" in line:
comps = line.split()
if len(comps) >= 2:
candidate = comps[-1]
if candidate.lower() == "(none)":
candidate = ""
break
installed = pkgs.get(name, [])
if not installed:
ret[name] = candidate
elif installed and show_installed:
ret[name] = candidate
elif candidate:
# If there are no installed versions that are greater than or equal
# to the install candidate, then the candidate is an upgrade, so
# add it to the return dict
if not any(
salt.utils.versions.compare(
ver1=x, oper=">=", ver2=candidate, cmp_func=version_cmp
)
for x in installed
):
ret[name] = candidate
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = salt.utils.functools.alias_function(
latest_version, "available_version"
)
def version(*names, **kwargs):
"""
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
"""
return __salt__["pkg_resource.version"](*names, **kwargs)
def refresh_db(cache_valid_time=0, failhard=False, **kwargs):
"""
Updates the APT database to latest packages based upon repositories
Returns a dict, with the keys being package databases and the values being
the result of the update attempt. Values can be one of the following:
- ``True``: Database updated successfully
- ``False``: Problem updating database
- ``None``: Database already up-to-date
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
failhard
If False, return results of Err lines as ``False`` for the package database that
encountered the error.
If True, raise an error with a list of the package databases that encountered
errors.
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
"""
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
failhard = salt.utils.data.is_true(failhard)
ret = {}
error_repos = list()
if cache_valid_time:
try:
latest_update = os.stat(APT_LISTS_PATH).st_mtime
now = time.time()
log.debug(
"now: %s, last update time: %s, expire after: %s seconds",
now,
latest_update,
cache_valid_time,
)
if latest_update + cache_valid_time > now:
return ret
except TypeError as exp:
log.warning(
"expected integer for cache_valid_time parameter, failed with: %s", exp
)
except OSError as exp:
log.warning("could not stat cache directory due to: %s", exp)
call = _call_apt(["apt-get", "-q", "update"], scope=False)
if call["retcode"] != 0:
comment = ""
if "stderr" in call:
comment += call["stderr"]
raise CommandExecutionError(comment)
else:
out = call["stdout"]
for line in out.splitlines():
cols = line.split()
if not cols:
continue
ident = " ".join(cols[1:])
if "Get" in cols[0]:
# Strip filesize from end of line
ident = re.sub(r" \[.+B\]$", "", ident)
ret[ident] = True
elif "Ign" in cols[0]:
ret[ident] = False
elif "Hit" in cols[0]:
ret[ident] = None
elif "Err" in cols[0]:
ret[ident] = False
error_repos.append(ident)
if failhard and error_repos:
raise CommandExecutionError(
"Error getting repos: {}".format(", ".join(error_repos))
)
return ret
def install(
name=None,
refresh=False,
fromrepo=None,
skip_verify=False,
debconf=None,
pkgs=None,
sources=None,
reinstall=False,
downloadonly=False,
ignore_epoch=False,
**kwargs
):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Install the passed package, add refresh=True to update the dpkg database.
name
The name of the package to be installed. Note that this parameter is
ignored if either "pkgs" or "sources" is passed. Additionally, please
note that this option can only be used to install packages from a
software repository. To install a package file manually, use the
"sources" option.
32-bit packages can be installed on 64-bit systems by appending the
architecture designation (``:i386``, etc.) to the end of the package
name.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
refresh
Whether or not to refresh the package database before installing.
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
fromrepo
Specify a package repository to install from
(e.g., ``apt-get -t unstable install somepackage``)
skip_verify
Skip the GPG verification check (e.g., ``--allow-unauthenticated``, or
``--force-bad-verify`` for install from package file).
debconf
Provide the path to a debconf answers file, processed before
installation.
version
Install a specific version of the package, e.g. 1.2.3~0ubuntu0. Ignored
if "pkgs" or "sources" is passed.
.. versionchanged:: 2018.3.0
version can now contain comparison operators (e.g. ``>1.2.3``,
``<=2.0``, etc.)
reinstall : False
Specifying reinstall=True will use ``apt-get install --reinstall``
rather than simply ``apt-get install`` for requested packages that are
already installed.
If a version is specified with the requested package, then ``apt-get
install --reinstall`` will only be used if the installed version
matches the requested version.
.. versionadded:: 2015.8.0
ignore_epoch : False
Only used when the version of a package is specified using a comparison
operator (e.g. ``>4.1``). If set to ``True``, then the epoch will be
ignored when comparing the currently-installed version to the desired
version.
.. versionadded:: 2018.3.0
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3-0ubuntu0"}]'
sources
A list of DEB packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package. Dependencies are automatically resolved
and marked as auto-installed.
32-bit packages can be installed on 64-bit systems by appending the
architecture designation (``:i386``, etc.) to the end of the package
name.
.. versionchanged:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' pkg.install sources='[{"foo": "salt://foo.deb"},{"bar": "salt://bar.deb"}]'
force_yes
Passes ``--force-yes`` to the apt-get command. Don't use this unless
you know what you're doing.
.. versionadded:: 0.17.4
install_recommends
Whether to install the packages marked as recommended. Default is True.
.. versionadded:: 2015.5.0
only_upgrade
Only upgrade the packages, if they are already installed. Default is False.
.. versionadded:: 2015.5.0
force_conf_new
Always install the new version of any configuration files.
.. versionadded:: 2015.8.0
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
"""
_refresh_db = False
if salt.utils.data.is_true(refresh):
_refresh_db = True
if "version" in kwargs and kwargs["version"]:
_refresh_db = False
_latest_version = latest_version(name, refresh=False, show_installed=True)
_version = kwargs.get("version")
# If the versions don't match, refresh is True, otherwise no need
# to refresh
if not _latest_version == _version:
_refresh_db = True
if pkgs:
_refresh_db = False
for pkg in pkgs:
if isinstance(pkg, dict):
_name = next(iter(pkg.keys()))
_latest_version = latest_version(
_name, refresh=False, show_installed=True
)
_version = pkg[_name]
# If the versions don't match, refresh is True, otherwise
# no need to refresh
if not _latest_version == _version:
_refresh_db = True
else:
# No version specified, so refresh should be True
_refresh_db = True
if debconf:
__salt__["debconf.set_file"](debconf)
try:
pkg_params, pkg_type = __salt__["pkg_resource.parse_targets"](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
# Support old "repo" argument
repo = kwargs.get("repo", "")
if not fromrepo and repo:
fromrepo = repo
if not pkg_params:
return {}
cmd_prefix = []
old = list_pkgs()
targets = []
downgrade = []
to_reinstall = {}
errors = []
if pkg_type == "repository":
pkg_params_items = list(pkg_params.items())
has_comparison = [
x
for x, y in pkg_params_items
if y is not None and (y.startswith("<") or y.startswith(">"))
]
_available = (
list_repo_pkgs(*has_comparison, byrepo=False, **kwargs)
if has_comparison
else {}
)
# Build command prefix
cmd_prefix.extend(["apt-get", "-q", "-y"])
if kwargs.get("force_yes", False):
cmd_prefix.append("--force-yes")
if "force_conf_new" in kwargs and kwargs["force_conf_new"]:
cmd_prefix.extend(["-o", "DPkg::Options::=--force-confnew"])
else:
cmd_prefix.extend(["-o", "DPkg::Options::=--force-confold"])
cmd_prefix += ["-o", "DPkg::Options::=--force-confdef"]
if "install_recommends" in kwargs:
if not kwargs["install_recommends"]:
cmd_prefix.append("--no-install-recommends")
else:
cmd_prefix.append("--install-recommends")
if "only_upgrade" in kwargs and kwargs["only_upgrade"]:
cmd_prefix.append("--only-upgrade")
if skip_verify:
cmd_prefix.append("--allow-unauthenticated")
if fromrepo:
cmd_prefix.extend(["-t", fromrepo])
cmd_prefix.append("install")
else:
pkg_params_items = []
for pkg_source in pkg_params:
if "lowpkg.bin_pkg_info" in __salt__:
deb_info = __salt__["lowpkg.bin_pkg_info"](pkg_source)
else:
deb_info = None
if deb_info is None:
log.error(
"pkg.install: Unable to get deb information for %s. "
"Version comparisons will be unavailable.",
pkg_source,
)
pkg_params_items.append([pkg_source])
else:
pkg_params_items.append(
[deb_info["name"], pkg_source, deb_info["version"]]
)
# Build command prefix
if "force_conf_new" in kwargs and kwargs["force_conf_new"]:
cmd_prefix.extend(["dpkg", "-i", "--force-confnew"])
else:
cmd_prefix.extend(["dpkg", "-i", "--force-confold"])
if skip_verify:
cmd_prefix.append("--force-bad-verify")
if HAS_APT:
_resolve_deps(name, pkg_params, **kwargs)
for pkg_item_list in pkg_params_items:
if pkg_type == "repository":
pkgname, version_num = pkg_item_list
if name and pkgs is None and kwargs.get("version") and len(pkg_params) == 1:
# Only use the 'version' param if 'name' was not specified as a
# comma-separated list
version_num = kwargs["version"]
else:
try:
pkgname, pkgpath, version_num = pkg_item_list
except ValueError:
pkgname = None
pkgpath = pkg_item_list[0]
version_num = None
if version_num is None:
if pkg_type == "repository":
if reinstall and pkgname in old:
to_reinstall[pkgname] = pkgname
else:
targets.append(pkgname)
else:
targets.append(pkgpath)
else:
# If we are installing a package file and not one from the repo,
# and version_num is not None, then we can assume that pkgname is
# not None, since the only way version_num is not None is if DEB
# metadata parsing was successful.
if pkg_type == "repository":
# Remove leading equals sign(s) to keep from building a pkgstr
# with multiple equals (which would be invalid)
version_num = version_num.lstrip("=")
if pkgname in has_comparison:
candidates = _available.get(pkgname, [])
target = salt.utils.pkg.match_version(
version_num,
candidates,
cmp_func=version_cmp,
ignore_epoch=ignore_epoch,
)
if target is None:
errors.append(
"No version matching '{}{}' could be found "
"(available: {})".format(
pkgname,
version_num,
", ".join(candidates) if candidates else None,
)
)
continue
else:
version_num = target
pkgstr = "{}={}".format(pkgname, version_num)
else:
pkgstr = pkgpath
cver = old.get(pkgname, "")
if (
reinstall
and cver
and salt.utils.versions.compare(
ver1=version_num, oper="==", ver2=cver, cmp_func=version_cmp
)
):
to_reinstall[pkgname] = pkgstr
elif not cver or salt.utils.versions.compare(
ver1=version_num, oper=">=", ver2=cver, cmp_func=version_cmp
):
targets.append(pkgstr)
else:
downgrade.append(pkgstr)
if fromrepo and not sources:
log.info("Targeting repo '%s'", fromrepo)
cmds = []
all_pkgs = []
if targets:
all_pkgs.extend(targets)
cmd = copy.deepcopy(cmd_prefix)
cmd.extend(targets)
cmds.append(cmd)
if downgrade:
cmd = copy.deepcopy(cmd_prefix)
if pkg_type == "repository" and "--force-yes" not in cmd:
# Downgrading requires --force-yes. Insert this before 'install'
cmd.insert(-1, "--force-yes")
cmd.extend(downgrade)
cmds.append(cmd)
if downloadonly:
cmd.append("--download-only")
if to_reinstall:
all_pkgs.extend(to_reinstall)
cmd = copy.deepcopy(cmd_prefix)
if not sources:
cmd.append("--reinstall")
cmd.extend([x for x in to_reinstall.values()])
cmds.append(cmd)
if not cmds:
ret = {}
else:
cache_valid_time = kwargs.pop("cache_valid_time", 0)
if _refresh_db:
refresh_db(cache_valid_time)
env = _parse_env(kwargs.get("env"))
env.update(DPKG_ENV_VARS.copy())
hold_pkgs = get_selections(state="hold").get("hold", [])
# all_pkgs contains the argument to be passed to apt-get install, which
# when a specific version is requested will be in the format
# name=version. Strip off the '=' if present so we can compare the
# held package names against the packages we are trying to install.
targeted_names = [x.split("=")[0] for x in all_pkgs]
to_unhold = [x for x in hold_pkgs if x in targeted_names]
if to_unhold:
unhold(pkgs=to_unhold)
for cmd in cmds:
out = _call_apt(cmd)
if out["retcode"] != 0 and out["stderr"]:
errors.append(out["stderr"])
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
for pkgname in to_reinstall:
if pkgname not in ret or pkgname in old:
ret.update(
{
pkgname: {
"old": old.get(pkgname, ""),
"new": new.get(pkgname, ""),
}
}
)
if to_unhold:
hold(pkgs=to_unhold)
if errors:
raise CommandExecutionError(
"Problem encountered installing package(s)",
info={"errors": errors, "changes": ret},
)
return ret
def _uninstall(action="remove", name=None, pkgs=None, **kwargs):
"""
remove and purge do identical things but with different apt-get commands,
this function performs the common logic.
"""
try:
pkg_params = __salt__["pkg_resource.parse_targets"](name, pkgs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
old_removed = list_pkgs(removed=True)
targets = [x for x in pkg_params if x in old]
if action == "purge":
targets.extend([x for x in pkg_params if x in old_removed])
if not targets:
return {}
cmd = ["apt-get", "-q", "-y", action]
cmd.extend(targets)
env = _parse_env(kwargs.get("env"))
env.update(DPKG_ENV_VARS.copy())
out = _call_apt(cmd, env=env)
if out["retcode"] != 0 and out["stderr"]:
errors = [out["stderr"]]
else:
errors = []
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
new_removed = list_pkgs(removed=True)
changes = salt.utils.data.compare_dicts(old, new)
if action == "purge":
ret = {
"removed": salt.utils.data.compare_dicts(old_removed, new_removed),
"installed": changes,
}
else:
ret = changes
if errors:
raise CommandExecutionError(
"Problem encountered removing package(s)",
info={"errors": errors, "changes": ret},
)
return ret
def autoremove(list_only=False, purge=False):
"""
.. versionadded:: 2015.5.0
Remove packages not required by another package using ``apt-get
autoremove``.
list_only : False
Only retrieve the list of packages to be auto-removed, do not actually
perform the auto-removal.
purge : False
Also remove package config data when autoremoving packages.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' pkg.autoremove
salt '*' pkg.autoremove list_only=True
salt '*' pkg.autoremove purge=True
"""
cmd = []
if list_only:
ret = []
cmd.extend(["apt-get", "--assume-no"])
if purge:
cmd.append("--purge")
cmd.append("autoremove")
out = _call_apt(cmd, ignore_retcode=True)["stdout"]
found = False
for line in out.splitlines():
if found is True:
if line.startswith(" "):
ret.extend(line.split())
else:
found = False
elif "The following packages will be REMOVED:" in line:
found = True
ret.sort()
return ret
else:
old = list_pkgs()
cmd.extend(["apt-get", "--assume-yes"])
if purge:
cmd.append("--purge")
cmd.append("autoremove")
_call_apt(cmd, ignore_retcode=True)
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
return salt.utils.data.compare_dicts(old, new)
def remove(name=None, pkgs=None, **kwargs):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages using ``apt-get remove``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
"""
return _uninstall(action="remove", name=name, pkgs=pkgs, **kwargs)
def purge(name=None, pkgs=None, **kwargs):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages via ``apt-get purge`` along with all configuration files.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
"""
return _uninstall(action="purge", name=name, pkgs=pkgs, **kwargs)
def upgrade(refresh=True, dist_upgrade=False, **kwargs):
"""
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade``
if ``dist_upgrade`` is ``True``.
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
dist_upgrade
Whether to perform the upgrade using dist-upgrade vs upgrade. Default
is to use upgrade.
.. versionadded:: 2014.7.0
refresh : True
If ``True``, the apt cache will be refreshed first. By default,
this is ``True`` and a refresh is performed.
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
download_only (or downloadonly)
Only download the packages, don't unpack or install them. Use
downloadonly to be in line with yum and zypper module.
.. versionadded:: 2018.3.0
force_conf_new
Always install the new version of any configuration files.
.. versionadded:: 2015.8.0
allow_downgrades
Allow apt to downgrade packages without a prompt.
.. versionadded:: 3005
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
"""
cache_valid_time = kwargs.pop("cache_valid_time", 0)
if salt.utils.data.is_true(refresh):
refresh_db(cache_valid_time)
old = list_pkgs()
if "force_conf_new" in kwargs and kwargs["force_conf_new"]:
dpkg_options = ["--force-confnew"]
else:
dpkg_options = ["--force-confold", "--force-confdef"]
cmd = [
"apt-get",
"-q",
"-y",
]
for option in dpkg_options:
cmd.append("-o")
cmd.append("DPkg::Options::={}".format(option))
if kwargs.get("force_yes", False):
cmd.append("--force-yes")
if kwargs.get("skip_verify", False):
cmd.append("--allow-unauthenticated")
if kwargs.get("download_only", False) or kwargs.get("downloadonly", False):
cmd.append("--download-only")
if kwargs.get("allow_downgrades", False):
cmd.append("--allow-downgrades")
cmd.append("dist-upgrade" if dist_upgrade else "upgrade")
result = _call_apt(cmd, env=DPKG_ENV_VARS.copy())
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result["retcode"] != 0:
raise CommandExecutionError(
"Problem encountered upgrading packages",
info={"changes": ret, "result": result},
)
return ret
def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
"""
.. versionadded:: 2014.7.0
Set package in 'hold' state, meaning it will not be upgraded.
name
The name of the package, e.g., 'tmux'
CLI Example:
.. code-block:: bash
salt '*' pkg.hold <package name>
pkgs
A list of packages to hold. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.hold pkgs='["foo", "bar"]'
"""
if not name and not pkgs and not sources:
raise SaltInvocationError("One of name, pkgs, or sources must be specified.")
if pkgs and sources:
raise SaltInvocationError("Only one of pkgs or sources can be specified.")
targets = []
if pkgs:
targets.extend(pkgs)
elif sources:
for source in sources:
targets.append(next(iter(source)))
else:
targets.append(name)
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(iter(target))
ret[target] = {"name": target, "changes": {}, "result": False, "comment": ""}
state = get_selections(pattern=target, state="hold")
if not state:
ret[target]["comment"] = "Package {} not currently held.".format(target)
elif not salt.utils.data.is_true(state.get("hold", False)):
if "test" in __opts__ and __opts__["test"]:
ret[target].update(result=None)
ret[target]["comment"] = "Package {} is set to be held.".format(target)
else:
result = set_selections(selection={"hold": [target]})
ret[target].update(changes=result[target], result=True)
ret[target]["comment"] = "Package {} is now being held.".format(target)
else:
ret[target].update(result=True)
ret[target]["comment"] = "Package {} is already set to be held.".format(
target
)
return ret
def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
"""
.. versionadded:: 2014.7.0
Set package current in 'hold' state to install state,
meaning it will be upgraded.
name
The name of the package, e.g., 'tmux'
CLI Example:
.. code-block:: bash
salt '*' pkg.unhold <package name>
pkgs
A list of packages to unhold. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.unhold pkgs='["foo", "bar"]'
"""
if not name and not pkgs and not sources:
raise SaltInvocationError("One of name, pkgs, or sources must be specified.")
if pkgs and sources:
raise SaltInvocationError("Only one of pkgs or sources can be specified.")
targets = []
if pkgs:
targets.extend(pkgs)
elif sources:
for source in sources:
targets.append(next(iter(source)))
else:
targets.append(name)
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(iter(target))
ret[target] = {"name": target, "changes": {}, "result": False, "comment": ""}
state = get_selections(pattern=target)
if not state:
ret[target]["comment"] = "Package {} does not have a state.".format(target)
elif salt.utils.data.is_true(state.get("hold", False)):
if "test" in __opts__ and __opts__["test"]:
ret[target].update(result=None)
ret[target]["comment"] = "Package {} is set not to be held.".format(
target
)
else:
result = set_selections(selection={"install": [target]})
ret[target].update(changes=result[target], result=True)
ret[target]["comment"] = "Package {} is no longer being held.".format(
target
)
else:
ret[target].update(result=True)
ret[target]["comment"] = "Package {} is already set not to be held.".format(
target
)
return ret
def _list_pkgs_from_context(versions_as_list, removed, purge_desired):
"""
Use pkg list from __context__
"""
if removed:
ret = copy.deepcopy(__context__["pkg.list_pkgs"]["removed"])
else:
ret = copy.deepcopy(__context__["pkg.list_pkgs"]["purge_desired"])
if not purge_desired:
ret.update(__context__["pkg.list_pkgs"]["installed"])
if not versions_as_list:
__salt__["pkg_resource.stringify"](ret)
return ret
def list_pkgs(
versions_as_list=False, removed=False, purge_desired=False, **kwargs
): # pylint: disable=W0613
"""
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
removed
If ``True``, then only packages which have been removed (but not
purged) will be returned.
purge_desired
If ``True``, then only packages which have been marked to be purged,
but can't be purged due to their status as dependencies for other
installed packages, will be returned. Note that these packages will
appear in installed
.. versionchanged:: 2014.1.1
Packages in this state now correctly show up in the output of this
function.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs versions_as_list=True
"""
versions_as_list = salt.utils.data.is_true(versions_as_list)
removed = salt.utils.data.is_true(removed)
purge_desired = salt.utils.data.is_true(purge_desired)
if "pkg.list_pkgs" in __context__ and kwargs.get("use_context", True):
return _list_pkgs_from_context(versions_as_list, removed, purge_desired)
ret = {"installed": {}, "removed": {}, "purge_desired": {}}
cmd = [
"dpkg-query",
"--showformat",
"${Status} ${Package} ${Version} ${Architecture}\n",
"-W",
]
out = __salt__["cmd.run_stdout"](cmd, output_loglevel="trace", python_shell=False)
# Typical lines of output:
# install ok installed zsh 4.3.17-1ubuntu1 amd64
# deinstall ok config-files mc 3:4.8.1-2ubuntu1 amd64
for line in out.splitlines():
cols = line.split()
try:
linetype, status, name, version_num, arch = (
cols[x] for x in (0, 2, 3, 4, 5)
)
except (ValueError, IndexError):
continue
if __grains__.get("cpuarch", "") == "x86_64":
osarch = __grains__.get("osarch", "")
if arch != "all" and osarch == "amd64" and osarch != arch:
name += ":{}".format(arch)
if cols:
if ("install" in linetype or "hold" in linetype) and "installed" in status:
__salt__["pkg_resource.add_pkg"](ret["installed"], name, version_num)
elif "deinstall" in linetype:
__salt__["pkg_resource.add_pkg"](ret["removed"], name, version_num)
elif "purge" in linetype and status == "installed":
__salt__["pkg_resource.add_pkg"](
ret["purge_desired"], name, version_num
)
for pkglist_type in ("installed", "removed", "purge_desired"):
__salt__["pkg_resource.sort_pkglist"](ret[pkglist_type])
__context__["pkg.list_pkgs"] = copy.deepcopy(ret)
if removed:
ret = ret["removed"]
else:
ret = copy.deepcopy(__context__["pkg.list_pkgs"]["purge_desired"])
if not purge_desired:
ret.update(__context__["pkg.list_pkgs"]["installed"])
if not versions_as_list:
__salt__["pkg_resource.stringify"](ret)
return ret
def _get_upgradable(dist_upgrade=True, **kwargs):
"""
Utility function to get upgradable packages
Sample return data:
{ 'pkgname': '1.2.3-45', ... }
"""
cmd = ["apt-get", "--just-print"]
if dist_upgrade:
cmd.append("dist-upgrade")
else:
cmd.append("upgrade")
try:
cmd.extend(["-o", "APT::Default-Release={}".format(kwargs["fromrepo"])])
except KeyError:
pass
call = _call_apt(cmd)
if call["retcode"] != 0:
msg = "Failed to get upgrades"
for key in ("stderr", "stdout"):
if call[key]:
msg += ": " + call[key]
break
raise CommandExecutionError(msg)
else:
out = call["stdout"]
# rexp parses lines that look like the following:
# Conf libxfont1 (1:1.4.5-1 Debian:testing [i386])
rexp = re.compile("(?m)^Conf " "([^ ]+) " r"\(([^ ]+)") # Package name # Version
keys = ["name", "version"]
_get = lambda l, k: l[keys.index(k)]
upgrades = rexp.findall(out)
ret = {}
for line in upgrades:
name = _get(line, "name")
version_num = _get(line, "version")
ret[name] = version_num
return ret
def list_upgrades(refresh=True, dist_upgrade=True, **kwargs):
"""
List all available package upgrades.
refresh
Whether to refresh the package database before listing upgrades.
Default: True.
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
dist_upgrade
Whether to list the upgrades using dist-upgrade vs upgrade. Default is
to use dist-upgrade.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
"""
cache_valid_time = kwargs.pop("cache_valid_time", 0)
if salt.utils.data.is_true(refresh):
refresh_db(cache_valid_time)
return _get_upgradable(dist_upgrade, **kwargs)
def upgrade_available(name, **kwargs):
"""
Check whether or not an upgrade is available for a given package
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
"""
return latest_version(name) != ""
def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs):
"""
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
making the comparison.
ignore_epoch : False
Set to ``True`` to ignore the epoch when comparing versions
.. versionadded:: 2015.8.10,2016.3.2
CLI Example:
.. code-block:: bash
salt '*' pkg.version_cmp '0.2.4-0ubuntu1' '0.2.4.1-0ubuntu1'
"""
normalize = lambda x: str(x).split(":", 1)[-1] if ignore_epoch else str(x)
# both apt_pkg.version_compare and _cmd_quote need string arguments.
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
# if we have apt_pkg, this will be quickier this way
# and also do not rely on shell.
if HAS_APTPKG:
try:
# the apt_pkg module needs to be manually initialized
apt_pkg.init_system()
# if there is a difference in versions, apt_pkg.version_compare will
# return an int representing the difference in minor versions, or
# 1/-1 if the difference is smaller than minor versions. normalize
# to -1, 0 or 1.
try:
ret = apt_pkg.version_compare(pkg1, pkg2)
except TypeError:
ret = apt_pkg.version_compare(str(pkg1), str(pkg2))
return 1 if ret > 0 else -1 if ret < 0 else 0
except Exception: # pylint: disable=broad-except
# Try to use shell version in case of errors w/python bindings
pass
try:
for oper, ret in (("lt", -1), ("eq", 0), ("gt", 1)):
cmd = ["dpkg", "--compare-versions", pkg1, oper, pkg2]
retcode = __salt__["cmd.retcode"](
cmd, output_loglevel="trace", python_shell=False, ignore_retcode=True
)
if retcode == 0:
return ret
except Exception as exc: # pylint: disable=broad-except
log.error(exc)
return None
def _split_repo_str(repo):
"""
Return APT source entry as a tuple.
"""
split = SourceEntry(repo)
return split.type, split.architectures, split.uri, split.dist, split.comps
def _consolidate_repo_sources(sources):
"""
Consolidate APT sources.
"""
if not isinstance(sources, SourcesList):
raise TypeError("'{}' not a '{}'".format(type(sources), SourcesList))
consolidated = {}
delete_files = set()
base_file = SourceEntry("").file
repos = [s for s in sources.list if not s.invalid]
for repo in repos:
key = str(
(
getattr(repo, "architectures", []),
repo.disabled,
repo.type,
repo.uri,
repo.dist,
)
)
if key in consolidated:
combined = consolidated[key]
combined_comps = set(repo.comps).union(set(combined.comps))
consolidated[key].comps = list(combined_comps)
else:
consolidated[key] = SourceEntry(repo.line)
if repo.file != base_file:
delete_files.add(repo.file)
sources.list = list(consolidated.values())
sources.save()
for file_ in delete_files:
try:
os.remove(file_)
except OSError:
pass
return sources
def list_repo_pkgs(*args, **kwargs): # pylint: disable=unused-import
"""
.. versionadded:: 2017.7.0
Returns all available packages. Optionally, package names (and name globs)
can be passed and the results will be filtered to packages matching those
names.
This function can be helpful in discovering the version or repo to specify
in a :mod:`pkg.installed <salt.states.pkg.installed>` state.
The return data will be a dictionary mapping package names to a list of
version numbers, ordered from newest to oldest. For example:
.. code-block:: python
{
'bash': ['4.3-14ubuntu1.1',
'4.3-14ubuntu1'],
'nginx': ['1.10.0-0ubuntu0.16.04.4',
'1.9.15-0ubuntu1']
}
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_repo_pkgs
salt '*' pkg.list_repo_pkgs foo bar baz
"""
if args:
# Get only information about packages in args
cmd = ["apt-cache", "show"] + [arg for arg in args]
else:
# Get information about all available packages
cmd = ["apt-cache", "dump"]
out = _call_apt(cmd, scope=False, ignore_retcode=True)
ret = {}
pkg_name = None
skip_pkg = False
new_pkg = re.compile("^Package: (.+)")
for line in salt.utils.itertools.split(out["stdout"], "\n"):
if not line.strip():
continue
try:
cur_pkg = new_pkg.match(line).group(1)
except AttributeError:
pass
else:
if cur_pkg != pkg_name:
pkg_name = cur_pkg
continue
comps = line.strip().split(None, 1)
if comps[0] == "Version:":
ret.setdefault(pkg_name, []).append(comps[1])
return ret
def _skip_source(source):
"""
Decide to skip source or not.
:param source:
:return:
"""
if source.invalid:
if (
source.uri
and source.type
and source.type in ("deb", "deb-src", "rpm", "rpm-src")
):
pieces = source.mysplit(source.line)
if pieces[1].strip()[0] == "[":
options = pieces.pop(1).strip("[]").split()
if len(options) > 0:
log.debug(
"Source %s will be included although is marked invalid",
source.uri,
)
return False
return True
else:
return True
return False
def list_repos(**kwargs):
"""
Lists all repos in the sources.list (and sources.lists.d) files
CLI Example:
.. code-block:: bash
salt '*' pkg.list_repos
salt '*' pkg.list_repos disabled=True
"""
repos = {}
sources = SourcesList()
for source in sources.list:
if _skip_source(source):
continue
repo = {}
repo["file"] = source.file
repo["comps"] = getattr(source, "comps", [])
repo["disabled"] = source.disabled
repo["dist"] = source.dist
repo["type"] = source.type
repo["uri"] = source.uri
repo["line"] = source.line.strip()
repo["architectures"] = getattr(source, "architectures", [])
repos.setdefault(source.uri, []).append(repo)
return repos
def get_repo(repo, **kwargs):
"""
Display a repo from the sources.list / sources.list.d
The repo passed in needs to be a complete repo entry.
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo "myrepo definition"
"""
ppa_auth = kwargs.get("ppa_auth", None)
# we have to be clever about this since the repo definition formats
# are a bit more "loose" than in some other distributions
if repo.startswith("ppa:") and __grains__["os"] in ("Ubuntu", "Mint", "neon"):
# This is a PPA definition meaning special handling is needed
# to derive the name.
dist = __grains__["lsb_distrib_codename"]
owner_name, ppa_name = repo[4:].split("/")
if ppa_auth:
auth_info = "{}@".format(ppa_auth)
repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name, ppa_name, dist)
else:
if HAS_SOFTWAREPROPERTIES:
try:
if hasattr(softwareproperties.ppa, "PPAShortcutHandler"):
repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(
dist
)[0]
else:
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
except NameError as name_error:
raise CommandExecutionError(
"Could not find ppa {}: {}".format(repo, name_error)
)
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
repos = list_repos()
if repos:
try:
(
repo_type,
repo_architectures,
repo_uri,
repo_dist,
repo_comps,
) = _split_repo_str(repo)
if ppa_auth:
uri_match = re.search("(http[s]?://)(.+)", repo_uri)
if uri_match:
if not uri_match.group(2).startswith(ppa_auth):
repo_uri = "{}{}@{}".format(
uri_match.group(1), ppa_auth, uri_match.group(2)
)
except SyntaxError:
raise CommandExecutionError(
"Error: repo '{}' is not a well formatted definition".format(repo)
)
for source in repos.values():
for sub in source:
if (
sub["type"] == repo_type
and sub["uri"] == repo_uri
and sub["dist"] == repo_dist
):
if not repo_comps:
return sub
for comp in repo_comps:
if comp in sub.get("comps", []):
return sub
return {}
def del_repo(repo, **kwargs):
"""
Delete a repo from the sources.list / sources.list.d
If the .list file is in the sources.list.d directory
and the file that the repo exists in does not contain any other
repo configuration, the file itself will be deleted.
The repo passed in must be a fully formed repository definition
string.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo "myrepo definition"
"""
is_ppa = False
if repo.startswith("ppa:") and __grains__["os"] in ("Ubuntu", "Mint", "neon"):
# This is a PPA definition meaning special handling is needed
# to derive the name.
is_ppa = True
dist = __grains__["lsb_distrib_codename"]
if not HAS_SOFTWAREPROPERTIES:
_warn_software_properties(repo)
owner_name, ppa_name = repo[4:].split("/")
if "ppa_auth" in kwargs:
auth_info = "{}@".format(kwargs["ppa_auth"])
repo = LP_PVT_SRC_FORMAT.format(auth_info, dist, owner_name, ppa_name)
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
else:
if hasattr(softwareproperties.ppa, "PPAShortcutHandler"):
repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0]
else:
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
sources = SourcesList()
repos = [s for s in sources.list if not s.invalid]
if repos:
deleted_from = dict()
try:
(
repo_type,
repo_architectures,
repo_uri,
repo_dist,
repo_comps,
) = _split_repo_str(repo)
except SyntaxError:
raise SaltInvocationError(
"Error: repo '{}' not a well formatted definition".format(repo)
)
for source in repos:
if (
source.type == repo_type
and source.architectures == repo_architectures
and source.uri == repo_uri
and source.dist == repo_dist
):
s_comps = set(source.comps)
r_comps = set(repo_comps)
if s_comps.intersection(r_comps):
deleted_from[source.file] = 0
source.comps = list(s_comps.difference(r_comps))
if not source.comps:
try:
sources.remove(source)
except ValueError:
pass
# PPAs are special and can add deb-src where expand_ppa_line
# doesn't always reflect this. Lets just cleanup here for good
# measure
if (
is_ppa
and repo_type == "deb"
and source.type == "deb-src"
and source.uri == repo_uri
and source.dist == repo_dist
):
s_comps = set(source.comps)
r_comps = set(repo_comps)
if s_comps.intersection(r_comps):
deleted_from[source.file] = 0
source.comps = list(s_comps.difference(r_comps))
if not source.comps:
try:
sources.remove(source)
except ValueError:
pass
sources.save()
if deleted_from:
ret = ""
for source in sources:
if source.file in deleted_from:
deleted_from[source.file] += 1
for repo_file, count in deleted_from.items():
msg = "Repo '{0}' has been removed from {1}.\n"
if count == 0 and "sources.list.d/" in repo_file:
if os.path.isfile(repo_file):
msg = "File {1} containing repo '{0}' has been removed."
try:
os.remove(repo_file)
except OSError:
pass
ret += msg.format(repo, repo_file)
# explicit refresh after a repo is deleted
refresh_db()
return ret
raise CommandExecutionError(
"Repo {} doesn't exist in the sources.list(s)".format(repo)
)
def _convert_if_int(value):
"""
.. versionadded:: 2017.7.0
Convert to an int if necessary.
:param str value: The value to check/convert.
:return: The converted or passed value.
:rtype: bool|int|str
"""
try:
value = int(str(value))
except ValueError:
pass
return value
def get_repo_keys():
"""
.. versionadded:: 2017.7.0
List known repo key details.
:return: A dictionary containing the repo keys.
:rtype: dict
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo_keys
"""
ret = dict()
repo_keys = list()
# The double usage of '--with-fingerprint' is necessary in order to
# retrieve the fingerprint of the subkey.
cmd = [
"apt-key",
"adv",
"--batch",
"--list-public-keys",
"--with-fingerprint",
"--with-fingerprint",
"--with-colons",
"--fixed-list-mode",
]
cmd_ret = _call_apt(cmd, scope=False)
if cmd_ret["retcode"] != 0:
log.error(cmd_ret["stderr"])
return ret
lines = [line for line in cmd_ret["stdout"].splitlines() if line.strip()]
# Reference for the meaning of each item in the colon-separated
# record can be found here: https://goo.gl/KIZbvp
for line in lines:
items = [
_convert_if_int(item.strip()) if item.strip() else None
for item in line.split(":")
]
key_props = dict()
if len(items) < 2:
log.debug("Skipping line: %s", line)
continue
if items[0] in ("pub", "sub"):
key_props.update(
{
"algorithm": items[3],
"bits": items[2],
"capability": items[11],
"date_creation": items[5],
"date_expiration": items[6],
"keyid": items[4],
"validity": items[1],
}
)
if items[0] == "pub":
repo_keys.append(key_props)
else:
repo_keys[-1]["subkey"] = key_props
elif items[0] == "fpr":
if repo_keys[-1].get("subkey", False):
repo_keys[-1]["subkey"].update({"fingerprint": items[9]})
else:
repo_keys[-1].update({"fingerprint": items[9]})
elif items[0] == "uid":
repo_keys[-1].update({"uid": items[9], "uid_hash": items[7]})
for repo_key in repo_keys:
ret[repo_key["keyid"]] = repo_key
return ret
def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv="base"):
"""
.. versionadded:: 2017.7.0
Add a repo key using ``apt-key add``.
:param str path: The path of the key file to import.
:param str text: The key data to import, in string form.
:param str keyserver: The server to download the repo key specified by the keyid.
:param str keyid: The key id of the repo key to add.
:param str saltenv: The environment the key file resides in.
:return: A boolean representing whether the repo key was added.
:rtype: bool
CLI Examples:
.. code-block:: bash
salt '*' pkg.add_repo_key 'salt://apt/sources/test.key'
salt '*' pkg.add_repo_key text="'$KEY1'"
salt '*' pkg.add_repo_key keyserver='keyserver.example' keyid='0000AAAA'
"""
cmd = ["apt-key"]
kwargs = {}
current_repo_keys = get_repo_keys()
if path:
cached_source_path = __salt__["cp.cache_file"](path, saltenv)
if not cached_source_path:
log.error("Unable to get cached copy of file: %s", path)
return False
cmd.extend(["add", cached_source_path])
elif text:
log.debug("Received value: %s", text)
cmd.extend(["add", "-"])
kwargs.update({"stdin": text})
elif keyserver:
if not keyid:
error_msg = "No keyid or keyid too short for keyserver: {}".format(
keyserver
)
raise SaltInvocationError(error_msg)
cmd.extend(["adv", "--batch", "--keyserver", keyserver, "--recv", keyid])
elif keyid:
error_msg = "No keyserver specified for keyid: {}".format(keyid)
raise SaltInvocationError(error_msg)
else:
raise TypeError(
"{}() takes at least 1 argument (0 given)".format(add_repo_key.__name__)
)
# If the keyid is provided or determined, check it against the existing
# repo key ids to determine whether it needs to be imported.
if keyid:
for current_keyid in current_repo_keys:
if current_keyid[-(len(keyid)) :] == keyid:
log.debug("The keyid '%s' already present: %s", keyid, current_keyid)
return True
cmd_ret = _call_apt(cmd, **kwargs)
if cmd_ret["retcode"] == 0:
return True
log.error("Unable to add repo key: %s", cmd_ret["stderr"])
return False
def del_repo_key(name=None, **kwargs):
"""
.. versionadded:: 2015.8.0
Remove a repo key using ``apt-key del``
name
Repo from which to remove the key. Unnecessary if ``keyid`` is passed.
keyid
The KeyID of the GPG key to remove
keyid_ppa : False
If set to ``True``, the repo's GPG key ID will be looked up from
ppa.launchpad.net and removed.
.. note::
Setting this option to ``True`` requires that the ``name`` param
also be passed.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo_key keyid=0123ABCD
salt '*' pkg.del_repo_key name='ppa:foo/bar' keyid_ppa=True
"""
if kwargs.get("keyid_ppa", False):
if isinstance(name, str) and name.startswith("ppa:"):
owner_name, ppa_name = name[4:].split("/")
ppa_info = _get_ppa_info_from_launchpad(owner_name, ppa_name)
keyid = ppa_info["signing_key_fingerprint"][-8:]
else:
raise SaltInvocationError("keyid_ppa requires that a PPA be passed")
else:
if "keyid" in kwargs:
keyid = kwargs.get("keyid")
else:
raise SaltInvocationError("keyid or keyid_ppa and PPA name must be passed")
result = _call_apt(["apt-key", "del", keyid], scope=False)
if result["retcode"] != 0:
msg = "Failed to remove keyid {0}"
if result["stderr"]:
msg += ": {}".format(result["stderr"])
raise CommandExecutionError(msg)
return keyid
def mod_repo(repo, saltenv="base", **kwargs):
"""
Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as the definition is well formed. For Ubuntu the
``ppa:<project>/repo`` format is acceptable. ``ppa:`` format can only be
used to create a new repository.
The following options are available to modify a repo definition:
architectures
A comma-separated list of supported architectures, e.g. ``amd64`` If
this option is not set, all architectures (configured in the system)
will be used.
comps
A comma separated list of components for the repo, e.g. ``main``
file
A file name to be used
keyserver
Keyserver to get gpg key from
keyid
Key ID or a list of key IDs to load with the ``keyserver`` argument
key_url
URL to a GPG key to add to the APT GPG keyring
key_text
GPG key in string form to add to the APT GPG keyring
.. versionadded:: 2018.3.0
consolidate : False
If ``True``, will attempt to de-duplicate and consolidate sources
comments
Sometimes you want to supply additional information, but not as
enabled configuration. All comments provided here will be joined
into a single string and appended to the repo configuration with a
comment marker (#) before it.
.. versionadded:: 2015.8.9
refresh : True
Enable or disable (True or False) refreshing of the apt package
database. The previous ``refresh_db`` argument was deprecated in
favor of ``refresh```. The ``refresh_db`` argument will still
continue to work to ensure backwards compatibility, but please
change to using the preferred ``refresh``.
.. note::
Due to the way keys are stored for APT, there is a known issue where
the key won't be updated unless another change is made at the same
time. Keys should be properly added on initial configuration.
CLI Examples:
.. code-block:: bash
salt '*' pkg.mod_repo 'myrepo definition' uri=http://new/uri
salt '*' pkg.mod_repo 'myrepo definition' comps=main,universe
"""
if "refresh_db" in kwargs:
refresh = kwargs["refresh_db"]
else:
refresh = kwargs.get("refresh", True)
# to ensure no one sets some key values that _shouldn't_ be changed on the
# object itself, this is just a white-list of "ok" to set properties
if repo.startswith("ppa:"):
if __grains__["os"] in ("Ubuntu", "Mint", "neon"):
# secure PPAs cannot be supported as of the time of this code
# implementation via apt-add-repository. The code path for
# secure PPAs should be the same as urllib method
if salt.utils.path.which("apt-add-repository") and "ppa_auth" not in kwargs:
repo_info = get_repo(repo)
if repo_info:
return {repo: repo_info}
else:
env = None
http_proxy_url = _get_http_proxy_url()
if http_proxy_url:
env = {
"http_proxy": http_proxy_url,
"https_proxy": http_proxy_url,
}
if float(__grains__["osrelease"]) < 12.04:
cmd = ["apt-add-repository", repo]
else:
cmd = ["apt-add-repository", "-y", repo]
out = _call_apt(cmd, env=env, scope=False, **kwargs)
if out["retcode"]:
raise CommandExecutionError(
"Unable to add PPA '{}'. '{}' exited with "
"status {!s}: '{}' ".format(
repo[4:], cmd, out["retcode"], out["stderr"]
)
)
# explicit refresh when a repo is modified.
if refresh:
refresh_db()
return {repo: out}
else:
if not HAS_SOFTWAREPROPERTIES:
_warn_software_properties(repo)
else:
log.info("Falling back to urllib method for private PPA")
# fall back to urllib style
try:
owner_name, ppa_name = repo[4:].split("/", 1)
except ValueError:
raise CommandExecutionError(
"Unable to get PPA info from argument. "
'Expected format "<PPA_OWNER>/<PPA_NAME>" '
"(e.g. saltstack/salt) not found. Received "
"'{}' instead.".format(repo[4:])
)
dist = __grains__["lsb_distrib_codename"]
# ppa has a lot of implicit arguments. Make them explicit.
# These will defer to any user-defined variants
kwargs["dist"] = dist
ppa_auth = ""
if "file" not in kwargs:
filename = "/etc/apt/sources.list.d/{0}-{1}-{2}.list"
kwargs["file"] = filename.format(owner_name, ppa_name, dist)
try:
launchpad_ppa_info = _get_ppa_info_from_launchpad(
owner_name, ppa_name
)
if "ppa_auth" not in kwargs:
kwargs["keyid"] = launchpad_ppa_info["signing_key_fingerprint"]
else:
if "keyid" not in kwargs:
error_str = (
"Private PPAs require a keyid to be specified: {0}/{1}"
)
raise CommandExecutionError(
error_str.format(owner_name, ppa_name)
)
except HTTPError as exc:
raise CommandExecutionError(
"Launchpad does not know about {}/{}: {}".format(
owner_name, ppa_name, exc
)
)
except IndexError as exc:
raise CommandExecutionError(
"Launchpad knows about {}/{} but did not "
"return a fingerprint. Please set keyid "
"manually: {}".format(owner_name, ppa_name, exc)
)
if "keyserver" not in kwargs:
kwargs["keyserver"] = "keyserver.ubuntu.com"
if "ppa_auth" in kwargs:
if not launchpad_ppa_info["private"]:
raise CommandExecutionError(
"PPA is not private but auth credentials passed: {}".format(
repo
)
)
# assign the new repo format to the "repo" variable
# so we can fall through to the "normal" mechanism
# here.
if "ppa_auth" in kwargs:
ppa_auth = "{}@".format(kwargs["ppa_auth"])
repo = LP_PVT_SRC_FORMAT.format(
ppa_auth, owner_name, ppa_name, dist
)
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
else:
raise CommandExecutionError(
'cannot parse "ppa:" style repo definitions: {}'.format(repo)
)
sources = SourcesList()
if kwargs.get("consolidate", False):
# attempt to de-dup and consolidate all sources
# down to entries in sources.list
# this option makes it easier to keep the sources
# list in a "sane" state.
#
# this should remove duplicates, consolidate comps
# for a given source down to one line
# and eliminate "invalid" and comment lines
#
# the second side effect is removal of files
# that are not the main sources.list file
sources = _consolidate_repo_sources(sources)
repos = [s for s in sources if not s.invalid]
mod_source = None
try:
(
repo_type,
repo_architectures,
repo_uri,
repo_dist,
repo_comps,
) = _split_repo_str(repo)
except SyntaxError:
raise SyntaxError(
"Error: repo '{}' not a well formatted definition".format(repo)
)
full_comp_list = {comp.strip() for comp in repo_comps}
no_proxy = __salt__["config.option"]("no_proxy")
if "keyid" in kwargs:
keyid = kwargs.pop("keyid", None)
keyserver = kwargs.pop("keyserver", None)
if not keyid or not keyserver:
error_str = "both keyserver and keyid options required."
raise NameError(error_str)
if not isinstance(keyid, list):
keyid = [keyid]
for key in keyid:
if isinstance(
key, int
): # yaml can make this an int, we need the hex version
key = hex(key)
cmd = ["apt-key", "export", key]
output = __salt__["cmd.run_stdout"](cmd, python_shell=False, **kwargs)
imported = output.startswith("-----BEGIN PGP")
if keyserver:
if not imported:
http_proxy_url = _get_http_proxy_url()
if http_proxy_url and keyserver not in no_proxy:
cmd = [
"apt-key",
"adv",
"--batch",
"--keyserver-options",
"http-proxy={}".format(http_proxy_url),
"--keyserver",
keyserver,
"--logger-fd",
"1",
"--recv-keys",
key,
]
else:
cmd = [
"apt-key",
"adv",
"--batch",
"--keyserver",
keyserver,
"--logger-fd",
"1",
"--recv-keys",
key,
]
ret = _call_apt(cmd, scope=False, **kwargs)
if ret["retcode"] != 0:
raise CommandExecutionError(
"Error: key retrieval failed: {}".format(ret["stdout"])
)
elif "key_url" in kwargs:
key_url = kwargs["key_url"]
fn_ = __salt__["cp.cache_file"](key_url, saltenv)
if not fn_:
raise CommandExecutionError("Error: file not found: {}".format(key_url))
cmd = ["apt-key", "add", fn_]
out = __salt__["cmd.run_stdout"](cmd, python_shell=False, **kwargs)
if not out.upper().startswith("OK"):
raise CommandExecutionError(
"Error: failed to add key from {}".format(key_url)
)
elif "key_text" in kwargs:
key_text = kwargs["key_text"]
cmd = ["apt-key", "add", "-"]
out = __salt__["cmd.run_stdout"](
cmd, stdin=key_text, python_shell=False, **kwargs
)
if not out.upper().startswith("OK"):
raise CommandExecutionError(
"Error: failed to add key:\n{}".format(key_text)
)
if "comps" in kwargs:
kwargs["comps"] = [comp.strip() for comp in kwargs["comps"].split(",")]
full_comp_list |= set(kwargs["comps"])
else:
kwargs["comps"] = list(full_comp_list)
if "architectures" in kwargs:
kwargs["architectures"] = kwargs["architectures"].split(",")
else:
kwargs["architectures"] = repo_architectures
if "disabled" in kwargs:
kwargs["disabled"] = salt.utils.data.is_true(kwargs["disabled"])
elif "enabled" in kwargs:
kwargs["disabled"] = not salt.utils.data.is_true(kwargs["enabled"])
kw_type = kwargs.get("type")
kw_dist = kwargs.get("dist")
for source in repos:
# This series of checks will identify the starting source line
# and the resulting source line. The idea here is to ensure
# we are not returning bogus data because the source line
# has already been modified on a previous run.
repo_matches = (
source.type == repo_type
and source.uri.rstrip("/") == repo_uri.rstrip("/")
and source.dist == repo_dist
)
kw_matches = source.dist == kw_dist and source.type == kw_type
if repo_matches or kw_matches:
for comp in full_comp_list:
if comp in getattr(source, "comps", []):
mod_source = source
if not source.comps:
mod_source = source
if kwargs["architectures"] != source.architectures:
mod_source = source
if mod_source:
break
if "comments" in kwargs:
kwargs["comments"] = salt.utils.pkg.deb.combine_comments(kwargs["comments"])
if not mod_source:
mod_source = SourceEntry(repo)
if "comments" in kwargs:
mod_source.comment = kwargs["comments"]
sources.list.append(mod_source)
elif "comments" in kwargs:
mod_source.comment = kwargs["comments"]
for key in kwargs:
if key in _MODIFY_OK and hasattr(mod_source, key):
setattr(mod_source, key, kwargs[key])
sources.save()
# on changes, explicitly refresh
if refresh:
refresh_db()
return {
repo: {
"architectures": getattr(mod_source, "architectures", []),
"comps": mod_source.comps,
"disabled": mod_source.disabled,
"file": mod_source.file,
"type": mod_source.type,
"uri": mod_source.uri,
"line": mod_source.line,
}
}
def file_list(*packages, **kwargs):
"""
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
"""
return __salt__["lowpkg.file_list"](*packages)
def file_dict(*packages, **kwargs):
"""
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of _every_ file on the system's
package database (not generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_dict httpd
salt '*' pkg.file_dict httpd postfix
salt '*' pkg.file_dict
"""
return __salt__["lowpkg.file_dict"](*packages)
def expand_repo_def(**kwargs):
"""
Take a repository definition and expand it to the full pkg repository dict
that can be used for comparison. This is a helper function to make
the Debian/Ubuntu apt sources sane for comparison in the pkgrepo states.
This is designed to be called from pkgrepo states and will have little use
being called on the CLI.
"""
if "repo" not in kwargs:
raise SaltInvocationError("missing 'repo' argument")
sanitized = {}
repo = kwargs["repo"]
if repo.startswith("ppa:") and __grains__["os"] in ("Ubuntu", "Mint", "neon"):
dist = __grains__["lsb_distrib_codename"]
owner_name, ppa_name = repo[4:].split("/", 1)
if "ppa_auth" in kwargs:
auth_info = "{}@".format(kwargs["ppa_auth"])
repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name, ppa_name, dist)
else:
if HAS_SOFTWAREPROPERTIES:
if hasattr(softwareproperties.ppa, "PPAShortcutHandler"):
repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[
0
]
else:
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
if "file" not in kwargs:
filename = "/etc/apt/sources.list.d/{0}-{1}-{2}.list"
kwargs["file"] = filename.format(owner_name, ppa_name, dist)
source_entry = SourceEntry(repo)
for list_args in ("architectures", "comps"):
if list_args in kwargs:
kwargs[list_args] = [
kwarg.strip() for kwarg in kwargs[list_args].split(",")
]
for kwarg in _MODIFY_OK:
if kwarg in kwargs:
setattr(source_entry, kwarg, kwargs[kwarg])
source_list = SourcesList()
source_entry = source_list.add(
type=source_entry.type,
uri=source_entry.uri,
dist=source_entry.dist,
orig_comps=getattr(source_entry, "comps", []),
architectures=getattr(source_entry, "architectures", []),
)
sanitized["file"] = source_entry.file
sanitized["comps"] = getattr(source_entry, "comps", [])
sanitized["disabled"] = source_entry.disabled
sanitized["dist"] = source_entry.dist
sanitized["type"] = source_entry.type
sanitized["uri"] = source_entry.uri
sanitized["line"] = source_entry.line.strip()
sanitized["architectures"] = getattr(source_entry, "architectures", [])
return sanitized
def _parse_selections(dpkgselection):
"""
Parses the format from ``dpkg --get-selections`` and return a format that
pkg.get_selections and pkg.set_selections work with.
"""
ret = {}
if isinstance(dpkgselection, str):
dpkgselection = dpkgselection.split("\n")
for line in dpkgselection:
if line:
_pkg, _state = line.split()
if _state in ret:
ret[_state].append(_pkg)
else:
ret[_state] = [_pkg]
return ret
def get_selections(pattern=None, state=None):
"""
View package state from the dpkg database.
Returns a dict of dicts containing the state, and package names:
.. code-block:: python
{'<host>':
{'<state>': ['pkg1',
...
]
},
...
}
CLI Example:
.. code-block:: bash
salt '*' pkg.get_selections
salt '*' pkg.get_selections 'python-*'
salt '*' pkg.get_selections state=hold
salt '*' pkg.get_selections 'openssh*' state=hold
"""
ret = {}
cmd = ["dpkg", "--get-selections"]
cmd.append(pattern if pattern else "*")
stdout = __salt__["cmd.run_stdout"](
cmd, output_loglevel="trace", python_shell=False
)
ret = _parse_selections(stdout)
if state:
return {state: ret.get(state, [])}
return ret
# TODO: allow state=None to be set, and that *args will be set to that state
# TODO: maybe use something similar to pkg_resources.pack_pkgs to allow a list
# passed to selection, with the default state set to whatever is passed by the
# above, but override that if explicitly specified
# TODO: handle path to selection file from local fs as well as from salt file
# server
def set_selections(path=None, selection=None, clear=False, saltenv="base"):
"""
Change package state in the dpkg database.
The state can be any one of, documented in ``dpkg(1)``:
- install
- hold
- deinstall
- purge
This command is commonly used to mark specific packages to be held from
being upgraded, that is, to be kept at a certain version. When a state is
changed to anything but being held, then it is typically followed by
``apt-get -u dselect-upgrade``.
Note: Be careful with the ``clear`` argument, since it will start
with setting all packages to deinstall state.
Returns a dict of dicts containing the package names, and the new and old
versions:
.. code-block:: python
{'<host>':
{'<package>': {'new': '<new-state>',
'old': '<old-state>'}
},
...
}
CLI Example:
.. code-block:: bash
salt '*' pkg.set_selections selection='{"install": ["netcat"]}'
salt '*' pkg.set_selections selection='{"hold": ["openssh-server", "openssh-client"]}'
salt '*' pkg.set_selections salt://path/to/file
salt '*' pkg.set_selections salt://path/to/file clear=True
"""
ret = {}
if not path and not selection:
return ret
if path and selection:
err = (
"The 'selection' and 'path' arguments to "
"pkg.set_selections are mutually exclusive, and cannot be "
"specified together"
)
raise SaltInvocationError(err)
if isinstance(selection, str):
try:
selection = salt.utils.yaml.safe_load(selection)
except (
salt.utils.yaml.parser.ParserError,
salt.utils.yaml.scanner.ScannerError,
) as exc:
raise SaltInvocationError("Improperly-formatted selection: {}".format(exc))
if path:
path = __salt__["cp.cache_file"](path, saltenv)
with salt.utils.files.fopen(path, "r") as ifile:
content = [salt.utils.stringutils.to_unicode(x) for x in ifile.readlines()]
selection = _parse_selections(content)
if selection:
valid_states = ("install", "hold", "deinstall", "purge")
bad_states = [x for x in selection if x not in valid_states]
if bad_states:
raise SaltInvocationError(
"Invalid state(s): {}".format(", ".join(bad_states))
)
if clear:
cmd = ["dpkg", "--clear-selections"]
if not __opts__["test"]:
result = _call_apt(cmd, scope=False)
if result["retcode"] != 0:
err = "Running dpkg --clear-selections failed: {}".format(
result["stderr"]
)
log.error(err)
raise CommandExecutionError(err)
sel_revmap = {}
for _state, _pkgs in get_selections().items():
sel_revmap.update({_pkg: _state for _pkg in _pkgs})
for _state, _pkgs in selection.items():
for _pkg in _pkgs:
if _state == sel_revmap.get(_pkg):
continue
cmd = ["dpkg", "--set-selections"]
cmd_in = "{} {}".format(_pkg, _state)
if not __opts__["test"]:
result = _call_apt(cmd, scope=False, stdin=cmd_in)
if result["retcode"] != 0:
log.error("failed to set state %s for package %s", _state, _pkg)
else:
ret[_pkg] = {"old": sel_revmap.get(_pkg), "new": _state}
return ret
def _resolve_deps(name, pkgs, **kwargs):
"""
Installs missing dependencies and marks them as auto installed so they
are removed when no more manually installed packages depend on them.
.. versionadded:: 2014.7.0
:depends: - python-apt module
"""
missing_deps = []
for pkg_file in pkgs:
deb = apt.debfile.DebPackage(filename=pkg_file, cache=apt.Cache())
if deb.check():
missing_deps.extend(deb.missing_deps)
if missing_deps:
cmd = ["apt-get", "-q", "-y"]
cmd = cmd + ["-o", "DPkg::Options::=--force-confold"]
cmd = cmd + ["-o", "DPkg::Options::=--force-confdef"]
cmd.append("install")
cmd.extend(missing_deps)
ret = __salt__["cmd.retcode"](cmd, env=kwargs.get("env"), python_shell=False)
if ret != 0:
raise CommandExecutionError(
"Error: unable to resolve dependencies for: {}".format(name)
)
else:
try:
cmd = ["apt-mark", "auto"] + missing_deps
__salt__["cmd.run"](cmd, env=kwargs.get("env"), python_shell=False)
except MinionError as exc:
raise CommandExecutionError(exc)
return
def owner(*paths, **kwargs):
"""
.. versionadded:: 2014.7.0
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.aptpkg.version>`, if a
single path is passed, a string will be returned, and if multiple paths are
passed, a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Example:
.. code-block:: bash
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename
"""
if not paths:
return ""
ret = {}
for path in paths:
cmd = ["dpkg", "-S", path]
output = __salt__["cmd.run_stdout"](
cmd, output_loglevel="trace", python_shell=False
)
ret[path] = output.split(":")[0]
if "no path found" in ret[path].lower():
ret[path] = ""
if len(ret) == 1:
return next(iter(ret.values()))
return ret
def show(*names, **kwargs):
"""
.. versionadded:: 2019.2.0
Runs an ``apt-cache show`` on the passed package names, and returns the
results in a nested dictionary. The top level of the return data will be
the package name, with each package name mapping to a dictionary of version
numbers to any additional information returned by ``apt-cache show``.
filter
An optional comma-separated list (or quoted Python list) of
case-insensitive keys on which to filter. This allows one to restrict
the information returned for each package to a smaller selection of
pertinent items.
refresh : False
If ``True``, the apt cache will be refreshed first. By default, no
refresh is performed.
CLI Examples:
.. code-block:: bash
salt myminion pkg.show gawk
salt myminion pkg.show 'nginx-*'
salt myminion pkg.show 'nginx-*' filter=description,provides
"""
kwargs = salt.utils.args.clean_kwargs(**kwargs)
refresh = kwargs.pop("refresh", False)
filter_ = salt.utils.args.split_input(
kwargs.pop("filter", []),
lambda x: str(x) if not isinstance(x, str) else x.lower(),
)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
if refresh:
refresh_db()
if not names:
return {}
result = _call_apt(["apt-cache", "show"] + list(names), scope=False)
def _add(ret, pkginfo):
name = pkginfo.pop("Package", None)
version = pkginfo.pop("Version", None)
if name is not None and version is not None:
ret.setdefault(name, {}).setdefault(version, {}).update(pkginfo)
def _check_filter(key):
key = key.lower()
return True if key in ("package", "version") or not filter_ else key in filter_
ret = {}
pkginfo = {}
for line in salt.utils.itertools.split(result["stdout"], "\n"):
line = line.strip()
if line:
try:
key, val = (x.strip() for x in line.split(":", 1))
except ValueError:
pass
else:
if _check_filter(key):
pkginfo[key] = val
else:
# We've reached a blank line, which separates packages
_add(ret, pkginfo)
# Clear out the pkginfo dict for the next package
pkginfo = {}
continue
# Make sure to add whatever was in the pkginfo dict when we reached the end
# of the output.
_add(ret, pkginfo)
return ret
def info_installed(*names, **kwargs):
"""
Return the information of the named package(s) installed on the system.
.. versionadded:: 2015.8.1
names
The names of the packages for which to return information.
failhard
Whether to throw an exception if none of the packages are installed.
Defaults to True.
.. versionadded:: 2016.11.3
CLI Example:
.. code-block:: bash
salt '*' pkg.info_installed <package1>
salt '*' pkg.info_installed <package1> <package2> <package3> ...
salt '*' pkg.info_installed <package1> failhard=false
"""
kwargs = salt.utils.args.clean_kwargs(**kwargs)
failhard = kwargs.pop("failhard", True)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
ret = dict()
for pkg_name, pkg_nfo in __salt__["lowpkg.info"](*names, failhard=failhard).items():
t_nfo = dict()
if pkg_nfo.get("status", "ii")[1] != "i":
continue # return only packages that are really installed
# Translate dpkg-specific keys to a common structure
for key, value in pkg_nfo.items():
if key == "package":
t_nfo["name"] = value
elif key == "origin":
t_nfo["vendor"] = value
elif key == "section":
t_nfo["group"] = value
elif key == "maintainer":
t_nfo["packager"] = value
elif key == "homepage":
t_nfo["url"] = value
elif key == "status":
continue # only installed pkgs are returned, no need for status
else:
t_nfo[key] = value
ret[pkg_name] = t_nfo
return ret
def _get_http_proxy_url():
"""
Returns the http_proxy_url if proxy_username, proxy_password, proxy_host, and proxy_port
config values are set.
Returns a string.
"""
http_proxy_url = ""
host = __salt__["config.option"]("proxy_host")
port = __salt__["config.option"]("proxy_port")
username = __salt__["config.option"]("proxy_username")
password = __salt__["config.option"]("proxy_password")
# Set http_proxy_url for use in various internet facing actions...eg apt-key adv
if host and port:
if username and password:
http_proxy_url = "http://{}:{}@{}:{}".format(username, password, host, port)
else:
http_proxy_url = "http://{}:{}".format(host, port)
return http_proxy_url
def list_downloaded(root=None, **kwargs):
"""
.. versionadded:: 3000?
List prefetched packages downloaded by apt in the local disk.
root
operate on a different root directory.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_downloaded
"""
CACHE_DIR = "/var/cache/apt"
if root:
CACHE_DIR = os.path.join(root, os.path.relpath(CACHE_DIR, os.path.sep))
ret = {}
for root, dirnames, filenames in salt.utils.path.os_walk(CACHE_DIR):
for filename in fnmatch.filter(filenames, "*.deb"):
package_path = os.path.join(root, filename)
pkg_info = __salt__["lowpkg.bin_pkg_info"](package_path)
pkg_timestamp = int(os.path.getctime(package_path))
ret.setdefault(pkg_info["name"], {})[pkg_info["version"]] = {
"path": package_path,
"size": os.path.getsize(package_path),
"creation_date_time_t": pkg_timestamp,
"creation_date_time": datetime.datetime.utcfromtimestamp(
pkg_timestamp
).isoformat(),
}
return ret
def services_need_restart(**kwargs):
"""
.. versionadded:: 3003
List services that use files which have been changed by the
package manager. It might be needed to restart them.
Requires checkrestart from the debian-goodies package.
CLI Examples:
.. code-block:: bash
salt '*' pkg.services_need_restart
"""
if not salt.utils.path.which_bin(["checkrestart"]):
raise CommandNotFoundError(
"'checkrestart' is needed. It is part of the 'debian-goodies' "
"package which can be installed from official repositories."
)
cmd = ["checkrestart", "--machine", "--package"]
services = set()
cr_output = __salt__["cmd.run_stdout"](cmd, python_shell=False)
for line in cr_output.split("\n"):
if not line.startswith("SERVICE:"):
continue
end_of_name = line.find(",")
service = line[8:end_of_name] # skip "SERVICE:"
services.add(service)
return list(services)
| saltstack/salt | salt/modules/aptpkg.py | Python | apache-2.0 | 105,007 | 0.001 |
import inspect
import sys
import types
from copy import deepcopy
import pickle as pypickle
try:
import cPickle as cpickle
except ImportError:
cpickle = None
if sys.version_info < (2, 6): # pragma: no cover
# cPickle is broken in Python <= 2.5.
# It unsafely and incorrectly uses relative instead of absolute imports,
# so e.g.:
# exceptions.KeyError
# becomes:
# celery.exceptions.KeyError
#
# Your best choice is to upgrade to Python 2.6,
# as while the pure pickle version has worse performance,
# it is the only safe option for older Python versions.
pickle = pypickle
else:
pickle = cpickle or pypickle
# BaseException was introduced in Python 2.5.
try:
_error_bases = (BaseException, )
except NameError: # pragma: no cover
_error_bases = (SystemExit, KeyboardInterrupt)
#: List of base classes we probably don't want to reduce to.
unwanted_base_classes = (StandardError, Exception) + _error_bases + (object, )
if sys.version_info < (2, 5): # pragma: no cover
# Prior to Python 2.5, Exception was an old-style class
def subclass_exception(name, parent, unused):
return types.ClassType(name, (parent,), {})
else:
def subclass_exception(name, parent, module):
return type(name, (parent,), {'__module__': module})
def find_nearest_pickleable_exception(exc):
"""With an exception instance, iterate over its super classes (by mro)
and find the first super exception that is pickleable. It does
not go below :exc:`Exception` (i.e. it skips :exc:`Exception`,
:class:`BaseException` and :class:`object`). If that happens
you should use :exc:`UnpickleableException` instead.
:param exc: An exception instance.
:returns: the nearest exception if it's not :exc:`Exception` or below,
if it is it returns :const:`None`.
:rtype :exc:`Exception`:
"""
cls = exc.__class__
getmro_ = getattr(cls, "mro", None)
# old-style classes doesn't have mro()
if not getmro_:
# all Py2.4 exceptions has a baseclass.
if not getattr(cls, "__bases__", ()):
return
# Use inspect.getmro() to traverse bases instead.
getmro_ = lambda: inspect.getmro(cls)
for supercls in getmro_():
if supercls in unwanted_base_classes:
# only BaseException and object, from here on down,
# we don't care about these.
return
try:
exc_args = getattr(exc, "args", [])
superexc = supercls(*exc_args)
pickle.dumps(superexc)
except:
pass
else:
return superexc
def create_exception_cls(name, module, parent=None):
"""Dynamically create an exception class."""
if not parent:
parent = Exception
return subclass_exception(name, parent, module)
class UnpickleableExceptionWrapper(Exception):
"""Wraps unpickleable exceptions.
:param exc_module: see :attr:`exc_module`.
:param exc_cls_name: see :attr:`exc_cls_name`.
:param exc_args: see :attr:`exc_args`
**Example**
.. code-block:: python
>>> try:
... something_raising_unpickleable_exc()
>>> except Exception, e:
... exc = UnpickleableException(e.__class__.__module__,
... e.__class__.__name__,
... e.args)
... pickle.dumps(exc) # Works fine.
"""
#: The module of the original exception.
exc_module = None
#: The name of the original exception class.
exc_cls_name = None
#: The arguments for the original exception.
exc_args = None
def __init__(self, exc_module, exc_cls_name, exc_args):
self.exc_module = exc_module
self.exc_cls_name = exc_cls_name
self.exc_args = exc_args
Exception.__init__(self, exc_module, exc_cls_name, exc_args)
@classmethod
def from_exception(cls, exc):
return cls(exc.__class__.__module__,
exc.__class__.__name__,
getattr(exc, "args", []))
def restore(self):
return create_exception_cls(self.exc_cls_name,
self.exc_module)(*self.exc_args)
def get_pickleable_exception(exc):
"""Make sure exception is pickleable."""
nearest = find_nearest_pickleable_exception(exc)
if nearest:
return nearest
try:
pickle.dumps(deepcopy(exc))
except Exception:
return UnpickleableExceptionWrapper.from_exception(exc)
return exc
def get_pickled_exception(exc):
"""Get original exception from exception pickled using
:meth:`get_pickleable_exception`."""
if isinstance(exc, UnpickleableExceptionWrapper):
return exc.restore()
return exc
| frac/celery | celery/utils/serialization.py | Python | bsd-3-clause | 4,836 | 0.000414 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for versioned caching and automatic timeout determination.
Versioning works by way of namespaces. Namespaces are the first
colon-separated part of cache keys.
For instance, the cache keys ``price:10``, ``price:20``, and ``price``
all belong to the ``price`` namespace and can be invalidated with
one ``bump_version("price")`` call.
The versions themselves are stored within the cache, within the
``_version`` namespace. (As an implementation detail, this allows one
to invalidate _all_ versioned keys by bumping the version of
``_version``. Very meta!)
"""
from .impl import VersionedCache
__all__ = [
"bump_version",
"clear",
"get",
"set",
"VersionedCache",
]
_default_cache = None
get = None
set = None
bump_version = None
clear = None
def init_cache():
global _default_cache, get, set, bump_version, clear
_default_cache = VersionedCache(using="default")
get = _default_cache.get
set = _default_cache.set
bump_version = _default_cache.bump_version
clear = _default_cache.clear
init_cache()
| shoopio/shoop | shuup/core/cache/__init__.py | Python | agpl-3.0 | 1,316 | 0 |
from unittest import TestCase
from instructions.pop import Pop
from context import Context
from registers import AX, SP
__author__ = "Sébastien Guimmara"
class TestPop(TestCase):
def test_execute(self):
p = Pop.parse(['ax'])
ctx = Context(None)
ctx.registers.set(SP, 0xFFFE)
self.assertEqual(ctx.registers.get(SP).value, 0xFFFE)
ctx.stack.set(0xFFFE, 0x0022)
p.execute(ctx)
self.assertEqual(ctx.registers.get(AX).value, 0x0022)
self.assertEqual(ctx.registers.get(SP).value, 0xFFFF)
def test_parse_bad_number_of_arguments(self):
self.assertRaises(SyntaxError, lambda: Pop.parse(['ax,', '2']))
def test_parse_ok(self):
self.assertIsInstance(Pop.parse(['ax']), Pop)
| Groutcho/exii | tests/instructions/test_pop.py | Python | gpl-2.0 | 762 | 0 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_network_policy_ingress_rule import V1beta1NetworkPolicyIngressRule
class TestV1beta1NetworkPolicyIngressRule(unittest.TestCase):
""" V1beta1NetworkPolicyIngressRule unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1NetworkPolicyIngressRule(self):
"""
Test V1beta1NetworkPolicyIngressRule
"""
model = kubernetes.client.models.v1beta1_network_policy_ingress_rule.V1beta1NetworkPolicyIngressRule()
if __name__ == '__main__':
unittest.main()
| skuda/client-python | kubernetes/test/test_v1beta1_network_policy_ingress_rule.py | Python | apache-2.0 | 991 | 0.004036 |
"""
A response received to a Swagger API operation.
"""
import logging
__all__ = ["Response"]
log = logging.getLogger(__name__)
class CaseInsensitiveDict(dict):
"""Dictionary with case insensitive lookup of string keys."""
def __getitem__(self, key):
return {k.lower(): v for k, v in self.items()}[key.lower()]
class Response:
"""A response received to a Swagger API operation.
:param raw_response: The raw response.
:type raw_response: pyswagger.io.Response
"""
def __init__(self, raw_response):
self._raw_response = raw_response
@property
def status(self):
"""HTTP status code of the response.
:rtype: int
"""
return self._raw_response.status
@property
def body(self):
"""Parsed response body converted to objects via the codec in use."""
return self._raw_response.data
@property
def raw(self):
"""Raw response body.
:rtype: bytes
"""
return self._raw_response.raw
@property
def headers(self):
"""HTTP headers received on the response.
Example format is ``{'Content-Type': [xxx, xxx]}``
Header field names are case insensitive (See
http://www.ietf.org/rfc/rfc2616.txt)
:rtype: dict(str, list(str))
"""
return CaseInsensitiveDict(self._raw_response.header)
| olipratt/swagger-conformance | swaggerconformance/response.py | Python | mit | 1,388 | 0 |
# misc.py
# Copyright (C) 2012-2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""
Assorted utility functions for yum.
"""
from __future__ import print_function, absolute_import
from __future__ import unicode_literals
from dnf.pycomp import base64_decodebytes, basestring, unicode
from stat import *
import libdnf.utils
import dnf.const
import dnf.crypto
import dnf.exceptions
import dnf.i18n
import errno
import glob
import io
import os
import os.path
import pwd
import re
import shutil
import tempfile
_default_checksums = ['sha256']
_re_compiled_glob_match = None
def re_glob(s):
""" Tests if a string is a shell wildcard. """
global _re_compiled_glob_match
if _re_compiled_glob_match is None:
_re_compiled_glob_match = re.compile(r'[*?]|\[.+\]').search
return _re_compiled_glob_match(s)
_re_compiled_full_match = None
def re_full_search_needed(s):
""" Tests if a string needs a full nevra match, instead of just name. """
global _re_compiled_full_match
if _re_compiled_full_match is None:
# A glob, or a "." or "-" separator, followed by something (the ".")
one = re.compile(r'.*([-.*?]|\[.+\]).').match
# Any epoch, for envra
two = re.compile('[0-9]+:').match
_re_compiled_full_match = (one, two)
for rec in _re_compiled_full_match:
if rec(s):
return True
return False
def get_default_chksum_type():
return _default_checksums[0]
class GenericHolder(object):
"""Generic Holder class used to hold other objects of known types
It exists purely to be able to do object.somestuff, object.someotherstuff
or object[key] and pass object to another function that will
understand it"""
def __init__(self, iter=None):
self.__iter = iter
def __iter__(self):
if self.__iter is not None:
return iter(self[self.__iter])
def __getitem__(self, item):
if hasattr(self, item):
return getattr(self, item)
else:
raise KeyError(item)
def all_lists(self):
"""Return a dictionary of all lists."""
return {key: list_ for key, list_ in vars(self).items()
if type(list_) is list}
def merge_lists(self, other):
""" Concatenate the list attributes from 'other' to ours. """
for (key, val) in other.all_lists().items():
vars(self).setdefault(key, []).extend(val)
return self
def procgpgkey(rawkey):
'''Convert ASCII-armored GPG key to binary
'''
# Normalise newlines
rawkey = re.sub(b'\r\n?', b'\n', rawkey)
# Extract block
block = io.BytesIO()
inblock = 0
pastheaders = 0
for line in rawkey.split(b'\n'):
if line.startswith(b'-----BEGIN PGP PUBLIC KEY BLOCK-----'):
inblock = 1
elif inblock and line.strip() == b'':
pastheaders = 1
elif inblock and line.startswith(b'-----END PGP PUBLIC KEY BLOCK-----'):
# Hit the end of the block, get out
break
elif pastheaders and line.startswith(b'='):
# Hit the CRC line, don't include this and stop
break
elif pastheaders:
block.write(line + b'\n')
# Decode and return
return base64_decodebytes(block.getvalue())
def keyInstalled(ts, keyid, timestamp):
'''
Return if the GPG key described by the given keyid and timestamp are
installed in the rpmdb.
The keyid and timestamp should both be passed as integers.
The ts is an rpm transaction set object
Return values:
- -1 key is not installed
- 0 key with matching ID and timestamp is installed
- 1 key with matching ID is installed but has an older timestamp
- 2 key with matching ID is installed but has a newer timestamp
No effort is made to handle duplicates. The first matching keyid is used to
calculate the return result.
'''
# Search
for hdr in ts.dbMatch('name', 'gpg-pubkey'):
if hdr['version'] == keyid:
installedts = int(hdr['release'], 16)
if installedts == timestamp:
return 0
elif installedts < timestamp:
return 1
else:
return 2
return -1
def import_key_to_pubring(rawkey, keyid, gpgdir=None, make_ro_copy=True):
if not os.path.exists(gpgdir):
os.makedirs(gpgdir)
with dnf.crypto.pubring_dir(gpgdir), dnf.crypto.Context() as ctx:
# import the key
with open(os.path.join(gpgdir, 'gpg.conf'), 'wb') as fp:
fp.write(b'')
ctx.op_import(rawkey)
if make_ro_copy:
rodir = gpgdir + '-ro'
if not os.path.exists(rodir):
os.makedirs(rodir, mode=0o755)
for f in glob.glob(gpgdir + '/*'):
basename = os.path.basename(f)
ro_f = rodir + '/' + basename
shutil.copy(f, ro_f)
os.chmod(ro_f, 0o755)
# yes it is this stupid, why do you ask?
opts = """lock-never
no-auto-check-trustdb
trust-model direct
no-expensive-trust-checks
no-permission-warning
preserve-permissions
"""
with open(os.path.join(rodir, 'gpg.conf'), 'w', 0o755) as fp:
fp.write(opts)
return True
def getCacheDir():
"""return a path to a valid and safe cachedir - only used when not running
as root or when --tempcache is set"""
uid = os.geteuid()
try:
usertup = pwd.getpwuid(uid)
username = dnf.i18n.ucd(usertup[0])
prefix = '%s-%s-' % (dnf.const.PREFIX, username)
except KeyError:
prefix = '%s-%s-' % (dnf.const.PREFIX, uid)
# check for /var/tmp/prefix-* -
dirpath = '%s/%s*' % (dnf.const.TMPDIR, prefix)
cachedirs = sorted(glob.glob(dirpath))
for thisdir in cachedirs:
stats = os.lstat(thisdir)
if S_ISDIR(stats[0]) and S_IMODE(stats[0]) == 448 and stats[4] == uid:
return thisdir
# make the dir (tempfile.mkdtemp())
cachedir = tempfile.mkdtemp(prefix=prefix, dir=dnf.const.TMPDIR)
return cachedir
def seq_max_split(seq, max_entries):
""" Given a seq, split into a list of lists of length max_entries each. """
ret = []
num = len(seq)
seq = list(seq) # Trying to use a set/etc. here is bad
beg = 0
while num > max_entries:
end = beg + max_entries
ret.append(seq[beg:end])
beg += max_entries
num -= max_entries
ret.append(seq[beg:])
return ret
def unlink_f(filename):
""" Call os.unlink, but don't die if the file isn't there. This is the main
difference between "rm -f" and plain "rm". """
try:
os.unlink(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def stat_f(filename, ignore_EACCES=False):
""" Call os.stat(), but don't die if the file isn't there. Returns None. """
try:
return os.stat(filename)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return None
if ignore_EACCES and e.errno == errno.EACCES:
return None
raise
def _getloginuid():
""" Get the audit-uid/login-uid, if available. os.getuid() is returned
instead if there was a problem. Note that no caching is done here. """
# We might normally call audit.audit_getloginuid(), except that requires
# importing all of the audit module. And it doesn't work anyway: BZ 518721
try:
with open("/proc/self/loginuid") as fo:
data = fo.read()
return int(data)
except (IOError, ValueError):
return os.getuid()
_cached_getloginuid = None
def getloginuid():
""" Get the audit-uid/login-uid, if available. os.getuid() is returned
instead if there was a problem. The value is cached, so you don't
have to save it. """
global _cached_getloginuid
if _cached_getloginuid is None:
_cached_getloginuid = _getloginuid()
return _cached_getloginuid
def decompress(filename, dest=None, check_timestamps=False):
"""take a filename and decompress it into the same relative location.
When the compression type is not recognized (or file is not compressed),
the content of the file is copied to the destination"""
if dest:
out = dest
else:
out = None
dot_pos = filename.rfind('.')
if dot_pos > 0:
ext = filename[dot_pos:]
if ext in ('.zck', '.xz', '.bz2', '.gz', '.lzma', '.zst'):
out = filename[:dot_pos]
if out is None:
raise dnf.exceptions.MiscError("Could not determine destination filename")
if check_timestamps:
fi = stat_f(filename)
fo = stat_f(out)
if fi and fo and fo.st_mtime == fi.st_mtime:
return out
try:
# libdnf.utils.decompress either decompress file to the destination or
# copy the content if the compression type is not recognized
libdnf.utils.decompress(filename, out, 0o644)
except RuntimeError as e:
raise dnf.exceptions.MiscError(str(e))
if check_timestamps and fi:
os.utime(out, (fi.st_mtime, fi.st_mtime))
return out
def read_in_items_from_dot_dir(thisglob, line_as_list=True):
""" Takes a glob of a dir (like /etc/foo.d/\\*.foo) returns a list of all
the lines in all the files matching that glob, ignores comments and blank
lines, optional paramater 'line_as_list tells whether to treat each line
as a space or comma-separated list, defaults to True.
"""
results = []
for fname in glob.glob(thisglob):
with open(fname) as f:
for line in f:
if re.match(r'\s*(#|$)', line):
continue
line = line.rstrip() # no more trailing \n's
line = line.lstrip() # be nice
if not line:
continue
if line_as_list:
line = line.replace('\n', ' ')
line = line.replace(',', ' ')
results.extend(line.split())
continue
results.append(line)
return results
| rpm-software-management/dnf | dnf/yum/misc.py | Python | gpl-2.0 | 11,251 | 0.001867 |
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2015 Stephen Warren
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
import pytest
import u_boot_utils
@pytest.mark.buildconfigspec('cmd_memory')
def test_md(u_boot_console):
"""Test that md reads memory as expected, and that memory can be modified
using the mw command."""
ram_base = u_boot_utils.find_ram_base(u_boot_console)
addr = '%08x' % ram_base
val = 'a5f09876'
expected_response = addr + ': ' + val
u_boot_console.run_command('mw ' + addr + ' 0 10')
response = u_boot_console.run_command('md ' + addr + ' 10')
assert(not (expected_response in response))
u_boot_console.run_command('mw ' + addr + ' ' + val)
response = u_boot_console.run_command('md ' + addr + ' 10')
assert(expected_response in response)
@pytest.mark.buildconfigspec('cmd_memory')
def test_md_repeat(u_boot_console):
"""Test command repeat (via executing an empty command) operates correctly
for "md"; the command must repeat and dump an incrementing address."""
ram_base = u_boot_utils.find_ram_base(u_boot_console)
addr_base = '%08x' % ram_base
words = 0x10
addr_repeat = '%08x' % (ram_base + (words * 4))
u_boot_console.run_command('md %s %x' % (addr_base, words))
response = u_boot_console.run_command('')
expected_response = addr_repeat + ': '
assert(expected_response in response)
| Digilent/u-boot-digilent | test/py/tests/test_md.py | Python | gpl-2.0 | 1,426 | 0.001403 |
# -*- coding: utf-8 -*-
# This file is part of Gtfslib-python.
#
# Gtfslib-python is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gtfslib-python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gtfslib-python. If not, see <http://www.gnu.org/licenses/>.
"""
@author: Laurent GRÉGOIRE <[email protected]>
"""
from sqlalchemy.orm import mapper, relationship, backref, clear_mappers
from sqlalchemy.orm.relationships import foreign
from sqlalchemy.sql.schema import Column, MetaData, Table, ForeignKey, \
ForeignKeyConstraint, Index
from sqlalchemy.sql.sqltypes import String, Integer, Float, Date, Boolean
from gtfslib.model import FeedInfo, Agency, Stop, Route, Calendar, CalendarDate, \
Trip, StopTime, Transfer, Shape, ShapePoint, Zone, FareAttribute, FareRule
# ORM Mappings
class _Orm(object):
def __init__(self, engine, schema=None):
self._metadata = MetaData(schema=schema)
self.mappers = []
_feedinfo_id_column = Column('feed_id', String, primary_key=True)
_agency_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_route_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_feedinfo_mapper = Table('feed_info', self._metadata,
_feedinfo_id_column,
Column('feed_publisher_name', String),
Column('feed_publisher_url', String),
Column('feed_contact_email', String), # Non-standard (yet) field
Column('feed_contact_url', String), # Non-standard (yet) field
Column('feed_lang', String),
Column('feed_start_date', Date),
Column('feed_end_date', Date),
Column('feed_version', String))
self.mappers.append(mapper(FeedInfo, _feedinfo_mapper, properties={
}))
_agency_id_column = Column('agency_id', String, primary_key=True)
_route_agency_id_column = Column('agency_id', String, nullable=False)
_agency_mapper = Table('agency', self._metadata,
_agency_feed_id_column,
_agency_id_column,
Column('agency_name', String, nullable=False),
Column('agency_url', String, nullable=False),
Column('agency_timezone', String, nullable=False),
Column('agency_lang', String),
Column('agency_phone', String),
Column('agency_fare_url', String),
Column('agency_email', String))
self.mappers.append(mapper(Agency, _agency_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('agencies', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_agency_feed_id_column))
}))
_zone_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_zone_id_column = Column('zone_id', String, primary_key=True)
_zone_mapper = Table('zones', self._metadata,
_zone_feed_id_column,
_zone_id_column)
self.mappers.append(mapper(Zone, _zone_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('zones', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_zone_feed_id_column))
}))
_stop_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_stop_id_column = Column('stop_id', String, primary_key=True)
_stop_parent_id_column = Column('parent_station_id', String, nullable=True)
_stop_zone_id_column = Column('zone_id', String, nullable=True)
_stop_mapper = Table('stops', self._metadata,
_stop_feed_id_column,
_stop_id_column,
_stop_parent_id_column,
Column('location_type', Integer, nullable=False),
Column('stop_name', String, nullable=False),
Column('stop_lat', Float, nullable=False),
Column('stop_lon', Float, nullable=False),
Column('wheelchair_boarding', Integer, nullable=False),
Column('stop_code', String),
Column('stop_desc', String),
_stop_zone_id_column,
Column('stop_url', String),
Column('stop_timezone', String),
ForeignKeyConstraint(['feed_id', 'parent_station_id'], ['stops.feed_id', 'stops.stop_id']),
ForeignKeyConstraint(['feed_id', 'zone_id'], ['zones.feed_id', 'zones.zone_id']),
# TODO Make those index parametrable
Index('idx_stops_lat', 'stop_lat'),
Index('idx_stops_lon', 'stop_lon'),
Index('idx_stops_code', 'feed_id', 'stop_code'),
Index('idx_stops_zone', 'feed_id', 'zone_id'),
Index('idx_stops_parent', 'feed_id', 'parent_station_id'))
self.mappers.append(mapper(Stop, _stop_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('stops', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_stop_feed_id_column)),
'sub_stops' : relationship(Stop, remote_side=[_stop_feed_id_column, _stop_parent_id_column], uselist=True,
primaryjoin=(_stop_parent_id_column == foreign(_stop_id_column)) & (_stop_feed_id_column == _stop_feed_id_column)),
'parent_station' : relationship(Stop, remote_side=[_stop_feed_id_column, _stop_id_column],
primaryjoin=(_stop_id_column == foreign(_stop_parent_id_column)) & (_stop_feed_id_column == _stop_feed_id_column)),
'zone' : relationship(Zone, backref=backref('stops', cascade="all,delete-orphan"),
primaryjoin=(_zone_id_column == foreign(_stop_zone_id_column)) & (_zone_feed_id_column == _stop_feed_id_column))
}))
_transfer_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_transfer_from_stop_id_column = Column('from_stop_id', String, primary_key=True)
_transfer_to_stop_id_column = Column('to_stop_id', String, primary_key=True)
_transfer_mapper = Table('transfers', self._metadata,
_transfer_feed_id_column,
_transfer_from_stop_id_column,
_transfer_to_stop_id_column,
Column('transfer_type', Integer, nullable=False),
Column('min_transfer_time', Integer),
ForeignKeyConstraint(['feed_id', 'from_stop_id'], ['stops.feed_id', 'stops.stop_id']),
ForeignKeyConstraint(['feed_id', 'to_stop_id'], ['stops.feed_id', 'stops.stop_id']),
Index('idx_transfer_from', 'feed_id', 'from_stop_id'),
Index('idx_transfer_to', 'feed_id', 'to_stop_id'))
self.mappers.append(mapper(Transfer, _transfer_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('transfers', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_transfer_feed_id_column)),
'from_stop' : relationship(Stop, backref=backref('from_transfers', cascade='all', uselist=True), uselist=False,
primaryjoin=(_transfer_from_stop_id_column == foreign(_stop_id_column)) & (_transfer_feed_id_column == _stop_feed_id_column)),
'to_stop' : relationship(Stop, backref=backref('to_transfers', cascade='all', uselist=True), uselist=False,
primaryjoin=(_transfer_to_stop_id_column == foreign(_stop_id_column)) & (_transfer_feed_id_column == _stop_feed_id_column))
}))
_route_id_column = Column('route_id', String, primary_key=True)
_route_mapper = Table('routes', self._metadata,
_route_feed_id_column,
_route_id_column,
_route_agency_id_column,
Column('route_short_name', String),
Column('route_long_name', String),
Column('route_desc', String),
Column('route_type', Integer, nullable=False),
Column('route_url', String),
Column('route_color', String),
Column('route_text_color', String),
ForeignKeyConstraint(['feed_id', 'agency_id'], ['agency.feed_id', 'agency.agency_id']),
Index('idx_routes_agency', 'feed_id', 'agency_id'),
Index('idx_routes_short_name', 'feed_id', 'route_short_name'),
Index('idx_routes_type', 'feed_id', 'route_type'))
self.mappers.append(mapper(Route, _route_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('routes', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_route_feed_id_column)),
'agency' : relationship(Agency, backref=backref('routes', cascade="all,delete-orphan"),
primaryjoin=(_agency_id_column == foreign(_route_agency_id_column)) & (_agency_feed_id_column == _route_feed_id_column))
}))
_calendar_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_calendar_id_column = Column('service_id', String, primary_key=True)
_calendar_mapper = Table('calendar', self._metadata,
_calendar_feed_id_column,
_calendar_id_column
)
self.mappers.append(mapper(Calendar, _calendar_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('calendars', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_calendar_feed_id_column))
}))
_calendar_date_mapper = Table('calendar_dates', self._metadata,
Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True),
Column('service_id', String, primary_key=True),
Column('date', Date, primary_key=True),
ForeignKeyConstraint(['feed_id', 'service_id'], ['calendar.feed_id', 'calendar.service_id']),
# TOCHECK It seems a composite primary key on (a,b,c) does not need indexing on left elements,
# such as (a) and (a,b); but need on (a,c) for example.
Index('idx_calendar_dates_date', 'feed_id', 'date'))
self.mappers.append(mapper(CalendarDate, _calendar_date_mapper, properties={
'calendar' : relationship(Calendar, backref=backref('dates', cascade="all,delete-orphan"))
}))
_shape_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_shape_id_column = Column('shape_id', String, primary_key=True)
_shape_mapper = Table('shapes', self._metadata,
_shape_feed_id_column,
_shape_id_column
)
self.mappers.append(mapper(Shape, _shape_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('shapes', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_shape_feed_id_column))
}))
_shape_pt_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_shape_pt_shape_id_column = Column('shape_id', String, primary_key=True)
_shape_pt_seq_column = Column('shape_pt_sequence', Integer, primary_key=True)
_shape_pt_mapper = Table('shape_pts', self._metadata,
_shape_pt_feed_id_column,
_shape_pt_shape_id_column,
_shape_pt_seq_column,
Column('shape_dist_traveled', Float, nullable=False),
Column('shape_pt_lat', Float, nullable=False),
Column('shape_pt_lon', Float, nullable=False),
ForeignKeyConstraint(['feed_id', 'shape_id'], ['shapes.feed_id', 'shapes.shape_id']),
Index('idx_shape_pt_shape', 'feed_id', 'shape_id'))
self.mappers.append(mapper(ShapePoint, _shape_pt_mapper, properties={
# Note: here we specify foreign() on shape_pt feed_id column as there is no ownership relation of feed to shape_pts
'shape' : relationship(Shape, backref=backref('points', order_by=_shape_pt_seq_column, cascade="all,delete-orphan"),
primaryjoin=(_shape_id_column == foreign(_shape_pt_shape_id_column)) & (_shape_feed_id_column == foreign(_shape_pt_feed_id_column)))
}))
_trip_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_trip_id_column = Column('trip_id', String, primary_key=True)
_trip_route_id_column = Column('route_id', String, nullable=False)
_trip_calendar_id_column = Column('service_id', String, nullable=False)
_trip_shape_id_column = Column('shape_id', String, nullable=True)
_trip_mapper = Table('trips', self._metadata,
_trip_feed_id_column,
_trip_id_column,
_trip_route_id_column,
_trip_calendar_id_column,
_trip_shape_id_column,
Column('wheelchair_accessible', Integer, nullable=False),
Column('bikes_allowed', Integer, nullable=False),
Column('exact_times', Integer, nullable=False),
Column('frequency_generated', Boolean, nullable=False),
Column('trip_headsign', String),
Column('trip_short_name', String),
Column('direction_id', Integer),
Column('block_id', String),
ForeignKeyConstraint(['feed_id', 'route_id'], ['routes.feed_id', 'routes.route_id']),
ForeignKeyConstraint(['feed_id', 'service_id'], ['calendar.feed_id', 'calendar.service_id']),
ForeignKeyConstraint(['feed_id', 'shape_id'], ['shapes.feed_id', 'shapes.shape_id']),
Index('idx_trips_route', 'feed_id', 'route_id'),
Index('idx_trips_service', 'feed_id', 'service_id'))
self.mappers.append(mapper(Trip, _trip_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('trips', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_trip_feed_id_column)),
'route' : relationship(Route, backref=backref('trips', cascade="all,delete-orphan"),
primaryjoin=(_route_id_column == foreign(_trip_route_id_column)) & (_route_feed_id_column == _trip_feed_id_column)),
'calendar' : relationship(Calendar, backref=backref('trips', cascade="all,delete-orphan"),
primaryjoin=(_calendar_id_column == foreign(_trip_calendar_id_column)) & (_calendar_feed_id_column == _trip_feed_id_column)),
'shape' : relationship(Shape, backref=backref('trips', cascade="all,delete-orphan"),
primaryjoin=(_shape_id_column == foreign(_trip_shape_id_column)) & (_shape_feed_id_column == _trip_feed_id_column))
}))
_stop_times_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_stop_times_trip_id_column = Column('trip_id', String, primary_key=True)
_stop_seq_column = Column('stop_sequence', Integer, primary_key=True)
_stop_times_stop_id_column = Column('stop_id', String, nullable=False)
_stop_times_mapper = Table('stop_times', self._metadata,
_stop_times_feed_id_column,
_stop_times_trip_id_column,
_stop_seq_column,
_stop_times_stop_id_column,
Column('arrival_time', Integer, nullable=True),
Column('departure_time', Integer, nullable=True),
Column('interpolated', Boolean, nullable=False),
Column('shape_dist_traveled', Float, nullable=False),
Column('timepoint', Integer, nullable=False),
Column('pickup_type', Integer, nullable=False),
Column('drop_off_type', Integer, nullable=False),
Column('stop_headsign', String),
ForeignKeyConstraint(['feed_id', 'trip_id'], ['trips.feed_id', 'trips.trip_id']),
ForeignKeyConstraint(['feed_id', 'stop_id'], ['stops.feed_id', 'stops.stop_id']),
Index('idx_stop_times_stop', 'feed_id', 'stop_id'),
Index('idx_stop_times_sequence', 'feed_id', 'stop_sequence'))
self.mappers.append(mapper(StopTime, _stop_times_mapper, properties={
# Note: here we specify foreign() on stop_times feed_id column as there is no ownership relation of feed to stop_times
'trip' : relationship(Trip, backref=backref('stop_times', order_by=_stop_seq_column, cascade="all,delete-orphan"),
primaryjoin=(_trip_id_column == foreign(_stop_times_trip_id_column)) & (_trip_feed_id_column == foreign(_stop_times_feed_id_column))),
'stop' : relationship(Stop, backref=backref('stop_times', cascade="all,delete-orphan"),
primaryjoin=(_stop_id_column == foreign(_stop_times_stop_id_column)) & (_stop_feed_id_column == _stop_times_feed_id_column)),
}))
_fareattr_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'), primary_key=True)
_fareattr_id_column = Column('fare_id', String, primary_key=True)
_fareattr_mapper = Table('fare_attributes', self._metadata,
_fareattr_feed_id_column,
_fareattr_id_column,
Column('price', Float, nullable=False),
Column('currency_type', String, nullable=False),
Column('payment_method', Integer, nullable=False),
Column('transfers', Integer),
Column('transfer_duration', Integer))
self.mappers.append(mapper(FareAttribute, _fareattr_mapper, properties={
'feed' : relationship(FeedInfo, backref=backref('fare_attributes', cascade="all,delete-orphan"),
primaryjoin=_feedinfo_id_column == foreign(_fareattr_feed_id_column))
}))
_farerule_feed_id_column = Column('feed_id', String, ForeignKey('feed_info.feed_id'))
_farerule_id_column = Column('fare_id', String)
# Use a dummy autoincrement numerical field for primary key,
# as a primary key is mandatory, and the natural primary key
# for the model (feed_id, fare_id, route+zones ids) do have
# some fields that can be null.
_farerule_rule_id_column = Column('fare_rule_id', Integer, primary_key=True, autoincrement=True)
_farerule_route_id_column = Column('route_id', String, nullable=True)
_farerule_origin_id_column = Column('origin_id', String, nullable=True)
_farerule_destination_id_column = Column('destination_id', String, nullable=True)
_farerule_contains_id_column = Column('contains_id', String, nullable=True)
_farerule_mapper = Table('fare_rules', self._metadata,
_farerule_feed_id_column,
_farerule_id_column,
_farerule_rule_id_column,
_farerule_route_id_column,
_farerule_origin_id_column,
_farerule_destination_id_column,
_farerule_contains_id_column,
ForeignKeyConstraint(['feed_id', 'fare_id'], ['fare_attributes.feed_id', 'fare_attributes.fare_id']))
self.mappers.append(mapper(FareRule, _farerule_mapper, properties={
'fare_attribute' : relationship(FareAttribute, backref=backref('fare_rules', cascade="all,delete-orphan")),
'route' : relationship(Route, backref=backref('fare_rules', cascade="all,delete-orphan"),
primaryjoin=(_route_id_column == foreign(_farerule_route_id_column)) & (_route_feed_id_column == _farerule_feed_id_column)),
'origin' : relationship(Zone, backref=backref('origin_fare_rules', cascade="all,delete-orphan"),
primaryjoin=(_zone_id_column == foreign(_farerule_origin_id_column)) & (_zone_feed_id_column == _farerule_feed_id_column)),
'destination' : relationship(Zone, backref=backref('destination_fare_rules', cascade="all,delete-orphan"),
primaryjoin=(_zone_id_column == foreign(_farerule_destination_id_column)) & (_zone_feed_id_column == _farerule_feed_id_column)),
'contains' : relationship(Zone, backref=backref('contains_fare_rules', cascade="all,delete-orphan"),
primaryjoin=(_zone_id_column == foreign(_farerule_contains_id_column)) & (_zone_feed_id_column == _farerule_feed_id_column))
}))
self._metadata.create_all(engine)
self._class_for_table = {}
self._table_for_class = {}
for _mapper in self.mappers:
self._class_for_table[_mapper.mapped_table.name] = _mapper.class_
self._table_for_class[_mapper.class_] = _mapper.mapped_table.name
def class_for_table(self, tablename):
"""Return the class associated to a given table name.
We implement ourselves this method as there does not seem a reliable way
of getting this information from SqlAlchemy itself w/o some brittle hacking."""
return self._class_for_table.get(tablename)
def table_for_class(self, clazz):
"""Return the table name associated to a give entity class."""
return self._table_for_class.get(clazz)
| afimb/gtfslib-python | gtfslib/orm.py | Python | gpl-3.0 | 22,956 | 0.008059 |
#!/usr/bin/python3
#-*- coding:utf-8 -*-
from functools import wraps
def xxx(func):
@wraps(func)
def my(n):
func(n*100)
return
return my
@xxx
def abc(n):
print(n)
if __name__ == '__main__':
abc(10)
abc.__wrapped__(10)
xx = abc.__wrapped__
xx(1234)
| yuncliu/Learn | python/decorator.py | Python | bsd-3-clause | 299 | 0.013378 |
#!/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
This script is used to check the psad logs for positive port scanning traffic
and report its findings to Zabbix.
"""
from __future__ import print_function
from datetime import datetime
import os
import re
import boto3
import botocore
import yaml
# Reason: disable pylint import-error because our modules aren't loaded on jenkins.
# pylint: disable=import-error
from openshift_tools.monitoring.zagg_sender import ZaggSender
class CheckStatus(object):
""" Class to check for issues found in psad logs. """
@staticmethod
def check_psad(log_message, logfile):
""" Check number of occurrences of issues in the specified logfile.
Returns:
An int representing the number of issues found.
"""
total_issues = 0
if os.path.isfile(logfile):
with open(logfile) as open_file:
stripped_line = list([line.rstrip() for line in open_file.readlines()])
for line in stripped_line:
line_found = re.search(log_message, line, re.IGNORECASE)
if line_found:
total_issues += 1
return total_issues
else:
raise ValueError(logfile + ' does not exist.')
@staticmethod
def search_logfile(logfile):
""" Look for positive scan results. """
results = []
with open(logfile) as open_file:
between = False
for line in open_file:
tline = line.strip()
if tline == 'iptables auto-blocked IPs:':
between = True
elif tline == 'Total protocol packet counters:':
between = False
elif between and tline != '':
results.append(tline)
issues = len(results)
return issues
@staticmethod
def get_config(config_path):
""" Open and read config data from the variables file. """
config_settings = {}
if os.path.isfile(config_path):
with open(config_path, 'r') as scan_config:
yaml_config = yaml.load(scan_config)
if yaml_config['opsad_creds_file']:
config_settings['opsad_creds_file'] = yaml_config['opsad_creds_file']
if yaml_config['opsad_s3_bucket']:
config_settings['opsad_s3_bucket'] = yaml_config['opsad_s3_bucket']
if yaml_config['opsad_log_file']:
config_settings['opsad_log_file'] = yaml_config['opsad_log_file']
if yaml_config['opsad_host_name']:
config_settings['opsad_host_name'] = yaml_config['opsad_host_name']
if yaml_config['opsad_cluster_name']:
config_settings['opsad_cluster_name'] = yaml_config['opsad_cluster_name']
return config_settings
@staticmethod
def upload_data(config_dict):
""" Use the current AWS_PROFILE to upload files to the specified bucket.
Raises:
A ValueError if the specified bucket can not be found.
"""
logfile = config_dict['opsad_log_file']
hostname = config_dict['opsad_host_name']
credsfile = config_dict['opsad_creds_file']
bucket = config_dict['opsad_s3_bucket']
cluster = config_dict['opsad_cluster_name']
os.environ["AWS_SHARED_CREDENTIALS_FILE"] = credsfile
s3_session = boto3.resource('s3')
exists = True
try:
s3_session.meta.client.head_bucket(Bucket=bucket)
except botocore.exceptions.ClientError as client_exception:
error_code = int(client_exception.response['Error']['Code'])
if error_code == 404:
exists = False
if exists:
s3_client = boto3.resource('s3')
s3_bucket = s3_client.Bucket(bucket)
if os.path.isfile(logfile):
print('\nUploading logfile to %s bucket.' % bucket)
with open(logfile) as open_file:
log_data = open_file.read()
bucket_path = cluster + '/' + \
hostname + '/' + \
datetime.utcnow().strftime('%Y') + '/' + \
datetime.utcnow().strftime('%m') + '/' + \
datetime.utcnow().strftime('%d') + '_status.txt'
s3_bucket.put_object(Key=bucket_path, Body=log_data)
else:
raise ValueError(logfile + ' does not exist.')
else:
raise ValueError(bucket + ' does not exist.')
#pylint: disable=no-member
def main(self):
""" Main function. """
zag = ZaggSender()
config_dict = self.get_config('/etc/openshift_tools/scanreport_config.yml')
logfile = config_dict['opsad_log_file']
result_status = self.search_logfile(logfile)
check = 'psad.found.scanner'
zag.add_zabbix_keys({check: result_status})
zag.send_metrics()
if result_status > 0:
self.upload_data(config_dict)
if __name__ == '__main__':
PSAD_STATUS = CheckStatus()
PSAD_STATUS.main()
| blrm/openshift-tools | docker/oso-psad/src/scripts/check_psad.py | Python | apache-2.0 | 5,322 | 0.004134 |
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
Bitbake "Fetch" implementation for osc (Opensuse build service client).
Based on the svn "Fetch" implementation.
"""
import os
import sys
import logging
import bb
from bb import data
from bb.fetch2 import FetchMethod
from bb.fetch2 import FetchError
from bb.fetch2 import MissingParameterError
from bb.fetch2 import runfetchcmd
class Osc(FetchMethod):
"""Class to fetch a module or modules from Opensuse build server
repositories."""
def supports(self, url, ud, d):
"""
Check to see if a given url can be fetched with osc.
"""
return ud.type in ['osc']
def urldata_init(self, ud, d):
if not "module" in ud.parm:
raise MissingParameterError('module', ud.url)
ud.module = ud.parm["module"]
# Create paths to osc checkouts
relpath = self._strip_leading_slashes(ud.path)
ud.pkgdir = os.path.join(data.expand('${OSCDIR}', d), ud.host)
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
if 'rev' in ud.parm:
ud.revision = ud.parm['rev']
else:
pv = data.getVar("PV", d, 0)
rev = bb.fetch2.srcrev_internal_helper(ud, d)
if rev and rev != True:
ud.revision = rev
else:
ud.revision = ""
ud.localfile = data.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision), d)
def _buildosccommand(self, ud, d, command):
"""
Build up an ocs commandline based on ud
command is "fetch", "update", "info"
"""
basecmd = data.expand('${FETCHCMD_osc}', d)
proto = ud.parm.get('protocol', 'ocs')
options = []
config = "-c %s" % self.generate_config(ud, d)
if ud.revision:
options.append("-r %s" % ud.revision)
coroot = self._strip_leading_slashes(ud.path)
if command == "fetch":
osccmd = "%s %s co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options))
elif command == "update":
osccmd = "%s %s up %s" % (basecmd, config, " ".join(options))
else:
raise FetchError("Invalid osc command %s" % command, ud.url)
return osccmd
def download(self, loc, ud, d):
"""
Fetch url
"""
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
if os.access(os.path.join(data.expand('${OSCDIR}', d), ud.path, ud.module), os.R_OK):
oscupdatecmd = self._buildosccommand(ud, d, "update")
logger.info("Update "+ loc)
# update sources there
os.chdir(ud.moddir)
logger.debug(1, "Running %s", oscupdatecmd)
bb.fetch2.check_network_access(d, oscupdatecmd, ud.url)
runfetchcmd(oscupdatecmd, d)
else:
oscfetchcmd = self._buildosccommand(ud, d, "fetch")
logger.info("Fetch " + loc)
# check out sources there
bb.utils.mkdirhier(ud.pkgdir)
os.chdir(ud.pkgdir)
logger.debug(1, "Running %s", oscfetchcmd)
bb.fetch2.check_network_access(d, oscfetchcmd, ud.url)
runfetchcmd(oscfetchcmd, d)
os.chdir(os.path.join(ud.pkgdir + ud.path))
# tar them up to a defined filename
runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d, cleanup = [ud.localpath])
def supports_srcrev(self):
return False
def generate_config(self, ud, d):
"""
Generate a .oscrc to be used for this run.
"""
config_path = os.path.join(data.expand('${OSCDIR}', d), "oscrc")
if (os.path.exists(config_path)):
os.remove(config_path)
f = open(config_path, 'w')
f.write("[general]\n")
f.write("apisrv = %s\n" % ud.host)
f.write("scheme = http\n")
f.write("su-wrapper = su -c\n")
f.write("build-root = %s\n" % data.expand('${WORKDIR}', d))
f.write("urllist = http://moblin-obs.jf.intel.com:8888/build/%(project)s/%(repository)s/%(buildarch)s/:full/%(name)s.rpm\n")
f.write("extra-pkgs = gzip\n")
f.write("\n")
f.write("[%s]\n" % ud.host)
f.write("user = %s\n" % ud.parm["user"])
f.write("pass = %s\n" % ud.parm["pswd"])
f.close()
return config_path
| martiert/bitbake | lib/bb/fetch2/osc.py | Python | gpl-2.0 | 4,506 | 0.00466 |
from data.conditional_import import (
dump,
# dumps,
# load,
# loads,
) | PyCQA/astroid | tests/testdata/python3/data/conditional.py | Python | lgpl-2.1 | 87 | 0.011494 |
# coding: utf-8
# Module: actions
# Created on: 27.07.2015
# Author: Roman Miroshnychenko aka Roman V.M. ([email protected])
# Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html
import os
import xbmcgui
import xbmcplugin
from simpleplugin import Plugin
import json_requests as jsonrq
from buffering import buffer_torrent, stream_torrent, add_torrent, get_videofiles
plugin = Plugin()
_ = plugin.initialize_gettext()
icons = os.path.join(plugin.path, 'resources', 'icons')
commands = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'commands.py')
def _play(path):
"""
Play a videofile
:param path:
:return:
"""
plugin.log_notice('Path to play: {0}'.format(path))
return plugin.resolve_url(path, succeeded=bool(path))
@plugin.action()
def root():
"""
Plugin root
"""
return [{'label': _('Play .torrent file...'),
'thumb': os.path.join(icons, 'play.png'),
'url': plugin.get_url(action='select_torrent', target='play')},
{'label': _('Download torrent from .torrent file...'),
'thumb': os.path.join(icons, 'down.png'),
'url': plugin.get_url(action='select_torrent', target='download'),
'is_folder': False},
{'label': _('Torrents'),
'thumb': plugin.icon,
'url': plugin.get_url(action='torrents')}]
@plugin.action()
def select_torrent(params):
"""
Select .torrent file to play
:param params:
:return:
"""
torrent = xbmcgui.Dialog().browse(1, _('Select .torrent file'), 'video', mask='.torrent')
if torrent:
plugin.log_notice('Torrent selected: {0}'.format(torrent))
if params['target'] == 'play':
return list_files({'torrent': torrent})
else:
download_torrent({'torrent': torrent})
@plugin.action('play')
def play_torrent(params):
"""
Play torrent
:param params:
:return:
"""
file_index = params.get('file_index')
if file_index is not None and file_index != 'dialog':
file_index = int(file_index)
return _play(buffer_torrent(params['torrent'], file_index))
@plugin.action()
def play_file(params):
"""
Stream a file from torrent by its index
The torrent must be already added to the session!
:param params:
:return:
"""
return _play(stream_torrent(int(params['file_index']), params['info_hash']))
@plugin.action('download')
def download_torrent(params):
"""
Add torrent for downloading
:param params:
:return:
"""
jsonrq.add_torrent(params['torrent'], False)
xbmcgui.Dialog().notification('YATP', _('Torrent added for downloading'), plugin.icon, 3000)
@plugin.action()
def torrents():
"""
Display the list of torrents in the session
"""
torrent_list = sorted(jsonrq.get_all_torrent_info(), key=lambda i: i['added_time'], reverse=True)
for torrent in torrent_list:
if torrent['state'] == 'downloading':
label = '[COLOR=red]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
elif torrent['state'] == 'seeding':
label = '[COLOR=green]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
elif torrent['state'] == 'paused':
label = '[COLOR=gray]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
else:
label = '[COLOR=blue]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
item = {'label': label,
'url': plugin.get_url(action='show_files', info_hash=torrent['info_hash']),
'is_folder': True}
if torrent['state'] == 'downloading':
item['thumb'] = os.path.join(icons, 'down.png')
elif torrent['state'] == 'seeding':
item['thumb'] = os.path.join(icons, 'up.png')
elif torrent['state'] == 'paused':
item['thumb'] = os.path.join(icons, 'pause.png')
else:
item['thumb'] = os.path.join(icons, 'question.png')
context_menu = [(_('Pause all torrents'),
'RunScript({commands},pause_all)'.format(commands=commands)),
(_('Resume all torrents'),
'RunScript({commands},resume_all)'.format(commands=commands)),
(_('Delete torrent'),
'RunScript({commands},delete,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
(_('Delete torrent and files'),
'RunScript({commands},delete_with_files,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
(_('Torrent info'),
'RunScript({commands},show_info,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
]
if torrent['state'] == 'paused':
context_menu.insert(0, (_('Resume torrent'),
'RunScript({commands},resume,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])))
else:
context_menu.insert(0, (_('Pause torrent'),
'RunScript({commands},pause,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])))
if torrent['state'] == 'incomplete':
context_menu.append((_('Complete download'),
'RunScript({commands},restore_finished,{info_hash})'.format(
commands=commands,
info_hash=torrent['info_hash'])))
item['context_menu'] = context_menu
yield item
def _build_file_list(files, info_hash):
"""
Create the list of videofiles in a torrent
:param files:
:param info_hash:
:return:
"""
videofiles = get_videofiles(files)
for file_ in videofiles:
ext = os.path.splitext(file_[1].lower())[1]
if ext == '.avi':
thumb = os.path.join(icons, 'avi.png')
elif ext == '.mp4':
thumb = os.path.join(icons, 'mp4.png')
elif ext == '.mkv':
thumb = os.path.join(icons, 'mkv.png')
elif ext == '.mov':
thumb = os.path.join(icons, 'mov.png')
else:
thumb = os.path.join(icons, 'play.png')
yield {'label': '{name} [{size}{unit}]'.format(name=file_[1].encode('utf-8'),
size=file_[2] / 1048576,
unit=_('MB')),
'thumb': thumb,
'url': plugin.get_url(action='play_file',
info_hash=info_hash,
file_index=file_[0]),
'is_playable': True,
'info': {'video': {'size': file_[2]}},
}
@plugin.action()
def list_files(params):
"""
Add a torrent to the session and display the list of files in a torrent
:param params:
:return:
"""
torrent_data = add_torrent(params['torrent'])
if torrent_data is not None:
return plugin.create_listing(_build_file_list(torrent_data['files'], torrent_data['info_hash']),
cache_to_disk=True,
sort_methods=(xbmcplugin.SORT_METHOD_LABEL, xbmcplugin.SORT_METHOD_SIZE))
xbmcgui.Dialog().notification(plugin.id, _('Playback cancelled.'), plugin.icon, 3000)
return []
@plugin.action()
def show_files(params):
"""
Display the list of videofiles
:param params:
:return:
"""
return plugin.create_listing(_build_file_list(jsonrq.get_files(params['info_hash']), params['info_hash']),
cache_to_disk=True,
sort_methods=(xbmcplugin.SORT_METHOD_LABEL, xbmcplugin.SORT_METHOD_SIZE))
| kreatorkodi/repository.torrentbr | plugin.video.yatp/libs/client/actions.py | Python | gpl-2.0 | 8,310 | 0.003851 |
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import warnings
from hyperspy.misc.test_utils import ignore_warning, assert_warns, all_warnings
def warnsA():
warnings.warn("Warning A!", UserWarning)
def warnsB():
warnings.warn("Warning B!", DeprecationWarning)
def warnsC():
warnings.warn("Warning C!")
def test_ignore_full_message():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning A!"):
warnsA()
with ignore_warning(message="Warning B!"):
warnsB()
with ignore_warning(message="Warning C!"):
warnsC()
def test_ignore_partial_message():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning"):
warnsA()
warnsB()
warnsC()
def test_ignore_regex_message():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning .?!"):
warnsA()
warnsB()
warnsC()
def test_ignore_message_fails():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning [AB]!"):
warnsA()
warnsB()
try:
warnsC()
except UserWarning as e:
assert str(e) == "Warning C!"
else:
raise ValueError("Expected warning to give error!")
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(message="Warning A! Too much"):
try:
warnsA()
except UserWarning as e:
assert str(e) == "Warning A!"
else:
raise ValueError("Expected warning to give error!")
def test_ignore_type():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(category=UserWarning):
warnsA()
warnsC()
with ignore_warning(category=DeprecationWarning):
warnsB()
def test_ignore_type_fails():
with all_warnings():
warnings.simplefilter("error")
with ignore_warning(category=UserWarning):
try:
warnsB()
except DeprecationWarning as e:
assert str(e) == "Warning B!"
else:
raise ValueError("Expected warning to give error!")
def test_assert_warns_full_message():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(message="Warning A!"):
warnsA()
with assert_warns(message="Warning B!"):
warnsB()
with assert_warns(message="Warning C!"):
warnsC()
with assert_warns(message=["Warning A!", "Warning B!", "Warning C!"]):
warnsA()
warnsB()
warnsC()
def test_assert_warns_partial_message():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(message="Warning"):
warnsA()
warnsB()
warnsC()
def test_assert_warns_regex_message():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(message="Warning .?!"):
warnsA()
warnsB()
warnsC()
def test_assert_warns_message_fails():
with all_warnings():
warnings.simplefilter("error")
try:
with assert_warns(message="Warning [AB]!"):
warnsC()
except ValueError:
pass
else:
raise AssertionError("ValueError expected!")
with all_warnings():
warnings.simplefilter("error")
try:
with assert_warns(message="Warning A! Too much"):
warnsA()
except ValueError:
pass
else:
raise ValueError("ValueError expected!")
def test_assert_warns_type():
with all_warnings():
warnings.simplefilter("error")
with assert_warns(category=UserWarning):
warnsA()
warnsC()
with assert_warns(category=DeprecationWarning):
warnsB()
def test_assert_warns_type_fails():
with all_warnings():
warnings.simplefilter("error")
try:
with assert_warns(category=UserWarning):
warnsB()
except ValueError:
pass
else:
raise ValueError("Expected warning to give error!")
| sem-geologist/hyperspy | hyperspy/tests/misc/test_test_utils.py | Python | gpl-3.0 | 5,176 | 0 |
import os
import random
import time
import hashlib
import warnings
from tempfile import mkdtemp
from shutil import rmtree
from six.moves.urllib.parse import urlparse
from six import BytesIO
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.pipelines.files import FilesPipeline, FSFilesStore, S3FilesStore, GCSFilesStore
from scrapy.item import Item, Field
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.utils.python import to_bytes
from scrapy.utils.test import assert_aws_environ, get_s3_content_and_delete
from scrapy.utils.test import assert_gcs_environ, get_gcs_content_and_delete
from scrapy.utils.boto import is_botocore
from tests import mock
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class FilesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': self.tempdir}))
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider(None)
def tearDown(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
self.assertEqual(file_path(Request("https://dev.mydeco.com/mydeco.pdf")),
'full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(file_path(Request("http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.txt")),
'full/4ce274dd83db0368bafd7e406f382ae088e39219.txt')
self.assertEqual(file_path(Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.doc")),
'full/94ccc495a17b9ac5d40e3eabf3afcb8c2c9b9e1a.doc')
self.assertEqual(file_path(Request("http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg")),
'full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532/")),
'full/97ee6f8a46cbbb418ea91502fd24176865cf39b2')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532")),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object()),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
def test_fs_store(self):
assert isinstance(self.pipeline.store, FSFilesStore)
self.assertEqual(self.pipeline.store.basedir, self.tempdir)
path = 'some/image/key.jpg'
fullpath = os.path.join(self.tempdir, 'some', 'image', 'key.jpg')
self.assertEqual(self.pipeline.store._get_filesystem_path(path), fullpath)
@defer.inlineCallbacks
def test_file_not_expired(self):
item_url = "http://example.com/file.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True),
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc', 'last_modified': time.time()}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)])
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
@defer.inlineCallbacks
def test_file_expired(self):
item_url = "http://example.com/file2.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc',
'last_modified': time.time() - (self.pipeline.expires * 60 * 60 * 24 * 2)}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)]),
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True)
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertNotEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
class DeprecatedFilesPipeline(FilesPipeline):
def file_key(self, url):
media_guid = hashlib.sha1(to_bytes(url)).hexdigest()
media_ext = os.path.splitext(url)[1]
return 'empty/%s%s' % (media_guid, media_ext)
class DeprecatedFilesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
def init_pipeline(self, pipeline_class):
self.pipeline = pipeline_class.from_settings(Settings({'FILES_STORE': self.tempdir}))
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider(None)
def test_default_file_key_method(self):
self.init_pipeline(FilesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.file_key("https://dev.mydeco.com/mydeco.pdf"),
'full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(len(w), 1)
self.assertTrue('file_key(url) method is deprecated' in str(w[-1].message))
def test_overridden_file_key_method(self):
self.init_pipeline(DeprecatedFilesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.file_path(Request("https://dev.mydeco.com/mydeco.pdf")),
'empty/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(len(w), 1)
self.assertTrue('file_key(url) method is deprecated' in str(w[-1].message))
def tearDown(self):
rmtree(self.tempdir)
class FilesPipelineTestCaseFields(unittest.TestCase):
def test_item_fields_default(self):
class TestItem(Item):
name = Field()
file_urls = Field()
files = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'file_urls': [url]})
pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': 's3://example/files/'}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['files'], [results[0][1]])
def test_item_fields_override_settings(self):
class TestItem(Item):
name = Field()
files = Field()
stored_file = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'files': [url]})
pipeline = FilesPipeline.from_settings(Settings({
'FILES_STORE': 's3://example/files/',
'FILES_URLS_FIELD': 'files',
'FILES_RESULT_FIELD': 'stored_file'
}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['stored_file'], [results[0][1]])
class FilesPipelineTestCaseCustomSettings(unittest.TestCase):
default_cls_settings = {
"EXPIRES": 90,
"FILES_URLS_FIELD": "file_urls",
"FILES_RESULT_FIELD": "files"
}
file_cls_attr_settings_map = {
("EXPIRES", "FILES_EXPIRES", "expires"),
("FILES_URLS_FIELD", "FILES_URLS_FIELD", "files_urls_field"),
("FILES_RESULT_FIELD", "FILES_RESULT_FIELD", "files_result_field")
}
def setUp(self):
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
def _generate_fake_settings(self, prefix=None):
def random_string():
return "".join([chr(random.randint(97, 123)) for _ in range(10)])
settings = {
"FILES_EXPIRES": random.randint(100, 1000),
"FILES_URLS_FIELD": random_string(),
"FILES_RESULT_FIELD": random_string(),
"FILES_STORE": self.tempdir
}
if not prefix:
return settings
return {prefix.upper() + "_" + k if k != "FILES_STORE" else k: v for k, v in settings.items()}
def _generate_fake_pipeline(self):
class UserDefinedFilePipeline(FilesPipeline):
EXPIRES = 1001
FILES_URLS_FIELD = "alfa"
FILES_RESULT_FIELD = "beta"
return UserDefinedFilePipeline
def test_different_settings_for_different_instances(self):
"""
If there are different instances with different settings they should keep
different settings.
"""
custom_settings = self._generate_fake_settings()
another_pipeline = FilesPipeline.from_settings(Settings(custom_settings))
one_pipeline = FilesPipeline(self.tempdir)
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
default_value = self.default_cls_settings[pipe_attr]
self.assertEqual(getattr(one_pipeline, pipe_attr), default_value)
custom_value = custom_settings[settings_attr]
self.assertNotEqual(default_value, custom_value)
self.assertEqual(getattr(another_pipeline, pipe_ins_attr), custom_value)
def test_subclass_attributes_preserved_if_no_settings(self):
"""
If subclasses override class attributes and there are no special settings those values should be kept.
"""
pipe_cls = self._generate_fake_pipeline()
pipe = pipe_cls.from_settings(Settings({"FILES_STORE": self.tempdir}))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
custom_value = getattr(pipe, pipe_ins_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr])
self.assertEqual(getattr(pipe, pipe_ins_attr), getattr(pipe, pipe_attr))
def test_subclass_attrs_preserved_custom_settings(self):
"""
If file settings are defined but they are not defined for subclass
settings should be preserved.
"""
pipeline_cls = self._generate_fake_pipeline()
settings = self._generate_fake_settings()
pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
value = getattr(pipeline, pipe_ins_attr)
setting_value = settings.get(settings_attr)
self.assertNotEqual(value, self.default_cls_settings[pipe_attr])
self.assertEqual(value, setting_value)
def test_no_custom_settings_for_subclasses(self):
"""
If there are no settings for subclass and no subclass attributes, pipeline should use
attributes of base class.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
user_pipeline = UserDefinedFilesPipeline.from_settings(Settings({"FILES_STORE": self.tempdir}))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = self.default_cls_settings.get(pipe_attr.upper())
self.assertEqual(getattr(user_pipeline, pipe_ins_attr), custom_value)
def test_custom_settings_for_subclasses(self):
"""
If there are custom settings for subclass and NO class attributes, pipeline should use custom
settings.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
prefix = UserDefinedFilesPipeline.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = UserDefinedFilesPipeline.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr])
self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
def test_custom_settings_and_class_attrs_for_subclasses(self):
"""
If there are custom settings for subclass AND class attributes
setting keys are preferred and override attributes.
"""
pipeline_cls = self._generate_fake_pipeline()
prefix = pipeline_cls.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_cls_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_cls_attr])
self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
def test_cls_attrs_with_DEFAULT_prefix(self):
class UserDefinedFilesPipeline(FilesPipeline):
DEFAULT_FILES_RESULT_FIELD = "this"
DEFAULT_FILES_URLS_FIELD = "that"
pipeline = UserDefinedFilesPipeline.from_settings(Settings({"FILES_STORE": self.tempdir}))
self.assertEqual(pipeline.files_result_field, "this")
self.assertEqual(pipeline.files_urls_field, "that")
def test_user_defined_subclass_default_key_names(self):
"""Test situation when user defines subclass of FilesPipeline,
but uses attribute names for default pipeline (without prefixing
them with pipeline class name).
"""
settings = self._generate_fake_settings()
class UserPipe(FilesPipeline):
pass
pipeline_cls = UserPipe.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
expected_value = settings.get(settings_attr)
self.assertEqual(getattr(pipeline_cls, pipe_inst_attr),
expected_value)
class TestS3FilesStore(unittest.TestCase):
@defer.inlineCallbacks
def test_persist(self):
assert_aws_environ()
uri = os.environ.get('S3_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No S3 URI available for testing")
data = b"TestS3FilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {'foo': 'bar'}
path = ''
store = S3FilesStore(uri)
yield store.persist_file(
path, buf, info=None, meta=meta,
headers={'Content-Type': 'image/png'})
s = yield store.stat_file(path, info=None)
self.assertIn('last_modified', s)
self.assertIn('checksum', s)
self.assertEqual(s['checksum'], '3187896a9657a28163abb31667df64c8')
u = urlparse(uri)
content, key = get_s3_content_and_delete(
u.hostname, u.path[1:], with_key=True)
self.assertEqual(content, data)
if is_botocore():
self.assertEqual(key['Metadata'], {'foo': 'bar'})
self.assertEqual(
key['CacheControl'], S3FilesStore.HEADERS['Cache-Control'])
self.assertEqual(key['ContentType'], 'image/png')
else:
self.assertEqual(key.metadata, {'foo': 'bar'})
self.assertEqual(
key.cache_control, S3FilesStore.HEADERS['Cache-Control'])
self.assertEqual(key.content_type, 'image/png')
class TestGCSFilesStore(unittest.TestCase):
@defer.inlineCallbacks
def test_persist(self):
assert_gcs_environ()
uri = os.environ.get('GCS_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No GCS URI available for testing")
data = b"TestGCSFilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {'foo': 'bar'}
path = 'full/filename'
store = GCSFilesStore(uri)
yield store.persist_file(path, buf, info=None, meta=meta, headers=None)
s = yield store.stat_file(path, info=None)
self.assertIn('last_modified', s)
self.assertIn('checksum', s)
self.assertEqual(s['checksum'], 'zc2oVgXkbQr2EQdSdw3OPA==')
u = urlparse(uri)
content, blob = get_gcs_content_and_delete(u.hostname, u.path[1:]+path)
self.assertEqual(content, data)
self.assertEqual(blob.metadata, {'foo': 'bar'})
self.assertEqual(blob.cache_control, GCSFilesStore.CACHE_CONTROL)
self.assertEqual(blob.content_type, 'application/octet-stream')
class ItemWithFiles(Item):
file_urls = Field()
files = Field()
def _create_item_with_files(*files):
item = ItemWithFiles()
item['file_urls'] = files
return item
def _prepare_request_object(item_url):
return Request(
item_url,
meta={'response': Response(item_url, status=200, body=b'data')})
if __name__ == "__main__":
unittest.main()
| Parlin-Galanodel/scrapy | tests/test_pipeline_files.py | Python | bsd-3-clause | 17,823 | 0.002749 |
from tests.test_pip import (reset_env, run_pip,
_create_test_package, _change_test_package_version)
from tests.local_repos import local_checkout
def test_install_editable_from_git_with_https():
"""
Test cloning from Git with https.
"""
reset_env()
result = run_pip('install', '-e',
'%s#egg=pip-test-package' %
local_checkout('git+https://github.com/pypa/pip-test-package.git'),
expect_error=True)
result.assert_installed('pip-test-package', with_files=['.git'])
def test_git_with_sha1_revisions():
"""
Git backend should be able to install from SHA1 revisions
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
_change_test_package_version(env, version_pkg_path)
sha1 = env.run('git', 'rev-parse', 'HEAD~1', cwd=version_pkg_path).stdout.strip()
run_pip('install', '-e', '%s@%s#egg=version_pkg' % ('git+file://' + version_pkg_path.abspath.replace('\\', '/'), sha1))
version = env.run('version_pkg')
assert '0.1' in version.stdout, version.stdout
def test_git_with_branch_name_as_revision():
"""
Git backend should be able to install from branch names
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'checkout', '-b', 'test_branch', expect_stderr=True, cwd=version_pkg_path)
_change_test_package_version(env, version_pkg_path)
run_pip('install', '-e', '%s@test_branch#egg=version_pkg' % ('git+file://' + version_pkg_path.abspath.replace('\\', '/')))
version = env.run('version_pkg')
assert 'some different version' in version.stdout
def test_git_with_tag_name_as_revision():
"""
Git backend should be able to install from tag names
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
env.run('git', 'tag', 'test_tag', expect_stderr=True, cwd=version_pkg_path)
_change_test_package_version(env, version_pkg_path)
run_pip('install', '-e', '%s@test_tag#egg=version_pkg' % ('git+file://' + version_pkg_path.abspath.replace('\\', '/')))
version = env.run('version_pkg')
assert '0.1' in version.stdout
def test_git_with_tag_name_and_update():
"""
Test cloning a git repository and updating to a different version.
"""
reset_env()
result = run_pip('install', '-e', '%s#egg=pip-test-package' %
local_checkout('git+http://github.com/pypa/pip-test-package.git'),
expect_error=True)
result.assert_installed('pip-test-package', with_files=['.git'])
result = run_pip('install', '--global-option=--version', '-e',
'%[email protected]#egg=pip-test-package' %
local_checkout('git+http://github.com/pypa/pip-test-package.git'),
expect_error=True)
assert '0.1.2' in result.stdout
def test_git_branch_should_not_be_changed():
"""
Editable installations should not change branch
related to issue #32 and #161
"""
env = reset_env()
run_pip('install', '-e', '%s#egg=pip-test-package' %
local_checkout('git+http://github.com/pypa/pip-test-package.git'),
expect_error=True)
source_dir = env.venv_path/'src'/'pip-test-package'
result = env.run('git', 'branch', cwd=source_dir)
assert '* master' in result.stdout, result.stdout
def test_git_with_non_editable_unpacking():
"""
Test cloning a git repository from a non-editable URL with a given tag.
"""
reset_env()
result = run_pip('install', '--global-option=--version', local_checkout(
'git+http://github.com/pypa/[email protected]#egg=pip-test-package'
), expect_error=True)
assert '0.1.2' in result.stdout
def test_git_with_editable_where_egg_contains_dev_string():
"""
Test cloning a git repository from an editable url which contains "dev" string
"""
reset_env()
result = run_pip('install', '-e', '%s#egg=django-devserver' %
local_checkout('git+git://github.com/dcramer/django-devserver.git'))
result.assert_installed('django-devserver', with_files=['.git'])
def test_git_with_non_editable_where_egg_contains_dev_string():
"""
Test cloning a git repository from a non-editable url which contains "dev" string
"""
env = reset_env()
result = run_pip('install', '%s#egg=django-devserver' %
local_checkout('git+git://github.com/dcramer/django-devserver.git'))
devserver_folder = env.site_packages/'devserver'
assert devserver_folder in result.files_created, str(result)
def test_git_with_ambiguous_revs():
"""
Test git with two "names" (tag/branch) pointing to the same commit
"""
env = reset_env()
version_pkg_path = _create_test_package(env)
package_url = 'git+file://%[email protected]#egg=version_pkg' % (version_pkg_path.abspath.replace('\\', '/'))
env.run('git', 'tag', '0.1', cwd=version_pkg_path)
result = run_pip('install', '-e', package_url)
assert 'Could not find a tag or branch' not in result.stdout
# it is 'version-pkg' instead of 'version_pkg' because
# egg-link name is version-pkg.egg-link because it is a single .py module
result.assert_installed('version-pkg', with_files=['.git'])
def test_git_works_with_editable_non_origin_repo():
# set up, create a git repo and install it as editable from a local directory path
env = reset_env()
version_pkg_path = _create_test_package(env)
run_pip('install', '-e', version_pkg_path.abspath)
# 'freeze'ing this should not fall over, but should result in stderr output warning
result = run_pip('freeze', expect_stderr=True)
assert "Error when trying to get requirement" in result.stderr
assert "Could not determine repository location" in result.stdout
assert "version-pkg==0.1" in result.stdout
| radiosilence/pip | tests/test_vcs_backends.py | Python | mit | 5,930 | 0.003373 |
import logging
from collections import defaultdict
class EventInsightsHelper(object):
@classmethod
def calculate_event_insights(cls, matches, year):
INSIGHTS_MAP = {
2016: cls.calculate_event_insights_2016
}
if year in INSIGHTS_MAP:
return INSIGHTS_MAP[year](matches)
else:
return None
@classmethod
def calculate_event_insights_2016(cls, matches):
qual_matches = []
playoff_matches = []
for match in matches:
if match.comp_level == 'qm':
qual_matches.append(match)
else:
playoff_matches.append(match)
qual_insights = cls._calculate_event_insights_2016_helper(qual_matches)
playoff_insights = cls._calculate_event_insights_2016_helper(playoff_matches)
return {
'qual': qual_insights,
'playoff': playoff_insights,
}
@classmethod
def _calculate_event_insights_2016_helper(cls, matches):
# defenses
defense_opportunities = defaultdict(int)
defense_damaged = defaultdict(int)
breaches = 0
# towers
high_goals = 0
low_goals = 0
challenges = 0
scales = 0
captures = 0
# scores
winning_scores = 0
win_margins = 0
total_scores = 0
auto_scores = 0
crossing_scores = 0
boulder_scores = 0
tower_scores = 0
foul_scores = 0
high_score = [0, "", ""] # score, match key, match name
finished_matches = 0
for match in matches:
if not match.has_been_played:
continue
red_score = match.alliances['red']['score']
blue_score = match.alliances['blue']['score']
win_score = max(red_score, blue_score)
winning_scores += win_score
win_margins += (win_score - min(red_score, blue_score))
total_scores += red_score + blue_score
if win_score > high_score[0]:
high_score = [win_score, match.key_name, match.short_name]
for alliance_color in ['red', 'blue']:
try:
alliance_breakdown = match.score_breakdown[alliance_color]
auto_scores += alliance_breakdown['autoPoints']
crossing_scores += alliance_breakdown['teleopCrossingPoints']
boulder_scores += alliance_breakdown['teleopBoulderPoints']
tower_scores += alliance_breakdown['teleopChallengePoints'] + alliance_breakdown['teleopScalePoints']
foul_scores += alliance_breakdown['foulPoints']
pos1 = 'LowBar'
pos2 = alliance_breakdown['position2']
pos3 = alliance_breakdown['position3']
pos4 = alliance_breakdown['position4']
pos5 = alliance_breakdown['position5']
positions = [pos1, pos2, pos3, pos4, pos5]
for pos_idx, pos in enumerate(positions):
defense_opportunities[pos] += 1
if alliance_breakdown['position{}crossings'.format(pos_idx + 1)] == 2:
defense_damaged[pos] += 1
breaches += 1 if alliance_breakdown['teleopDefensesBreached'] else 0
high_goals += alliance_breakdown['autoBouldersHigh'] + alliance_breakdown['teleopBouldersHigh']
low_goals += alliance_breakdown['autoBouldersLow'] + alliance_breakdown['teleopBouldersLow']
captures += 1 if alliance_breakdown['teleopTowerCaptured'] else 0
for tower_face in ['towerFaceA', 'towerFaceB', 'towerFaceC']:
if alliance_breakdown[tower_face] == 'Challenged':
challenges += 1
elif alliance_breakdown[tower_face] == 'Scaled':
scales += 1
except Exception, e:
logging.error("Event insights failed for {}".format(match.key.id()))
finished_matches += 1
if finished_matches == 0:
return {}
opportunities_1x = 2 * finished_matches # once per alliance
opportunities_3x = 6 * finished_matches # 3x per alliance
event_insights = {
'LowBar': [0, 0, 0],
'A_ChevalDeFrise': [0, 0, 0],
'A_Portcullis': [0, 0, 0],
'B_Ramparts': [0, 0, 0],
'B_Moat': [0, 0, 0],
'C_SallyPort': [0, 0, 0],
'C_Drawbridge': [0, 0, 0],
'D_RoughTerrain': [0, 0, 0],
'D_RockWall': [0, 0, 0],
'average_high_goals': float(high_goals) / (2 * finished_matches),
'average_low_goals': float(low_goals) / (2 * finished_matches),
'breaches': [breaches, opportunities_1x, 100.0 * float(breaches) / opportunities_1x], # [# success, # opportunities, %]
'scales': [scales, opportunities_3x, 100.0 * float(scales) / opportunities_3x],
'challenges': [challenges, opportunities_3x, 100.0 * float(challenges) / opportunities_3x],
'captures': [captures, opportunities_1x, 100.0 * float(captures) / opportunities_1x],
'average_win_score': float(winning_scores) / finished_matches,
'average_win_margin': float(win_margins) / finished_matches,
'average_score': float(total_scores) / (2 * finished_matches),
'average_auto_score': float(auto_scores) / (2 * finished_matches),
'average_crossing_score': float(crossing_scores) / (2 * finished_matches),
'average_boulder_score': float(boulder_scores) / (2 * finished_matches),
'average_tower_score': float(tower_scores) / (2 * finished_matches),
'average_foul_score': float(foul_scores) / (2 * finished_matches),
'high_score': high_score, # [score, match key, match name]
}
for defense, opportunities in defense_opportunities.items():
event_insights[defense] = [defense_damaged[defense], opportunities, 100.0 * float(defense_damaged[defense]) / opportunities] # [# damaged, # opportunities, %]
return event_insights
| synth3tk/the-blue-alliance | helpers/event_insights_helper.py | Python | mit | 6,320 | 0.002848 |
import copy
pattern_raw_digits = [
[' _ ',
'| |',
'|_|'],
[' ',
' |',
' |'],
[' _ ',
' _|',
'|_ '],
[' _ ',
' _|',
' _|'],
[' ',
'|_|',
' |'],
[' _ ',
'|_ ',
' _|'],
[' _ ',
'|_ ',
'|_|'],
[' _ ',
' |',
' |'],
[' _ ',
'|_|',
'|_|'],
[' _ ',
'|_|',
' _|']]
def read_raw_number(file):
raw_number = []
for row in range(3):
line = file.readline()
if line == "":
return None
line = line.rstrip('\n')
raw_number.append(line)
file.readline()
return raw_number
def print_raw_number(raw_number):
for i in range(3):
print(raw_number[i])
def read_expected_result(file):
return file.readline().rstrip('\n')
def parse_raw_digit(raw_digit):
for digit in range(10):
if pattern_raw_digits[digit] == raw_digit:
return str(digit)
return '?'
def parse_raw_number(raw_number):
number = ''
for digit_index in range(9):
raw_digit = []
for row in range(3):
start = digit_index * 3
end = start + 3
raw_digit_line = raw_number[row][start:end]
raw_digit.append(raw_digit_line)
digit = parse_raw_digit(raw_digit)
number += digit
return number
def is_valid(number):
if len(number) != 9:
return False
for i in range(9):
digit = number[i]
if not digit in "0123456789":
return False
return True
# assumes number is valid
def is_checksum_ok(number):
total = 0
for i in range(9):
digit = number[i]
total += int(digit) * (9 - i)
return (total % 11) == 0
def classify_number(number):
if is_valid(number):
if is_checksum_ok(number):
return ""
else:
return " ERR"
else:
return " ILL"
def change_one_char(raw_number, row, col, new_char):
new_raw_number = copy.copy(raw_number)
new_raw_number[row] = raw_number[row][:col] + new_char + raw_number[row][col+1:]
return new_raw_number
def find_all_guesses(raw_number):
guesses = []
for row in range(3):
for col in range(27):
char = raw_number[row][col]
if (char == '_') or (char == '|'):
guess_raw_number = change_one_char(raw_number, row, col, ' ')
guess_number = parse_raw_number(guess_raw_number)
if classify_number(guess_number) == "":
guesses.append(guess_number)
elif (char == ' '):
guess_raw_number = change_one_char(raw_number, row, col, '|')
guess_number = parse_raw_number(guess_raw_number)
if classify_number(guess_number) == "":
guesses.append(guess_number)
guess_raw_number = change_one_char(raw_number, row, col, '_')
guess_number = parse_raw_number(guess_raw_number)
if classify_number(guess_number) == "":
guesses.append(guess_number)
print(guesses)
return guesses
def parse_and_classify_raw_number(raw_number):
number = parse_raw_number(raw_number)
classify = classify_number(number)
if classify != "":
guesses = find_all_guesses(raw_number)
if len(guesses) == 1:
number = guesses[0]
classify = classify_number(number)
elif len(guesses) > 1:
classify = " AMB " + str(sorted(guesses))
return number + classify
def run_all_test_cases():
file = open('test-data.txt')
fail_count = 0
while True:
raw_number = read_raw_number(file)
if raw_number == None:
break
result = parse_and_classify_raw_number(raw_number)
expected_result = read_expected_result(file)
print_raw_number(raw_number)
print('expected result:', expected_result)
print('result :', result)
if result == expected_result:
print('pass')
else:
print('fail')
fail_count += 1
print()
if fail_count == 0:
print("ALL PASS")
else:
print(fail_count, "FAILURE(S)")
file.close()
run_all_test_cases()
| brunorijsman/coding-katas | bank-ocr-python/bank-ocr.py | Python | mit | 3,837 | 0.019547 |
import sys
sys.path.insert(0, ".")
import unittest
from coalib.settings.Section import Section
from coalib.bears.LocalBear import LocalBear, BEAR_KIND
class LocalBearTest(unittest.TestCase):
def test_api(self):
test_object = LocalBear(Section("name"), None)
self.assertRaises(NotImplementedError,
test_object.run,
"filename",
["file\n"])
def test_kind(self):
self.assertEqual(LocalBear.kind(), BEAR_KIND.LOCAL)
if __name__ == '__main__':
unittest.main(verbosity=2)
| Tanmay28/coala | coalib/tests/bears/LocalBearTest.py | Python | agpl-3.0 | 585 | 0.005128 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.