code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class Person(models.Model):
_inherit = 'myo.person'
code = fields.Char(string='Person/Patient Code', required=False)
is_patient = fields.Boolean('Is Patient',
help="If checked, the Person is a Patient.",
default=0)
| MostlyOpen/odoo_addons | myo_patient/models/person.py | Python | agpl-3.0 | 1,237 |
from __future__ import unicode_literals
import djblets.extensions.views as djblets_ext_views
from django.views.decorators.csrf import csrf_protect
from reviewboard.extensions.base import get_extension_manager
@csrf_protect
def configure_extension(request, ext_class, form_class,
template_name='extensions/configure_extension.html'):
return djblets_ext_views.configure_extension(request, ext_class,
form_class,
get_extension_manager(),
template_name)
| chipx86/reviewboard | reviewboard/extensions/views.py | Python | mit | 629 |
import os
def test_generous_parse_uri():
from dtoolcore.utils import generous_parse_uri
s3_uri = "s3://my-bucket/path/to/files"
parse_result = generous_parse_uri(s3_uri)
assert parse_result.scheme == 's3'
assert parse_result.netloc == 'my-bucket'
assert parse_result.path == '/path/to/files'
lazy_file_uri = ".my_dataset"
parse_result = generous_parse_uri(lazy_file_uri)
assert parse_result.scheme == 'file'
full_file_uri = "file://localhost/path/to/files"
parse_result = generous_parse_uri(full_file_uri)
assert parse_result.scheme == 'file'
assert parse_result.netloc == 'localhost'
assert parse_result.path == '/path/to/files'
irods_uri = "irods:///jic_raw_data/rg-someone/my_dataset"
parse_result = generous_parse_uri(irods_uri)
assert parse_result.scheme == 'irods'
assert parse_result.netloc == ''
assert parse_result.path == '/jic_raw_data/rg-someone/my_dataset'
irods_uri = "irods:/jic_raw_data/rg-someone/my_dataset"
parse_result = generous_parse_uri(irods_uri)
assert parse_result.scheme == 'irods'
assert parse_result.netloc == ''
assert parse_result.path == '/jic_raw_data/rg-someone/my_dataset'
def test_sanitise_uri():
from dtoolcore.utils import sanitise_uri, IS_WINDOWS, windows_to_unix_path
relpath = "./my_data"
abspath = os.path.abspath(relpath)
if IS_WINDOWS:
abspath = windows_to_unix_path(abspath)
sanitised_uri = sanitise_uri(relpath)
assert sanitised_uri.startswith("file://")
assert sanitised_uri.endswith(abspath)
s3_uri = "s3://my-bucket/path/to/files"
sanitised_uri = sanitise_uri(s3_uri)
assert sanitised_uri == s3_uri
irods_uri = "irods:///jic_raw_data/rg-someone/my_dataset"
sanitised_uri = sanitise_uri(irods_uri)
expected_uri = "irods:/jic_raw_data/rg-someone/my_dataset"
assert sanitised_uri == expected_uri
| JIC-CSB/dtoolcore | tests/test_uri_sanitisation.py | Python | mit | 1,925 |
from framework.dependency_management.dependency_resolver import ServiceLocator
DESCRIPTION = "Cookie Attributes Plugin to assist manual testing"
def run(PluginInfo):
resource = ServiceLocator.get_component("resource").GetResources('ExternalCookiesAttributes')
Content = ServiceLocator.get_component("plugin_helper").ResourceLinkList('Online Hash Cracking Resources', resource)
return Content
| DarKnight24/owtf | plugins/web/external/[email protected] | Python | bsd-3-clause | 408 |
from django import forms
from datetimewidget.widgets import DateWidget
class DateFilterForm(forms.Form):
"""
Very simple form responsible for specifying what dates to filter the system stats by.
"""
start_date = forms.DateField(widget=DateWidget(usel10n=True, bootstrap_version=3))
end_date = forms.DateField(widget=DateWidget(usel10n=True, bootstrap_version=3))
class Meta:
fields = ('start_date', 'end_date')
| jimga150/HealthNet | HealthNet/sysstats/forms.py | Python | mit | 446 |
from utils.opengl import TextureManager
from utils import Vec2d
from OpenGL.GL import *
import random
from math import sin
class CloudManager(object):
def __init__(self, screen, maxClouds=5):
self.screen = Vec2d(screen)
self.offset = Vec2d(0, 0)
self.clouds = []
self.maxClouds = maxClouds
for i in range(self.maxClouds):
self.createCloud(initial=True)
def createCloud(self, pos=None, left=False, top=False, texture=None, w=None, h=None, vel=None, tide=None, age=None, initial=False):
if pos is None:
pos = Vec2d(0, 0)
if initial:
pos.x = random.randint(int(self.offset[0]) - self.screen[0]/2 - 260,
int(self.offset[0]) + self.screen[0]/2 + 260)
pos.y = random.randint(int(self.offset[1]) - self.screen[1]/2 - 260,
int(self.offset[1]) + self.screen[1]/2 + 260)
elif left is not None or top is not None:
if left is not None and left is True:
pos.x = -self.offset[0] - self.screen[0]/2 - 260
pos.y = random.randint(int(self.offset[1]) - self.screen[1]/2 - 260,
int(self.offset[1]) + self.screen[1]/2 + 260)
elif left is not None and left is False:
pos.x = -self.offset[0] + self.screen[0]/2 + 260
pos.y = random.randint(int(self.offset[1]) - self.screen[1]/2 - 260,
int(self.offset[1]) + self.screen[1]/2 + 260)
elif top is not None and top is True:
pos.x = random.randint(int(self.offset[0]) - self.screen[0]/2 - 260,
int(self.offset[0]) + self.screen[0]/2 + 260)
pos.y = -self.offset[1] + self.screen[1]/2 + 260
elif top is not None and top is False:
pos.x = random.randint(int(self.offset[0]) - self.screen[0]/2 - 260,
int(self.offset[0]) + self.screen[0]/2 + 260)
pos.y = -self.offset[1] - self.screen[1]/2 - 260
else:
if bool(random.getrandbits(1)):
pos.x = -self.offset[0] - self.screen[0]/2 - 260
else:
pos.x = -self.offset[0] + self.screen[0]/2 + 260
if bool(random.getrandbits(1)):
pos.y = -self.offset[1] - self.screen[1]/2 - 260
else:
pos.y = -self.offset[1] + self.screen[1]/2 + 260
if texture is None:
options = [n for n in TextureManager.instance().keys() if n.startswith("textures/background/cloud")]
texture = options[random.randint(0, len(options)-1)]
if w is None and h is None:
w = random.randint(150, 250)
h = w
elif w is None:
w = h
else:
h = w
if vel is None:
vel = random.randint(5, 25)
if tide is None:
tide = random.randint(0, 10)
if age is None:
age = random.randint(0, 315) / 100.0
self.clouds.append({
"pos": pos,
"w": w,
"h": h,
"vel": vel,
"age": age,
"tide": tide,
"texture": texture,
"size": [([0.0, 1.0], [-w*0.5, h*0.5]),
([0.0, 0.0], [-w*0.5, -h*0.5]),
([1.0, 0.0], [ w*0.5, -h*0.5]),
([1.0, 1.0], [ w*0.5, h*0.5])]
})
def removeDistantClouds(self):
def in_range(cloud):
distance = cloud["pos"] + self.offset
#print (self.offset, cloud["pos"])
return abs(distance.x) < self.screen.x and abs(distance.y) < self.screen.y
self.clouds = list(filter(in_range, self.clouds))
def spawnCloud(self, vel):
left, right, top, bottom = 0, 0, 0, 0
for c in self.clouds:
distance = c["pos"] - self.offset
if distance.x > 0:
left += 1
else:
right += 1
left = vel[0] < 0
top = vel[1] > 0
if abs(vel[0]) > abs(vel[1]):
top = None
else:
left = None
self.createCloud(left=left, top=top)
def render(self, dt, offset=(0, 0), vel=(0, 0)):
self.offset = Vec2d(offset)
self.removeDistantClouds()
if len(self.clouds) < self.maxClouds:
self.spawnCloud(vel)
glColor3f(1.0, 1.0, 1.0)
self.clouds.sort(key=lambda c: c["texture"])
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_TEXTURE_2D)
if len(self.clouds) > 0:
old = self.clouds[0]["texture"]
TextureManager.instance()[old].bind()
glBegin(GL_QUADS)
for c in self.clouds:
pos = c["pos"]
age = c["age"]
tide = c["tide"]
c["pos"] = pos + Vec2d(c["vel"] * dt, 0.0)
c["age"] = age + dt
pos += offset
if old != c["texture"]:
glEnd()
old = c["texture"]
TextureManager.instance()[old].bind()
glBegin(GL_QUADS)
for v in c["size"]:
glTexCoord2fv(v[0])
glVertex2f(pos.x + v[1][0], pos.y + v[1][1] + sin(age) * tide)
if len(self.clouds) > 0:
glEnd()
glDisable(GL_BLEND)
glDisable(GL_TEXTURE_2D) | markusd/rocket | rocket/clouds.py | Python | bsd-2-clause | 5,964 |
# -*- coding: utf-8 -*-
import threading
import sys
import logging
logger = logging.getLogger(__name__)
try:
import Queue as queue
except ImportError:
import queue
import traceback
class TaskQueue(object):
def __init__(self, producer, consumers):
self.__producer = producer
self.__consumers = consumers
self.__threads = []
# must be an infinite queue, otherwise producer may be blocked after all consumers being dead.
self.__queue = queue.Queue()
self.__lock = threading.Lock()
self.__exc_info = None
self.__exc_stack = ''
def run(self):
self.__add_and_run(threading.Thread(target=self.__producer_func))
for c in self.__consumers:
self.__add_and_run(threading.Thread(target=self.__consumer_func, args=(c,)))
# give KeyboardInterrupt chances to happen by joining with timeouts.
while self.__any_active():
for t in self.__threads:
t.join(1)
if self.__exc_info:
logger.error('An exception was thrown by producer or consumer, backtrace: {0}'.format(self.__exc_stack))
raise self.__exc_info[1]
def put(self, data):
assert data is not None
self.__queue.put(data)
def get(self):
return self.__queue.get()
def ok(self):
with self.__lock:
return self.__exc_info is None
def __add_and_run(self, thread):
thread.daemon = True
thread.start()
self.__threads.append(thread)
def __any_active(self):
return any(t.is_alive() for t in self.__threads)
def __producer_func(self):
try:
self.__producer(self)
except:
self.__on_exception(sys.exc_info())
self.__put_end()
else:
self.__put_end()
def __consumer_func(self, consumer):
try:
consumer(self)
except:
self.__on_exception(sys.exc_info())
def __put_end(self):
for i in range(len(self.__consumers)):
self.__queue.put(None)
def __on_exception(self, exc_info):
with self.__lock:
if self.__exc_info is None:
self.__exc_info = exc_info
self.__exc_stack = traceback.format_exc()
| aliyun/aliyun-oss-python-sdk | oss2/task_queue.py | Python | mit | 2,311 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""AppInfo tools.
Library for working with AppInfo records in memory, store and load from
configuration files.
"""
import logging
import re
import string
import wsgiref.util
from google.appengine.api import appinfo_errors
from google.appengine.api import backendinfo
from google.appengine.api import pagespeedinfo
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
_URL_REGEX = r'(?!\^)/.*|\..*|(\(.).*(?!\$).'
_FILES_REGEX = r'(?!\^).*(?!\$).'
_URL_ROOT_REGEX = r'/.*'
_DELTA_REGEX = r'([0-9]+)([DdHhMm]|[sS]?)'
_EXPIRATION_REGEX = r'\s*(%s)(\s+%s)*\s*' % (_DELTA_REGEX, _DELTA_REGEX)
_START_PATH = '/_ah/start'
_ALLOWED_SERVICES = ['mail', 'mail_bounce', 'xmpp_message', 'xmpp_subscribe',
'xmpp_presence', 'xmpp_error', 'channel_presence', 'rest',
'warmup']
_SERVICE_RE_STRING = '(' + '|'.join(_ALLOWED_SERVICES) + ')'
_PAGE_NAME_REGEX = r'^.+$'
_EXPIRATION_CONVERSIONS = {
'd': 60 * 60 * 24,
'h': 60 * 60,
'm': 60,
's': 1,
}
APP_ID_MAX_LEN = 100
SERVER_ID_MAX_LEN = 63
SERVER_VERSION_ID_MAX_LEN = 63
MAX_URL_MAPS = 100
PARTITION_SEPARATOR = '~'
DOMAIN_SEPARATOR = ':'
VERSION_SEPARATOR = '.'
SERVER_SEPARATOR = ':'
DEFAULT_SERVER = 'default'
PARTITION_RE_STRING = (r'[a-z\d\-]{1,%d}\%s' %
(APP_ID_MAX_LEN, PARTITION_SEPARATOR))
DOMAIN_RE_STRING = (r'(?!\-)[a-z\d\-\.]{1,%d}%s' %
(APP_ID_MAX_LEN, DOMAIN_SEPARATOR))
DISPLAY_APP_ID_RE_STRING = r'(?!-)[a-z\d\-]{0,%d}[a-z\d]' % (APP_ID_MAX_LEN - 1)
APPLICATION_RE_STRING = (r'(?:%s)?(?:%s)?%s' %
(PARTITION_RE_STRING,
DOMAIN_RE_STRING,
DISPLAY_APP_ID_RE_STRING))
SERVER_ID_RE_STRING = r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' % (SERVER_ID_MAX_LEN - 1)
SERVER_VERSION_ID_RE_STRING = (r'^(?!-)[a-z\d\-]{0,%d}[a-z\d]$' %
(SERVER_VERSION_ID_MAX_LEN - 1))
_INSTANCES_REGEX = r'^([\d]+|automatic)$'
_INSTANCE_CLASS_REGEX = r'^([fF](1|2|4)|[bB](1|2|4|8))$'
_PENDING_LATENCY_REGEX = r'^(\d+((\.\d{1,3})?s|ms)|automatic)$'
_IDLE_TIMEOUT_REGEX = r'^[\d]+(s|m)$'
ALTERNATE_HOSTNAME_SEPARATOR = '-dot-'
BUILTIN_NAME_PREFIX = 'ah-builtin'
RUNTIME_RE_STRING = r'[a-z][a-z0-9]{0,29}'
API_VERSION_RE_STRING = r'[\w.]{1,32}'
SOURCE_LANGUAGE_RE_STRING = r'[\w.\-]{1,32}'
HANDLER_STATIC_FILES = 'static_files'
HANDLER_STATIC_DIR = 'static_dir'
HANDLER_SCRIPT = 'script'
HANDLER_API_ENDPOINT = 'api_endpoint'
LOGIN_OPTIONAL = 'optional'
LOGIN_REQUIRED = 'required'
LOGIN_ADMIN = 'admin'
AUTH_FAIL_ACTION_REDIRECT = 'redirect'
AUTH_FAIL_ACTION_UNAUTHORIZED = 'unauthorized'
SECURE_HTTP = 'never'
SECURE_HTTPS = 'always'
SECURE_HTTP_OR_HTTPS = 'optional'
SECURE_DEFAULT = 'default'
REQUIRE_MATCHING_FILE = 'require_matching_file'
DEFAULT_SKIP_FILES = (r'^(.*/)?('
r'(#.*#)|'
r'(.*~)|'
r'(.*\.py[co])|'
r'(.*/RCS/.*)|'
r'(\..*)|'
r')$')
DEFAULT_NOBUILD_FILES = (r'^$')
LOGIN = 'login'
AUTH_FAIL_ACTION = 'auth_fail_action'
SECURE = 'secure'
URL = 'url'
POSITION = 'position'
POSITION_HEAD = 'head'
POSITION_TAIL = 'tail'
STATIC_FILES = 'static_files'
UPLOAD = 'upload'
STATIC_DIR = 'static_dir'
MIME_TYPE = 'mime_type'
SCRIPT = 'script'
EXPIRATION = 'expiration'
API_ENDPOINT = 'api_endpoint'
HTTP_HEADERS = 'http_headers'
APPLICATION = 'application'
SERVER = 'server'
SERVER_SETTINGS = 'server_settings'
VM_SETTINGS = 'vm_settings'
VERSION = 'version'
MAJOR_VERSION = 'major_version'
MINOR_VERSION = 'minor_version'
RUNTIME = 'runtime'
API_VERSION = 'api_version'
SOURCE_LANGUAGE = 'source_language'
BUILTINS = 'builtins'
INCLUDES = 'includes'
HANDLERS = 'handlers'
LIBRARIES = 'libraries'
DEFAULT_EXPIRATION = 'default_expiration'
SKIP_FILES = 'skip_files'
NOBUILD_FILES = 'nobuild_files'
SERVICES = 'inbound_services'
DERIVED_FILE_TYPE = 'derived_file_type'
JAVA_PRECOMPILED = 'java_precompiled'
PYTHON_PRECOMPILED = 'python_precompiled'
ADMIN_CONSOLE = 'admin_console'
ERROR_HANDLERS = 'error_handlers'
BACKENDS = 'backends'
THREADSAFE = 'threadsafe'
API_CONFIG = 'api_config'
CODE_LOCK = 'code_lock'
ENV_VARIABLES = 'env_variables'
PAGESPEED = 'pagespeed'
INSTANCES = 'instances'
CLASS = 'class'
MINIMUM_PENDING_LATENCY = 'min_pending_latency'
MAXIMUM_PENDING_LATENCY = 'max_pending_latency'
MINIMUM_IDLE_INSTANCES = 'min_idle_instances'
MAXIMUM_IDLE_INSTANCES = 'max_idle_instances'
IDLE_TIMEOUT = 'idle_timeout'
FAILFAST = 'failfast'
PAGES = 'pages'
NAME = 'name'
ERROR_CODE = 'error_code'
FILE = 'file'
_ERROR_CODE_REGEX = r'(default|over_quota|dos_api_denial|timeout)'
ON = 'on'
ON_ALIASES = ['yes', 'y', 'True', 't', '1', 'true']
OFF = 'off'
OFF_ALIASES = ['no', 'n', 'False', 'f', '0', 'false']
class _VersionedLibrary(object):
"""A versioned library supported by App Engine."""
def __init__(self,
name,
url,
description,
supported_versions,
default_version=None,
deprecated_versions=None,
experimental_versions=None):
"""Initializer for _VersionedLibrary.
Args:
name: The name of the library e.g. "django".
url: The URL for the library's project page e.g.
"http://www.djangoproject.com/".
description: A short description of the library e.g. "A framework...".
supported_versions: A list of supported version names ordered by release
date e.g. ["v1", "v2", "v3"].
default_version: The version of the library that is enabled by default
in the Python 2.7 runtime or None if the library is not available by
default e.g. "v1".
deprecated_versions: A list of the versions of the library that have been
deprecated e.g. ["v1", "v2"].
experimental_versions: A list of the versions of the library that are
current experimental e.g. ["v1"].
"""
self.name = name
self.url = url
self.description = description
self.supported_versions = supported_versions
self.default_version = default_version
self.deprecated_versions = deprecated_versions or []
self.experimental_versions = experimental_versions or []
@property
def non_deprecated_versions(self):
return [version for version in self.supported_versions
if version not in self.deprecated_versions]
_SUPPORTED_LIBRARIES = [
_VersionedLibrary(
'django',
'http://www.djangoproject.com/',
'A full-featured web application framework for Python.',
['1.2', '1.3', '1.4'],
experimental_versions=['1.4']
),
_VersionedLibrary(
'jinja2',
'http://jinja.pocoo.org/docs/',
'A modern and designer friendly templating language for Python.',
['2.6']),
_VersionedLibrary(
'lxml',
'http://lxml.de/',
'A Pythonic binding for the C libraries libxml2 and libxslt.',
['2.3']),
_VersionedLibrary(
'markupsafe',
'http://pypi.python.org/pypi/MarkupSafe',
'A XML/HTML/XHTML markup safe string for Python.',
['0.15']),
_VersionedLibrary(
'numpy',
'http://numpy.scipy.org/',
'A general-purpose library for array-processing.',
['1.6.1']),
_VersionedLibrary(
'PIL',
'http://www.pythonware.com/library/pil/handbook/',
'A library for creating and transforming images.',
['1.1.7']),
_VersionedLibrary(
'PyAMF',
'http://www.pyamf.org/',
'A library that provides (AMF) Action Message Format functionality.',
['0.6.1']),
_VersionedLibrary(
'pycrypto',
'https://www.dlitz.net/software/pycrypto/',
'A library of cryptogoogle.appengine._internal.graphy functions such as random number generation.',
['2.3', '2.6'],
experimental_versions=['2.6']
),
_VersionedLibrary(
'setuptools',
'http://pypi.python.org/pypi/setuptools',
'A library that provides package and module discovery capabilities.',
['0.6c11']),
_VersionedLibrary(
'webapp2',
'http://webapp-improved.appspot.com/',
'A lightweight Python web framework.',
['2.3', '2.5.1'],
default_version='2.3',
deprecated_versions=['2.3']
),
_VersionedLibrary(
'webob',
'http://www.webob.org/',
'A library that provides wrappers around the WSGI request environment.',
['1.1.1'],
default_version='1.1.1',
),
_VersionedLibrary(
'yaml',
'http://www.yaml.org/',
'A library for YAML serialization and deserialization.',
['3.10'],
default_version='3.10'
),
]
_NAME_TO_SUPPORTED_LIBRARY = dict((library.name, library)
for library in _SUPPORTED_LIBRARIES)
REQUIRED_LIBRARIES = {
('jinja2', '2.6'): [('markupsafe', '0.15'), ('setuptools', '0.6c11')],
('jinja2', 'latest'): [('markupsafe', 'latest'), ('setuptools', 'latest')],
}
_USE_VERSION_FORMAT = ('use one of: "%s" or "latest" '
'("latest" recommended for development only)')
_HTTP_SEPARATOR_CHARS = frozenset('()<>@,;:\\"/[]?={} \t')
_HTTP_TOKEN_CHARS = frozenset(string.printable[:-5]) - _HTTP_SEPARATOR_CHARS
_HTTP_TOKEN_RE = re.compile('[%s]+$' % re.escape(''.join(_HTTP_TOKEN_CHARS)))
_HTTP_REQUEST_HEADERS = frozenset([
'accept',
'accept-charset',
'accept-encoding',
'accept-language',
'authorization',
'expect',
'from',
'host',
'if-match',
'if-modified-since',
'if-none-match',
'if-range',
'if-unmodified-since',
'max-forwards',
'proxy-authorization',
'range',
'referer',
'te',
'user-agent',
])
_MAX_COOKIE_LENGTH = 4096
_MAX_URL_LENGTH = 2047
class HandlerBase(validation.Validated):
"""Base class for URLMap and ApiConfigHandler."""
ATTRIBUTES = {
URL: validation.Optional(_URL_REGEX),
LOGIN: validation.Options(LOGIN_OPTIONAL,
LOGIN_REQUIRED,
LOGIN_ADMIN,
default=LOGIN_OPTIONAL),
AUTH_FAIL_ACTION: validation.Options(AUTH_FAIL_ACTION_REDIRECT,
AUTH_FAIL_ACTION_UNAUTHORIZED,
default=AUTH_FAIL_ACTION_REDIRECT),
SECURE: validation.Options(SECURE_HTTP,
SECURE_HTTPS,
SECURE_HTTP_OR_HTTPS,
SECURE_DEFAULT,
default=SECURE_DEFAULT),
HANDLER_SCRIPT: validation.Optional(_FILES_REGEX)
}
class HttpHeadersDict(validation.ValidatedDict):
"""A dict that limits keys and values what http_headers allows.
http_headers is an static handler key i.e. it applies to handlers with
static_dir or static_files keys. An example of how http_headers is used is
handlers:
- url: /static
static_dir: static
http_headers:
X-Foo-Header: foo value
X-Bar-Header: bar value
"""
DISALLOWED_HEADERS = frozenset([
'content-encoding',
'content-length',
'date',
'server'
])
MAX_HEADER_LENGTH = 500
MAX_HEADER_VALUE_LENGTHS = {
'set-cookie': _MAX_COOKIE_LENGTH,
'set-cookie2': _MAX_COOKIE_LENGTH,
'location': _MAX_URL_LENGTH}
MAX_LEN = 500
class KeyValidator(validation.Validator):
"""Ensures that keys in HttpHeadersDict i.e. header names are valid.
An instance is used as HttpHeadersDict's KEY_VALIDATOR.
"""
def Validate(self, name, unused_key=None):
"""Returns argument, or raises an exception if it is invalid.
HTTP header names are defined by RFC 2616 section 4.2.
Args:
name: HTTP header field value.
unused_key: Unused.
Returns:
name argument, unchanged.
Raises:
appinfo_errors.InvalidHttpHeaderName: argument cannot be used as an HTTP
header name.
"""
original_name = name
if isinstance(name, unicode):
try:
name = name.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header values must not contain non-ASCII data')
name = name.lower()
if not _HTTP_TOKEN_RE.match(name):
raise appinfo_errors.InvalidHttpHeaderName(
'An HTTP header must be a non-empty RFC 2616 token.')
if name in _HTTP_REQUEST_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%r can only be used in HTTP requests, not responses.'
% original_name)
if name.startswith('x-appengine'):
raise appinfo_errors.InvalidHttpHeaderName(
'HTTP header names that begin with X-Appengine are reserved.')
if wsgiref.util.is_hop_by_hop(name):
raise appinfo_errors.InvalidHttpHeaderName(
'Only use end-to-end headers may be used. See RFC 2616 section'
' 13.5.1.')
if name in HttpHeadersDict.DISALLOWED_HEADERS:
raise appinfo_errors.InvalidHttpHeaderName(
'%s is a disallowed header.' % name)
return original_name
class ValueValidator(validation.Validator):
"""Ensures that values in HttpHeadersDict i.e. header values are valid.
An instance is used as HttpHeadersDict's VALUE_VALIDATOR.
"""
def Validate(self, value, key=None):
"""Returns value, or raises an exception if it is invalid.
According to RFC 2616 section 4.2, header field values must consist "of
either *TEXT or combinations of token, separators, and quoted-string".
TEXT = <any OCTET except CTLs, but including LWS>
Args:
value: HTTP header field value.
key: HTTP header field name.
Returns:
value argument.
Raises:
appinfo_errors.InvalidHttpHeaderValue: argument cannot be used as an
HTTP header value.
"""
if isinstance(value, unicode):
try:
value = value.encode('ascii')
except UnicodeEncodeError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header values must not contain non-ASCII data')
key = key.lower()
printable = set(string.printable[:-5])
if not all(char in printable for char in value):
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header field values must consist of printable characters.')
HttpHeadersDict.ValueValidator.AssertHeaderNotTooLong(key, value)
return value
@staticmethod
def AssertHeaderNotTooLong(name, value):
header_length = len('%s: %s\r\n' % (name, value))
if header_length >= HttpHeadersDict.MAX_HEADER_LENGTH:
try:
max_len = HttpHeadersDict.MAX_HEADER_VALUE_LENGTHS[name]
except KeyError:
raise appinfo_errors.InvalidHttpHeaderValue(
'HTTP header (name + value) is too long.')
if len(value) > max_len:
insert = name, len(value), max_len
raise appinfo_errors.InvalidHttpHeaderValue(
'%r header value has length %d, which exceed the maximum allowed,'
' %d.' % insert)
KEY_VALIDATOR = KeyValidator()
VALUE_VALIDATOR = ValueValidator()
def Get(self, header_name):
"""Gets a header value.
Args:
header_name: HTTP header name to look for.
Returns:
A header value that corresponds to header_name. If more than one such
value is in self, one of the values is selected arbitrarily, and
returned. The selection is not deterministic.
"""
for name in self:
if name.lower() == header_name.lower():
return self[name]
def __setitem__(self, key, value):
is_addition = self.Get(key) is None
if is_addition and len(self) >= self.MAX_LEN:
raise appinfo_errors.TooManyHttpHeaders(
'Tried to add another header when the current set of HTTP headers'
' already has the maximum allowed number of headers, %d.'
% HttpHeadersDict.MAX_LEN)
super(HttpHeadersDict, self).__setitem__(key, value)
class URLMap(HandlerBase):
"""Mapping from URLs to handlers.
This class acts like something of a union type. Its purpose is to
describe a mapping between a set of URLs and their handlers. What
handler type a given instance has is determined by which handler-id
attribute is used.
Each mapping can have one and only one handler type. Attempting to
use more than one handler-id attribute will cause an UnknownHandlerType
to be raised during validation. Failure to provide any handler-id
attributes will cause MissingHandlerType to be raised during validation.
The regular expression used by the url field will be used to match against
the entire URL path and query string of the request. This means that
partial maps will not be matched. Specifying a url, say /admin, is the
same as matching against the regular expression '^/admin$'. Don't begin
your matching url with ^ or end them with $. These regular expressions
won't be accepted and will raise ValueError.
Attributes:
login: Whether or not login is required to access URL. Defaults to
'optional'.
secure: Restriction on the protocol which can be used to serve
this URL/handler (HTTP, HTTPS or either).
url: Regular expression used to fully match against the request URLs path.
See Special Cases for using static_dir.
static_files: Handler id attribute that maps URL to the appropriate
file. Can use back regex references to the string matched to url.
upload: Regular expression used by the application configuration
program to know which files are uploaded as blobs. It's very
difficult to determine this using just the url and static_files
so this attribute must be included. Required when defining a
static_files mapping.
A matching file name must fully match against the upload regex, similar
to how url is matched against the request path. Do not begin upload
with ^ or end it with $.
static_dir: Handler id that maps the provided url to a sub-directory
within the application directory. See Special Cases.
mime_type: When used with static_files and static_dir the mime-type
of files served from those directories are overridden with this
value.
script: Handler id that maps URLs to scipt handler within the application
directory that will run using CGI.
position: Used in AppInclude objects to specify whether a handler
should be inserted at the beginning of the primary handler list or at the
end. If 'tail' is specified, the handler is inserted at the end,
otherwise, the handler is inserted at the beginning. This means that
'head' is the effective default.
expiration: When used with static files and directories, the time delta to
use for cache expiration. Has the form '4d 5h 30m 15s', where each letter
signifies days, hours, minutes, and seconds, respectively. The 's' for
seconds may be omitted. Only one amount must be specified, combining
multiple amounts is optional. Example good values: '10', '1d 6h',
'1h 30m', '7d 7d 7d', '5m 30'.
api_endpoint: Handler id that identifies endpoint as an API endpoint,
calls that terminate here will be handled by the api serving framework.
Special cases:
When defining a static_dir handler, do not use a regular expression
in the url attribute. Both the url and static_dir attributes are
automatically mapped to these equivalents:
<url>/(.*)
<static_dir>/\1
For example:
url: /images
static_dir: images_folder
Is the same as this static_files declaration:
url: /images/(.*)
static_files: images_folder/\1
upload: images_folder/(.*)
"""
ATTRIBUTES = {
HANDLER_STATIC_FILES: validation.Optional(_FILES_REGEX),
UPLOAD: validation.Optional(_FILES_REGEX),
HANDLER_STATIC_DIR: validation.Optional(_FILES_REGEX),
MIME_TYPE: validation.Optional(str),
EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
REQUIRE_MATCHING_FILE: validation.Optional(bool),
HTTP_HEADERS: validation.Optional(HttpHeadersDict),
POSITION: validation.Optional(validation.Options(POSITION_HEAD,
POSITION_TAIL)),
HANDLER_API_ENDPOINT: validation.Optional(validation.Options(
(ON, ON_ALIASES),
(OFF, OFF_ALIASES))),
}
ATTRIBUTES.update(HandlerBase.ATTRIBUTES)
COMMON_FIELDS = set([URL, LOGIN, AUTH_FAIL_ACTION, SECURE])
ALLOWED_FIELDS = {
HANDLER_STATIC_FILES: (MIME_TYPE, UPLOAD, EXPIRATION,
REQUIRE_MATCHING_FILE, HTTP_HEADERS),
HANDLER_STATIC_DIR: (MIME_TYPE, EXPIRATION, REQUIRE_MATCHING_FILE,
HTTP_HEADERS),
HANDLER_SCRIPT: (POSITION),
HANDLER_API_ENDPOINT: (POSITION, SCRIPT),
}
def GetHandler(self):
"""Get handler for mapping.
Returns:
Value of the handler (determined by handler id attribute).
"""
return getattr(self, self.GetHandlerType())
def GetHandlerType(self):
"""Get handler type of mapping.
Returns:
Handler type determined by which handler id attribute is set.
Raises:
UnknownHandlerType: when none of the no handler id attributes are set.
UnexpectedHandlerAttribute: when an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: when the handler is missing a
required attribute for its handler type.
"""
for id_field in URLMap.ALLOWED_FIELDS.iterkeys():
if getattr(self, id_field) is not None:
mapping_type = id_field
break
else:
raise appinfo_errors.UnknownHandlerType(
'Unknown url handler type.\n%s' % str(self))
allowed_fields = URLMap.ALLOWED_FIELDS[mapping_type]
for attribute in self.ATTRIBUTES.iterkeys():
if (getattr(self, attribute) is not None and
not (attribute in allowed_fields or
attribute in URLMap.COMMON_FIELDS or
attribute == mapping_type)):
raise appinfo_errors.UnexpectedHandlerAttribute(
'Unexpected attribute "%s" for mapping type %s.' %
(attribute, mapping_type))
if mapping_type == HANDLER_STATIC_FILES and not self.upload:
raise appinfo_errors.MissingHandlerAttribute(
'Missing "%s" attribute for URL "%s".' % (UPLOAD, self.url))
return mapping_type
def CheckInitialized(self):
"""Adds additional checking to make sure handler has correct fields.
In addition to normal ValidatedCheck calls GetHandlerType
which validates all the handler fields are configured
properly.
Raises:
UnknownHandlerType: when none of the no handler id attributes are set.
UnexpectedHandlerAttribute: when an unexpected attribute is set for the
discovered handler type.
HandlerTypeMissingAttribute: when the handler is missing a required
attribute for its handler type.
ContentTypeSpecifiedMultipleTimes: when mime_type is inconsistent with
http_headers.
"""
super(URLMap, self).CheckInitialized()
if self.GetHandlerType() in (STATIC_DIR, STATIC_FILES):
self.AssertUniqueContentType()
def AssertUniqueContentType(self):
"""Makes sure that self.http_headers is consistent with self.mime_type.
Assumes self is a static handler i.e. either self.static_dir or
self.static_files is set (to not None).
Raises:
appinfo_errors.ContentTypeSpecifiedMultipleTimes: Raised when
self.http_headers contains a Content-Type header, and self.mime_type is
set. For example, the following configuration would be rejected:
handlers:
- url: /static
static_dir: static
mime_type: text/html
http_headers:
content-type: text/html
As this example shows, a configuration will be rejected when
http_headers and mime_type specify a content type, even when they
specify the same content type.
"""
used_both_fields = self.mime_type and self.http_headers
if not used_both_fields:
return
content_type = self.http_headers.Get('Content-Type')
if content_type is not None:
raise appinfo_errors.ContentTypeSpecifiedMultipleTimes(
'http_header specified a Content-Type header of %r in a handler that'
' also specified a mime_type of %r.' % (content_type, self.mime_type))
def FixSecureDefaults(self):
"""Force omitted 'secure: ...' handler fields to 'secure: optional'.
The effect is that handler.secure is never equal to the (nominal)
default.
See http://b/issue?id=2073962.
"""
if self.secure == SECURE_DEFAULT:
self.secure = SECURE_HTTP_OR_HTTPS
def WarnReservedURLs(self):
"""Generates a warning for reserved URLs.
See:
https://developers.google.com/appengine/docs/python/config/appconfig#Reserved_URLs
"""
if self.url == '/form':
logging.warning(
'The URL path "/form" is reserved and will not be matched.')
def ErrorOnPositionForAppInfo(self):
"""Raises an error if position is specified outside of AppInclude objects.
"""
if self.position:
raise appinfo_errors.PositionUsedInAppYamlHandler(
'The position attribute was specified for this handler, but this is '
'an app.yaml file. Position attribute is only valid for '
'include.yaml files.')
class AdminConsolePage(validation.Validated):
"""Class representing admin console page in AdminConsole object.
"""
ATTRIBUTES = {
URL: _URL_REGEX,
NAME: _PAGE_NAME_REGEX,
}
class AdminConsole(validation.Validated):
"""Class representing admin console directives in application info.
"""
ATTRIBUTES = {
PAGES: validation.Optional(validation.Repeated(AdminConsolePage)),
}
@classmethod
def Merge(cls, adminconsole_one, adminconsole_two):
"""Return the result of merging two AdminConsole objects."""
if not adminconsole_one or not adminconsole_two:
return adminconsole_one or adminconsole_two
if adminconsole_one.pages:
if adminconsole_two.pages:
adminconsole_one.pages.extend(adminconsole_two.pages)
else:
adminconsole_one.pages = adminconsole_two.pages
return adminconsole_one
class ErrorHandlers(validation.Validated):
"""Class representing error handler directives in application info.
"""
ATTRIBUTES = {
ERROR_CODE: validation.Optional(_ERROR_CODE_REGEX),
FILE: _FILES_REGEX,
MIME_TYPE: validation.Optional(str),
}
class BuiltinHandler(validation.Validated):
"""Class representing builtin handler directives in application info.
Permits arbitrary keys but their values must be described by the
validation.Options object returned by ATTRIBUTES.
"""
class DynamicAttributes(dict):
"""Provide a dictionary object that will always claim to have a key.
This dictionary returns a fixed value for any get operation. The fixed
value passed in as a constructor parameter should be a
validation.Validated object.
"""
def __init__(self, return_value, **parameters):
self.__return_value = return_value
dict.__init__(self, parameters)
def __contains__(self, _):
return True
def __getitem__(self, _):
return self.__return_value
ATTRIBUTES = DynamicAttributes(
validation.Optional(validation.Options((ON, ON_ALIASES),
(OFF, OFF_ALIASES))))
def __init__(self, **attributes):
"""Ensure that all BuiltinHandler objects at least have attribute 'default'.
"""
self.ATTRIBUTES.clear()
self.builtin_name = ''
super(BuiltinHandler, self).__init__(**attributes)
def __setattr__(self, key, value):
"""Permit ATTRIBUTES.iteritems() to return set of items that have values.
Whenever validate calls iteritems(), it is always called on ATTRIBUTES,
not on __dict__, so this override is important to ensure that functions
such as ToYAML() return the correct set of keys.
"""
if key == 'builtin_name':
object.__setattr__(self, key, value)
elif not self.builtin_name:
self.ATTRIBUTES[key] = ''
self.builtin_name = key
super(BuiltinHandler, self).__setattr__(key, value)
else:
raise appinfo_errors.MultipleBuiltinsSpecified(
'More than one builtin defined in list element. Each new builtin '
'should be prefixed by "-".')
def ToDict(self):
"""Convert BuiltinHander object to a dictionary.
Returns:
dictionary of the form: {builtin_handler_name: on/off}
"""
return {self.builtin_name: getattr(self, self.builtin_name)}
@classmethod
def IsDefined(cls, builtins_list, builtin_name):
"""Find if a builtin is defined in a given list of builtin handler objects.
Args:
builtins_list: list of BuiltinHandler objects (typically yaml.builtins)
builtin_name: name of builtin to find whether or not it is defined
Returns:
true if builtin_name is defined by a member of builtins_list,
false otherwise
"""
for b in builtins_list:
if b.builtin_name == builtin_name:
return True
return False
@classmethod
def ListToTuples(cls, builtins_list):
"""Converts a list of BuiltinHandler objects to a list of (name, status)."""
return [(b.builtin_name, getattr(b, b.builtin_name)) for b in builtins_list]
@classmethod
def Validate(cls, builtins_list, runtime=None):
"""Verify that all BuiltinHandler objects are valid and not repeated.
Args:
builtins_list: list of BuiltinHandler objects to validate.
runtime: if set then warnings are generated for builtins that have been
deprecated in the given runtime.
Raises:
InvalidBuiltinFormat if the name of a Builtinhandler object
cannot be determined.
DuplicateBuiltinSpecified if a builtin handler name is used
more than once in the list.
"""
seen = set()
for b in builtins_list:
if not b.builtin_name:
raise appinfo_errors.InvalidBuiltinFormat(
'Name of builtin for list object %s could not be determined.'
% b)
if b.builtin_name in seen:
raise appinfo_errors.DuplicateBuiltinsSpecified(
'Builtin %s was specified more than once in one yaml file.'
% b.builtin_name)
if b.builtin_name == 'datastore_admin' and runtime == 'python':
logging.warning(
'The datastore_admin builtin is deprecated. You can find '
'information on how to enable it through the Administrative '
'Console here: '
'http://developers.google.com/appengine/docs/adminconsole/'
'datastoreadmin.html')
elif b.builtin_name == 'mapreduce' and runtime == 'python':
logging.warning(
'The mapreduce builtin is deprecated. You can find more '
'information on how to configure and use it here: '
'http://developers.google.com/appengine/docs/python/dataprocessing/'
'overview.html')
seen.add(b.builtin_name)
class ApiConfigHandler(HandlerBase):
"""Class representing api_config handler directives in application info."""
ATTRIBUTES = HandlerBase.ATTRIBUTES
ATTRIBUTES.update({
URL: validation.Regex(_URL_REGEX),
HANDLER_SCRIPT: validation.Regex(_FILES_REGEX)
})
class Library(validation.Validated):
"""Class representing the configuration of a single library."""
ATTRIBUTES = {'name': validation.Type(str),
'version': validation.Type(str)}
def CheckInitialized(self):
"""Raises if the library configuration is not valid."""
super(Library, self).CheckInitialized()
if self.name not in _NAME_TO_SUPPORTED_LIBRARY:
raise appinfo_errors.InvalidLibraryName(
'the library "%s" is not supported' % self.name)
supported_library = _NAME_TO_SUPPORTED_LIBRARY[self.name]
if self.version != 'latest':
if self.version not in supported_library.supported_versions:
raise appinfo_errors.InvalidLibraryVersion(
('%s version "%s" is not supported, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
elif self.version in supported_library.deprecated_versions:
logging.warning(
('%s version "%s" is deprecated, ' + _USE_VERSION_FORMAT) % (
self.name,
self.version,
'", "'.join(supported_library.non_deprecated_versions)))
class ServerSettings(validation.Validated):
"""Class representing server settings in the AppInfoExternal.
"""
ATTRIBUTES = {
INSTANCES: validation.Optional(_INSTANCES_REGEX),
CLASS: validation.Optional(_INSTANCE_CLASS_REGEX),
MINIMUM_IDLE_INSTANCES: validation.Optional(_INSTANCES_REGEX),
MAXIMUM_IDLE_INSTANCES: validation.Optional(_INSTANCES_REGEX),
MINIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
MAXIMUM_PENDING_LATENCY: validation.Optional(_PENDING_LATENCY_REGEX),
IDLE_TIMEOUT: validation.Optional(_IDLE_TIMEOUT_REGEX),
FAILFAST: validation.Optional(validation.TYPE_BOOL),
}
class VmSettings(validation.ValidatedDict):
"""Class for VM settings.
We don't validate these further because the feature is in flux.
"""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
class EnvironmentVariables(validation.ValidatedDict):
"""Class representing a mapping of environment variable key value pairs."""
KEY_VALIDATOR = validation.Regex('[a-zA-Z_][a-zA-Z0-9_]*')
VALUE_VALIDATOR = str
class AppInclude(validation.Validated):
"""Class representing the contents of an included app.yaml file.
Used for both builtins and includes directives.
"""
ATTRIBUTES = {
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap)),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
}
@classmethod
def MergeAppYamlAppInclude(cls, appyaml, appinclude):
"""This function merges an app.yaml file with referenced builtins/includes.
"""
if not appinclude:
return appyaml
if appinclude.handlers:
tail = appyaml.handlers or []
appyaml.handlers = []
for h in appinclude.handlers:
if not h.position or h.position == 'head':
appyaml.handlers.append(h)
else:
tail.append(h)
h.position = None
appyaml.handlers.extend(tail)
appyaml.admin_console = AdminConsole.Merge(appyaml.admin_console,
appinclude.admin_console)
return appyaml
@classmethod
def MergeAppIncludes(cls, appinclude_one, appinclude_two):
"""This function merges the non-referential state of the provided AppInclude
objects. That is, builtins and includes directives are not preserved, but
any static objects are copied into an aggregate AppInclude object that
preserves the directives of both provided AppInclude objects.
Args:
appinclude_one: object one to merge
appinclude_two: object two to merge
Returns:
AppInclude object that is the result of merging the static directives of
appinclude_one and appinclude_two.
"""
if not appinclude_one or not appinclude_two:
return appinclude_one or appinclude_two
if appinclude_one.handlers:
if appinclude_two.handlers:
appinclude_one.handlers.extend(appinclude_two.handlers)
else:
appinclude_one.handlers = appinclude_two.handlers
appinclude_one.admin_console = (
AdminConsole.Merge(appinclude_one.admin_console,
appinclude_two.admin_console))
return appinclude_one
class AppInfoExternal(validation.Validated):
"""Class representing users application info.
This class is passed to a yaml_object builder to provide the validation
for the application information file format parser.
Attributes:
application: Unique identifier for application.
version: Application's major version.
runtime: Runtime used by application.
api_version: Which version of APIs to use.
source_language: Optional specification of the source language.
For example we specify "php-quercus" if this is a Java app
that was generated from PHP source using Quercus
handlers: List of URL handlers.
default_expiration: Default time delta to use for cache expiration for
all static files, unless they have their own specific 'expiration' set.
See the URLMap.expiration field's documentation for more information.
skip_files: An re object. Files that match this regular expression will
not be uploaded by appcfg.py. For example:
skip_files: |
.svn.*|
#.*#
nobuild_files: An re object. Files that match this regular expression will
not be built into the app. Go only.
api_config: URL root and script/servlet path for enhanced api serving
"""
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
SERVER: validation.Optional(SERVER_ID_RE_STRING),
VERSION: validation.Optional(SERVER_VERSION_ID_RE_STRING),
RUNTIME: RUNTIME_RE_STRING,
API_VERSION: API_VERSION_RE_STRING,
SOURCE_LANGUAGE: validation.Optional(
validation.Regex(SOURCE_LANGUAGE_RE_STRING)),
SERVER_SETTINGS: validation.Optional(ServerSettings),
VM_SETTINGS: validation.Optional(VmSettings),
BUILTINS: validation.Optional(validation.Repeated(BuiltinHandler)),
INCLUDES: validation.Optional(validation.Type(list)),
HANDLERS: validation.Optional(validation.Repeated(URLMap)),
LIBRARIES: validation.Optional(validation.Repeated(Library)),
SERVICES: validation.Optional(validation.Repeated(
validation.Regex(_SERVICE_RE_STRING))),
DEFAULT_EXPIRATION: validation.Optional(_EXPIRATION_REGEX),
SKIP_FILES: validation.RegexStr(default=DEFAULT_SKIP_FILES),
NOBUILD_FILES: validation.RegexStr(default=DEFAULT_NOBUILD_FILES),
DERIVED_FILE_TYPE: validation.Optional(validation.Repeated(
validation.Options(JAVA_PRECOMPILED, PYTHON_PRECOMPILED))),
ADMIN_CONSOLE: validation.Optional(AdminConsole),
ERROR_HANDLERS: validation.Optional(validation.Repeated(ErrorHandlers)),
BACKENDS: validation.Optional(validation.Repeated(
backendinfo.BackendEntry)),
THREADSAFE: validation.Optional(bool),
API_CONFIG: validation.Optional(ApiConfigHandler),
CODE_LOCK: validation.Optional(bool),
ENV_VARIABLES: validation.Optional(EnvironmentVariables),
PAGESPEED: validation.Optional(pagespeedinfo.PagespeedEntry),
}
def CheckInitialized(self):
"""Performs non-regex-based validation.
The following are verified:
- At least one url mapping is provided in the URL mappers.
- Number of url mappers doesn't exceed MAX_URL_MAPS.
- Major version does not contain the string -dot-.
- If api_endpoints are defined, an api_config stanza must be defined.
- If the runtime is python27 and threadsafe is set, then no CGI handlers
can be used.
- That the version name doesn't start with BUILTIN_NAME_PREFIX
Raises:
DuplicateLibrary: if the name library name is specified more than once.
MissingURLMapping: if no URLMap object is present in the object.
TooManyURLMappings: if there are too many URLMap entries.
MissingApiConfig: if api_endpoints exist without an api_config.
MissingThreadsafe: if threadsafe is not set but the runtime requires it.
ThreadsafeWithCgiHandler: if the runtime is python27, threadsafe is set
and CGI handlers are specified.
"""
super(AppInfoExternal, self).CheckInitialized()
if not self.handlers and not self.builtins and not self.includes:
raise appinfo_errors.MissingURLMapping(
'No URLMap entries found in application configuration')
if self.handlers and len(self.handlers) > MAX_URL_MAPS:
raise appinfo_errors.TooManyURLMappings(
'Found more than %d URLMap entries in application configuration' %
MAX_URL_MAPS)
if self.threadsafe is None and self.runtime == 'python27':
raise appinfo_errors.MissingThreadsafe(
'threadsafe must be present and set to either "yes" or "no"')
if self.libraries:
if self.runtime != 'python27':
raise appinfo_errors.RuntimeDoesNotSupportLibraries(
'libraries entries are only supported by the "python27" runtime')
library_names = [library.name for library in self.libraries]
for library_name in library_names:
if library_names.count(library_name) > 1:
raise appinfo_errors.DuplicateLibrary(
'Duplicate library entry for %s' % library_name)
if self.version and self.version.find(ALTERNATE_HOSTNAME_SEPARATOR) != -1:
raise validation.ValidationError(
'Version "%s" cannot contain the string "%s"' % (
self.version, ALTERNATE_HOSTNAME_SEPARATOR))
if self.version and self.version.startswith(BUILTIN_NAME_PREFIX):
raise validation.ValidationError(
('Version "%s" cannot start with "%s" because it is a '
'reserved version name prefix.') % (self.version,
BUILTIN_NAME_PREFIX))
if self.handlers:
api_endpoints = [handler.url for handler in self.handlers
if handler.GetHandlerType() == HANDLER_API_ENDPOINT]
if api_endpoints and not self.api_config:
raise appinfo_errors.MissingApiConfig(
'An api_endpoint handler was specified, but the required '
'api_config stanza was not configured.')
if self.threadsafe and self.runtime == 'python27':
for handler in self.handlers:
if (handler.script and (handler.script.endswith('.py') or
'/' in handler.script)):
raise appinfo_errors.ThreadsafeWithCgiHandler(
'threadsafe cannot be enabled with CGI handler: %s' %
handler.script)
def GetAllLibraries(self):
"""Returns a list of all Library instances active for this configuration.
Returns:
The list of active Library instances for this configuration. This includes
directly-specified libraries as well as any required dependencies.
"""
if not self.libraries:
return []
library_names = set(library.name for library in self.libraries)
required_libraries = []
for library in self.libraries:
for required_name, required_version in REQUIRED_LIBRARIES.get(
(library.name, library.version), []):
if required_name not in library_names:
required_libraries.append(Library(name=required_name,
version=required_version))
return self.libraries + required_libraries
def GetNormalizedLibraries(self):
"""Returns a list of normalized Library instances for this configuration.
Returns:
The list of active Library instances for this configuration. This includes
directly-specified libraries, their required dependencies as well as any
libraries enabled by default. Any libraries with "latest" as their version
will be replaced with the latest available version.
"""
libraries = self.GetAllLibraries()
enabled_libraries = set(library.name for library in libraries)
for library in _SUPPORTED_LIBRARIES:
if library.default_version and library.name not in enabled_libraries:
libraries.append(Library(name=library.name,
version=library.default_version))
for library in libraries:
if library.version == 'latest':
library.version = _NAME_TO_SUPPORTED_LIBRARY[
library.name].supported_versions[-1]
return libraries
def ApplyBackendSettings(self, backend_name):
"""Applies settings from the indicated backend to the AppInfoExternal.
Backend entries may contain directives that modify other parts of the
app.yaml, such as the 'start' directive, which adds a handler for the start
request. This method performs those modifications.
Args:
backend_name: The name of a backend defined in 'backends'.
Raises:
BackendNotFound: If the indicated backend was not listed in 'backends'.
"""
if backend_name is None:
return
if self.backends is None:
raise appinfo_errors.BackendNotFound
self.version = backend_name
match = None
for backend in self.backends:
if backend.name != backend_name:
continue
if match:
raise appinfo_errors.DuplicateBackend
else:
match = backend
if match is None:
raise appinfo_errors.BackendNotFound
if match.start is None:
return
start_handler = URLMap(url=_START_PATH, script=match.start)
self.handlers.insert(0, start_handler)
def ValidateHandlers(handlers, is_include_file=False):
"""Validates a list of handler (URLMap) objects.
Args:
handlers: A list of a handler (URLMap) objects.
is_include_file: If true, indicates the we are performing validation
for handlers in an AppInclude file, which may contain special directives.
"""
if not handlers:
return
for handler in handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if not is_include_file:
handler.ErrorOnPositionForAppInfo()
def LoadSingleAppInfo(app_info):
"""Load a single AppInfo object where one and only one is expected.
Args:
app_info: A file-like object or string. If it is a string, parse it as
a configuration file. If it is a file-like object, read in data and
parse.
Returns:
An instance of AppInfoExternal as loaded from a YAML file.
Raises:
ValueError: if a specified service is not valid.
EmptyConfigurationFile: when there are no documents in YAML file.
MultipleConfigurationFile: when there is more than one document in YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_info)
app_infos = handler.GetResults()
if len(app_infos) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(app_infos) > 1:
raise appinfo_errors.MultipleConfigurationFile()
appyaml = app_infos[0]
ValidateHandlers(appyaml.handlers)
if appyaml.builtins:
BuiltinHandler.Validate(appyaml.builtins, appyaml.runtime)
return appyaml
class AppInfoSummary(validation.Validated):
"""This class contains only basic summary information about an app.
It is used to pass back information about the newly created app to users
after a new version has been created.
"""
ATTRIBUTES = {
APPLICATION: APPLICATION_RE_STRING,
MAJOR_VERSION: SERVER_VERSION_ID_RE_STRING,
MINOR_VERSION: validation.TYPE_LONG
}
def LoadAppInclude(app_include):
"""Load a single AppInclude object where one and only one is expected.
Args:
app_include: A file-like object or string. If it is a string, parse it as
a configuration file. If it is a file-like object, read in data and
parse.
Returns:
An instance of AppInclude as loaded from a YAML file.
Raises:
EmptyConfigurationFile: when there are no documents in YAML file.
MultipleConfigurationFile: when there is more than one document in YAML
file.
"""
builder = yaml_object.ObjectBuilder(AppInclude)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(app_include)
includes = handler.GetResults()
if len(includes) < 1:
raise appinfo_errors.EmptyConfigurationFile()
if len(includes) > 1:
raise appinfo_errors.MultipleConfigurationFile()
includeyaml = includes[0]
if includeyaml.handlers:
for handler in includeyaml.handlers:
handler.FixSecureDefaults()
handler.WarnReservedURLs()
if includeyaml.builtins:
BuiltinHandler.Validate(includeyaml.builtins)
return includeyaml
def ParseExpiration(expiration):
"""Parses an expiration delta string.
Args:
expiration: String that matches _DELTA_REGEX.
Returns:
Time delta in seconds.
"""
delta = 0
for match in re.finditer(_DELTA_REGEX, expiration):
amount = int(match.group(1))
units = _EXPIRATION_CONVERSIONS.get(match.group(2).lower(), 1)
delta += amount * units
return delta
_file_path_positive_re = re.compile(r'^[ 0-9a-zA-Z\._\+/\$-]{1,256}$')
_file_path_negative_1_re = re.compile(r'\.\.|^\./|\.$|/\./|^-|^_ah/')
_file_path_negative_2_re = re.compile(r'//|/$')
_file_path_negative_3_re = re.compile(r'^ | $|/ | /')
def ValidFilename(filename):
"""Determines if filename is valid.
filename must be a valid pathname.
- It must contain only letters, numbers, _, +, /, $, ., and -.
- It must be less than 256 chars.
- It must not contain "/./", "/../", or "//".
- It must not end in "/".
- All spaces must be in the middle of a directory or file name.
Args:
filename: The filename to validate.
Returns:
An error string if the filename is invalid. Returns '' if the filename
is valid.
"""
if _file_path_positive_re.match(filename) is None:
return 'Invalid character in filename: %s' % filename
if _file_path_negative_1_re.search(filename) is not None:
return ('Filename cannot contain "." or ".." '
'or start with "-" or "_ah/": %s' %
filename)
if _file_path_negative_2_re.search(filename) is not None:
return 'Filename cannot have trailing / or contain //: %s' % filename
if _file_path_negative_3_re.search(filename) is not None:
return 'Any spaces must be in the middle of a filename: %s' % filename
return ''
| overtherain/scriptfile | software/googleAppEngine/google/appengine/api/appinfo.py | Python | mit | 51,450 |
""" Our tests are defined in here """
import os
import sys
from operator import eq
import just
import pytest
TEST_FNAME = "testobj"
def get_result(m, extension, inp):
fname = TEST_FNAME + "." + extension
try:
m.write(inp, fname)
read_result = m.read(fname)
finally:
just.remove(fname)
return read_result
@pytest.mark.parametrize(
"m, extension, inp, expected, compare",
[
(just, "txt", "{}", "{}", eq),
(just.txt, "txt", "{}", "{}", eq),
(just.txt, "txt", "None", "None", eq),
(just.txt, "txt", "", "", eq),
(just.newl, "newl", ["1", "2"], ["1", "2"], eq),
(just.json, "json", {}, {}, eq),
(just.json, "json", None, None, eq),
(just.json, "json", "", "", eq),
(just.yaml, "yaml", {}, {}, eq),
(just.yaml, "yaml", None, None, eq),
(just.yaml, "yaml", "", "", eq),
(just.pickle, "pkl", {}, {}, eq),
(just.pickle, "pkl", None, None, eq),
(just.pickle, "pkl", "", "", eq),
],
)
def test_compare(m, extension, inp, expected, compare):
assert compare(get_result(m, extension, inp), expected)
def test_multi_read():
obj = ["a", "b"]
fnames = ["a.txt", "b.txt"]
just.multi_write(obj, fnames)
try:
for name, data in just.multi_read("*.txt"):
assert fnames.index(name.split("/")[-1]) == obj.index(data)
finally:
for fname in fnames:
os.remove(fname)
def test_newl_iread():
fname = "testobj.newl"
obj = ["1", "2"]
just.write(obj, "testobj.newl")
try:
assert [x for x in just.iread(fname)] == [x for x in obj]
finally:
os.remove(fname)
def test_txt_iread():
fname = "testobj.txt"
obj = "1\n2\n3\n4\n5"
just.write(obj, "testobj.txt")
try:
assert [x for x in just.iread(fname)] == [x for x in obj.split("\n")]
finally:
os.remove(fname)
def test_find_just_path():
try:
base = os.path.dirname(os.path.abspath(__file__))
just_file = os.path.join(base, ".just")
with open(just_file, "w") as f:
f.write("OK")
assert isinstance(just.path_.find_just_path(), type("1"))
finally:
os.remove(just_file)
def test_txt_append():
fname = "testobj.txt"
obj = "bla"
just.append(obj, "testobj.txt")
try:
assert [x for x in just.iread(fname)] == [obj]
just.append(obj, "testobj.txt")
assert [x for x in just.iread(fname)] == [obj, obj]
finally:
os.remove(fname)
def test_unsuccesful_read():
assert just.read("A" * 100, 42) == 42
def test_unsuccesful_remove():
assert just.remove("A" * 100, 42) == 42
def test_ls():
assert just.ls(".")
| kootenpv/just | tests/test_files.py | Python | agpl-3.0 | 2,748 |
import tempfile
import os, sys
import subprocess
import time
from . import walk_projects, rest_header
class Statistics:
def __init__(self, artefact_path, output_path, index_path):
# which directory to scan for repositories
self.artefact_path = artefact_path
# base directory for output
self.output_path = output_path
# index page in reStructuredText format
self.statistics_index = os.path.join(index_path, 'statistics.rst')
# 3rd party tools
self.gitstats = os.environ.get('GITSTATS', 'gitstats')
self.statsvn = os.environ.get('STATSVN', 'statsvn.jar')
# all projects to work on
self.projects = []
# don't render statistics for "gitstats" itself
self.projects_blacklist = ['gitstats', 'statsvn', '-develop']
# just regenerate every week
self.regenerate_threshold = 60 * 60 * 24 * 7
# for debugging purposes (generate statistics for listed projects only, if not empty)
self.projects_whitelist = []
#self.projects_whitelist = ['image_proxy']
def scan_projects(self):
for project in walk_projects(self.artefact_path):
# compute whether to skip this project by blacklist
skip = False
for blacklist_item in self.projects_blacklist:
if blacklist_item in project.name:
skip = True
if skip: continue
if project.vcs in ('git', 'svn'):
if not self.projects_whitelist or (self.projects_whitelist and project.name in self.projects_whitelist):
self.projects.append(project)
def write_index(self):
path = os.path.dirname(self.statistics_index)
if not os.path.isdir(path):
os.makedirs(path)
f = open(self.statistics_index, 'w')
f.write(rest_header('Repository statistics', 'zt.manticore.ext.statistics'))
f.write('Statistics of the following projects:\n\n')
f.write('- `{0} <./statistics/{0}/index.html>`_\n'.format('all-git'))
for project in self.projects:
f.write('- `{0} <./statistics/{0}/index.html>`_\n'.format(project.name))
f.write('\n\n\n')
f.close()
def generate_statistics(self):
git_projects = []
for project in self.projects:
print "Generating statistics for {0}".format(project)
if project.vcs == 'git':
self.run_gitstats(project)
git_projects.append(project)
elif project.vcs == 'svn':
cwd = os.getcwd()
try:
self.run_statsvn(project)
#pass
#except:
# pass
finally:
os.chdir(cwd)
# 2011-11-07: generate multi-repo statistics
self.run_gitstats_multi(git_projects, alias='all-git')
def is_up_to_date(self, output_path):
# check if it actually should be regenerated
try:
path_to_index = os.path.join(output_path, 'index.html')
project_mtime = os.stat(path_to_index).st_mtime
now = time.time()
if abs(now - project_mtime) < self.regenerate_threshold:
return True
except:
pass
return False
def get_output_path(self, project):
output_path = os.path.join(self.output_path, project.name)
return output_path
def run_command(self, command):
returncode = subprocess.call(command)
if returncode != 0:
print "Command '{0}' had errors, exit code was {1}".format(' '.join(command), returncode >> 8)
def run_command_system(self, command):
command = ' '.join(command)
returncode = os.system(command)
if returncode == 0:
return True
else:
print "Command '{0}' had errors, exit code was {1}".format(command, returncode >> 8)
return False
def run_gitstats(self, project):
# example: ./parts/gitstats/gitstats -c project_name=foobar -c max_authors=60 .git /tmp/test/
input_path = os.path.join(project.path, '.git')
output_path = self.get_output_path(project)
if self.is_up_to_date(output_path):
return
if not os.path.isdir(output_path):
os.makedirs(output_path)
print "Running 'gitstats' for project '{0}'".format(project.name)
command = [self.gitstats, '-c', 'project_name={0}'.format(project.name), '-c', 'max_authors=60', input_path, output_path]
self.run_command(command)
def run_gitstats_multi(self, projects, alias):
input_paths = []
project_names = []
for project in projects:
input_paths.append(os.path.abspath(os.path.join(project.path, '.git')))
project_names.append(project.name)
output_path = os.path.join(self.output_path, alias)
if self.is_up_to_date(output_path):
return
if not os.path.isdir(output_path):
os.makedirs(output_path)
#print "Running 'gitstats' for multiple projects '{0}'".format(input_paths)
command = [self.gitstats, '-c', 'project_name={0}'.format(', '.join(project_names))]
command += input_paths
command.append(output_path)
print "command:", command
self.run_command(command)
def run_statsvn(self, project):
# http://wiki.statsvn.org/
# example:
# svn log -v --xml parts/acme/ > tmp_svn.log
# java -jar parts/statsvn/statsvn.jar tmp_svn.log parts/acme/
input_path = project.path
output_path = self.get_output_path(project)
if self.is_up_to_date(output_path):
return
if not os.path.isdir(output_path):
os.makedirs(output_path)
print "Running 'svn log' and 'StatSVN' for project '{0}'".format(project.name)
# run "svn log"
tempdir = tempfile.mkdtemp()
if not os.path.exists(tempdir):
os.makedirs(tempdir)
svn_log = os.path.join(tempdir, '{0}.svnlog'.format(project.name))
command = ['svn', 'log', '-v', '--xml', input_path, '>', svn_log]
success = self.run_command_system(command)
# fix svn log
if not success:
payload = file(svn_log).read()
if '<log>' in payload and not '</log>' in payload:
payload += '\n</log>\n'
file(svn_log, 'w').write(payload)
# run "statsvn.jar"
os.chdir(output_path)
command = ['java', '-mx768m', '-jar', self.statsvn, svn_log, input_path]
self.run_command_system(command)
def build_statistics():
print "Generating repository statistics"
artefact_path = sys.argv[1]
output_path = sys.argv[2]
index_path = sys.argv[3]
stats = Statistics(artefact_path, output_path, index_path)
stats.scan_projects()
stats.write_index()
stats.generate_statistics()
| zerotired/manticore-ext | src/zt/manticore/ext/statistics.py | Python | bsd-2-clause | 7,029 |
# coding: utf-8
"""
This module defines a holder of Monosublocus instances. It is the last step
before the definition of real loci.
"""
from sys import version_info
from ..transcripts.transcript import Transcript
from .abstractlocus import Abstractlocus
from .locus import Locus
from .monosublocus import Monosublocus
from .sublocus import Sublocus
from ..parsers.GFF import GffLine
from ..utilities import overlap
if version_info.minor < 5:
from sortedcontainers import SortedDict
else:
from collections import OrderedDict as SortedDict
# Resolution order is important here!
# pylint: disable=too-many-instance-attributes
class MonosublocusHolder(Sublocus, Abstractlocus):
"""This is a container that groups together the transcripts
surviving the selection for the Monosublocus.
The class inherits from both sublocus and Abstractlocus
(the main abstract class) in order to be able to reuse
some of the code present in the former.
Internally, the most important method is define_loci -
which will select the best transcript(s) and remove all the overlapping ones.
The intersection function for this object is quite laxer than in previous stages,
and so are the requirements for the inclusion.
"""
__name__ = "monosubloci_holder"
# pylint: disable=super-init-not-called
def __init__(self, transcript_instance=None,
configuration=None, logger=None,
verified_introns=None, **kwargs):
# I know what I am doing by NOT calling the Sublocus super but rather
# Abstractlocus
Abstractlocus.__init__(self,
transcript_instance=None,
verified_introns=verified_introns,
configuration=configuration,
logger=logger,
**kwargs)
self._not_passing = set()
self.splitted = False
self.metrics_calculated = False
self.excluded = None
self.feature = "MonosublocusHolder"
# Add the transcript to the Locus
self.locus_verified_introns = set()
if transcript_instance is not None:
self.add_monosublocus(transcript_instance, check_in_locus=False)
self.loci = SortedDict()
self.attributes = dict()
# Overriding is correct here
# pylint: disable=arguments-differ
def add_transcript_to_locus(self, transcript, check_in_locus=True):
"""Override of the sublocus method, and reversal to the original
method in the Abstractlocus class.
The check_in_locus boolean flag is used to decide
whether to check if the transcript is in the Locus or not.
This should be set to False for the first transcript, and True afterwards.
:param transcript: a Transcript instance
:type transcript: Transcript
:param check_in_locus: optional flag to pass to the basic method.
If set to False it disables checks on whether the transcript
is really in the locus
:type check_in_locus: bool
"""
if check_in_locus is True and self.in_locus(
self,
transcript,
flank=self.configuration.pick.clustering.flank,
logger=self.logger,
cds_only=self.configuration.pick.clustering.cds_only,
min_cdna_overlap=self.configuration.pick.clustering.min_cdna_overlap,
min_cds_overlap=self.configuration.pick.clustering.min_cds_overlap,
simple_overlap_for_monoexonic=self.configuration.pick.clustering.simple_overlap_for_monoexonic
) is False:
self.logger.debug("%s is not a valid intersection for %s", transcript.id, self.id)
return False
Abstractlocus.add_transcript_to_locus(self, transcript, check_in_locus=False)
self.locus_verified_introns = set.union(self.locus_verified_introns,
transcript.verified_introns)
# pylint: enable=arguments-differ
@classmethod
def is_intersecting(cls,
transcript,
other,
cds_only=False,
logger=None,
min_cdna_overlap=0.2,
min_cds_overlap=0.2,
simple_overlap_for_monoexonic=True) -> bool:
"""Alias for the Sublocus.is_intersecting method"""
return Sublocus.is_intersecting(transcript, other, cds_only=cds_only,
logger=logger, min_cdna_overlap=min_cdna_overlap,
min_cds_overlap=min_cds_overlap,
simple_overlap_for_monoexonic=simple_overlap_for_monoexonic)
def add_monosublocus(self, monosublocus_instance: Monosublocus, check_in_locus=True):
"""Wrapper to extract the transcript from the monosubloci and pass it to the constructor.
:param monosublocus_instance: the instance to add
:type monosublocus_instance: Monosublocus
:param check_in_locus: boolean flag - should we perform checks to verify the monosublocus is compatible?
"""
assert len(monosublocus_instance.transcripts) == 1
for tid in monosublocus_instance.transcripts:
self.add_transcript_to_locus(monosublocus_instance.transcripts[tid],
check_in_locus=check_in_locus)
def __str__(self, print_cds=False, source_in_name=True):
"""
Method to print out the sublocus lines in GFF format.
:param print_cds: flag. Should we print the CDS?
:param source_in_name: boolean flag. Should we add the original source of the transcript to the ID?
"""
lines = []
self_line = GffLine('')
for attr in ["chrom", 'feature', 'source', 'start', 'end', 'strand']:
setattr(self_line, attr, getattr(self, attr))
self.filter_and_calculate_scores()
self_line.phase, self_line.score = None, self.score
if source_in_name is True:
self_line.id = "{0}_{1}".format(self.source, self.id)
else:
self_line.id = self.id
self_line.name = self.name
self_line.parent = self.parent
self_line.attributes.update(self.attributes)
self_line.attributes["multiexonic"] = (not self.monoexonic)
lines.append(str(self_line))
for tid in self.transcripts:
transcript_instance = self.transcripts[tid]
transcript_instance.source = self.source
transcript_instance.parent = self_line.id
self.logger.debug(self.attributes)
for attribute in self.attributes:
if attribute not in transcript_instance.attributes:
if attribute == "is_fragment" and self.attributes[attribute] is False:
continue
transcript_instance.attributes[attribute] = self.attributes[attribute]
lines.append(transcript_instance.format(
"gff",
all_orfs=self.configuration.pick.output_format.report_all_orfs,
with_cds=print_cds).rstrip())
return "\n".join(lines)
def define_monosubloci(self, **kwargs):
"""Overriden and set to NotImplemented to avoid cross-calling it when inappropriate."""
raise NotImplementedError("Monosubloci are the input of this object, not the output.")
def define_loci(self, purge=False, excluded=None, check_requirements=True):
"""This is the main function of the class. It is analogous
to the define_subloci class defined for sublocus objects,
but it returns "Locus" objects (not "Monosublocus").
Optional parameters:
:param purge: flag. If set to True, all loci whose transcripts
have scores of 0 will be thrown out
into an Excluded holder.
:type purge: bool
:param excluded
:type excluded: Excluded
:param check_requirements: boolean flag. If set to false, transcripts will not be checked for passing requirements.
:type check_requirements: bool
"""
if self.splitted is True:
return
self.excluded = excluded
self.filter_and_calculate_scores(check_requirements=check_requirements)
graph = self.define_graph(
self.transcripts,
inters=self.is_intersecting,
logger=self.logger,
cds_only=self.configuration.pick.clustering.cds_only,
min_cdna_overlap=self.configuration.pick.clustering.min_cdna_overlap,
min_cds_overlap=self.configuration.pick.clustering.min_cds_overlap,
simple_overlap_for_monoexonic=self.configuration.pick.clustering.simple_overlap_for_monoexonic
)
loci = []
while len(graph) > 0:
communities = self.find_communities(graph)
to_remove = set()
for locus_comm in communities:
locus_comm = dict((x, self.transcripts[x]) for x in locus_comm)
selected_tid = self.choose_best(locus_comm)
selected_transcript = self.transcripts[selected_tid]
to_remove.add(selected_tid)
to_remove.update(set(graph.neighbors(selected_tid)))
if purge is False or selected_transcript.score > 0:
new_locus = Locus(selected_transcript, logger=self.logger, configuration=self.configuration,
use_transcript_scores=self._use_transcript_scores)
loci.append(new_locus)
self.logger.debug("Removing {0} transcripts from {1}".format(len(to_remove), self.id))
graph.remove_nodes_from(to_remove) # Remove nodes from graph, iterate
for locus in sorted(loci):
self.loci[locus.id] = locus
self.splitted = True
return
@classmethod
def in_locus(cls, monosublocus: Abstractlocus,
transcript: Transcript,
flank=0,
logger=None,
cds_only=False,
min_cdna_overlap=0.2,
min_cds_overlap=0.2,
simple_overlap_for_monoexonic=False) -> bool:
"""This method checks whether a transcript / monosbulocus
falls inside the Locus coordinates.
:rtype: bool
:param monosublocus: Monosublocus instance
:type monosublocus: Monosublocus
:param transcript: the transcript to be compared
:type transcript: Transcript
:param flank: optional flank argument
:type flank: int
"""
if hasattr(transcript, "transcripts"):
assert len(transcript.transcripts) == 1
transcript = transcript.transcripts[list(transcript.transcripts.keys())[0]]
assert hasattr(transcript, "finalize")
if Abstractlocus.in_locus(monosublocus, transcript, flank=flank) is True:
is_in_locus = False
for tran in monosublocus.transcripts:
tran = monosublocus.transcripts[tran]
is_in_locus = cls.is_intersecting(tran,
transcript,
logger=logger,
cds_only=cds_only,
min_cds_overlap=min_cds_overlap,
min_cdna_overlap=min_cdna_overlap,
simple_overlap_for_monoexonic=simple_overlap_for_monoexonic
)
if is_in_locus is True:
break
return is_in_locus
else:
return False
@property
def id(self):
"""
Wrapper for the id method of abstractlocus. Necessary to redefine the name.
"""
return Abstractlocus.id.fget(self) # @UndefinedVariable
def as_dict(self):
state = Abstractlocus.as_dict(self)
return state
def load_dict(self, state, load_transcripts=True, load_configuration=True):
Abstractlocus.load_dict(self, state, load_configuration=load_configuration,
load_transcripts=load_transcripts)
| lucventurini/mikado | Mikado/loci/monosublocusholder.py | Python | lgpl-3.0 | 12,512 |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet load on startup.
Verify that a syscoind node can maintain list of wallets loading on startup
"""
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import (
assert_equal,
)
class WalletStartupTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_nodes(self):
self.add_nodes(self.num_nodes)
self.start_nodes()
def run_test(self):
self.log.info('Should start without any wallets')
assert_equal(self.nodes[0].listwallets(), [])
assert_equal(self.nodes[0].listwalletdir(), {'wallets': []})
self.log.info('New default wallet should load by default when there are no other wallets')
self.nodes[0].createwallet(wallet_name='', load_on_startup=False)
self.restart_node(0)
assert_equal(self.nodes[0].listwallets(), [''])
self.log.info('Test load on startup behavior')
self.nodes[0].createwallet(wallet_name='w0', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w1', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w2', load_on_startup=True)
self.nodes[0].createwallet(wallet_name='w3', load_on_startup=False)
self.nodes[0].createwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w0', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet(filename='w4', load_on_startup=True)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w1', 'w2', 'w3', 'w4')))
self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('', 'w2', 'w4')))
self.nodes[0].unloadwallet(wallet_name='', load_on_startup=False)
self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False)
self.nodes[0].loadwallet(filename='w3', load_on_startup=True)
self.nodes[0].loadwallet(filename='')
self.restart_node(0)
assert_equal(set(self.nodes[0].listwallets()), set(('w2', 'w3')))
if __name__ == '__main__':
WalletStartupTest().main()
| syscoin/syscoin | test/functional/wallet_startup.py | Python | mit | 2,539 |
'''
Copyright (c) 2008 Georgios Giannoudovardis, <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class BaseAction:
def __init__(self, name, description):
self.name = name
self.description = description
def getName(self):
return self.name
def getDescription(self):
return self.description
def execute(self, game, params):
pass | vardis/pano | src/pano/actions/BaseAction.py | Python | mit | 1,446 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
## PhysicalParameters
Density=2400
frictionAngle=radians(35)
tc = 0.001
en = 0.3
es = 0.3
## Import wall's geometry
params=utils.getViscoelasticFromSpheresInteraction(tc,en,es)
facetMat=O.materials.append(ViscElMat(frictionAngle=frictionAngle,**params)) # **params sets kn, cn, ks, cs
sphereMat=O.materials.append(ViscElMat(density=Density,frictionAngle=frictionAngle,**params))
from woo import ymport
fctIds=O.bodies.append(ymport.stl('baraban.stl',color=(1,0,0),material=facetMat))
## Spheres
sphereRadius = 0.2
nbSpheres = (10,10,10)
#nbSpheres = (50,50,50)
for i in range(nbSpheres[0]):
for j in range(nbSpheres[1]):
for k in range(nbSpheres[2]):
x = (i*2 - nbSpheres[0])*sphereRadius*1.1
y = (j*2 - nbSpheres[1])*sphereRadius*1.1
z = (k*2 - nbSpheres[2])*sphereRadius*1.1
s=utils.sphere([x,y,z],sphereRadius,material=sphereMat)
O.bodies.append(s)
## Timestep
O.dt=.2*tc
## Engines
O.engines=[
## Resets forces and momenta the act on bodies
ForceResetter(),
## Using bounding boxes find possible body collisions.
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
## Interactions
InteractionLoop(
## Create geometry information about each potential collision.
[Ig2_Sphere_Sphere_ScGeom(), Ig2_Facet_Sphere_ScGeom()],
## Create physical information about the interaction.
[Ip2_ViscElMat_ViscElMat_ViscElPhys()],
## Constitutive law
[Law2_ScGeom_ViscElPhys_Basic()],
),
## Apply gravity
GravityEngine(gravity=[0,-9.81,0]),
## Cundall damping must been disabled!
NewtonIntegrator(damping=0),
## Saving results
#VTKRecorder(virtPeriod=0.04,fileName='/tmp/stlimp-',recorders=['spheres','facets']),
## Apply kinematics to walls
RotationEngine(ids=fctIds,rotationAxis=[0,0,1],rotateAroundZero=True,angularVelocity=0.5)
]
from woo import qt
qt.View()
#O.saveTmp()
#O.run()
| woodem/woo | obsolete/examples/baraban/baraban.py | Python | gpl-2.0 | 2,018 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from lxml.etree import Element
class AbstractManager(object):
'''
Handler for a group of UI elements with a XMLController as the center piece
'''
def __init__(self, base_widget, tab_base_widget, project, manager_node_path):
'''
@param base_widget (QWidget): Widget to place XmlController in
@param tab_base_widget (QTabWidget): TabWidget to place gui elements (tabs)
@param project (OpusProject): currently opened project
@param manager_node_path (String): name of the top level node to manage
'''
# Main GUI Window
self.base_widget = base_widget
# Where to add tabs
self.tab_base_widget = tab_base_widget
# Universal project access
self.project = project
# Manager related node in project
self.xml_root = self.project.find(manager_node_path)
if self.xml_root is None:
raise LookupError('The given manager node "%s" is not in the project XML' %
manager_node_path)
# Controlled GUI elements
self.tab_widgets = []
self.xml_controller = None
def close_tab(self, tab_widget):
'''
Close the GUI element if it's managed by this manager.
Nothing is closed if the provided widget is not managed by this manager.
@param tab_widget (QWidget): The widget to close.
'''
if tab_widget in self.tab_widgets:
self.tab_base_widget.removeTab(self.tab_base_widget.indexOf(tab_widget))
self.tab_widgets.remove(tab_widget)
def _attach_tab(self, tab_widget):
'''
Couples a widget with this manager and adds it to the managers
base tab widget.
@param tab_widget (QWidget): The widget to add
'''
self.tab_widgets.append(tab_widget)
self.tab_base_widget.insertTab(0, tab_widget, tab_widget.tabIcon,
tab_widget.tabLabel)
self.tab_base_widget.setCurrentIndex(0)
tab_widget.show()
def close(self):
'''
Close the manager, removing all it's managed tabs + it's XMLController.
'''
while len(self.tab_widgets) > 0:
self.close_tab(self.tab_widgets[0])
self.xml_controller.close()
| apdjustino/DRCOG_Urbansim | src/opus_gui/abstract_manager/abstract_manager.py | Python | agpl-3.0 | 2,465 |
from copy import deepcopy
from robofab.pens.pointPen import AbstractPointPen
from robofab.pens.adapterPens import PointToSegmentPen
from mathFunctions import *
from mathGuideline import *
# ------------------
# UFO 3 branch notes
# ------------------
#
# to do:
# X anchors
# - try to preserve ordering?
# X components
# X identifiers
# X contours
# X identifiers
# X points
# X identifiers
# X guidelines
# X height
# X image
#
# - is there any cruft that can be removed?
# X why is divPt here? move all of those to the math funcions
# and get rid of the robofab dependency.
# - FilterRedundantPointPen._flushContour is a mess
# X for the pt math funcons, always send (x, y) factors instead
# of coercing within the funcion. the coercion can happen at
# the beginning of the _processMathTwo method.
# - try list comprehensions in the point math for speed
#
# Questionable stuff:
# X is getRef needed?
# X nothing is ever set to _structure. should it be?
# X should the compatibilty be a function or pen?
# X the lib import is shallow and modifications to
# lower level objects (ie dict) could modify the
# original object. this probably isn't desirable.
# deepcopy won't work here since it will try to
# maintain the original class. may need to write
# a custom copier. or maybe something like this
# would be sufficient:
# self.lib = deepcopy(dict(glyph.lib))
# the class would be maintained for everything but
# the top level. that shouldn't matter for the
# purposes here.
# - __cmp__ is dubious but harmless i suppose.
# X is generationCount needed?
# X can box become bounds? have both?
class MathGlyph(object):
"""
A very shallow glyph object for rapid math operations.
Notes about glyph math:
- absolute contour compatibility is required
- absolute component, anchor, guideline and image compatibility is NOT required.
in cases of incompatibility in this data, only compatible data is processed and
returned. becuase of this, anchors and components may not be returned in the
same order as the original.
"""
def __init__(self, glyph):
if glyph is None:
self.contours = []
self.components = []
self.anchors = []
self.guidelines = []
self.image = _expandImage(None)
self.lib = {}
self.name = None
self.unicodes = None
self.width = None
self.height = None
self.note = None
else:
p = MathGlyphPen()
glyph.drawPoints(p)
self.contours = p.contours
self.components = p.components
self.anchors = [dict(anchor) for anchor in glyph.anchors]
self.guidelines = [_expandGuideline(guideline) for guideline in glyph.guidelines]
self.image = _expandImage(glyph.image)
self.lib = deepcopy(dict(glyph.lib))
self.name = glyph.name
self.unicodes = list(glyph.unicodes)
self.width = glyph.width
self.height = glyph.height
self.note = glyph.note
def __cmp__(self, other):
flag = False
if self.name != other.name:
flag = True
if self.unicodes != other.unicodes:
flag = True
if self.width != other.width:
flag = True
if self.height != other.height:
flag = True
if self.note != other.note:
flag = True
if self.lib != other.lib:
flag = True
if self.contours != other.contours:
flag = True
if self.components != other.components:
flag = True
if self.anchors != other.anchors:
flag = True
if self.guidelines != other.guidelines:
flag = True
if self.image != other.image:
flag = True
return flag
# ----
# Copy
# ----
def copy(self):
"""return a new MathGlyph containing all data in self"""
return MathGlyph(self)
def copyWithoutMathSubObjects(self):
"""
return a new MathGlyph containing all data except:
contours
components
anchors
guidelines
this is used mainly for internal glyph math.
"""
n = MathGlyph(None)
n.name = self.name
if self.unicodes is not None:
n.unicodes = list(self.unicodes)
n.width = self.width
n.height = self.height
n.note = self.note
n.lib = deepcopy(dict(self.lib))
return n
# ----
# Math
# ----
# math with other glyph
def __add__(self, otherGlyph):
copiedGlyph = self.copyWithoutMathSubObjects()
self._processMathOne(copiedGlyph, otherGlyph, addPt, add)
return copiedGlyph
def __sub__(self, otherGlyph):
copiedGlyph = self.copyWithoutMathSubObjects()
self._processMathOne(copiedGlyph, otherGlyph, subPt, sub)
return copiedGlyph
def _processMathOne(self, copiedGlyph, otherGlyph, ptFunc, func):
# width
copiedGlyph.width = func(self.width, otherGlyph.width)
# height
copiedGlyph.height = func(self.height, otherGlyph.height)
# contours
copiedGlyph.contours = []
if self.contours:
copiedGlyph.contours = _processMathOneContours(self.contours, otherGlyph.contours, ptFunc)
# components
copiedGlyph.components = []
if self.components:
componentPairs = _pairComponents(self.components, otherGlyph.components)
copiedGlyph.components = _processMathOneComponents(componentPairs, ptFunc)
# anchors
copiedGlyph.anchors = []
if self.anchors:
anchorTree1 = _anchorTree(self.anchors)
anchorTree2 = _anchorTree(otherGlyph.anchors)
anchorPairs = _pairAnchors(anchorTree1, anchorTree2)
copiedGlyph.anchors = _processMathOneAnchors(anchorPairs, ptFunc)
# guidelines
copiedGlyph.guidelines = []
if self.guidelines:
guidelinePairs = _pairGuidelines(self.guidelines, otherGlyph.guidelines)
copiedGlyph.guidelines = _processMathOneGuidelines(guidelinePairs, ptFunc, func)
# image
copiedGlyph.image = _expandImage(None)
imagePair = _pairImages(self.image, otherGlyph.image)
if imagePair:
copiedGlyph.image = _processMathOneImage(imagePair, ptFunc)
# math with factor
def __mul__(self, factor):
if not isinstance(factor, tuple):
factor = (factor, factor)
copiedGlyph = self.copyWithoutMathSubObjects()
self._processMathTwo(copiedGlyph, factor, mulPt, mul)
return copiedGlyph
__rmul__ = __mul__
def __div__(self, factor):
if not isinstance(factor, tuple):
factor = (factor, factor)
copiedGlyph = self.copyWithoutMathSubObjects()
self._processMathTwo(copiedGlyph, factor, divPt, div)
return copiedGlyph
__rdiv__ = __div__
def _processMathTwo(self, copiedGlyph, factor, ptFunc, func):
# width
copiedGlyph.width = func(self.width, factor[0])
# height
copiedGlyph.height = func(self.height, factor[1])
# contours
copiedGlyph.contours = []
if self.contours:
copiedGlyph.contours = _processMathTwoContours(self.contours, factor, ptFunc)
# components
copiedGlyph.components = []
if self.components:
copiedGlyph.components = _processMathTwoComponents(self.components, factor, ptFunc)
# anchors
copiedGlyph.anchors = []
if self.anchors:
copiedGlyph.anchors = _processMathTwoAnchors(self.anchors, factor, ptFunc)
# guidelines
copiedGlyph.guidelines = []
if self.guidelines:
copiedGlyph.guidelines = _processMathTwoGuidelines(self.guidelines, factor, func)
# image
if self.image:
copiedGlyph.image = _processMathTwoImage(self.image, factor, ptFunc)
# -------
# Additional math
# -------
def round(self, digits=None):
"""round the geometry."""
copiedGlyph = self.copyWithoutMathSubObjects()
# misc
copiedGlyph.width = _roundNumber(self.width, digits)
copiedGlyph.height = _roundNumber(self.height, digits)
# contours
copiedGlyph.contours = []
if self.contours:
copiedGlyph.contours = _roundContours(self.contours, digits)
# components
copiedGlyph.components = []
if self.components:
copiedGlyph.components = _roundComponents(self.components, digits)
# guidelines
copiedGlyph.guidelines = []
if self.guidelines:
copiedGlyph.guidelines = _roundGuidelines(self.guidelines, digits)
# anchors
copiedGlyph.anchors = []
if self.anchors:
copiedGlyph.anchors = _roundAnchors(self.anchors, digits)
# image
copiedGlyph.image = None
if self.image:
copiedGlyph.image = _roundImage(self.image, digits)
return copiedGlyph
# -------
# Pen API
# -------
def drawPoints(self, pointPen):
"""draw self using pointPen"""
for contour in self.contours:
pointPen.beginPath(identifier=contour["identifier"])
for segmentType, pt, smooth, name, identifier in contour["points"]:
pointPen.addPoint(pt=pt, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier)
pointPen.endPath()
for component in self.components:
pointPen.addComponent(component["baseGlyph"], component["transformation"], identifier=component["identifier"])
def draw(self, pen):
"""draw self using pen"""
pointPen = PointToSegmentPen(pen)
self.drawPoints(pointPen)
# ----------
# Extraction
# ----------
def extractGlyph(self, glyph, pointPen=None, onlyGeometry=False):
"""
"rehydrate" to a glyph. this requires
a glyph as an argument. if a point pen other
than the type of pen returned by glyph.getPointPen()
is required for drawing, send this the needed point pen.
"""
if pointPen is None:
pointPen = glyph.getPointPen()
glyph.clearContours()
glyph.clearComponents()
glyph.clearAnchors()
glyph.clearGuidelines()
glyph.lib.clear()
cleanerPen = FilterRedundantPointPen(pointPen)
self.drawPoints(cleanerPen)
glyph.anchors = [dict(anchor) for anchor in self.anchors]
glyph.guidelines = [_compressGuideline(guideline) for guideline in self.guidelines]
glyph.image = _compressImage(self.image)
glyph.lib = deepcopy(dict(self.lib))
glyph.width = self.width
glyph.height = self.height
glyph.note = self.note
if not onlyGeometry:
glyph.name = self.name
glyph.unicodes = list(self.unicodes)
return glyph
# ----------
# Point Pens
# ----------
class MathGlyphPen(AbstractPointPen):
"""
Point pen for building MathGlyph data structures.
>>> pen = MathGlyphPen()
>>> pen.beginPath(identifier="contour 1")
>>> pen.addPoint(( 0, 100), "line", smooth=False, name="name 1", identifier="point 1")
>>> pen.addPoint((100, 100), "line", smooth=False, name="name 2", identifier="point 2")
>>> pen.addPoint((100, 0), "line", smooth=False, name="name 3", identifier="point 3")
>>> pen.addPoint(( 0, 0), "line", smooth=False, name="name 4", identifier="point 4")
>>> pen.endPath()
>>> expected = [
... ("curve", ( 0, 100), False, "name 1", "point 1"),
... (None, ( 0, 100), False, None, None),
... (None, (100, 100), False, None, None),
... ("curve", (100, 100), False, "name 2", "point 2"),
... (None, (100, 100), False, None, None),
... (None, (100, 0), False, None, None),
... ("curve", (100, 0), False, "name 3", "point 3"),
... (None, (100, 0), False, None, None),
... (None, ( 0, 0), False, None, None),
... ("curve", ( 0, 0), False, "name 4", "point 4"),
... (None, ( 0, 0), False, None, None),
... (None, ( 0, 100), False, None, None),
... ]
>>> pen.contours[-1]["points"] == expected
True
>>> pen.contours[-1]["identifier"]
'contour 1'
>>> pen = MathGlyphPen()
>>> pen.beginPath(identifier="contour 1")
>>> pen.addPoint(( 0, 50), "curve", smooth=False, name="name 1", identifier="point 1")
>>> pen.addPoint(( 0, 75), None)
>>> pen.addPoint(( 25, 100), None)
>>> pen.addPoint(( 50, 100), "curve", smooth=False, name="name 2", identifier="point 2")
>>> pen.addPoint(( 75, 100), None)
>>> pen.addPoint((100, 75), None)
>>> pen.addPoint((100, 50), "curve", smooth=False, name="name 3", identifier="point 3")
>>> pen.addPoint((100, 25), None)
>>> pen.addPoint(( 75, 0), None)
>>> pen.addPoint(( 50, 0), "curve", smooth=False, name="name 4", identifier="point 4")
>>> pen.addPoint(( 25, 0), None)
>>> pen.addPoint(( 0, 25), None)
>>> pen.endPath()
>>> expected = [
... ("curve", ( 0, 50), False, "name 1", "point 1"),
... (None, ( 0, 75), False, None, None),
... (None, ( 25, 100), False, None, None),
... ("curve", ( 50, 100), False, "name 2", "point 2"),
... (None, ( 75, 100), False, None, None),
... (None, (100, 75), False, None, None),
... ("curve", (100, 50), False, "name 3", "point 3"),
... (None, (100, 25), False, None, None),
... (None, ( 75, 0), False, None, None),
... ("curve", ( 50, 0), False, "name 4", "point 4"),
... (None, ( 25, 0), False, None, None),
... (None, ( 0, 25), False, None, None),
... ]
>>> pen.contours[-1]["points"] == expected
True
>>> pen.contours[-1]["identifier"]
'contour 1'
"""
def __init__(self):
self.contours = []
self.components = []
self._contourIdentifier = None
self._points = []
def _flushContour(self):
"""
This normalizes the contour so that:
- there are no line segments. in their place will be
curve segments with the off curves positioned on top
of the previous on curve and the new curve on curve.
- the contour starts with an on curve
"""
self.contours.append(
dict(identifier=self._contourIdentifier, points=[])
)
contourPoints = self.contours[-1]["points"]
points = self._points
# move offcurves at the beginning of the contour to the end
haveOnCurve = False
for point in points:
if point[0] is not None:
haveOnCurve = True
break
if haveOnCurve:
while 1:
if points[0][0] is None:
point = points.pop(0)
points.append(point)
else:
break
# convert lines to curves
holdingOffCurves = []
for index, point in enumerate(points):
segmentType = point[0]
if segmentType == "line":
pt, smooth, name, identifier = point[1:]
prevPt = points[index - 1][1]
if index == 0:
holdingOffCurves.append((None, prevPt, False, None, None))
holdingOffCurves.append((None, pt, False, None, None))
else:
contourPoints.append((None, prevPt, False, None, None))
contourPoints.append((None, pt, False, None, None))
contourPoints.append(("curve", pt, False, name, identifier))
else:
contourPoints.append(point)
contourPoints.extend(holdingOffCurves)
def beginPath(self, identifier=None):
self._contourIdentifier = identifier
self._points = []
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
self._points.append((segmentType, pt, smooth, name, identifier))
def endPath(self):
self._flushContour()
def addComponent(self, baseGlyph, transformation, identifier=None, **kwargs):
self.components.append(dict(baseGlyph=baseGlyph, transformation=transformation, identifier=identifier))
class FilterRedundantPointPen(AbstractPointPen):
def __init__(self, anotherPointPen):
self._pen = anotherPointPen
self._points = []
def _flushContour(self):
"""
>>> points = [
... ("curve", ( 0, 100), False, "name 1", "point 1"),
... (None, ( 0, 100), False, None, None),
... (None, (100, 100), False, None, None),
... ("curve", (100, 100), False, "name 2", "point 2"),
... (None, (100, 100), False, None, None),
... (None, (100, 0), False, None, None),
... ("curve", (100, 0), False, "name 3", "point 3"),
... (None, (100, 0), False, None, None),
... (None, ( 0, 0), False, None, None),
... ("curve", ( 0, 0), False, "name 4", "point 4"),
... (None, ( 0, 0), False, None, None),
... (None, ( 0, 100), False, None, None),
... ]
>>> testPen = _TestPointPen()
>>> filterPen = FilterRedundantPointPen(testPen)
>>> filterPen.beginPath(identifier="contour 1")
>>> for segmentType, pt, smooth, name, identifier in points:
... filterPen.addPoint(pt, segmentType=segmentType, smooth=smooth, name=name, identifier=identifier)
>>> filterPen.endPath()
>>> testPen.dump()
beginPath(identifier="contour 1")
addPoint((0, 100), segmentType="line", smooth=False, name="name 1", identifier="point 1")
addPoint((100, 100), segmentType="line", smooth=False, name="name 2", identifier="point 2")
addPoint((100, 0), segmentType="line", smooth=False, name="name 3", identifier="point 3")
addPoint((0, 0), segmentType="line", smooth=False, name="name 4", identifier="point 4")
endPath()
"""
points = self._points
prevOnCurve = None
offCurves = []
pointsToDraw = []
# deal with the first point
pt, segmentType, smooth, name, identifier = points[0]
# if it is an offcurve, add it to the offcurve list
if segmentType is None:
offCurves.append((pt, segmentType, smooth, name, identifier))
else:
# potential redundancy
if segmentType == "curve":
# gather preceding off curves
testOffCurves = []
lastPoint = None
for i in xrange(len(points)):
i = -i - 1
testPoint = points[i]
testSegmentType = testPoint[1]
if testSegmentType is not None:
lastPoint = testPoint[0]
break
testOffCurves.append(testPoint[0])
# if two offcurves exist we can test for redundancy
if len(testOffCurves) == 2:
if testOffCurves[1] == lastPoint and testOffCurves[0] == pt:
segmentType = "line"
# remove the last two points
points = points[:-2]
# add the point to the contour
pointsToDraw.append((pt, segmentType, smooth, name, identifier))
prevOnCurve = pt
for pt, segmentType, smooth, name, identifier in points[1:]:
# store offcurves
if segmentType is None:
offCurves.append((pt, segmentType, smooth, name, identifier))
continue
# curves are a potential redundancy
elif segmentType == "curve":
if len(offCurves) == 2:
# test for redundancy
if offCurves[0][0] == prevOnCurve and offCurves[1][0] == pt:
offCurves = []
segmentType = "line"
# add all offcurves
for offCurve in offCurves:
pointsToDraw.append(offCurve)
# add the on curve
pointsToDraw.append((pt, segmentType, smooth, name, identifier))
# reset the stored data
prevOnCurve = pt
offCurves = []
# catch any remaining offcurves
if len(offCurves) != 0:
for offCurve in offCurves:
pointsToDraw.append(offCurve)
# draw to the pen
for pt, segmentType, smooth, name, identifier in pointsToDraw:
self._pen.addPoint(pt, segmentType, smooth=smooth, name=name, identifier=identifier)
def beginPath(self, identifier=None, **kwargs):
self._points = []
self._pen.beginPath(identifier=identifier)
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
self._points.append((pt, segmentType, smooth, name, identifier))
def endPath(self):
self._flushContour()
self._pen.endPath()
def addComponent(self, baseGlyph, transformation, identifier=None, **kwargs):
self._pen.addComponent(baseGlyph, transformation, identifier)
class _TestPointPen(AbstractPointPen):
def __init__(self):
self._text = []
def dump(self):
for line in self._text:
print line
def _prep(self, i):
if isinstance(i, basestring):
i = "\"%s\"" % i
return str(i)
def beginPath(self, identifier=None, **kwargs):
self._text.append("beginPath(identifier=%s)" % self._prep(identifier))
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
self._text.append("addPoint(%s, segmentType=%s, smooth=%s, name=%s, identifier=%s)" % (
self._prep(pt),
self._prep(segmentType),
self._prep(smooth),
self._prep(name),
self._prep(identifier)
)
)
def endPath(self):
self._text.append("endPath()")
def addComponent(self, baseGlyph, transformation, identifier=None, **kwargs):
self._text.append("addComponent(baseGlyph=%s, transformation=%s, identifier=%s)" % (
self._prep(baseGlyph),
self._prep(transformation),
self._prep(identifier)
)
)
# -------
# Support
# -------
# contours
def _processMathOneContours(contours1, contours2, func):
"""
>>> contours1 = [
... dict(identifier="contour 1", points=[("line", (1, 3), False, "test", "1")])
... ]
>>> contours2 = [
... dict(identifier=None, points=[(None, (4, 6), True, None, None)])
... ]
>>> expected = [
... dict(identifier="contour 1", points=[("line", (5, 9), False, "test", "1")])
... ]
>>> _processMathOneContours(contours1, contours2, addPt) == expected
True
"""
result = []
for index, contour1 in enumerate(contours1):
contourIdentifier = contour1["identifier"]
points1 = contour1["points"]
points2 = contours2[index]["points"]
resultPoints = []
for index, point in enumerate(points1):
segmentType, pt1, smooth, name, identifier = point
pt2 = points2[index][1]
pt = func(pt1, pt2)
resultPoints.append((segmentType, pt, smooth, name, identifier))
result.append(dict(identifier=contourIdentifier, points=resultPoints))
return result
def _processMathTwoContours(contours, factor, func):
"""
>>> contours = [
... dict(identifier="contour 1", points=[("line", (1, 3), False, "test", "1")])
... ]
>>> expected = [
... dict(identifier="contour 1", points=[("line", (2, 4.5), False, "test", "1")])
... ]
>>> _processMathTwoContours(contours, (2, 1.5), mulPt) == expected
True
"""
result = []
for contour in contours:
contourIdentifier = contour["identifier"]
points = contour["points"]
resultPoints = []
for point in points:
segmentType, pt, smooth, name, identifier = point
pt = func(pt, factor)
resultPoints.append((segmentType, pt, smooth, name, identifier))
result.append(dict(identifier=contourIdentifier, points=resultPoints))
return result
# anchors
def _anchorTree(anchors):
"""
>>> anchors = [
... dict(identifier="1", name="test", x=1, y=2, color=None),
... dict(name="test", x=1, y=2, color=None),
... dict(name="test", x=3, y=4, color=None),
... dict(name="test", x=2, y=3, color=None),
... dict(name="test 2", x=1, y=2, color=None),
... ]
>>> expected = {
... "test" : [
... ("1", 1, 2, None),
... (None, 1, 2, None),
... (None, 3, 4, None),
... (None, 2, 3, None),
... ],
... "test 2" : [
... (None, 1, 2, None)
... ]
... }
>>> _anchorTree(anchors) == expected
True
"""
tree = {}
for anchor in anchors:
x = anchor["x"]
y = anchor["y"]
name = anchor.get("name")
identifier = anchor.get("identifier")
color = anchor.get("color")
if name not in tree:
tree[name] = []
tree[name].append((identifier, x, y, color))
return tree
def _pairAnchors(anchorDict1, anchorDict2):
"""
Anchors are paired using the following rules:
Matching Identifiers
--------------------
>>> anchors1 = {
... "test" : [
... (None, 1, 2, None),
... ("identifier 1", 3, 4, None)
... ]
... }
>>> anchors2 = {
... "test" : [
... ("identifier 1", 1, 2, None),
... (None, 3, 4, None)
... ]
... }
>>> expected = [
... (
... dict(name="test", identifier=None, x=1, y=2, color=None),
... dict(name="test", identifier=None, x=3, y=4, color=None)
... ),
... (
... dict(name="test", identifier="identifier 1", x=3, y=4, color=None),
... dict(name="test", identifier="identifier 1", x=1, y=2, color=None)
... )
... ]
>>> _pairAnchors(anchors1, anchors2) == expected
True
Mismatched Identifiers
----------------------
>>> anchors1 = {
... "test" : [
... ("identifier 1", 3, 4, None)
... ]
... }
>>> anchors2 = {
... "test" : [
... ("identifier 2", 1, 2, None),
... ]
... }
>>> expected = [
... (
... dict(name="test", identifier="identifier 1", x=3, y=4, color=None),
... dict(name="test", identifier="identifier 2", x=1, y=2, color=None)
... )
... ]
>>> _pairAnchors(anchors1, anchors2) == expected
True
"""
pairs = []
for name, anchors1 in anchorDict1.items():
if name not in anchorDict2:
continue
anchors2 = anchorDict2[name]
# align with matching identifiers
removeFromAnchors1 = []
for anchor1 in anchors1:
match = None
identifier = anchor1[0]
for anchor2 in anchors2:
if anchor2[0] == identifier:
match = anchor2
break
if match is not None:
anchor2 = match
anchors2.remove(anchor2)
removeFromAnchors1.append(anchor1)
a1 = dict(name=name, identifier=identifier)
a1["x"], a1["y"], a1["color"] = anchor1[1:]
a2 = dict(name=name, identifier=identifier)
a2["x"], a2["y"], a2["color"] = anchor2[1:]
pairs.append((a1, a2))
for anchor1 in removeFromAnchors1:
anchors1.remove(anchor1)
if not anchors1 or not anchors2:
continue
# align by index
while 1:
anchor1 = anchors1.pop(0)
anchor2 = anchors2.pop(0)
a1 = dict(name=name)
a1["identifier"], a1["x"], a1["y"], a1["color"] = anchor1
a2 = dict(name=name, identifier=identifier)
a2["identifier"], a2["x"], a2["y"], a2["color"] = anchor2
pairs.append((a1, a2))
if not anchors1:
break
if not anchors2:
break
return pairs
def _processMathOneAnchors(anchorPairs, func):
"""
>>> anchorPairs = [
... (
... dict(x=100, y=-100, name="foo", identifier="1", color="0,0,0,0"),
... dict(x=200, y=-200, name="bar", identifier="2", color="1,1,1,1")
... )
... ]
>>> expected = [
... dict(x=300, y=-300, name="foo", identifier="1", color="0,0,0,0")
... ]
>>> _processMathOneAnchors(anchorPairs, addPt) == expected
True
"""
result = []
for anchor1, anchor2 in anchorPairs:
anchor = dict(anchor1)
pt1 = (anchor1["x"], anchor1["y"])
pt2 = (anchor2["x"], anchor2["y"])
anchor["x"], anchor["y"] = func(pt1, pt2)
result.append(anchor)
return result
def _processMathTwoAnchors(anchors, factor, func):
"""
>>> anchors = [
... dict(x=100, y=-100, name="foo", identifier="1", color="0,0,0,0")
... ]
>>> expected = [
... dict(x=200, y=-150, name="foo", identifier="1", color="0,0,0,0")
... ]
>>> _processMathTwoAnchors(anchors, (2, 1.5), mulPt) == expected
True
"""
result = []
for anchor in anchors:
anchor = dict(anchor)
pt = (anchor["x"], anchor["y"])
anchor["x"], anchor["y"] = func(pt, factor)
result.append(anchor)
return result
# components
def _pairComponents(components1, components2):
"""
>>> components1 = [
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier="1"),
... dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier="1"),
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None)
... ]
>>> components2 = [
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None),
... dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier="1"),
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier="1")
... ]
>>> expected = [
... (
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier="1"),
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier="1")
... ),
... (
... dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier="1"),
... dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier="1")
... ),
... (
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None),
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None)
... ),
... ]
>>> _pairComponents(components1, components2) == expected
True
>>> components1 = [
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None),
... dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier=None)
... ]
>>> components2 = [
... dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier=None),
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None)
... ]
>>> expected = [
... (
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None),
... dict(baseGlyph="A", transformation=(0, 0, 0, 0, 0, 0), identifier=None)
... ),
... (
... dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier=None),
... dict(baseGlyph="B", transformation=(0, 0, 0, 0, 0, 0), identifier=None)
... ),
... ]
>>> _pairComponents(components1, components2) == expected
True
"""
components1 = list(components1)
components2 = list(components2)
pairs = []
# align with matching identifiers
removeFromComponents1 = []
for component1 in components1:
baseGlyph = component1["baseGlyph"]
identifier = component1["identifier"]
match = None
for component2 in components2:
if component2["baseGlyph"] == baseGlyph and component2["identifier"] == identifier:
match = component2
break
if match is not None:
component2 = match
removeFromComponents1.append(component1)
components2.remove(component2)
pairs.append((component1, component2))
for component1 in removeFromComponents1:
components1.remove(component1)
# align with index
for component1 in components1:
baseGlyph = component1["baseGlyph"]
for component2 in components2:
if component2["baseGlyph"] == baseGlyph:
components2.remove(component2)
pairs.append((component1, component2))
break
return pairs
def _processMathOneComponents(componentPairs, func):
"""
>>> components = [
... (
... dict(baseGlyph="A", transformation=( 1, 3, 5, 7, 9, 11), identifier="1"),
... dict(baseGlyph="A", transformation=(12, 14, 16, 18, 20, 22), identifier=None)
... )
... ]
>>> expected = [
... dict(baseGlyph="A", transformation=(13, 17, 21, 25, 29, 33), identifier="1")
... ]
>>> _processMathOneComponents(components, addPt) == expected
True
"""
result = []
for component1, component2 in componentPairs:
component = dict(component1)
component["transformation"] = _processMathOneTransformation(component1["transformation"], component2["transformation"], func)
result.append(component)
return result
def _processMathTwoComponents(components, factor, func):
"""
>>> components = [
... dict(baseGlyph="A", transformation=(1, 2, 3, 4, 5, 6), identifier="1"),
... ]
>>> expected = [
... dict(baseGlyph="A", transformation=(2, 4, 4.5, 6, 10, 9), identifier="1")
... ]
>>> _processMathTwoComponents(components, (2, 1.5), mulPt) == expected
True
"""
result = []
for component in components:
component = dict(component)
component["transformation"] = _processMathTwoTransformation(component["transformation"], factor, func)
result.append(component)
return result
# image
_imageTransformationKeys = "xScale xyScale yxScale yScale xOffset yOffset".split(" ")
_defaultImageTransformation = (1, 0, 0, 1, 0, 0)
_defaultImageTransformationDict = {}
for key, value in zip(_imageTransformationKeys, _defaultImageTransformation):
_defaultImageTransformationDict[key] = value
def _expandImage(image):
"""
>>> _expandImage(None) == dict(fileName=None, transformation=(1, 0, 0, 1, 0, 0), color=None)
True
>>> _expandImage(dict(fileName="foo")) == dict(fileName="foo", transformation=(1, 0, 0, 1, 0, 0), color=None)
True
"""
if image is None:
fileName = None
transformation = _defaultImageTransformation
color = None
else:
fileName = image["fileName"]
color = image.get("color")
transformation = tuple([
image.get(key, _defaultImageTransformationDict[key])
for key in _imageTransformationKeys
])
return dict(fileName=fileName, transformation=transformation, color=color)
def _compressImage(image):
"""
>>> expected = dict(fileName="foo", color=None, xScale=1, xyScale=0, yxScale=0, yScale=1, xOffset=0, yOffset=0)
>>> _compressImage(dict(fileName="foo", transformation=(1, 0, 0, 1, 0, 0), color=None)) == expected
True
"""
fileName = image["fileName"]
transformation = image["transformation"]
color = image["color"]
if fileName is None:
return
image = dict(fileName=fileName, color=color)
for index, key in enumerate(_imageTransformationKeys):
image[key] = transformation[index]
return image
def _pairImages(image1, image2):
"""
>>> image1 = dict(fileName="foo", transformation=(1, 0, 0, 1, 0, 0), color=None)
>>> image2 = dict(fileName="foo", transformation=(2, 0, 0, 2, 0, 0), color="0,0,0,0")
>>> _pairImages(image1, image2) == (image1, image2)
True
>>> image1 = dict(fileName="foo", transformation=(1, 0, 0, 1, 0, 0), color=None)
>>> image2 = dict(fileName="bar", transformation=(1, 0, 0, 1, 0, 0), color=None)
>>> _pairImages(image1, image2) == ()
True
"""
if image1["fileName"] != image2["fileName"]:
return ()
return (image1, image2)
def _processMathOneImage(imagePair, func):
"""
>>> image1 = dict(fileName="foo", transformation=( 1, 3, 5, 7, 9, 11), color="0,0,0,0")
>>> image2 = dict(fileName="bar", transformation=(12, 14, 16, 18, 20, 22), color=None)
>>> expected = dict(fileName="foo", transformation=(13, 17, 21, 25, 29, 33), color="0,0,0,0")
>>> _processMathOneImage((image1, image2), addPt) == expected
True
"""
image1, image2 = imagePair
fileName = image1["fileName"]
color = image1["color"]
transformation = _processMathOneTransformation(image1["transformation"], image2["transformation"], func)
return dict(fileName=fileName, transformation=transformation, color=color)
def _processMathTwoImage(image, factor, func):
"""
>>> image = dict(fileName="foo", transformation=(1, 2, 3, 4, 5, 6), color="0,0,0,0")
>>> expected = dict(fileName="foo", transformation=(2, 4, 4.5, 6, 10, 9), color="0,0,0,0")
>>> _processMathTwoImage(image, (2, 1.5), mulPt) == expected
True
"""
fileName = image["fileName"]
color = image["color"]
transformation = _processMathTwoTransformation(image["transformation"], factor, func)
return dict(fileName=fileName, transformation=transformation, color=color)
# transformations
def _processMathOneTransformation(transformation1, transformation2, func):
"""
>>> transformation1 = ( 1, 3, 5, 7, 9, 11)
>>> transformation2 = (12, 14, 16, 18, 20, 22)
>>> expected = (13, 17, 21, 25, 29, 33)
>>> _processMathOneTransformation(transformation1, transformation2, addPt) == expected
True
"""
xScale1, xyScale1, yxScale1, yScale1, xOffset1, yOffset1 = transformation1
xScale2, xyScale2, yxScale2, yScale2, xOffset2, yOffset2 = transformation2
xScale, yScale = func((xScale1, yScale1), (xScale2, yScale2))
xyScale, yxScale = func((xyScale1, yxScale1), (xyScale2, yxScale2))
xOffset, yOffset = func((xOffset1, yOffset1), (xOffset2, yOffset2))
return (xScale, xyScale, yxScale, yScale, xOffset, yOffset)
def _processMathTwoTransformation(transformation, factor, func):
"""
>>> transformation = (1, 2, 3, 4, 5, 6)
>>> expected = (2, 4, 4.5, 6, 10, 9)
>>> _processMathTwoTransformation(transformation, (2, 1.5), mulPt) == expected
True
"""
xScale, xyScale, yxScale, yScale, xOffset, yOffset = transformation
xScale, yScale = func((xScale, yScale), factor)
xyScale, yxScale = func((xyScale, yxScale), factor)
xOffset, yOffset = func((xOffset, yOffset), factor)
return (xScale, xyScale, yxScale, yScale, xOffset, yOffset)
# rounding
def _roundContours(contours, digits=None):
"""
>>> contour = [
... dict(identifier="contour 1", points=[("line", (0.55, 3.1), False, "test", "1")])
... ]
>>> expected = [
... dict(identifier="contour 1", points=[("line", (1, 3), False, "test", "1")])
... ]
>>> _roundContours(contour) == expected
True
"""
results = []
for contour in contours:
contour = dict(contour)
roundedPoints = []
for segmentType, pt, smooth, name, identifier in contour["points"]:
roundedPt = (_roundNumber(pt[0],digits), _roundNumber(pt[1],digits))
roundedPoints.append((segmentType, roundedPt, smooth, name, identifier))
contour["points"] = roundedPoints
results.append(contour)
return results
def _roundTransformation(transformation, digits=None):
"""
>>> transformation = (1, 2, 3, 4, 4.99, 6.01)
>>> expected = (1, 2, 3, 4, 5, 6)
>>> _roundTransformation(transformation) == expected
True
"""
xScale, xyScale, yxScale, yScale, xOffset, yOffset = transformation
return (xScale, xyScale, yxScale, yScale, _roundNumber(xOffset, digits), _roundNumber(yOffset, digits))
def _roundImage(image, digits=None):
"""
>>> image = dict(fileName="foo", transformation=(1, 2, 3, 4, 4.99, 6.01), color="0,0,0,0")
>>> expected = dict(fileName="foo", transformation=(1, 2, 3, 4, 5, 6), color="0,0,0,0")
>>> _roundImage(image) == expected
True
"""
image = dict(image)
fileName = image["fileName"]
color = image["color"]
transformation = _roundTransformation(image["transformation"], digits)
return dict(fileName=fileName, transformation=transformation, color=color)
def _roundComponents(components, digits=None):
"""
>>> components = [
... dict(baseGlyph="A", transformation=(1, 2, 3, 4, 5.1, 5.99), identifier="1"),
... ]
>>> expected = [
... dict(baseGlyph="A", transformation=(1, 2, 3, 4, 5, 6), identifier="1")
... ]
>>> _roundComponents(components) == expected
True
"""
result = []
for component in components:
component = dict(component)
component["transformation"] = _roundTransformation(component["transformation"], digits)
result.append(component)
return result
def _roundAnchors(anchors, digits=None):
"""
>>> anchors = [
... dict(x=99.9, y=-100.1, name="foo", identifier="1", color="0,0,0,0")
... ]
>>> expected = [
... dict(x=100, y=-100, name="foo", identifier="1", color="0,0,0,0")
... ]
>>> _roundAnchors(anchors) == expected
True
"""
result = []
for anchor in anchors:
anchor = dict(anchor)
anchor["x"], anchor["y"] = _roundNumber(anchor["x"], digits), _roundNumber(anchor["y"], digits)
result.append(anchor)
return result
# -----
# Tests
# -----
# these are tests that don't fit elsewhere
def _setupTestGlyph():
glyph = MathGlyph(None)
glyph.width = 0
glyph.height = 0
return glyph
def _testWidth():
"""
add
---
>>> glyph1 = _setupTestGlyph()
>>> glyph1.width = 1
>>> glyph2 = _setupTestGlyph()
>>> glyph2.width = 2
>>> glyph3 = glyph1 + glyph2
>>> glyph3.width
3
sub
---
>>> glyph1 = _setupTestGlyph()
>>> glyph1.width = 3
>>> glyph2 = _setupTestGlyph()
>>> glyph2.width = 2
>>> glyph3 = glyph1 - glyph2
>>> glyph3.width
1
mul
---
>>> glyph1 = _setupTestGlyph()
>>> glyph1.width = 2
>>> glyph2 = glyph1 * 3
>>> glyph2.width
6
>>> glyph1 = _setupTestGlyph()
>>> glyph1.width = 2
>>> glyph2 = glyph1 * (3, 1)
>>> glyph2.width
6
div
---
>>> glyph1 = _setupTestGlyph()
>>> glyph1.width = 7
>>> glyph2 = glyph1 / 2
>>> glyph2.width
3.5
>>> glyph1 = _setupTestGlyph()
>>> glyph1.width = 7
>>> glyph2 = glyph1 / (2, 1)
>>> glyph2.width
3.5
round
-----
>>> glyph1 = _setupTestGlyph()
>>> glyph1.width = 6.99
>>> glyph2 = glyph1.round()
>>> glyph2.width
7
"""
def _testHeight():
"""
add
---
>>> glyph1 = _setupTestGlyph()
>>> glyph1.height = 1
>>> glyph2 = _setupTestGlyph()
>>> glyph2.height = 2
>>> glyph3 = glyph1 + glyph2
>>> glyph3.height
3
sub
---
>>> glyph1 = _setupTestGlyph()
>>> glyph1.height = 3
>>> glyph2 = _setupTestGlyph()
>>> glyph2.height = 2
>>> glyph3 = glyph1 - glyph2
>>> glyph3.height
1
mul
---
>>> glyph1 = _setupTestGlyph()
>>> glyph1.height = 2
>>> glyph2 = glyph1 * 3
>>> glyph2.height
6
>>> glyph1 = _setupTestGlyph()
>>> glyph1.height = 2
>>> glyph2 = glyph1 * (1, 3)
>>> glyph2.height
6
div
---
>>> glyph1 = _setupTestGlyph()
>>> glyph1.height = 7
>>> glyph2 = glyph1 / 2
>>> glyph2.height
3.5
>>> glyph1 = _setupTestGlyph()
>>> glyph1.height = 7
>>> glyph2 = glyph1 / (1, 2)
>>> glyph2.height
3.5
round
-----
>>> glyph1 = _setupTestGlyph()
>>> glyph1.height = 6.99
>>> glyph2 = glyph1.round()
>>> glyph2.height
7
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| metapolator/mutatormathtools | python_modules/lib/python/fontMath/mathGlyph.py | Python | apache-2.0 | 45,362 |
# -*- coding: utf-8 -*-
__version__ = '0.15.0'
| jules185/IoT_Hackathon | .homeassistant/deps/fuzzywuzzy/__init__.py | Python | mit | 47 |
"""
Tools used by the `explore_ligpy_results.ipynb` notebook that help with
analysis and plotting.
"""
import os
import cPickle as pickle
import numpy as np
from constants import MW
def load_results(path):
"""
Load the results from the ODE solver, along with the program parameters
used to generate those results. The program parameters should be saved
in the `prog_params.pkl` file generated by `ligpy.py`. The model species
concentration results should be in the same format as those output by
DDASAC (see the ligpy/sample_files/ folder for an example).
Parameters
----------
path : str
path to the folder that contains `/results_dir/`, where the *.out
files (model results) and `prog_params.pkl` are saved.
Returns
-------
end_time : float
the pyrolysis end time in seconds (excludes cool-down
time)
output_time_step : float
the time step at which results were saved (sec)
initial_T : float
initial temperature (K)
heating_rate : float
the heating rate (K/min)
max_T : float
maximum temperature of pyrolysis (K)
atol : float
absolute tolerance used by the ODE solver
rtol : float
relative tolerance used by the ODE solver
plant : str
the name of the lignin species modeled
cool_time : int
the time (s) to cool down after an isothermal hold
y : numpy matrix
a matrix with the concentrations of each species in the
kinetic scheme for every time in `t` (mol/L)
t : numpy array
array with all the times (s) corresponding to entries in
`y` and `T`
T : numpy array
array with the temperature (K) at every time in `t`
specieslist : list
a list of all the species participating in the model
speciesindices : dict
dictionary where species names are keys and values are
the index in `y` that corresponds to that species
indices_to_species : dict
the opposite of speciesindices
"""
rpath = path + '/results_dir'
if not os.path.exists(rpath):
raise ValueError('Please specify a valid directory with a'
' results_dir folder.')
with open(rpath + '/prog_params.pkl', 'rb') as params:
prog_params = pickle.load(params)
end_time = prog_params[0]
output_time_step = prog_params[1]
initial_T = prog_params[2]
heating_rate = prog_params[3]
max_T = prog_params[4]
atol = prog_params[5]
rtol = prog_params[6]
plant = prog_params[7]
cool_time = prog_params[8]
if not os.path.isfile(rpath + '/ddasac_results_1.out'):
raise IOError('There is not a valid DDASAC .out file.')
# Determine the order that species are listed in the DDASAC model.c file
with open(path + '/model.c', 'rb') as modelc:
body = modelc.read()
spos = body.find('enum {')
modelc.seek(spos+6)
# this includes the species list that I want to find
listiwant = modelc.read(1000)
# this is the list of all the species in the DDASAC model
species_ddasac = ''
for i, char in enumerate(listiwant):
if char == '}':
species_ddasac = listiwant[:i]
break
# Build a list of species from this string of species
species_ddasac = species_ddasac.replace('\n', '').replace(' ', '')
specieslist_ddasac = []
for s in species_ddasac.split(','):
specieslist_ddasac.append(s)
# Build dictionaries of corresponding indices (these indices from DDASAC's
# output are different from those from `ligpy_utils.get_speciesindices()`)
speciesindices_ddasac = {}
for i, species in enumerate(specieslist_ddasac):
speciesindices_ddasac[species] = i
indices_to_species_ddasac = dict(zip(speciesindices_ddasac.values(),
speciesindices_ddasac.keys()))
# Sort to make sure legends will always be the same
specieslist_ddasac.sort()
# Read the first DDASAC results file
file1 = rpath + '/ddasac_results_1.out'
t, y, T = read_results_files(file1, specieslist_ddasac)
# Check to see if a temperature ramp was followed by an isothermal stage
try:
file2 = rpath + '/ddasac_results_2.out'
t2, y2, T2 = read_results_files(file2, specieslist_ddasac)
y = np.concatenate((y, y2[1:]))
t = np.concatenate((t, t[-1]+t2[1:]))
T = np.concatenate((T, T2[1:]))
except IOError:
print 'There is not a second DDASAC results file (isothermal hold)'
# Check to see if a cool down phase was included
try:
file3 = rpath + '/ddasac_results_3.out'
t3, y3, T3 = read_results_files(file3, specieslist_ddasac)
y = np.concatenate((y, y3[1:]))
t = np.concatenate((t, t[-1]+t3[1:]))
T = np.concatenate((T, T3[1:]))
except IOError:
print 'There is not a third DDASAC results file (cool down period)'
return [end_time, output_time_step, initial_T, heating_rate, max_T, atol,
rtol, plant, cool_time, y, t, T, specieslist_ddasac,
speciesindices_ddasac, indices_to_species_ddasac]
def read_results_files(filename, specieslist_ddasac):
"""
Read and process the DDASAC *.out results files so they can be
combined.
Parameters
----------
filename : str
the filename of the *.out file (including relative
or absolute path)
specieslist_ddasac : list
the specieslist_ddasac object from load_results()
Returns
-------
t : numpy array
an array with the output time (s) for each entry in the
concentration or temperature arrays
y : numpy matrix
a matrix with the concentrations of each species in the model for
every timepoint in `t` (mol/L)
T : numpy array
an array with the temperature at evey timepoint in `
"""
with open(filename, 'r') as result:
# There are 6 lines of descriptive text at the end of file
num_lines = sum(1 for line in result) - 7
t = np.zeros((num_lines, 1), dtype='float64')
T = np.zeros((num_lines, 1), dtype='float64')
y = np.zeros((num_lines, len(specieslist_ddasac)), dtype='float64')
with open(filename, 'r') as result:
for i, line in enumerate(result.readlines()):
if 1 <= i < num_lines + 1:
t[i-1] = line.split('\t')[0].split(' ')[1]
T[i-1] = line.split('\t')[-2]
for j, concentration in enumerate(line.split('\t')[1:-2]):
y[i-1, j] = concentration
return t, y, T
def tar_elem_analysis(speciesindices, y, t, t_choice='end'):
"""
Calculate the elemental analysis of the tar fraction at a specified time
(moles of C, H, O). The species that make up the tar fraction are specified
in the MW dictionary (in `constants.py`). This function also returns the
wt% and mol% of C, H, O at that specified time.
Parameters
----------
speciesindices : dict
dictionary from `load_results()` where species names are
keys and values are the index in `y` that corresponds to
that species
y : numpy array
a matrix with the concentrations of each species in the
kinetic scheme for every time in `t` (mol/L)
t : numpy array
array with all the times (s) corresponding to entries in
`y` and `T`
t_choice : str or int, optional
if 'end' (default) then this elemental analysis will be
done at the last timepoint saved in the simulation (i.e.
after any isothermal or cool-down stage). Otherwise, an
integer specifying the index of the `t` array can be
passed to do the analysis at a specified time.
Returns
-------
ea0 : numpy array
the elemental analysis at time = 0
ea : numpy array
the elemental analysis of tars at the specified time
ea0_molpercent : numpy array
the mole% of C, H, O at time = 0
ea_molpercent : numpy array
the mole% of C, H, O at the specified time
ea0_wtpercent : numpy array
the wt% of C, H, O at time = 0
ea_wtpercent : numpy array
the wt% of C, H, O at the specified time
choice : str
a string describing the time that was chosen for analysis
t_index : int
the index of the time array at which analysis was done
"""
# Calculate the elemental analysis at time=0
ea0 = np.zeros(3)
for species in MW:
if y[0, speciesindices[species]] != 0:
# mol C/L, mol H/L, mol O/L
# NOTE: in MW dict, only PLIGH, PLIGO, PLIGC contribute to ea0
ea0[0] += y[0, speciesindices[species]] * MW[species][3][0]
ea0[1] += y[0, speciesindices[species]] * MW[species][3][1]
ea0[2] += y[0, speciesindices[species]] * MW[species][3][2]
# Calculate the elemental analysis at some later time
if t_choice == 'end':
t_index = len(t) - 1
choice = 'Analysis done at the end of the entire simulation.'
else:
t_index = t_choice
choice = 'Analysis done at time = %s sec.' % t[t_index]
ea = np.zeros(3)
for species in MW:
if MW[species][1] in set(['t', 'lt', 'H2O']):
# mol C,H,O/L
ea[0] += y[t_index, speciesindices[species]] * MW[species][3][0]
ea[1] += y[t_index, speciesindices[species]] * MW[species][3][1]
ea[2] += y[t_index, speciesindices[species]] * MW[species][3][2]
ea0_molpercent = ea0 / ea0.sum()
ea_molpercent = ea / ea.sum()
# Convert to g/L for calculating wt%
ea_g = ea * [12.011, 1.0079, 15.999]
ea0_g = ea0 * [12.011, 1.0079, 15.999]
ea_wtpercent = ea_g / ea_g.sum()
ea0_wtpercent = ea0_g / ea0_g.sum()
return (ea0, ea, ea0_molpercent, ea_molpercent, ea0_wtpercent,
ea_wtpercent, choice, t_index)
def C_fun_gen(fractions, speciesindices, y, time):
"""
Calculate the distribution of carbon functional groups as a percent of
total carbon.
Parameters
----------
fractions : list
The lumped phases that you want to include (as specified
in MW['species'][1], options are any subset of
['g','s','lt','t','char','H20','CO','CO2'] or ['initial']
for the case when you want to determine the initial
distribution before pyrolysis)
speciesindices : dict
dictionary from `load_results()` where species names are
keys and values are the index in `y` that corresponds to
that species
y : numpy array
a matrix with the concentrations of each species in the
kinetic scheme for every time in `t` (mol/L)
time : int
the index of the timepoint that you want the results for
Returns
-------
C_fun : numpy array
the distribution of carbon functional groups as a percent of total
carbon. The order of the elements in the array is:
carbonyl, aromatic C-O, aromatic C-C, aromatic C-H, aliphatic C-O,
aromatic methoxyl, aliphatic C-C
"""
C_fun = np.zeros(7)
ind = speciesindices
for species in MW:
if fractions == ['initial']:
time = 0
if y[time, speciesindices[species]] != 0:
# moles of functional group/L (order from Return docstring)
C_fun[0] += y[time, ind[species]] * MW[species][4][0]
C_fun[1] += y[time, ind[species]] * MW[species][4][1]
C_fun[2] += y[time, ind[species]] * MW[species][4][2]
C_fun[3] += y[time, ind[species]] * MW[species][4][3]
C_fun[4] += y[time, ind[species]] * MW[species][4][4]
C_fun[5] += y[time, ind[species]] * MW[species][4][5]
C_fun[6] += y[time, ind[species]] * MW[species][4][6]
else:
if MW[species][1] in set(fractions):
C_fun[0] += y[time, ind[species]] * MW[species][4][0]
C_fun[1] += y[time, ind[species]] * MW[species][4][1]
C_fun[2] += y[time, ind[species]] * MW[species][4][2]
C_fun[3] += y[time, ind[species]] * MW[species][4][3]
C_fun[4] += y[time, ind[species]] * MW[species][4][4]
C_fun[5] += y[time, ind[species]] * MW[species][4][5]
C_fun[6] += y[time, ind[species]] * MW[species][4][6]
C_fun /= C_fun.sum()
return C_fun
def lump_species(speciesindices, m):
"""
Lump the molecular species in the model into subsets of
solids, tars, and gases. Also separate heavy tars into
phenolic and syringol families.
Parameters
----------
speciesindices : dict
dictionary from `load_results()` where species names are
keys and values are the index in `y` that corresponds to
that species
m : numpy array
a matrix with the mass fraction of each species in the
kinetic scheme for every time in `t`
Returns
-------
lumped : numpy array
each row in this array is the mass fraction of a
lumped phase (0 = solid, 1 = heavy tar, 2 = light tar,
3 = gas, 4 = CO, 5 = CO2, 6 = H2O, 7 = char)
phenolic_families : numpy array
Splits the heavy tar components into the phenol
family (first row) and syringol family (second row)
morelumped : numpy array
a "more lumped" version of `lumped` where
column 0 = solids, 1 = tars, 2 = gases
"""
lumped = np.zeros((m.shape[0], 8))
phenolic_families = np.zeros((m.shape[0], 2))
for species in MW:
if MW[species][1] == 's':
lumped[:, 0] += m[:, speciesindices[species]]
elif MW[species][1] == 't':
lumped[:, 1] += m[:, speciesindices[species]]
if MW[species][2] == 'p':
phenolic_families[:, 0] += m[:, speciesindices[species]]
elif MW[species][2] == 's':
phenolic_families[:, 1] += m[:, speciesindices[species]]
elif MW[species][1] == 'lt':
lumped[:, 2] += m[:, speciesindices[species]]
elif MW[species][1] == 'g':
lumped[:, 3] += m[:, speciesindices[species]]
elif MW[species][1] == 'CO':
lumped[:, 4] += m[:, speciesindices[species]]
elif MW[species][1] == 'CO2':
lumped[:, 5] += m[:, speciesindices[species]]
elif MW[species][1] == 'H2O':
lumped[:, 6] += m[:, speciesindices[species]]
elif MW[species][1] == 'char':
lumped[:, 7] += m[:, speciesindices[species]]
else:
print '%s does not have a phase defined.' % species
# Make a more lumped (3 component) model
morelumped = np.zeros((m.shape[0], 3))
morelumped[:, 0] = lumped[:, 0] + lumped[:, 7]
morelumped[:, 1] = lumped[:, 1] + lumped[:, 2] + lumped[:, 6]
morelumped[:, 2] = lumped[:, 3] + lumped[:, 4] + lumped[:, 5]
return lumped, phenolic_families, morelumped
def generate_report(speciesindices, specieslist, y, m, t, which_result):
"""
Print a descriptive summary of a specific simulation.
Parameters
----------
speciesindices : dict
dictionary from `load_results()` where species names are
keys and values are the index in `y` that corresponds to
that species
specieslist : list
the specieslist_ddasac object from load_results()
y : numpy array
a matrix with the concentrations of each species in the
kinetic scheme for every time in `t` (mol/L)
m : numpy array
a matrix with the mass fraction of each species in the
kinetic scheme for every time in `t`
t : numpy array
array with all the times (s) corresponding to entries in
`y` and `T`
which_result : str
the name of the simulation folder you are analysing
Returns
-------
t_index : int
the index of `t` where this analysis was performed
"""
(ea0, ea, ea0_molpercent, ea_molpercent, ea0_wtpercent, ea_wtpercent, choice,
t_index) = tar_elem_analysis(speciesindices, y, t)
# Header and elemental analysis results
print1 = ('\n{:-^80}\n'
'Analysis of folder: {}\n'
'{}\n'
'\n{:.^80}\n\n'
'Feedstock (wt%) : {:.1%} C {:>7.1%} H {:>7.1%} O\n'
'Bio-oil (wt%) : {:.1%} C {:>7.1%} H {:>7.1%} O\n\n'
'Feedstock (mol%) : {:.1%} C {:>7.1%} H {:>7.1%} O\n'
'Bio-oil (mol%) : {:.1%} C {:>7.1%} H {:>7.1%} O\n'
.format(' REPORT ', which_result.value, choice,
' Elemental Analysis ', ea0_wtpercent[0], ea0_wtpercent[1],
ea0_wtpercent[2], ea_wtpercent[0], ea_wtpercent[1],
ea_wtpercent[2], ea0_molpercent[0], ea0_molpercent[1],
ea0_molpercent[2], ea_molpercent[0], ea_molpercent[1],
ea_molpercent[2]))
# H:C ratio in tar
# a low H:C ratio limits hydrocarbon yield during upgrading, so upgraded
# product is primarily aromatics. Combustion energetics can be estimated from
# the bond energies for all the classifications of fossil fuels. The amount of
# energy released is dependent on the oxidation state of the carbons in the
# hydrocarbon which is related to the hydrogen/carbon ratio. The more hydrogen
# per carbon, the lower the oxidation state and the more energy that will be
# released during the oxidation reaction. Thus the greater the H/C ratio,
# the more energy release on combustion.
# Sample values: gas 4/1, petroleum 2/1, coal 1/1, ethanol 3/1
print2 = '\nH:C ratio of tar = {:.3}\n'.format(ea[1] / ea[0])
# Moisture content in tar -- typical value for wood bio-oil is 25%
mtot = [0]
for species in MW:
if MW[species][1] in set(['t', 'lt', 'H2O']):
# the total mass fraction of all tar components at the specified time
mtot += m[t_index, speciesindices[species]]
# The moisture content (wt%) in the bio-oil
mc = m[t_index, speciesindices['H2O']] / mtot
print3 = '\nMoisture content of tar (wt%) = {:.1%}\n'.format(mc[0])
# The distribution of carbon functional groups in the tar
groups = ['C=O', 'aromatic C-O', 'aromatic C-C', 'aromatic C-H',
'aliphatic C-O', 'aromatic Methoxyl', 'aliphatic C-C']
Cfun0 = C_fun_gen(['initial'], speciesindices, y, 0)
Cfun = C_fun_gen(['t','lt'], speciesindices, y, t_index)
Cfunheavy = C_fun_gen(['t'], speciesindices, y, t_index)
Cfunlight = C_fun_gen(['lt'], speciesindices, y, t_index)
print4 = ('\n{:.^80}\n\n'
'{: <19}{: <16}{: <16}{: <16}{: <16}'
.format(' Distribution of C-functional groups (shown as % of C) ',
' ','Feedstock','Bio-oil','Heavy oil','Light oil'))
print print1, print2, print3, print4
for i, group in enumerate(groups):
print5 = ('%s%s%s%s%s' % ('{: <19}'.format(group),
'{: <16.2%}'.format(Cfun0[i]),
'{: <16.2%}'.format(Cfun[i]),
'{: <16.2%}'.format(Cfunheavy[i]),
'{: <16.2%}'.format(Cfunlight[i])))
print print5
# lump the molecules in the model into groups
lumped, phenolic_families, morelumped = lump_species(speciesindices, m)
# The final mass fractions of each component (morelumped)
print6 = ('\n{:.^80}\n\n'
'Solids:\t\t {:>10.2%}\n'
'Gases:\t\t {:>10.2%}\n'
'Total Tar:\t {:>10.2%}\n'
' Heavy Tars:\t {:>10.2%}\n'
' Light Tars:\t {:>10.2%}\n'
' Water:\t {:>10.2%}'
.format(' Final lumped product yields (wt%) ', morelumped[-1, 0],
morelumped[-1, 2], morelumped[-1, 1], lumped[-1, 1],
lumped[-1, 2], lumped[-1, 6]))
print7 = ('\n\n{:.2%} of heavy tars are derived from phenol, '
'{:.2%} are derived from syringol'
.format(phenolic_families[-1, 0] / lumped[-1, 1],
phenolic_families[-1, 1] / lumped[-1, 1]))
# Look at the final distribution of gases
print8 = '\n\n{:.^80}\n'.format(' Final gas composition (wt%) ')
print print6, print7, print8
# dictionary with the ending mass fraction for each species
final_mass_fracs = {}
for species in specieslist:
final_mass_fracs[species] = m[t_index, speciesindices[species]]
gas_list = {}
for species in specieslist:
if MW[species][1] in ('g', 'CO', 'CO2'):
gas_list[species] = final_mass_fracs[species]
gas_w = sorted(gas_list, key=gas_list.__getitem__, reverse=True)[:8]
for species in gas_w:
print9 = ('%s\t%s' % ('{0: <8}'.format(species),
'{0: <18}'.format(final_mass_fracs[species])))
print print9
# identify the 20 species with the highest mass fractions at the end
print10 = ('\n{:.^80}\n'
.format(' Top 20 species (by mass fraction) at end (wt%) '))
print print10
top = sorted(final_mass_fracs, key=final_mass_fracs.__getitem__,
reverse=True)[:20]
for species in top:
print11 = '%s\t%s' % ('{0: <8}'.format(species),
'{0: <18}'.format(final_mass_fracs[species]))
print print11
return t_index
| houghb/ligpy | ligpy/analysis_tools.py | Python | bsd-2-clause | 22,978 |
import errno
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import urllib2
import pytest
from tools.wpt import wpt
here = os.path.abspath(os.path.dirname(__file__))
def is_port_8000_in_use():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", 8000))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
return True
else:
raise e
finally:
s.close()
return False
def get_persistent_manifest_path():
directory = ("~/meta" if os.environ.get('TRAVIS') == "true"
else wpt.localpaths.repo_root)
return os.path.join(directory, "MANIFEST.json")
@pytest.fixture(scope="module", autouse=True)
def init_manifest():
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["manifest", "--no-download",
"--path", get_persistent_manifest_path()])
assert excinfo.value.code == 0
@pytest.fixture
def manifest_dir():
try:
path = tempfile.mkdtemp()
shutil.copyfile(get_persistent_manifest_path(),
os.path.join(path, "MANIFEST.json"))
yield path
finally:
shutil.rmtree(path)
@pytest.fixture
def temp_test():
os.makedirs("../../.tools-tests")
test_count = {"value": 0}
def make_test(body):
test_count["value"] += 1
test_name = ".tools-tests/%s.html" % test_count["value"]
test_path = "../../%s" % test_name
with open(test_path, "w") as handle:
handle.write("""
<!DOCTYPE html>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>%s</script>
""" % body)
return test_name
yield make_test
shutil.rmtree("../../.tools-tests")
def test_missing():
with pytest.raises(SystemExit):
wpt.main(argv=["#missing-command"])
def test_help():
# TODO: It seems like there's a bug in argparse that makes this argument order required
# should try to work around that
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["--help"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12935")
def test_list_tests(manifest_dir):
"""The `--list-tests` option should not produce an error under normal
conditions."""
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--metadata", manifest_dir, "--list-tests",
"--channel", "dev", "--yes", "chrome",
"/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12935")
def test_list_tests_missing_manifest(manifest_dir):
"""The `--list-tests` option should not produce an error in the absence of
a test manifest file."""
os.remove(os.path.join(manifest_dir, "MANIFEST.json"))
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run",
# This test triggers the creation of a new manifest
# file which is not necessary to ensure successful
# process completion. Specifying the current directory
# as the tests source via the --tests` option
# drastically reduces the time to execute the test.
"--tests", here,
"--metadata", manifest_dir,
"--list-tests",
"--yes",
"firefox", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12935")
def test_list_tests_invalid_manifest(manifest_dir):
"""The `--list-tests` option should not produce an error in the presence of
a malformed test manifest file."""
manifest_filename = os.path.join(manifest_dir, "MANIFEST.json")
assert os.path.isfile(manifest_filename)
with open(manifest_filename, "a+") as handle:
handle.write("extra text which invalidates the file")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run",
# This test triggers the creation of a new manifest
# file which is not necessary to ensure successful
# process completion. Specifying the current directory
# as the tests source via the --tests` option
# drastically reduces the time to execute the test.
"--tests", here,
"--metadata", manifest_dir,
"--list-tests",
"--yes",
"firefox", "/dom/nodes/Element-tagName.html"])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_zero_tests():
"""A test execution describing zero tests should be reported as an error
even in the presence of the `--no-fail-on-unexpected` option."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"--channel", "dev", "chrome",
"/non-existent-dir/non-existent-file.html"])
assert excinfo.value.code != 0
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"--no-fail-on-unexpected", "--channel", "dev",
"chrome", "/non-existent-dir/non-existent-file.html"])
assert excinfo.value.code != 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_failing_test():
"""Failing tests should be reported with a non-zero exit status unless the
`--no-fail-on-unexpected` option has been specified."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
failing_test = "/infrastructure/expected-fail/failing-test.html"
assert os.path.isfile("../../%s" % failing_test)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"--channel", "dev", "chrome", failing_test])
assert excinfo.value.code != 0
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--no-pause", "--binary-arg", "headless",
"--no-fail-on-unexpected", "--channel", "dev",
"chrome", failing_test])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_run_verify_unstable(temp_test):
"""Unstable tests should be reported with a non-zero exit status. Stable
tests should be reported with a zero exit status."""
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
unstable_test = temp_test("""
test(function() {
if (localStorage.getItem('wpt-unstable-test-flag')) {
throw new Error();
}
localStorage.setItem('wpt-unstable-test-flag', 'x');
}, 'my test');
""")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--verify", "--binary-arg", "headless",
"--channel", "dev", "chrome", unstable_test])
assert excinfo.value.code != 0
stable_test = temp_test("test(function() {}, 'my test');")
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["run", "--yes", "--verify", "--binary-arg", "headless",
"--channel", "dev", "chrome", stable_test])
assert excinfo.value.code == 0
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_install_chromedriver():
chromedriver_path = os.path.join(wpt.localpaths.repo_root, "_venv", "bin", "chromedriver")
if os.path.exists(chromedriver_path):
os.unlink(chromedriver_path)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["install", "chrome", "webdriver"])
assert excinfo.value.code == 0
assert os.path.exists(chromedriver_path)
os.unlink(chromedriver_path)
@pytest.mark.slow
@pytest.mark.remote_network
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_install_firefox():
if sys.platform == "darwin":
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "browsers", "nightly", "Firefox Nightly.app")
else:
fx_path = os.path.join(wpt.localpaths.repo_root, "_venv", "browsers", "nightly", "firefox")
if os.path.exists(fx_path):
shutil.rmtree(fx_path)
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["install", "firefox", "browser", "--channel=nightly"])
assert excinfo.value.code == 0
assert os.path.exists(fx_path)
shutil.rmtree(fx_path)
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_files_changed(capsys):
commit = "9047ac1d9f51b1e9faa4f9fad9c47d109609ab09"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["files-changed", "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert out == """html/browsers/offline/appcache/workers/appcache-worker.html
html/browsers/offline/appcache/workers/resources/appcache-dedicated-worker-not-in-cache.js
html/browsers/offline/appcache/workers/resources/appcache-shared-worker-not-in-cache.js
html/browsers/offline/appcache/workers/resources/appcache-worker-data.py
html/browsers/offline/appcache/workers/resources/appcache-worker-import.py
html/browsers/offline/appcache/workers/resources/appcache-worker.manifest
html/browsers/offline/appcache/workers/resources/appcache-worker.py
"""
assert err == ""
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_files_changed_null(capsys):
commit = "9047ac1d9f51b1e9faa4f9fad9c47d109609ab09"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["files-changed", "--null", "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert out == "\0".join(["html/browsers/offline/appcache/workers/appcache-worker.html",
"html/browsers/offline/appcache/workers/resources/appcache-dedicated-worker-not-in-cache.js",
"html/browsers/offline/appcache/workers/resources/appcache-shared-worker-not-in-cache.js",
"html/browsers/offline/appcache/workers/resources/appcache-worker-data.py",
"html/browsers/offline/appcache/workers/resources/appcache-worker-import.py",
"html/browsers/offline/appcache/workers/resources/appcache-worker.manifest",
"html/browsers/offline/appcache/workers/resources/appcache-worker.py",
""])
assert err == ""
def test_files_changed_ignore():
from tools.wpt.testfiles import exclude_ignored
files = ["resources/testharness.js", "resources/webidl2/index.js", "test/test.js"]
changed, ignored = exclude_ignored(files, ignore_rules=["resources/testharness*"])
assert changed == [os.path.join(wpt.wpt_root, item) for item in
["resources/webidl2/index.js", "test/test.js"]]
assert ignored == [os.path.join(wpt.wpt_root, item) for item in
["resources/testharness.js"]]
def test_files_changed_ignore_rules():
from tools.wpt.testfiles import compile_ignore_rule
assert compile_ignore_rule("foo*bar*/baz").pattern == r"^foo\*bar[^/]*/baz$"
assert compile_ignore_rule("foo**bar**/baz").pattern == r"^foo\*\*bar.*/baz$"
assert compile_ignore_rule("foobar/baz/*").pattern == "^foobar/baz/[^/]*$"
assert compile_ignore_rule("foobar/baz/**").pattern == "^foobar/baz/.*$"
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected(capsys, manifest_dir):
# This doesn't really work properly for random commits because we test the files in
# the current working directory for references to the changed files, not the ones at
# that specific commit. But we can at least test it returns something sensible.
# The test will fail if the file we assert is renamed, so we choose a stable one.
commit = "3a055e818218f548db240c316654f3cc1aeeb733"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert "infrastructure/reftest-wait.html" in out
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected_idlharness(capsys, manifest_dir):
commit = "47cea8c38b88c0ddd3854e4edec0c5b6f2697e62"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert "webrtc-stats/idlharness.window.js\nwebrtc/idlharness.https.window.js\n" == out
@pytest.mark.slow # this updates the manifest
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
@pytest.mark.skipif(sys.platform == "win32",
reason="https://github.com/web-platform-tests/wpt/issues/12934")
def test_tests_affected_null(capsys, manifest_dir):
# This doesn't really work properly for random commits because we test the files in
# the current working directory for references to the changed files, not the ones at
# that specific commit. But we can at least test it returns something sensible.
# The test will fail if the file we assert is renamed, so we choose a stable one.
commit = "9bf1daa3d8b4425f2354c3ca92c4cf0398d329dd"
with pytest.raises(SystemExit) as excinfo:
wpt.main(argv=["tests-affected", "--null", "--metadata", manifest_dir, "%s~..%s" % (commit, commit)])
assert excinfo.value.code == 0
out, err = capsys.readouterr()
tests = out.split("\0")
assert "dom/interfaces.html" in tests
assert "html/dom/interfaces.https.html" in tests
@pytest.mark.slow
@pytest.mark.xfail(sys.platform == "win32",
reason="Tests currently don't work on Windows for path reasons")
def test_serve():
if is_port_8000_in_use():
pytest.skip("port 8000 already in use")
p = subprocess.Popen([os.path.join(wpt.localpaths.repo_root, "wpt"), "serve"],
preexec_fn=os.setsid)
start = time.time()
try:
while True:
if p.poll() is not None:
assert False, "server not running"
if time.time() - start > 60:
assert False, "server did not start responding within 60s"
try:
resp = urllib2.urlopen("http://web-platform.test:8000")
print(resp)
except urllib2.URLError:
print("URLError")
time.sleep(1)
else:
assert resp.code == 200
break
finally:
os.killpg(p.pid, 15)
# The following commands are slow running and used implicitly in other CI
# jobs, so we skip them here:
# wpt manifest
# wpt lint
| ecoal95/servo | tests/wpt/web-platform-tests/tools/wpt/tests/test_wpt.py | Python | mpl-2.0 | 16,794 |
def to_float(v, default=None):
try:
return float(v)
except ValueError:
return default
except TypeError:
return default
def to_int(v, default=None):
try:
return int(v)
except ValueError:
return default
except TypeError:
return default
def float_to_str(v, default=None):
r = repr(v)
if r[-2:] == '.0':
r = r[:-2]
return r
def debug(text):
try:
prefix = current['object']['id'] + ' | '
except:
prefix = ''
plpy.warning(prefix + str(text))
class config_base:
math_level = None
op = None
unary = False
aliases = None
mutable = 0
# 'mutable' says whether a function returns different output for the same set
# of input parameters:
# 0 .. volatile, may return different value on each call (e.g. random())
# 1 .. always returns the same value for the current object (e.g. osm_id())
# 2 .. always returns the same value for the current view (e.g. zoom())
# 3 .. static, always returns the same value (e.g. 2+3 = 5)
def __init__(self, func):
import re
if not re.match('[a-zA-Z_0-9]+$', func):
raise Exception('Illegal eval function name: ' + func)
self.func = func
def compiler(self, param, eval_param, stat):
return 'eval_' + self.func + '([' + ', '.join(param) + ']' + eval_param + ')'
def __call__(self, param, stat):
try:
current
except NameError:
import pgmapcss.eval
return pgmapcss.eval.eval_functions.call(self.func, param, stat)
else:
return eval(self.compiler(param, '', {}))
# list of possible values the function returns for the set of input parameters. # possible types of returns a tuple with the first element being one of:
# str .. the function always returns the given value (e.g. 2+3 = 5)
# None .. the function always returns None
# set .. the function may return the following values
# (e.g. zoom() => { 1, 2, 3, ..., 18 }
# True .. it's not possible to predict the result of this function (e.g. random())
# the second element is the mutability, see above
def possible_values(self, param_values, prop, stat):
m = self.mutable
if True in param_values:
return ( True, 0 )
if callable(m):
m = self.mutable(param_values, stat)
if m == 3:
return ( self(param_values, stat), m )
return ( True, m )
| plepe/pgmapcss | pgmapcss/eval/base.py | Python | agpl-3.0 | 2,479 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 15 12:14:08 2015
@author: Sam Thiele
"""
import os
import numpy as np
import scipy as sp
from pynoddy.experiment.MonteCarlo import MonteCarlo
from pynoddy.output import NoddyTopology
from pynoddy.history import NoddyHistory
class TopologyAnalysis:
'''
Performs a topological uncertainty analysis on a noddy model.
'''
class ModelRealisation:
'''
Class containing information regarding an individual model realisation.
This essentially just bundles a history class and NoddyTopology class together (plus a bit
of extra information like basename etc)
'''
def __init__(self, history_file, **kwds ):
#get keywords
vb = kwds.get("verbose",False)
self.history_path=history_file
self.basename=history_file.split('.')[0] #remove file extension
if not os.path.exists(self.history_path):
print "Error: please specify a valid noddy history file (*.his)"
return
#load history file
self.history = NoddyHistory(history_file, verbose=vb)
#load topology network
self.topology = NoddyTopology(self.basename)
@staticmethod
def loadModels( path, **kwds ):
'''
Loads all noddy models realisations and returns them as an array of ModelRealisation objects
**Arguments**:
- *path* = The root directory that models should be loaded from. All models with the same base_name
as this class will be loaded (including subdirectoriess)
**Optional Keywords**:
- *verbose* = True if this function should write debug information to the print buffer. Default is True.
**Returns**:
- a list of ModelRealisation objects
'''
vb = kwds.get('verbose',True)
if vb:
print "Loading models in %s" % path
#array of topology objects
realisations = []
for root, dirnames, filenames in os.walk(path): #walk the directory
for f in filenames:
if ('.his' in f): #find all topology files
p = os.path.join(root,f)
if vb:
print 'Loading %s' % p
#load model
realisations.append( TopologyAnalysis.ModelRealisation(p,verbose=vb) )
return realisations
def __init__(self,path, params=None,n=None, **kwds):
'''
Performs a topological uncertainty analysis. If a directory is given, all the history files within
the directory are loaded and the analyses performed on them. If a history file is given, n perturbations
are performed on it using the params file.
**Arguments**:
- *path* = The directory or history file to perform this analysis on.
**Optional Arguments**:
- *params* = The params file to use for MonteCarlo perturbation (if a history file is provided)
- *n* = The number of model perturbations to generate (if a history file is provided)
**Optional Keywords**:
- *verbose* = True if this experiment should write to the print buffer. Default is True
- *threads* = The number of threads this experiment should utilise. The default is 4.
- *force* = True if all noddy models should be recalculated. Default is False.
'''
#init variables
self.base_history_path = None
self.base_path = path #if a history file has been given, this will be changed
vb = kwds.get("verbose",True)
n_threads = kwds.get("threads",4)
force = kwds.get("force",False)
#a history file has been given, generate model stuff
if '.' in path:
if not '.his' in path: #foobar
print "Error: please provide a valid history file (*.his)"
return
if params is None or n is None: #need this info
print "Error: please provide valid arguments [params,n]"
self.base_history_path = path
self.base_path=path.split('.')[0] #trim file extension
self.num_trials = n
#ensure path exists
if not os.path.exists(self.base_path):
os.makedirs(self.base_path)
#do monte carlo simulations
MC = MonteCarlo(path,params)
MC.generate_model_instances(self.base_path,n, sim_type='TOPOLOGY', verbose=vb, threads=n_threads, write_changes=None)
else:
#ensure that models have been run
MonteCarlo.generate_models_from_existing_histories(self.base_path,sim_type='TOPOLOGY',force_recalculate=force,verbose=vb,threads=n_threads)
#load models from base directory
self.models = TopologyAnalysis.ModelRealisation.loadModels(self.base_path, verbose=vb)
###########################################
#GENERATE TOPOLOGY LISTS
###########################################
#declare lists
self.all_litho_topologies=[]
self.all_struct_topologies=[]
#generate lists
for m in self.models:
self.all_litho_topologies.append(m.topology)
self.all_struct_topologies.append(m.topology.collapse_stratigraphy())
############################################
#FIND UNIQUE TOPOLOGIES
############################################
self.accumulate_litho_topologies = []
self.accumulate_struct_topologies = []
self.unique_litho_topologies=NoddyTopology.calculate_unique_topologies(self.all_litho_topologies, output=self.accumulate_litho_topologies)
self.unique_struct_topologies=NoddyTopology.calculate_unique_topologies(self.all_struct_topologies, output=self.accumulate_struct_topologies)
############################################
#GENERATE SUPER TOPOLOGY
############################################
self.super_litho_topology = NoddyTopology.combine_topologies(self.all_litho_topologies)
self.super_struct_topology = NoddyTopology.combine_topologies(self.all_struct_topologies)
def get_average_node_count(self,topology_type='litho'):
'''
Calculates the average number of nodes in all of the model realisations that are part of this
experiment.
**Arguments**
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
**Returns**
- The average number of nodes
'''
t_list= []
if 'litho' in topology_type:
t_list = self.all_litho_topologies
elif 'struct' in topology_type:
t_list = self.all_struct_topologies
else:
print "Error: Invalid topology_type. This should be 'litho' or 'struct'"
avg = 0.0
for t in t_list:
avg += t.graph.number_of_nodes() / float(len(self.all_litho_topologies))
return avg
def get_average_edge_count(self,topology_type='litho'):
'''
Calculates the average number of nodes in all of the model realisations that are part of this
experiment.
**Arguments**
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
**Returns**
- The average number of nodes
'''
t_list= []
if 'litho' in topology_type:
t_list = self.all_litho_topologies
elif 'struct' in topology_type:
t_list = self.all_struct_topologies
else:
print "Error: Invalid topology_type. This should be 'litho' or 'struct'"
avg = 0.0
for t in t_list:
avg += t.graph.number_of_edges() / float(len(self.all_litho_topologies))
return avg
def get_possibility(self,topology_type='litho'):
print"not implemented"
def get_variability(self,topology_type='litho'):
'''
Returns the 'variability' of model topology. This is equal to the total number of observed
adjacency relationships (network edges) divided by the average number of adjacency
relationships (edges) in each model realisation minus one. This value will be equal to 0 if
all the topologies are identical, and increase as more different topological varieties come into
existance. The maximum possible 'variability', when every edge in every topology realisation is
different, is equal to the sum of the number of edges in all the networks divided by the average
number of edges.
**Arguments**
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
'''
if 'litho' in topology_type:
return -1 + self.super_litho_topology.number_of_edges() / self.get_average_edge_count('litho')
elif 'struct' in topology_type:
return -1 + self.super_struct_topology.number_of_edges() / self.get_average_edge_count('struct')
else:
print "Error: Invalid topology_type. This should be 'litho' or 'struct'"
def get_difference_matrix(self,topology_type='litho'):
'''
Calculates a difference matrix in which each matrix element Exy contains 1 over the jaccard
coefficient of topologies x and y.
**Arguments**
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
**Returns**
- A difference matrix
'''
t_list= []
if 'litho' in topology_type:
if hasattr(self,'litho_difference_matrix'): #already been calculated
return self.litho_difference_matrix
t_list = self.unique_litho_topologies
elif 'struct' in topology_type:
if hasattr(self,'struct_difference_matrix'):
return self.struct_difference_matrix
t_list = self.unique_struct_topologies
else:
print "Error: Invalid topology_type. This should be 'litho' or 'struct'"
difference_matrix=np.zeros( (len(t_list),len(t_list)))
for i in range (0,len(t_list)):
for j in range (0,len(t_list)):
if i==j: #minor speed optimisation
difference_matrix[i][j] = 0.0
elif i < j:
#nb: similarity = 1 if networks are identical and approaches zero as they become different
difference_matrix[i][j] = -1 + 1.0 / t_list[i].jaccard_coefficient(t_list[j]) #calculate difference
difference_matrix[j][i] = difference_matrix[i][j] #matrix is symmetric
#store
if 'litho' in topology_type:
self.litho_difference_matrix = difference_matrix
else:
self.struct_difference_matrix = difference_matrix
return difference_matrix #reutrn the difference matrix
def plot_dendrogram(self,topology_type='litho',path=None):
'''
Calculates the average number of nodes in all of the model realisations that are part of this
experiment.
**Arguments**
- *topology_type* = The type of topology you are interested in. This should be either 'litho'
or 'struct'
- *path* = A path to save the image to. If left as None the image is drawn to the screen.
'''
#get difference matrix (NB. squareform converts it to a condensed matrix for scipy)
import scipy.spatial.distance as dist
import scipy.cluster.hierarchy as clust
m_dif = dist.squareform( self.get_difference_matrix(topology_type),force='tovector' )
if len(m_dif) > 2:
#generate dendrogram using UPGMA
Z = clust.average(m_dif)
#generate plot
clust.dendrogram(Z)
else: #we cant build a tree with only one topology...
print "Error: only a single unique topology of this type has been found"
def is_strata_continuous(self,litho):
'''
Calculates the number of models in which all sections of a particular lithology are
directly connected.
**Arguments**:
- *litho* = the lithology id of interest
**Returns**
-The number of models in which the specified lithology is continuous.
'''
##Not implemented yet. This function should count the number of topologies in which
#all nodes of the given lithology are connected (not disjoint).
print "Not implemented yet. Sorry"
def is_strata_touching(self, litho1, litho2):
'''
Calculates the number of models in which these two strata come into contact.
**Arguments**:
- *litho1* = the lithology id of the first lithology
- *litho2* = the lithology id of the second lithology
**Returns**
- The number of models in which the two specified strata come into contact.
'''
##Not implemented yet. This function should count the number of topologies in which
#any nodes of litho1 are touching nodes of litho2
print "Not implemented yet. Sorry"
if __name__ == '__main__': #some debug stuff
import sys
sys.path.append(r"C:\Users\Sam\OneDrive\Documents\Masters\pynoddy")
os.chdir(r"C:\Users\Sam\Documents\Temporary Model Files")
a = TopologyAnalysis("unconf",params='Unconf_de.csv',n=5)
#print results
print "%d unique lithological topologies found" % len(a.unique_litho_topologies)
print "%d unique structural topologies found" % len(a.unique_struct_topologies)
print "model variability (lithological) = %f" % a.get_variability('litho')
print "model variability (structural) = %f" % a.get_variability('struct')
print "Model realisations had lithological topologies of (on average):"
print "\t%d nodes" % a.get_average_node_count()
print "\t%d edges" % a.get_average_edge_count()
print "Model realisations had structural topologies of (on average):"
print "\t%d nodes" % a.get_average_node_count('struct')
print "\t%d edges" % a.get_average_edge_count('struct')
| Leguark/pynoddy | pynoddy/experiment/TopologyAnalysis.py | Python | gpl-2.0 | 15,069 |
from django.conf.urls.defaults import *
urlpatterns = patterns('project.views',
url(r'^$', 'client_index', name='client_index'),
url(r'^(?P<client_id>\d+)/$', 'show_client', name='client_page'),
)
| bansalrajnikant/djime | project/client_urls.py | Python | bsd-3-clause | 207 |
import sys
import os
import unittest
from scrapy.item import Item, Field
from scrapy.utils.misc import arg_to_iter, create_instance, load_object, set_environ, walk_modules
from tests import mock
__doctests__ = ['scrapy.utils.misc']
class UtilsMiscTestCase(unittest.TestCase):
def test_load_object(self):
obj = load_object('scrapy.utils.misc.load_object')
assert obj is load_object
self.assertRaises(ImportError, load_object, 'nomodule999.mod.function')
self.assertRaises(NameError, load_object, 'scrapy.utils.misc.load_object999')
def test_walk_modules(self):
mods = walk_modules('tests.test_utils_misc.test_walk_modules')
expected = [
'tests.test_utils_misc.test_walk_modules',
'tests.test_utils_misc.test_walk_modules.mod',
'tests.test_utils_misc.test_walk_modules.mod.mod0',
'tests.test_utils_misc.test_walk_modules.mod1',
]
self.assertEqual(set([m.__name__ for m in mods]), set(expected))
mods = walk_modules('tests.test_utils_misc.test_walk_modules.mod')
expected = [
'tests.test_utils_misc.test_walk_modules.mod',
'tests.test_utils_misc.test_walk_modules.mod.mod0',
]
self.assertEqual(set([m.__name__ for m in mods]), set(expected))
mods = walk_modules('tests.test_utils_misc.test_walk_modules.mod1')
expected = [
'tests.test_utils_misc.test_walk_modules.mod1',
]
self.assertEqual(set([m.__name__ for m in mods]), set(expected))
self.assertRaises(ImportError, walk_modules, 'nomodule999')
def test_walk_modules_egg(self):
egg = os.path.join(os.path.dirname(__file__), 'test.egg')
sys.path.append(egg)
try:
mods = walk_modules('testegg')
expected = [
'testegg.spiders',
'testegg.spiders.a',
'testegg.spiders.b',
'testegg'
]
self.assertEqual(set([m.__name__ for m in mods]), set(expected))
finally:
sys.path.remove(egg)
def test_arg_to_iter(self):
class TestItem(Item):
name = Field()
assert hasattr(arg_to_iter(None), '__iter__')
assert hasattr(arg_to_iter(100), '__iter__')
assert hasattr(arg_to_iter('lala'), '__iter__')
assert hasattr(arg_to_iter([1, 2, 3]), '__iter__')
assert hasattr(arg_to_iter(l for l in 'abcd'), '__iter__')
self.assertEqual(list(arg_to_iter(None)), [])
self.assertEqual(list(arg_to_iter('lala')), ['lala'])
self.assertEqual(list(arg_to_iter(100)), [100])
self.assertEqual(list(arg_to_iter(l for l in 'abc')), ['a', 'b', 'c'])
self.assertEqual(list(arg_to_iter([1, 2, 3])), [1, 2, 3])
self.assertEqual(list(arg_to_iter({'a':1})), [{'a': 1}])
self.assertEqual(list(arg_to_iter(TestItem(name="john"))), [TestItem(name="john")])
def test_create_instance(self):
settings = mock.MagicMock()
crawler = mock.MagicMock(spec_set=['settings'])
args = (True, 100.)
kwargs = {'key': 'val'}
def _test_with_settings(mock, settings):
create_instance(mock, settings, None, *args, **kwargs)
if hasattr(mock, 'from_crawler'):
self.assertEqual(mock.from_crawler.call_count, 0)
if hasattr(mock, 'from_settings'):
mock.from_settings.assert_called_once_with(settings, *args,
**kwargs)
self.assertEqual(mock.call_count, 0)
else:
mock.assert_called_once_with(*args, **kwargs)
def _test_with_crawler(mock, settings, crawler):
create_instance(mock, settings, crawler, *args, **kwargs)
if hasattr(mock, 'from_crawler'):
mock.from_crawler.assert_called_once_with(crawler, *args,
**kwargs)
if hasattr(mock, 'from_settings'):
self.assertEqual(mock.from_settings.call_count, 0)
self.assertEqual(mock.call_count, 0)
elif hasattr(mock, 'from_settings'):
mock.from_settings.assert_called_once_with(settings, *args,
**kwargs)
self.assertEqual(mock.call_count, 0)
else:
mock.assert_called_once_with(*args, **kwargs)
# Check usage of correct constructor using four mocks:
# 1. with no alternative constructors
# 2. with from_settings() constructor
# 3. with from_crawler() constructor
# 4. with from_settings() and from_crawler() constructor
spec_sets = ([], ['from_settings'], ['from_crawler'],
['from_settings', 'from_crawler'])
for specs in spec_sets:
m = mock.MagicMock(spec_set=specs)
_test_with_settings(m, settings)
m.reset_mock()
_test_with_crawler(m, settings, crawler)
# Check adoption of crawler settings
m = mock.MagicMock(spec_set=['from_settings'])
create_instance(m, None, crawler, *args, **kwargs)
m.from_settings.assert_called_once_with(crawler.settings, *args,
**kwargs)
with self.assertRaises(ValueError):
create_instance(m, None, None)
def test_set_environ(self):
assert os.environ.get('some_test_environ') is None
with set_environ(some_test_environ='test_value'):
assert os.environ.get('some_test_environ') == 'test_value'
assert os.environ.get('some_test_environ') is None
os.environ['some_test_environ'] = 'test'
assert os.environ.get('some_test_environ') == 'test'
with set_environ(some_test_environ='test_value'):
assert os.environ.get('some_test_environ') == 'test_value'
assert os.environ.get('some_test_environ') == 'test'
if __name__ == "__main__":
unittest.main()
| wujuguang/scrapy | tests/test_utils_misc/__init__.py | Python | bsd-3-clause | 6,145 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration test for sequence feature columns with SequenceExamples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import string
import tempfile
from google.protobuf import text_format
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as sfc
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class SequenceFeatureColumnIntegrationTest(test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
feat.int64_list.value.extend([val] * val)
example.feature_lists.feature_list['int_list'].feature.extend([feat])
for val in range(1, 11, 2):
feat = feature_pb2.Feature()
feat.bytes_list.value.extend([compat.as_bytes(str(val))] * val)
example.feature_lists.feature_list['str_list'].feature.extend([feat])
return example
def _build_feature_columns(self):
col = fc._categorical_column_with_identity('int_ctx', num_buckets=100)
ctx_cols = [
fc._embedding_column(col, dimension=10),
fc._numeric_column('float_ctx')
]
identity_col = sfc.sequence_categorical_column_with_identity(
'int_list', num_buckets=10)
bucket_col = sfc.sequence_categorical_column_with_hash_bucket(
'bytes_list', hash_bucket_size=100)
seq_cols = [
fc._embedding_column(identity_col, dimension=10),
fc._embedding_column(bucket_col, dimension=20)
]
return ctx_cols, seq_cols
def test_sequence_example_into_input_layer(self):
examples = [_make_sequence_example().SerializeToString()] * 100
ctx_cols, seq_cols = self._build_feature_columns()
def _parse_example(example):
ctx, seq = parsing_ops.parse_single_sequence_example(
example,
context_features=fc.make_parse_example_spec(ctx_cols),
sequence_features=fc.make_parse_example_spec(seq_cols))
ctx.update(seq)
return ctx
ds = dataset_ops.Dataset.from_tensor_slices(examples)
ds = ds.map(_parse_example)
ds = ds.batch(20)
# Test on a single batch
features = dataset_ops.make_one_shot_iterator(ds).get_next()
# Tile the context features across the sequence features
seq_layer, _ = sfc.sequence_input_layer(features, seq_cols)
ctx_layer = fc.input_layer(features, ctx_cols)
input_layer = sfc.concatenate_context_input(ctx_layer, seq_layer)
rnn_layer = recurrent.RNN(recurrent.SimpleRNNCell(10))
output = rnn_layer(input_layer)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
features_r = sess.run(features)
self.assertAllEqual(features_r['int_list'].dense_shape, [20, 3, 6])
output_r = sess.run(output)
self.assertAllEqual(output_r.shape, [20, 10])
class SequenceExampleParsingTest(test.TestCase):
def test_seq_ex_in_sequence_categorical_column_with_identity(self):
self._test_parsed_sequence_example(
'int_list', sfc.sequence_categorical_column_with_identity,
10, [3, 6], [2, 4, 6])
def test_seq_ex_in_sequence_categorical_column_with_hash_bucket(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_hash_bucket,
10, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_list(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_list,
list(string.ascii_lowercase), [3, 4],
[compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_file(self):
_, fname = tempfile.mkstemp()
with open(fname, 'w') as f:
f.write(string.ascii_lowercase)
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_file,
fname, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def _test_parsed_sequence_example(
self, col_name, col_fn, col_arg, shape, values):
"""Helper function to check that each FeatureColumn parses correctly.
Args:
col_name: string, name to give to the feature column. Should match
the name that the column will parse out of the features dict.
col_fn: function used to create the feature column. For example,
sequence_numeric_column.
col_arg: second arg that the target feature column is expecting.
shape: the expected dense_shape of the feature after parsing into
a SparseTensor.
values: the expected values at index [0, 2, 6] of the feature
after parsing into a SparseTensor.
"""
example = _make_sequence_example()
columns = [
fc._categorical_column_with_identity('int_ctx', num_buckets=100),
fc._numeric_column('float_ctx'),
col_fn(col_name, col_arg)
]
context, seq_features = parsing_ops.parse_single_sequence_example(
example.SerializeToString(),
context_features=fc.make_parse_example_spec(columns[:2]),
sequence_features=fc.make_parse_example_spec(columns[2:]))
with self.cached_session() as sess:
ctx_result, seq_result = sess.run([context, seq_features])
self.assertEqual(list(seq_result[col_name].dense_shape), shape)
self.assertEqual(
list(seq_result[col_name].values[[0, 2, 6]]), values)
self.assertEqual(list(ctx_result['int_ctx'].dense_shape), [1])
self.assertEqual(ctx_result['int_ctx'].values[0], 5)
self.assertEqual(list(ctx_result['float_ctx'].shape), [1])
self.assertAlmostEqual(ctx_result['float_ctx'][0], 123.6, places=1)
_SEQ_EX_PROTO = """
context {
feature {
key: "float_ctx"
value {
float_list {
value: 123.6
}
}
}
feature {
key: "int_ctx"
value {
int64_list {
value: 5
}
}
}
}
feature_lists {
feature_list {
key: "bytes_list"
value {
feature {
bytes_list {
value: "a"
}
}
feature {
bytes_list {
value: "b"
value: "c"
}
}
feature {
bytes_list {
value: "d"
value: "e"
value: "f"
value: "g"
}
}
}
}
feature_list {
key: "float_list"
value {
feature {
float_list {
value: 1.0
}
}
feature {
float_list {
value: 3.0
value: 3.0
value: 3.0
}
}
feature {
float_list {
value: 5.0
value: 5.0
value: 5.0
value: 5.0
value: 5.0
}
}
}
}
feature_list {
key: "int_list"
value {
feature {
int64_list {
value: 2
value: 2
}
}
feature {
int64_list {
value: 4
value: 4
value: 4
value: 4
}
}
feature {
int64_list {
value: 6
value: 6
value: 6
value: 6
value: 6
value: 6
}
}
}
}
}
"""
def _make_sequence_example():
example = example_pb2.SequenceExample()
return text_format.Parse(_SEQ_EX_PROTO, example)
if __name__ == '__main__':
test.main()
| chemelnucfin/tensorflow | tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_integration_test.py | Python | apache-2.0 | 8,611 |
from crocs.regex import Pattern, ConsumeBack, X
e = ConsumeBack(ConsumeBack(Pattern('a', X(), 'b'), 'def'), 'def')
e.test()
e.hits()
| iogf/crocs | demo/nested_consume_back.py | Python | apache-2.0 | 135 |
from django.utils.http import parse_http_date_safe
from betty.cropper.utils import seconds_since_epoch
def check_not_modified(request, last_modified):
"""Handle 304/If-Modified-Since
With Django v1.9.5+ could just use "django.utils.cache.get_conditional_response", but v1.9 is
not supported by "logan" dependancy (yet).
"""
if_modified_since = parse_http_date_safe(request.META.get('HTTP_IF_MODIFIED_SINCE'))
return (last_modified and
if_modified_since and
seconds_since_epoch(last_modified) <= if_modified_since)
| theonion/betty-cropper | betty/cropper/utils/http.py | Python | mit | 567 |
# Generated by Django 2.0 on 2018-02-07 13:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
]
| patcurry/WebGIS | core/migrations/0002_auto_20180207_1438.py | Python | mit | 410 |
import os
import os.path
import subprocess
from collections import namedtuple
import rpm
Pkg = namedtuple("Pkg", ["portname", "name", "version", "release", "arch"])
Reader = namedtuple("Reader", ["template"])
Paths = namedtuple("Paths", ["specdir", "rpmdir"])
class Template(object):
def __init__(self, content):
self._content = content
@staticmethod
def resolve_variable(env, name):
varname, sep, atts = name.partition('.')
try:
value = env[varname.strip().lower()]
except KeyError:
errmsg = "Template uses unknown variable: {0}".format(varname)
raise RuntimeError(errmsg)
if not sep:
return value
for attribute in atts.split('.'):
try:
value = getattr(value, attribute)
except AttributeError:
errmsg = "object {0} missing attribute {1}"
raise RuntimeError(errmsg.format(name, attribute))
return value
def render(self, **variables):
sections = []
remaining = self._content
while remaining:
prefix, sep, remaining = remaining.partition("%%")
sections.append(prefix)
if not sep:
# no more variables to substitute
break
variable, sep, remaining = remaining.partition("%%")
if not sep:
raise RuntimeError("Improperly formatted template")
value = self.resolve_variable(variables, variable)
sections.append(str(value))
return "".join(sections)
def build(pkg, template, paths):
keys = dict(zip(pkg._fields, pkg))
raw = subprocess.check_output(["pkg_info", "-f", pkg.portname])
files = [ line[5:].strip() for line in raw.splitlines() if line.startswith('File:')]
files = [ os.path.join('%{_bindir}', name[4:])
for name in files if name.startswith('bin/')]
install = [
"mkdir -p $RPM_BUILD_ROOT%{_bindir}",
"mkdir -p $RPM_BUILD_ROOT%{_bindir}/db46",
"mkdir -p $RPM_BUILD_ROOT%{_bindir}/lua51",
"mkdir -p $RPM_BUILD_ROOT/usr/bin"
]
install += ["touch $RPM_BUILD_ROOT%s" % name
for name in files]
spec = template.render(
files='\n'.join(files),
install='\n'.join(install),
**keys)
specfile = os.path.join(paths.specdir, "%s.spec" % pkg.name)
with open(specfile, 'w') as fp:
fp.write(spec)
os.system("rpmbuild -bb {specfile}".format(specfile=specfile))
rpmname = "{name}-{version}-{release}.{arch}.rpm".format(**keys)
rpmfile = os.path.join(paths.rpmdir, pkg.arch, rpmname)
return rpmfile
def installPackages(pkgs, cfg):
ts = rpm.ts()
ts.setFlags(rpm.RPMTRANS_FLAG_JUSTDB)
with open(cfg.template) as fp:
spectemplate = fp.read()
template = Template(spectemplate)
paths = Paths(rpm.expandMacro('%_specdir'), rpm.expandMacro('%_rpmdir'))
for pkg in pkgs:
match = ts.dbMatch(rpm.RPMTAG_NAME, pkg.name)
if len(match) == 0:
rpmfile = build(pkg, template, paths)
ts.addInstall(rpmfile, rpmfile, 'i')
if ts.check():
raise RuntimeError("This should not happen")
ts.order()
def runCallback(reason, amount, total, rpmfile, client_data):
if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
client_data[rpmfile] = os.open(rpmfile, os.O_RDONLY)
return client_data[rpmfile]
elif reason == rpm.RPMCALLBACK_INST_CLOSE_FILE:
os.close(client_data[rpmfile])
result = ts.run(runCallback, {})
if result:
print("ERRORS")
print(result)
else:
print("Packages")
for te in ts:
info = (te.N(), te.V(), te.R())
print("name: %s version: %s release: %s" % info)
ts.check()
ts.verifyDB()
def main(fp, cfg):
content = fp.read()
fp.close()
pkgs = (line.split()[0] for line in content.splitlines()
if line.strip())
pkgs = ((rawname, rawname.rsplit('-', 1))
for rawname in pkgs)
# Need to drop the commas in freebsd package versions
pkgs = [Pkg(rawname, name, version.partition(',')[0], "1", "noarch")
for rawname, (name, version) in pkgs]
installPackages(pkgs, cfg)
if __name__ == "__main__":
import sysconfig
import os.path
import sys
data = sysconfig.get_path('data')
hcndir = os.path.join(sysconfig.get_path('data'), 'share', 'hcn')
reader = Reader(os.path.join(hcndir, "spectemplate"))
main(sys.stdin, reader)
| masom/Puck | flavour/pkg/registerports.py | Python | lgpl-3.0 | 4,699 |
#!/usr/bin/env python
# Copyright (C) 2009,2010 Junta de Andalucia
#
# Authors:
# Roberto Majadas <roberto.majadas at openshine.com>
# Cesar Garcia Tapia <cesar.garcia.tapia at openshine.com>
# Luis de Bethencourt <luibg at openshine.com>
# Pablo Vieytes <pvieytes at openshine.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import gobject
import gio
import os
import time
import hashlib
import sys
import copy
import json
from urlparse import urlparse
import etld
import errno
import shutil
import pickle
import tempfile
import tarfile
from glob import glob
from twisted.internet import reactor, threads, defer
from twisted.enterprise import adbapi
from BlockingDeferred import BlockingDeferred
import re
def regexp(expr, item):
try:
p = re.compile(expr)
ret = bool(p.match(item))
return ret
except:
print "Regex failure"
return False
def on_db_connect(conn):
conn.create_function("gregexp", 2, regexp)
if os.name == "posix" :
NANNY_DAEMON_DATA = "/var/lib/nanny/"
elif os.name == "nt" :
if not hasattr(sys, "frozen") :
file_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
for x in range(6):
file_dir = os.path.dirname(file_dir)
root_path = file_dir
NANNY_DAEMON_DATA = os.path.join(root_path, "var", "lib", "nanny")
else:
NANNY_DAEMON_DATA = os.path.join(os.environ["ALLUSERSPROFILE"], "Gnome", "nanny")
#Nanny daemon blacklists dir is to storage the admin blacklists and sys blacklists if for
#read-only blacklists, for example blacklists provided by packages
NANNY_DAEMON_BLACKLISTS_DIR = os.path.join(NANNY_DAEMON_DATA, "blacklists")
NANNY_DAEMON_BLACKLISTS_SYS_DIR = os.path.join(NANNY_DAEMON_DATA, "sysblacklists")
NANNY_DAEMON_BLACKLISTS_CONF_FILE = os.path.join(NANNY_DAEMON_DATA, "bl_conf.db")
PKG_STATUS_ERROR_NOT_EXISTS = -4
PKG_STATUS_ERROR_UPDATING_BL = -3
PKG_STATUS_ERROR_INSTALLING_NEW_BL = -2
PKG_STATUS_ERROR = -1
PKG_STATUS_READY = 0
PKG_STATUS_READY_UPDATE_AVAILABLE = 1
PKG_STATUS_DOWNLOADING = 2
PKG_STATUS_INSTALLING = 3
PKG_STATUS_UPDATING = 4
def mkdir_path(path):
try:
os.makedirs(path)
except os.error, e:
if e.errno != errno.EEXIST:
raise
class FilterManager (gobject.GObject) :
def __init__(self, quarterback):
gobject.GObject.__init__(self)
self.quarterback = quarterback
self.custom_filters_db = None
self.db_pools = {}
self.db_cat_cache = {}
self.pkg_filters_conf = {}
reactor.addSystemEventTrigger("before", "startup", self.start)
reactor.addSystemEventTrigger("before", "shutdown", self.stop)
def start(self):
print "Start Filter Manager"
mkdir_path(os.path.join(NANNY_DAEMON_DATA, "pkg_filters"))
mkdir_path(NANNY_DAEMON_BLACKLISTS_DIR)
self.custom_filters_db = self.__get_custom_filters_db()
self.__start_packaged_filters()
gobject.timeout_add(5000, self.__update_pkg_checker_timeout)
def stop(self):
print "Stop Filter Manager"
#Custom Filters methods
#------------------------------------
def __get_custom_filters_db(self):
path = os.path.join(NANNY_DAEMON_DATA, "customfilters.db")
if os.path.exists(path) :
return adbapi.ConnectionPool('sqlite3', path,
check_same_thread=False,
cp_openfun=on_db_connect)
else:
db = adbapi.ConnectionPool('sqlite3', path,
check_same_thread=False,
cp_openfun=on_db_connect)
db.runOperation('create table customfilters (id INTEGER PRIMARY KEY, uid text, is_black bool, name text, description text, regexp text)')
print "Created custom filters db"
return db
def add_custom_filter(self, uid, is_black, name, description, regex):
sql_query = 'insert into customfilters ("uid", "is_black", "name", "description", "regexp") values ("%s", %s, "%s", "%s", "%s")' % (str(uid),
int(is_black),
name,
description,
regex)
print sql_query
query = self.custom_filters_db.runQuery(sql_query)
block_d = BlockingDeferred(query)
try:
qr = block_d.blockOn()
return True
except:
print "Something goes wrong Adding Custom Filters"
return False
def list_custom_filters(self, uid):
query = self.custom_filters_db.runQuery("select * from customfilters where uid = '%s'" % str(uid))
block_d = BlockingDeferred(query)
ret = []
try:
qr = block_d.blockOn()
for f in qr :
ret.append([ int(f[0]), unicode(f[3]), unicode(f[4]), unicode(f[5]), bool(f[2]) ])
return ret
except:
print "Something goes wrong Listing Custom Filters"
return ret
def remove_custom_filter(self, list_id):
query = self.custom_filters_db.runQuery('delete from customfilters where id=%s' % int(list_id))
block_d = BlockingDeferred(query)
try:
qr = block_d.blockOn()
return True
except:
print "Something goes wrong Removing Custom Filters"
return False
def update_custom_filter(self, list_id, name, description, regex):
sql_query = 'update customfilters set name="%s", description="%s", regexp="%s" where id=%s' % (name,
description,
regex,
int(list_id))
print sql_query
query = self.custom_filters_db.runQuery(sql_query)
block_d = BlockingDeferred(query)
try:
qr = block_d.blockOn()
return True
except:
print "Something goes wrong Updating Custom Filter"
return False
#Packaged filters
#-----------------------------------
def __start_packaged_filters(self):
if os.path.exists(NANNY_DAEMON_BLACKLISTS_CONF_FILE):
with open(NANNY_DAEMON_BLACKLISTS_CONF_FILE, 'rb') as f:
self.pkg_filters_conf = pickle.load(f)
for pkg_id in self.pkg_filters_conf.keys():
self._save_pkg_filters_conf()
if self.pkg_filters_conf[pkg_id]["status"] == PKG_STATUS_READY \
or self.pkg_filters_conf[pkg_id]["status"] == PKG_STATUS_READY_UPDATE_AVAILABLE:
db = os.path.join(NANNY_DAEMON_BLACKLISTS_DIR,
"%s.db" % (pkg_id))
print db
self.db_pools[pkg_id] = adbapi.ConnectionPool('sqlite3', db,
check_same_thread=False,
cp_openfun=on_db_connect)
print "Added to db pool -> %s" % pkg_id
def _save_pkg_filters_conf(self):
output = open(NANNY_DAEMON_BLACKLISTS_CONF_FILE, 'wb')
pickle.dump(self.pkg_filters_conf, output)
output.close()
def __get_categories_from_db(self, pkg_id):
try:
return self.pkg_filters_conf[pkg_id]["pkg_info"]["categories"]
except:
return []
def _refresh_db_categories_cache(self, pkg_id):
try:
if self.db_pools.has_key(pkg_id) and pkg_id not in self.db_cat_cache.keys() :
print "REFRESHING CATEGORIES (%s)" % pkg_id
sql = "SELECT id,name FROM category"
query = self.db_pools[pkg_id].runQuery(sql)
block_d = BlockingDeferred(query)
qr = block_d.blockOn()
self.db_cat_cache[pkg_id] = {}
for id, name in qr:
self.db_cat_cache[pkg_id][int(id)] = name
print "REFRESHED CATEGORIES (%s)" % pkg_id
except:
print "Something goes wrong updating categories"
return False
return True
def add_pkg_filter(self, url):
pkg_id = hashlib.md5(url).hexdigest()
if pkg_id in self.pkg_filters_conf.keys() :
return False
self.pkg_filters_conf[pkg_id] = {"users_info" : {},
"pkg_info": {},
"status" : PKG_STATUS_DOWNLOADING,
"progress" : 0,
"update_url" : url
}
reactor.callInThread(self.__download_new_pkg, pkg_id, url, self)
return True
def __download_new_pkg(self, pkg_id, url, fm):
import sqlite3
import urllib2
import urlparse
import bz2
try:
if pkg_id in fm.db_pools.keys():
db = fm.db_pools.pop(pkg_id)
db.close()
try:
pkg_info = json.load(urllib2.urlopen(url))
except:
fm.pkg_filters_conf.pop(pkg_id)
threads.blockingCallFromThread(reactor,
fm._save_pkg_filters_conf)
return
fm.pkg_filters_conf[pkg_id]["pkg_info"] = pkg_info
base_filename = pkg_info["base"]
base_url = urlparse.urljoin(url, base_filename)
dest_file = os.path.join(NANNY_DAEMON_BLACKLISTS_DIR,
"%s-%s" % (pkg_id, base_filename))
dest_db = os.path.join(NANNY_DAEMON_BLACKLISTS_DIR,
"%s.db" % (pkg_id))
if os.path.exists(dest_file):
os.unlink(dest_file)
if os.path.exists(dest_db):
os.unlink(dest_db)
df = open(dest_file, "wb")
url_x = urllib2.urlopen(base_url)
fm.pkg_filters_conf[pkg_id]["progress"] = 0
total_len = int(url_x.info().getheaders("Content-Length")[0])
downl_len = 0
while True:
x = url_x.read(1024)
if x != '' :
df.write(x)
downl_len += len(x)
fm.pkg_filters_conf[pkg_id]["progress"] = (downl_len * 100) / total_len
else:
break
df.close()
df_uc_c = bz2.BZ2File(dest_file, "r")
lines_counted = 0
for line in df_uc_c.readlines():
lines_counted += 1
df_uc_c.close()
df_uc = bz2.BZ2File(dest_file, "r")
db_conn = sqlite3.connect(dest_db)
sql=''
fm.pkg_filters_conf[pkg_id]["status"]=PKG_STATUS_INSTALLING
fm.pkg_filters_conf[pkg_id]["progress"] = 0
lines_inserted = 0
for line in df_uc.readlines():
lines_inserted += 1
sql = sql + line
if sqlite3.complete_statement(sql) :
c = db_conn.cursor()
try:
c.execute(sql)
except:
pass
sql = ''
fm.pkg_filters_conf[pkg_id]["progress"] = (lines_inserted * 100) / lines_counted
db_conn.commit()
db_conn.close()
df_uc.close()
os.unlink(dest_file)
fm.pkg_filters_conf[pkg_id]["status"]=PKG_STATUS_READY
fm.pkg_filters_conf[pkg_id]["progress"] = 0
fm.db_pools[pkg_id] = adbapi.ConnectionPool('sqlite3', dest_db,
check_same_thread=False,
cp_openfun=on_db_connect)
print "Added to db pool -> %s" % pkg_id
threads.blockingCallFromThread(reactor,
fm._save_pkg_filters_conf)
except:
if os.path.exists(dest_file):
os.unlink(dest_file)
if os.path.exists(dest_db):
os.unlink(dest_db)
fm.pkg_filters_conf[pkg_id]["pkg_info"]={}
fm.pkg_filters_conf[pkg_id]["status"]=PKG_STATUS_ERROR_INSTALLING_NEW_BL
fm.pkg_filters_conf[pkg_id]["progress"] = 0
threads.blockingCallFromThread(reactor,
fm._save_pkg_filters_conf)
def remove_pkg_filter(self, pkg_id):
dest_db = os.path.join(NANNY_DAEMON_BLACKLISTS_DIR,
"%s.db" % (pkg_id))
if os.path.exists(dest_db):
if pkg_id in self.db_pools.keys():
db = self.db_pools.pop(pkg_id)
db.close()
os.unlink(dest_db)
try:
self.pkg_filters_conf.pop(pkg_id)
self._save_pkg_filters_conf()
print "Removed from db pool -> %s" % pkg_id
except:
pass
return True
def update_pkg_filter(self, pkg_id):
reactor.callInThread(self. __real_update_pkg_filter, pkg_id, self)
return True
def __real_update_pkg_filter(self, pkg_id, fm):
import sqlite3
import urllib2
import urlparse
import bz2
if pkg_id not in fm.pkg_filters_conf.keys():
return
try:
fm.pkg_filters_conf[pkg_id]["status"] = PKG_STATUS_DOWNLOADING
fm.pkg_filters_conf[pkg_id]["progress"] = 0
url = fm.pkg_filters_conf[pkg_id]["update_url"]
pkg_info = json.load(urllib2.urlopen(url))
orig_t = fm.pkg_filters_conf[pkg_id]["pkg_info"]["metadata"]["orig-timestamp"]
release_n = fm.pkg_filters_conf[pkg_id]["pkg_info"]["metadata"]["release-number"]
on_server_orig_t = pkg_info["metadata"]["orig-timestamp"]
on_server_release_n = pkg_info["metadata"]["release-number"]
if orig_t != on_server_orig_t :
reactor.callInThread(self.__download_new_pkg, pkg_id, url, self)
return
else:
force_download = False
for x in range(int(release_n) + 1, int(on_server_release_n) + 1) :
if "diff-%s-%s.bz2" % (orig_t, x) not in pkg_info["diffs"] :
force_download = True
break
if force_download == True:
reactor.callInThread(self.__download_new_pkg, pkg_id, url, self)
return
else:
patches = []
for x in range(int(release_n) + 1, int(on_server_release_n) + 1) :
patches.append(["diff-%s-%s.bz2" % (orig_t, x),
urlparse.urljoin(url, "diff-%s-%s.bz2" % (orig_t, x))])
dest_patch = os.path.join(NANNY_DAEMON_BLACKLISTS_DIR,
"%s.update-patch" % (pkg_id))
if os.path.exists(dest_patch):
os.unlink(dest_patch)
dest_patch_fd = open(dest_patch, "w")
lines_counted = 0
total_diffs = len(patches)
downl_diffs = 0
for diff_filename, diff_url in patches :
dest_file = os.path.join(NANNY_DAEMON_BLACKLISTS_DIR,
"%s-%s" % (pkg_id, diff_filename))
if os.path.exists(dest_file):
os.unlink(dest_file)
df = open(dest_file, "wb")
url_x = urllib2.urlopen(diff_url)
while True:
x = url_x.read(1024)
if x != '' :
df.write(x)
else:
break
df.close()
df_uc = bz2.BZ2File(dest_file, "r")
for line in df_uc.readlines():
if not line.startswith("#") :
dest_patch_fd.write(line)
lines_counted += 1
df_uc.close()
os.unlink(dest_file)
downl_diffs += 1
fm.pkg_filters_conf[pkg_id]["progress"] = (downl_diffs * 100) / total_diffs
dest_patch_fd.close()
dest_patch_fd = open(dest_patch, "r")
if pkg_id in fm.db_pools.keys():
db = fm.db_pools.pop(pkg_id)
db.close()
dest_db = os.path.join(NANNY_DAEMON_BLACKLISTS_DIR,
"%s.db" % (pkg_id))
db_conn = sqlite3.connect(dest_db)
fm.pkg_filters_conf[pkg_id]["status"]=PKG_STATUS_UPDATING
fm.pkg_filters_conf[pkg_id]["progress"] = 0
lines_inserted = 0
sql = ''
update_ok = True
for line in dest_patch_fd.readlines():
lines_inserted += 1
sql = sql + line
if sqlite3.complete_statement(sql) :
c = db_conn.cursor()
try:
c.execute(sql)
except:
db_conn.rollback()
update_ok = False
break
sql = ''
fm.pkg_filters_conf[pkg_id]["progress"] = (lines_inserted * 100) / lines_counted
if update_ok == True:
c = db_conn.cursor()
c.execute ("UPDATE metadata SET value='%s' WHERE key='release-number'" % on_server_release_n)
db_conn.commit()
print "UPDATED pkg:%s to version:%s" % (pkg_id, on_server_release_n)
db_conn.close()
dest_patch_fd.close()
os.unlink(dest_patch)
if update_ok == True :
fm.pkg_filters_conf[pkg_id]["status"]=PKG_STATUS_READY
fm.pkg_filters_conf[pkg_id]["pkg_info"] = pkg_info
fm.pkg_filters_conf[pkg_id]["progress"] = 0
else:
fm.pkg_filters_conf[pkg_id]["status"]=PKG_STATUS_READY_UPDATE_AVAILABLE
fm.pkg_filters_conf[pkg_id]["progress"] = 0
fm.db_pools[pkg_id] = adbapi.ConnectionPool('sqlite3', dest_db,
check_same_thread=False,
cp_openfun=on_db_connect)
print "Added to db pool -> %s" % pkg_id
threads.blockingCallFromThread(reactor,
fm._save_pkg_filters_conf)
except:
print "Something wrong updating pkg : %s" % pkg_id
fm.pkg_filters_conf[pkg_id]["status"]=PKG_STATUS_READY_UPDATE_AVAILABLE
fm.pkg_filters_conf[pkg_id]["progress"] = 0
threads.blockingCallFromThread(reactor,
fm._save_pkg_filters_conf)
def __update_pkg_checker_timeout(self):
reactor.callInThread(self.__update_pkg_checker, self)
gobject.timeout_add(5*60*1000, self.__update_pkg_checker_timeout)
return False
def __update_pkg_checker(self, fm):
import urllib2
for pkg_id in fm.pkg_filters_conf.keys() :
try:
if fm.pkg_filters_conf[pkg_id]["status"] == PKG_STATUS_READY :
url = fm.pkg_filters_conf[pkg_id]["update_url"]
pkg_info = json.load(urllib2.urlopen(url))
orig_t = fm.pkg_filters_conf[pkg_id]["pkg_info"]["metadata"]["orig-timestamp"]
release_n = fm.pkg_filters_conf[pkg_id]["pkg_info"]["metadata"]["release-number"]
on_server_orig_t = pkg_info["metadata"]["orig-timestamp"]
on_server_release_n = pkg_info["metadata"]["release-number"]
if orig_t == on_server_orig_t and release_n == on_server_release_n :
print "Nothing to update (pkg : %s)!" % pkg_id
else:
print "Seems there is and update (pkg: %s)" % pkg_id
fm.pkg_filters_conf[pkg_id]["status"] = PKG_STATUS_READY_UPDATE_AVAILABLE
threads.blockingCallFromThread(reactor,
fm._save_pkg_filters_conf)
except:
print "I can't update pkgs info (no network conn??? )"
def list_pkg_filter(self):
ret = []
for x in self.pkg_filters_conf.keys():
ret.append(x)
return ret
def get_pkg_filter_metadata(self, pkg_id):
if pkg_id not in self.pkg_filters_conf.keys() :
return {}
try:
if self.pkg_filters_conf[pkg_id]["pkg_info"].has_key("metadata"):
metadata = copy.deepcopy(self.pkg_filters_conf[pkg_id]["pkg_info"]["metadata"])
metadata["status"] = self.pkg_filters_conf[pkg_id]["status"]
metadata["progress"] = self.pkg_filters_conf[pkg_id]["progress"]
return metadata
except:
pass
return {"name" : "Unknown",
"provider" : "Unknown",
"status" : self.pkg_filters_conf[pkg_id]["status"],
"progress" : self.pkg_filters_conf[pkg_id]["progress"]}
def set_pkg_filter_metadata(self, pkg_id, name, description):
#Deprecated !!
return True
def get_pkg_filter_user_categories(self, pkg_id, uid):
try:
return_categories = []
categories = self.__get_categories_from_db(pkg_id)
if self.pkg_filters_conf[pkg_id]["users_info"].has_key(uid) :
user_categories = self.pkg_filters_conf[pkg_id]["users_info"][uid]
else:
user_categories = []
if not set(user_categories).issubset(set(categories)) :
tmp_user_categories = []
for ucat in user_categories :
if ucat in categories:
tmp_user_categories.append(ucat)
user_categories = tmp_user_categories
self.pkg_filters_conf[pkg_id]["users_info"][uid] = user_categories
self._save_pkg_filters_conf()
for category in categories:
if category in user_categories:
return_categories.append ((category, True))
else:
return_categories.append ((category, False))
return return_categories
except:
return []
def set_pkg_filter_user_categories(self, pkg_id, uid, user_categories):
categories = self.__get_categories_from_db(pkg_id)
tmp_user_categories = []
if not set(user_categories).issubset(set(categories)) :
for ucat in user_categories :
if ucat in categories:
tmp_user_categories.append(ucat)
user_categories = tmp_user_categories
self.pkg_filters_conf[pkg_id]["users_info"][uid] = user_categories
self._save_pkg_filters_conf()
return True
#Check methods
#------------------------------------
def check_domain_defer (self, uid, domain):
d = defer.Deferred()
reactor.callLater(0.05, d.callback, self.check_domain(uid, domain))
return d
def check_url_defer(self, uid, host, port, request, rest, pre_check):
d = defer.Deferred()
reactor.callLater(0.05, d.callback, self.check_url(uid, host, port, request, rest, pre_check))
return d
def check_domain(self, uid, domain):
print "Check Domain"
idomain = ''
domain_list = domain.split(".")
domain_list.reverse()
for x in domain_list:
idomain = idomain + x + "."
idomain = idomain[:-1]
print "Idomain : %s" % idomain
blacklisted_categories = []
custom_black=False
#Search in customfilters
sub_query = '('
sub_query += ' gregexp( "(.+\.|)" || regexp || ".*" , "%s") ' % (domain)
sub_query += ' or gregexp( "(.+\.|)" || regexp || ".*" , "%s") ' % ('http://' + domain)
sub_query += ')'
sql_query = 'select distinct is_black from customfilters where uid="%s" and %s ' % (uid, sub_query)
query = self.custom_filters_db.runQuery(sql_query)
block_d = BlockingDeferred(query)
try:
qr = block_d.blockOn()
if len(qr) > 0 :
for x in qr :
if x[0] == 0:
print "Custom WhiteListed"
return [False, False, []]
if x[0] == 1:
custom_black = True
except:
print "Something goes wrong checking Custom Filters"
return [[False, False], []]
if custom_black == True :
print "Custom BlackListed"
return [[True, False], []]
#Search in blacklists
x = self.__split_url(domain)
if x != (None, None, None, None, None):
b_domain = x[1].split(".")[0]
b_etld = x[1][len(b_domain) + 1:]
b_subdomain = x[2]
if b_subdomain == None:
b_subdomain = ''
b_path = ''
for db in self.pkg_filters_conf.keys():
self._refresh_db_categories_cache(db)
for db in self.pkg_filters_conf.keys():
if self.pkg_filters_conf[db]["users_info"].has_key(uid) :
if len(self.pkg_filters_conf[db]["users_info"][uid]) > 0 :
sql = 'SELECT id FROM domain WHERE name="%s"' % b_domain
query = self.db_pools[db].runQuery(sql)
block_d = BlockingDeferred(query)
qr = block_d.blockOn()
if len(qr) == 0 :
continue
sql = ''
sql += 'SELECT categories_list FROM blacklist WHERE '
sql += 'etld_id = (SELECT id FROM etld WHERE name ="%s") AND ' % b_etld
sql += 'domain_id = (SELECT id FROM domain WHERE name ="%s") AND '% b_domain
if b_subdomain != '' :
sql += '( '
sql += 'subdomain_id = (SELECT id FROM subdomain WHERE name ="") OR '
sql += 'subdomain_id = (SELECT id FROM subdomain WHERE name ="%s") ' % b_subdomain
sql += ') AND '
else:
sql += 'subdomain_id = (SELECT id FROM subdomain WHERE name ="") AND '
sql += 'path_id = (SELECT id FROM path WHERE name = "" ) '
query = self.db_pools[db].runQuery(sql)
block_d = BlockingDeferred(query)
qr = block_d.blockOn()
if len(qr) != 0:
for cats in qr :
exec ("cats_list = [%s]" % cats)
for c in cats_list :
if self.db_cat_cache[db][c] in self.pkg_filters_conf[db]["users_info"][uid] :
if self.db_cat_cache[db][c] not in blacklisted_categories :
blacklisted_categories.append(self.db_cat_cache[db][c])
if "may_url_blocked" in blacklisted_categories:
continue
sql = ''
sql += 'SELECT COUNT(id) FROM blacklist WHERE '
sql += 'etld_id = (SELECT id FROM etld WHERE name ="%s") AND ' % b_etld
sql += 'domain_id = (SELECT id FROM domain WHERE name ="%s") AND '% b_domain
if b_subdomain != '' :
sql += '( '
sql += 'subdomain_id = (SELECT id FROM subdomain WHERE name ="") OR '
sql += 'subdomain_id = (SELECT id FROM subdomain WHERE name ="%s") ' % b_subdomain
sql += ')'
else:
sql += 'subdomain_id = (SELECT id FROM subdomain WHERE name ="")'
query = self.db_pools[db].runQuery(sql)
block_d = BlockingDeferred(query)
qr = block_d.blockOn()
if (b_subdomain == '' and int(qr[0][0]) > 1) or (b_subdomain != '' and int(qr[0][0]) > 2) :
blacklisted_categories.append("may_url_blocked")
if len (blacklisted_categories) > 0 :
if "may_url_blocked" in blacklisted_categories :
blacklisted_categories.pop(blacklisted_categories.index("may_url_blocked"))
if len (blacklisted_categories) > 0 :
return [[True, True], blacklisted_categories]
else:
return [[False, True], blacklisted_categories]
else:
return [[True, False], blacklisted_categories]
return [[False, False], []]
def check_url(self, uid, host, port, request, rest, pre_check):
if pre_check[0] == True :
print 'Uri Validation stopped because domain is blocked, %s' % (host + request.uri)
return False, request, rest, host, port
#Search in customfilters
sub_query = '('
sub_query += ' gregexp( "(.+\.|)" || regexp || ".*" , "%s") ' % (host + request.uri)
sub_query += ' or gregexp( "(.+\.|)" || regexp || ".*" , "%s") ' % ('http://' + host + request.uri)
sub_query += ')'
sql_query = 'select distinct is_black from customfilters where uid="%s" and %s ' % (uid, sub_query)
query = self.custom_filters_db.runQuery(sql_query)
block_d = BlockingDeferred(query)
try:
qr = block_d.blockOn()
if len(qr) > 0 :
for x in qr :
if x[0] == 0:
print 'Uri Custom filter Whitelist %s' % (host + request.uri)
return True, request, rest, host, port
if x[0] == 1:
print 'Uri Custom filter Blacklist %s' % (host + request.uri)
return False, request, rest, host, port
except:
print "Something goes wrong checking Custom Filters (check_url)"
return True, request, rest, host, port
if pre_check[1] == False :
print 'Uri validation verified in pre-check %s' % (host + request.uri)
return True, request, rest, host, port
uri = host + request.uri
is_ok = True
blacklisted_categories = []
x = self.__split_url(domain)
if x != (None, None, None, None, None):
b_domain = x[1].split(".")[0]
b_etld = x[1][len(b_domain) + 1:]
b_subdomain = x[2]
if b_subdomain == None:
b_subdomain = ''
b_path = ''
if x[3] != None:
b_path = b_path + x[3]
if x[4] != None:
b_path = b_path + x[4]
for db in self.pkg_filters_conf.keys():
self._refresh_db_categories_cache(db)
for db in self.pkg_filters_conf.keys():
if self.pkg_filters_conf[db]["users_info"].has_key(uid) :
if len(self.pkg_filters_conf[db]["users_info"][uid]) > 0 :
sql = 'SELECT id FROM domain WHERE name="%s"' % b_domain
query = self.db_pools[db].runQuery(sql)
block_d = BlockingDeferred(query)
qr = block_d.blockOn()
if len(qr) == 0 :
continue
sql = ''
sql += 'SELECT categories_list FROM blacklist WHERE '
sql += 'etld_id = (SELECT id FROM etld WHERE name ="%s") AND ' % b_etld
sql += 'domain_id = (SELECT id FROM domain WHERE name ="%s") AND '% b_domain
if b_subdomain != '' :
sql += '( '
sql += 'subdomain_id = (SELECT id FROM subdomain WHERE name ="") OR '
sql += 'subdomain_id = (SELECT id FROM subdomain WHERE name ="%s") ' % b_subdomain
sql += ') AND '
else:
sql += 'subdomain_id = (SELECT id FROM subdomain WHERE name ="") AND '
sql += '('
sql += 'path_id = (SELECT id FROM path WHERE name = "%s" ) OR ' % b_path
sql += 'path_id = (SELECT id FROM path WHERE "%s" GLOB name || "/*") ' % b_path
sql += ')'
query = self.db_pools[db].runQuery(sql)
block_d = BlockingDeferred(query)
qr = block_d.blockOn()
if len(qr) != 0:
for cats in qr :
exec ("cats_list = [%s]" % cats)
for c in cats_list :
if self.db_cat_cache[db][c] in self.pkg_filters_conf[db]["users_info"][uid] :
if self.db_cat_cache[db][c] not in blacklisted_categories :
blacklisted_categories.append(self.db_cat_cache[db][c])
if len (blacklisted_categories) > 0 :
print 'Uri validation stopped because is blacklisted %s [%s]' % (host + request.uri, blacklisted_categories)
return False, request, rest, host, port
print 'Uri validation passed by default %s' % (host + request.uri)
return True, request, rest, host, port
def __split_url(self, url):
"""Split a url in several pieces, returning a tuple with each of that pieces.
It will also remove the user (http://user:[email protected]) and the port (http://domain.com:8080)
Example: With the url "http://www.google.com/test/extra/index.html", the function will return this pieces:
protocol: The protocol used by the url (in the example, "http").
domain: The domain of the url (in the example, "google.com").
subdomain: The subdomain of the url (in the example, "www").
firstlevel: The first level of the path (in the example, "test").
extra: The part of the URL not contained in the previous pieces (in the example, "extra/index.html").
"""
url = url.lower ()
splitted_url = url.split ("://")
if len (splitted_url) > 1:
protocol = splitted_url[0]
url = splitted_url[1]
else:
protocol = 'http'
if protocol != "http" and protocol != "https":
return (None, None, None, None, None)
parsed_url = urlparse ("%s://%s" % (protocol, url))
domain_string = parsed_url.netloc
path_string = parsed_url.path
if not domain_string:
return (None, None, None, None, None)
else:
if domain_string.find ("@") > -1:
domain_string = domain_string.split ("@")[1]
if domain_string.find (":") > -1:
domain_string = domain_string.split (":")[0]
etld_object = etld.etld()
try:
subdomain, domain = etld_object.parse ("%s://%s" % (protocol, domain_string))
except:
return (None, None, None, None, None)
if subdomain == "":
subdomain = None
if path_string:
path_pieces = path_string.split ("/")
firstlevel = path_pieces[1] if len (path_pieces) > 1 and path_pieces[1] else None
extra = "/".join (path_pieces [2:]) if len (path_pieces) > 2 and path_pieces[2] else None
else:
firstlevel = None
extra = None
return (protocol, domain, subdomain, firstlevel, extra)
| hychen/gnome-nanny | daemon/src/FilterManager.py | Python | gpl-2.0 | 39,019 |
#!/usr/bin/env python
import getopt, sys
import optparse
import string
import re
import os
from optparse import OptionParser
def get_running_time(log_time):
# Watchout: we only expect time of the form MM:SS.SS
str_time = 'Notime'
with open(log_time, 'r') as l:
for timeline in l:
time_sign = timeline.find('Elapsed (wall clock) time')
if time_sign != -1:
last_colon = timeline.rfind(':')
last_dot = timeline.rfind('.')
str_minutes = timeline[last_colon-2 : last_colon]
str_secs = timeline[last_colon+1 : last_dot]
str_psecs = timeline[last_dot+1 : len(timeline)-1]
minutes = string.atoi(str_minutes)
secs = string.atoi(str_secs)
secs += 60*minutes
str_time = str(secs) + '.' + str_psecs
break
l.closed
return str_time
def element_compare(element1, element2):
if element1 == element2:
return 0
elif element1 < element2:
return -1
else:
return 1
# 0: if instance1 == instance2
# -1: if instance1 < instance2
# 1: if instance1 > instance2
# []: maximum
def instance_compare(instance1, instance2):
for i in range(5):
if instance1[i] != instance2[i]:
return element_compare(instance1[i], instance2[i])
return 0
def get_engine_running_time(outputdir, testrun, toponame):
status_filename = outputdir + testrun + '/' + toponame + '-status.log'
time_filename = outputdir + testrun + '/' + toponame + '-time.log'
passed = False
with open(status_filename, 'r') as status_file:
line = status_file.readline()
passed_sign = line.find('PASSED')
if passed_sign != -1:
passed = True
status_file.closed
if passed:
running_time = get_running_time(time_filename)
else:
running_time = '600'
return running_time
def ordered_push(output,
dmcs_running_time,
dmcsopt_running_time,
mcsie_running_time):
if output == []:
output.append([dmcs_running_time, dmcsopt_running_time, mcsie_running_time])
else:
d_time = float(dmcs_running_time)
do_time = float(dmcsopt_running_time)
i = 0
while i < len(output):
d_time1 = float(output[i][0])
do_time1 = float(output[i][1])
if ((d_time1 > d_time) or ((d_time1 == d_time) and (do_time1 > do_time)) ):
break
i = i + 1
output.insert(i, [dmcs_running_time, dmcsopt_running_time, mcsie_running_time])
def process_test_cases(toponame, current_test_cases):
# output is an array of arrays of 3 values: [dmcs_running_time, dmcsopt_running_time, mcsie_running_time]
output = []
for instance in current_test_cases:
testrun = toponame + '/' + toponame + '-' + instance[1] + '-' + instance[2] + '-' + instance[3] + '-' + instance[4] + '-' + instance[5]
dmcs_running_time = get_engine_running_time('output/', testrun + '/all', toponame)
dmcsopt_running_time = get_engine_running_time('output/', testrun + '/opt_all', toponame)
mcsie_running_time = get_engine_running_time('output-mcsie/', testrun, toponame)
# DMCS' results are sorted increasingly by the running time of compute_all+non_streaming mode
# This order must be respected here so that the gnuplot script will produce correct comparisons.
ordered_push(output, dmcs_running_time, dmcsopt_running_time, mcsie_running_time)
print output
output_filename = 'output-mcsie/' + toponame + '/mcsie-' + toponame + '-' + current_test_cases[0][1] + '-' + current_test_cases[0][2] + '-' + current_test_cases[0][3] + '-' + current_test_cases[0][4] + '.dat'
with open(output_filename, 'w') as ofile:
for i in range(0,len(output)):
ofile.write(output[i][2] + '\n')
ofile.closed
def main(argv):
topos = ['diamond', 'ring', 'tree', 'zigzag' ]
for i in range(0, len(topos)):
filename = 'config/' + topos[i] + '.cfg'
with open(filename, 'r') as config_file:
line = config_file.readline()
line = line[:len(line)-1]
current_instance = re.split(',', line)
current_test_case = [current_instance]
while True:
while True:
line = config_file.readline()
if line == "":
break
line = line[:len(line)-1]
current_instance = re.split(',', line)
if instance_compare(current_instance, current_test_case[0]) == 0:
current_test_case.append(current_instance)
else:
break
if line == "":
break
process_test_cases(topos[i], current_test_case)
current_test_case = [current_instance]
config_file.closed
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| DistributedMCS/dmcs | examples/test/collect-mcsie.py | Python | gpl-3.0 | 5,167 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class _Base(object):
PROJECT = 'project'
SOURCE1 = 'http://example.com/source1.csv'
DS_NAME = 'datset_name'
TABLE_NAME = 'table_name'
JOB_NAME = 'job_name'
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def _setUpConstants(self):
import datetime
from gcloud._helpers import UTC
self.WHEN_TS = 1437767599.006
self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(
tzinfo=UTC)
self.ETAG = 'ETAG'
self.JOB_ID = '%s:%s' % (self.PROJECT, self.JOB_NAME)
self.RESOURCE_URL = 'http://example.com/path/to/resource'
self.USER_EMAIL = '[email protected]'
def _makeResource(self, started=False, ended=False):
self._setUpConstants()
resource = {
'configuration': {
self.JOB_TYPE: {
},
},
'statistics': {
'creationTime': self.WHEN_TS * 1000,
self.JOB_TYPE: {
}
},
'etag': self.ETAG,
'id': self.JOB_ID,
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'selfLink': self.RESOURCE_URL,
'user_email': self.USER_EMAIL,
}
if started or ended:
resource['statistics']['startTime'] = self.WHEN_TS * 1000
if ended:
resource['statistics']['endTime'] = (self.WHEN_TS + 1000) * 1000
return resource
def _verifyInitialReadonlyProperties(self, job):
# root elements of resource
self.assertEqual(job.etag, None)
self.assertEqual(job.job_id, None)
self.assertEqual(job.self_link, None)
self.assertEqual(job.user_email, None)
# derived from resource['statistics']
self.assertEqual(job.created, None)
self.assertEqual(job.started, None)
self.assertEqual(job.ended, None)
# derived from resource['status']
self.assertEqual(job.error_result, None)
self.assertEqual(job.errors, None)
self.assertEqual(job.state, None)
def _verifyReadonlyResourceProperties(self, job, resource):
from datetime import timedelta
self.assertEqual(job.job_id, self.JOB_ID)
statistics = resource.get('statistics', {})
if 'creationTime' in statistics:
self.assertEqual(job.created, self.WHEN)
else:
self.assertEqual(job.created, None)
if 'startTime' in statistics:
self.assertEqual(job.started, self.WHEN)
else:
self.assertEqual(job.started, None)
if 'endTime' in statistics:
self.assertEqual(job.ended, self.WHEN + timedelta(seconds=1000))
else:
self.assertEqual(job.ended, None)
if 'etag' in resource:
self.assertEqual(job.etag, self.ETAG)
else:
self.assertEqual(job.etag, None)
if 'selfLink' in resource:
self.assertEqual(job.self_link, self.RESOURCE_URL)
else:
self.assertEqual(job.self_link, None)
if 'user_email' in resource:
self.assertEqual(job.user_email, self.USER_EMAIL)
else:
self.assertEqual(job.user_email, None)
class TestLoadTableFromStorageJob(unittest2.TestCase, _Base):
JOB_TYPE = 'load'
def _getTargetClass(self):
from gcloud.bigquery.job import LoadTableFromStorageJob
return LoadTableFromStorageJob
def _setUpConstants(self):
super(TestLoadTableFromStorageJob, self)._setUpConstants()
self.INPUT_FILES = 2
self.INPUT_BYTES = 12345
self.OUTPUT_BYTES = 23456
self.OUTPUT_ROWS = 345
def _makeResource(self, started=False, ended=False):
resource = super(TestLoadTableFromStorageJob, self)._makeResource(
started, ended)
if ended:
resource['statistics']['load']['inputFiles'] = self.INPUT_FILES
resource['statistics']['load']['inputFileBytes'] = self.INPUT_BYTES
resource['statistics']['load']['outputBytes'] = self.OUTPUT_BYTES
resource['statistics']['load']['outputRows'] = self.OUTPUT_ROWS
return resource
def _verifyBooleanConfigProperties(self, job, config):
if 'allowJaggedRows' in config:
self.assertEqual(job.allow_jagged_rows,
config['allowJaggedRows'])
else:
self.assertTrue(job.allow_jagged_rows is None)
if 'allowQuotedNewlines' in config:
self.assertEqual(job.allow_quoted_newlines,
config['allowQuotedNewlines'])
else:
self.assertTrue(job.allow_quoted_newlines is None)
if 'ignoreUnknownValues' in config:
self.assertEqual(job.ignore_unknown_values,
config['ignoreUnknownValues'])
else:
self.assertTrue(job.ignore_unknown_values is None)
def _verifyEnumConfigProperties(self, job, config):
if 'createDisposition' in config:
self.assertEqual(job.create_disposition,
config['createDisposition'])
else:
self.assertTrue(job.create_disposition is None)
if 'encoding' in config:
self.assertEqual(job.encoding,
config['encoding'])
else:
self.assertTrue(job.encoding is None)
if 'sourceFormat' in config:
self.assertEqual(job.source_format,
config['sourceFormat'])
else:
self.assertTrue(job.source_format is None)
if 'writeDisposition' in config:
self.assertEqual(job.write_disposition,
config['writeDisposition'])
else:
self.assertTrue(job.write_disposition is None)
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
config = resource.get('configuration', {}).get('load')
self._verifyBooleanConfigProperties(job, config)
self._verifyEnumConfigProperties(job, config)
if 'fieldDelimiter' in config:
self.assertEqual(job.field_delimiter,
config['fieldDelimiter'])
else:
self.assertTrue(job.field_delimiter is None)
if 'maxBadRecords' in config:
self.assertEqual(job.max_bad_records,
config['maxBadRecords'])
else:
self.assertTrue(job.max_bad_records is None)
if 'quote' in config:
self.assertEqual(job.quote_character,
config['quote'])
else:
self.assertTrue(job.quote_character is None)
if 'skipLeadingRows' in config:
self.assertEqual(job.skip_leading_rows,
config['skipLeadingRows'])
else:
self.assertTrue(job.skip_leading_rows is None)
def test_ctor(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
self.assertTrue(job.destination is table)
self.assertEqual(list(job.source_uris), [self.SOURCE1])
self.assertTrue(job._client is client)
self.assertEqual(
job.path,
'/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME))
self.assertEqual(job.schema, [])
self._verifyInitialReadonlyProperties(job)
# derived from resource['statistics']['load']
self.assertEqual(job.input_file_bytes, None)
self.assertEqual(job.input_files, None)
self.assertEqual(job.output_bytes, None)
self.assertEqual(job.output_rows, None)
# set/read from resource['configuration']['load']
self.assertTrue(job.allow_jagged_rows is None)
self.assertTrue(job.allow_quoted_newlines is None)
self.assertTrue(job.create_disposition is None)
self.assertTrue(job.encoding is None)
self.assertTrue(job.field_delimiter is None)
self.assertTrue(job.ignore_unknown_values is None)
self.assertTrue(job.max_bad_records is None)
self.assertTrue(job.quote_character is None)
self.assertTrue(job.skip_leading_rows is None)
self.assertTrue(job.source_format is None)
self.assertTrue(job.write_disposition is None)
def test_ctor_w_schema(self):
from gcloud.bigquery.table import SchemaField
client = _Client(self.PROJECT)
table = _Table()
full_name = SchemaField('full_name', 'STRING', mode='REQUIRED')
age = SchemaField('age', 'INTEGER', mode='REQUIRED')
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client,
schema=[full_name, age])
self.assertEqual(job.schema, [full_name, age])
def test_schema_setter_non_list(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(TypeError):
job.schema = object()
def test_schema_setter_invalid_field(self):
from gcloud.bigquery.table import SchemaField
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
full_name = SchemaField('full_name', 'STRING', mode='REQUIRED')
with self.assertRaises(ValueError):
job.schema = [full_name, object()]
def test_schema_setter(self):
from gcloud.bigquery.table import SchemaField
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
full_name = SchemaField('full_name', 'STRING', mode='REQUIRED')
age = SchemaField('age', 'INTEGER', mode='REQUIRED')
job.schema = [full_name, age]
self.assertEqual(job.schema, [full_name, age])
def test_props_set_by_server(self):
import datetime
from gcloud._helpers import UTC
from gcloud._helpers import _millis
CREATED = datetime.datetime(2015, 8, 11, 12, 13, 22, tzinfo=UTC)
STARTED = datetime.datetime(2015, 8, 11, 13, 47, 15, tzinfo=UTC)
ENDED = datetime.datetime(2015, 8, 11, 14, 47, 15, tzinfo=UTC)
JOB_ID = '%s:%s' % (self.PROJECT, self.JOB_NAME)
URL = 'http://example.com/projects/%s/jobs/%s' % (
self.PROJECT, self.JOB_NAME)
EMAIL = '[email protected]'
ERROR_RESULT = {'debugInfo': 'DEBUG',
'location': 'LOCATION',
'message': 'MESSAGE',
'reason': 'REASON'}
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job._properties['etag'] = 'ETAG'
job._properties['id'] = JOB_ID
job._properties['selfLink'] = URL
job._properties['user_email'] = EMAIL
statistics = job._properties['statistics'] = {}
statistics['creationTime'] = _millis(CREATED)
statistics['startTime'] = _millis(STARTED)
statistics['endTime'] = _millis(ENDED)
load_stats = statistics['load'] = {}
load_stats['inputFileBytes'] = 12345
load_stats['inputFiles'] = 1
load_stats['outputBytes'] = 23456
load_stats['outputRows'] = 345
self.assertEqual(job.etag, 'ETAG')
self.assertEqual(job.job_id, JOB_ID)
self.assertEqual(job.self_link, URL)
self.assertEqual(job.user_email, EMAIL)
self.assertEqual(job.created, CREATED)
self.assertEqual(job.started, STARTED)
self.assertEqual(job.ended, ENDED)
self.assertEqual(job.input_file_bytes, 12345)
self.assertEqual(job.input_files, 1)
self.assertEqual(job.output_bytes, 23456)
self.assertEqual(job.output_rows, 345)
status = job._properties['status'] = {}
self.assertEqual(job.error_result, None)
self.assertEqual(job.errors, None)
self.assertEqual(job.state, None)
status['errorResult'] = ERROR_RESULT
status['errors'] = [ERROR_RESULT]
status['state'] = 'STATE'
self.assertEqual(job.error_result, ERROR_RESULT)
self.assertEqual(job.errors, [ERROR_RESULT])
self.assertEqual(job.state, 'STATE')
def test_allow_jagged_rows_setter_bad_value(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(ValueError):
job.allow_jagged_rows = object()
def test_allow_jagged_rows_setter_deleter(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.allow_jagged_rows = True
self.assertTrue(job.allow_jagged_rows)
del job.allow_jagged_rows
self.assertTrue(job.allow_jagged_rows is None)
def test_allow_quoted_newlines_setter_bad_value(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(ValueError):
job.allow_quoted_newlines = object()
def test_allow_quoted_newlines_setter_deleter(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.allow_quoted_newlines = True
self.assertTrue(job.allow_quoted_newlines)
del job.allow_quoted_newlines
self.assertTrue(job.allow_quoted_newlines is None)
def test_create_disposition_setter_bad_value(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(ValueError):
job.create_disposition = 'BOGUS'
def test_create_disposition_setter_deleter(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.create_disposition = 'CREATE_IF_NEEDED'
self.assertEqual(job.create_disposition, 'CREATE_IF_NEEDED')
del job.create_disposition
self.assertTrue(job.create_disposition is None)
def test_encoding_setter_bad_value(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(ValueError):
job.encoding = 'BOGUS'
def test_encoding_setter_deleter(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.encoding = 'ISO-8559-1'
self.assertEqual(job.encoding, 'ISO-8559-1')
del job.encoding
self.assertTrue(job.encoding is None)
def test_field_delimiter_setter_bad_value(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(ValueError):
job.field_delimiter = object()
def test_field_delimiter_setter_deleter(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.field_delimiter = '|'
self.assertEqual(job.field_delimiter, '|')
del job.field_delimiter
self.assertTrue(job.field_delimiter is None)
def test_ignore_unknown_values_setter_bad_value(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(ValueError):
job.ignore_unknown_values = object()
def test_ignore_unknown_values_setter_deleter(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.ignore_unknown_values = True
self.assertTrue(job.ignore_unknown_values)
del job.ignore_unknown_values
self.assertTrue(job.ignore_unknown_values is None)
def test_max_bad_records_setter_bad_value(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(ValueError):
job.max_bad_records = object()
def test_max_bad_records_setter_deleter(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.max_bad_records = 100
self.assertEqual(job.max_bad_records, 100)
del job.max_bad_records
self.assertTrue(job.max_bad_records is None)
def test_quote_character_setter_bad_value(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(ValueError):
job.quote_character = object()
def test_quote_character_setter_deleter(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.quote_character = "'"
self.assertEqual(job.quote_character, "'")
del job.quote_character
self.assertTrue(job.quote_character is None)
def test_skip_leading_rows_setter_bad_value(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(ValueError):
job.skip_leading_rows = object()
def test_skip_leading_rows_setter_deleter(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.skip_leading_rows = 2
self.assertEqual(job.skip_leading_rows, 2)
del job.skip_leading_rows
self.assertTrue(job.skip_leading_rows is None)
def test_source_format_setter_bad_value(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(ValueError):
job.source_format = 'BOGUS'
def test_source_format_setter_deleter(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.source_format = 'NEWLINE_DELIMITED_JSON'
self.assertEqual(job.source_format, 'NEWLINE_DELIMITED_JSON')
del job.source_format
self.assertTrue(job.source_format is None)
def test_write_disposition_setter_bad_value(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
with self.assertRaises(ValueError):
job.write_disposition = 'BOGUS'
def test_write_disposition_setter_deleter(self):
client = _Client(self.PROJECT)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.write_disposition = 'WRITE_TRUNCATE'
self.assertEqual(job.write_disposition, 'WRITE_TRUNCATE')
del job.write_disposition
self.assertTrue(job.write_disposition is None)
def test_begin_w_bound_client(self):
PATH = 'projects/%s/jobs' % self.PROJECT
RESOURCE = self._makeResource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.begin()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'configuration': {
'load': {
'sourceUris': [self.SOURCE1],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_NAME,
'tableId': self.TABLE_NAME,
},
},
},
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_alternate_client(self):
from gcloud.bigquery.table import SchemaField
PATH = 'projects/%s/jobs' % self.PROJECT
RESOURCE = self._makeResource(ended=True)
LOAD_CONFIGURATION = {
'sourceUris': [self.SOURCE1],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_NAME,
'tableId': self.TABLE_NAME,
},
'allowJaggedRows': True,
'allowQuotedNewlines': True,
'createDisposition': 'CREATE_NEVER',
'encoding': 'ISO-8559-1',
'fieldDelimiter': '|',
'ignoreUnknownValues': True,
'maxBadRecords': 100,
'quote': "'",
'skipLeadingRows': 1,
'sourceFormat': 'CSV',
'writeDisposition': 'WRITE_TRUNCATE',
'schema': {'fields': [
{'name': 'full_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'age', 'type': 'INTEGER', 'mode': 'REQUIRED'},
]}
}
RESOURCE['configuration']['load'] = LOAD_CONFIGURATION
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
table = _Table()
full_name = SchemaField('full_name', 'STRING', mode='REQUIRED')
age = SchemaField('age', 'INTEGER', mode='REQUIRED')
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client1,
schema=[full_name, age])
job.allow_jagged_rows = True
job.allow_quoted_newlines = True
job.create_disposition = 'CREATE_NEVER'
job.encoding = 'ISO-8559-1'
job.field_delimiter = '|'
job.ignore_unknown_values = True
job.max_bad_records = 100
job.quote_character = "'"
job.skip_leading_rows = 1
job.source_format = 'CSV'
job.write_disposition = 'WRITE_TRUNCATE'
job.begin(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'configuration': {
'load': LOAD_CONFIGURATION,
},
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(job, RESOURCE)
def test_exists_miss_w_bound_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
conn = _Connection()
client = _Client(project=self.PROJECT, connection=conn)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
self.assertFalse(job.exists())
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {'fields': 'id'})
def test_exists_hit_w_alternate_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection({})
client2 = _Client(project=self.PROJECT, connection=conn2)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client1)
self.assertTrue(job.exists(client=client2))
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {'fields': 'id'})
def test_reload_w_bound_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
RESOURCE = self._makeResource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.reload()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_alternate_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
RESOURCE = self._makeResource()
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client1)
job.reload(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(job, RESOURCE)
def test_cancel_w_bound_client(self):
PATH = 'projects/%s/jobs/%s/cancel' % (self.PROJECT, self.JOB_NAME)
RESOURCE = self._makeResource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client)
job.cancel()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(job, RESOURCE)
def test_cancel_w_alternate_client(self):
PATH = 'projects/%s/jobs/%s/cancel' % (self.PROJECT, self.JOB_NAME)
RESOURCE = self._makeResource()
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
table = _Table()
job = self._makeOne(self.JOB_NAME, table, [self.SOURCE1], client1)
job.cancel(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(job, RESOURCE)
class TestCopyJob(unittest2.TestCase, _Base):
JOB_TYPE = 'copy'
SOURCE_TABLE = 'source_table'
DESTINATION_TABLE = 'destination_table'
def _getTargetClass(self):
from gcloud.bigquery.job import CopyJob
return CopyJob
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
config = resource.get('configuration', {}).get('copy')
if 'createDisposition' in config:
self.assertEqual(job.create_disposition,
config['createDisposition'])
else:
self.assertTrue(job.create_disposition is None)
if 'writeDisposition' in config:
self.assertEqual(job.write_disposition,
config['writeDisposition'])
else:
self.assertTrue(job.write_disposition is None)
def test_ctor(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
destination = _Table(self.DESTINATION_TABLE)
job = self._makeOne(self.JOB_NAME, destination, [source], client)
self.assertTrue(job.destination is destination)
self.assertEqual(job.sources, [source])
self.assertTrue(job._client is client)
self.assertEqual(
job.path,
'/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME))
self._verifyInitialReadonlyProperties(job)
# set/read from resource['configuration']['copy']
self.assertTrue(job.create_disposition is None)
self.assertTrue(job.write_disposition is None)
def test_create_disposition_setter_bad_value(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
destination = _Table(self.DESTINATION_TABLE)
job = self._makeOne(self.JOB_NAME, destination, [source], client)
with self.assertRaises(ValueError):
job.create_disposition = 'BOGUS'
def test_create_disposition_setter_deleter(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
destination = _Table(self.DESTINATION_TABLE)
job = self._makeOne(self.JOB_NAME, destination, [source], client)
job.create_disposition = 'CREATE_IF_NEEDED'
self.assertEqual(job.create_disposition, 'CREATE_IF_NEEDED')
del job.create_disposition
self.assertTrue(job.create_disposition is None)
def test_write_disposition_setter_bad_value(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
destination = _Table(self.DESTINATION_TABLE)
job = self._makeOne(self.JOB_NAME, destination, [source], client)
with self.assertRaises(ValueError):
job.write_disposition = 'BOGUS'
def test_write_disposition_setter_deleter(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
destination = _Table(self.DESTINATION_TABLE)
job = self._makeOne(self.JOB_NAME, destination, [source], client)
job.write_disposition = 'WRITE_TRUNCATE'
self.assertEqual(job.write_disposition, 'WRITE_TRUNCATE')
del job.write_disposition
self.assertTrue(job.write_disposition is None)
def test_begin_w_bound_client(self):
PATH = 'projects/%s/jobs' % self.PROJECT
RESOURCE = self._makeResource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
source = _Table(self.SOURCE_TABLE)
destination = _Table(self.DESTINATION_TABLE)
job = self._makeOne(self.JOB_NAME, destination, [source], client)
job.begin()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'configuration': {
'copy': {
'sourceTables': [{
'projectId': self.PROJECT,
'datasetId': self.DS_NAME,
'tableId': self.SOURCE_TABLE
}],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_NAME,
'tableId': self.DESTINATION_TABLE,
},
},
},
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_alternate_client(self):
PATH = 'projects/%s/jobs' % self.PROJECT
RESOURCE = self._makeResource(ended=True)
COPY_CONFIGURATION = {
'sourceTables': [{
'projectId': self.PROJECT,
'datasetId': self.DS_NAME,
'tableId': self.SOURCE_TABLE,
}],
'destinationTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_NAME,
'tableId': self.DESTINATION_TABLE,
},
'createDisposition': 'CREATE_NEVER',
'writeDisposition': 'WRITE_TRUNCATE',
}
RESOURCE['configuration']['copy'] = COPY_CONFIGURATION
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
source = _Table(self.SOURCE_TABLE)
destination = _Table(self.DESTINATION_TABLE)
job = self._makeOne(self.JOB_NAME, destination, [source], client1)
job.create_disposition = 'CREATE_NEVER'
job.write_disposition = 'WRITE_TRUNCATE'
job.begin(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'configuration': {
'copy': COPY_CONFIGURATION,
},
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(job, RESOURCE)
def test_exists_miss_w_bound_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
conn = _Connection()
client = _Client(project=self.PROJECT, connection=conn)
source = _Table(self.SOURCE_TABLE)
destination = _Table(self.DESTINATION_TABLE)
job = self._makeOne(self.JOB_NAME, destination, [source], client)
self.assertFalse(job.exists())
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {'fields': 'id'})
def test_exists_hit_w_alternate_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection({})
client2 = _Client(project=self.PROJECT, connection=conn2)
source = _Table(self.SOURCE_TABLE)
destination = _Table(self.DESTINATION_TABLE)
job = self._makeOne(self.JOB_NAME, destination, [source], client1)
self.assertTrue(job.exists(client=client2))
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {'fields': 'id'})
def test_reload_w_bound_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
RESOURCE = self._makeResource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
source = _Table(self.SOURCE_TABLE)
destination = _Table(self.DESTINATION_TABLE)
job = self._makeOne(self.JOB_NAME, destination, [source], client)
job.reload()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_alternate_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
RESOURCE = self._makeResource()
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
source = _Table(self.SOURCE_TABLE)
destination = _Table(self.DESTINATION_TABLE)
job = self._makeOne(self.JOB_NAME, destination, [source], client1)
job.reload(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(job, RESOURCE)
class TestExtractTableToStorageJob(unittest2.TestCase, _Base):
JOB_TYPE = 'extract'
SOURCE_TABLE = 'source_table'
DESTINATION_URI = 'gs://bucket_name/object_name'
def _getTargetClass(self):
from gcloud.bigquery.job import ExtractTableToStorageJob
return ExtractTableToStorageJob
def _verifyResourceProperties(self, job, resource):
self._verifyReadonlyResourceProperties(job, resource)
config = resource.get('configuration', {}).get('extract')
if 'compression' in config:
self.assertEqual(job.compression,
config['compression'])
else:
self.assertTrue(job.compression is None)
if 'destinationFormat' in config:
self.assertEqual(job.destination_format,
config['destinationFormat'])
else:
self.assertTrue(job.destination_format is None)
if 'fieldDelimiter' in config:
self.assertEqual(job.field_delimiter,
config['fieldDelimiter'])
else:
self.assertTrue(job.field_delimiter is None)
if 'printHeader' in config:
self.assertEqual(job.print_header,
config['printHeader'])
else:
self.assertTrue(job.print_header is None)
def test_ctor(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
self.assertEqual(job.source, source)
self.assertEqual(job.destination_uris, [self.DESTINATION_URI])
self.assertTrue(job._client is client)
self.assertEqual(
job.path,
'/projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME))
self._verifyInitialReadonlyProperties(job)
# set/read from resource['configuration']['copy']
self.assertTrue(job.compression is None)
self.assertTrue(job.destination_format is None)
self.assertTrue(job.field_delimiter is None)
self.assertTrue(job.print_header is None)
def test_compression_setter_bad_value(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
with self.assertRaises(ValueError):
job.compression = 'BOGUS'
def test_compression_setter_deleter(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
job.compression = 'GZIP'
self.assertEqual(job.compression, 'GZIP')
del job.compression
self.assertTrue(job.compression is None)
def test_destination_format_setter_bad_value(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
with self.assertRaises(ValueError):
job.destination_format = 'BOGUS'
def test_destination_format_setter_deleter(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
job.destination_format = 'AVRO'
self.assertEqual(job.destination_format, 'AVRO')
del job.destination_format
self.assertTrue(job.destination_format is None)
def test_field_delimiter_setter_bad_value(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
with self.assertRaises(ValueError):
job.field_delimiter = object()
def test_field_delimiter_setter_deleter(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
job.field_delimiter = '|'
self.assertEqual(job.field_delimiter, '|')
del job.field_delimiter
self.assertTrue(job.field_delimiter is None)
def test_print_header_setter_bad_value(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
with self.assertRaises(ValueError):
job.print_header = 'BOGUS'
def test_print_header_setter_deleter(self):
client = _Client(self.PROJECT)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
job.print_header = False
self.assertEqual(job.print_header, False)
del job.print_header
self.assertTrue(job.print_header is None)
def test_begin_w_bound_client(self):
PATH = 'projects/%s/jobs' % self.PROJECT
RESOURCE = self._makeResource()
# Ensure None for missing server-set props
del RESOURCE['statistics']['creationTime']
del RESOURCE['etag']
del RESOURCE['selfLink']
del RESOURCE['user_email']
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
job.begin()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'configuration': {
'extract': {
'sourceTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_NAME,
'tableId': self.SOURCE_TABLE
},
'destinationUris': [self.DESTINATION_URI],
},
},
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(job, RESOURCE)
def test_begin_w_alternate_client(self):
PATH = 'projects/%s/jobs' % self.PROJECT
RESOURCE = self._makeResource(ended=True)
EXTRACT_CONFIGURATION = {
'sourceTable': {
'projectId': self.PROJECT,
'datasetId': self.DS_NAME,
'tableId': self.SOURCE_TABLE,
},
'destinationUris': [self.DESTINATION_URI],
'compression': 'GZIP',
'destinationFormat': 'NEWLINE_DELIMITED_JSON',
'fieldDelimiter': '|',
'printHeader': False,
}
RESOURCE['configuration']['extract'] = EXTRACT_CONFIGURATION
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client1)
job.compression = 'GZIP'
job.destination_format = 'NEWLINE_DELIMITED_JSON'
job.field_delimiter = '|'
job.print_header = False
job.begin(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'POST')
self.assertEqual(req['path'], '/%s' % PATH)
SENT = {
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_NAME,
},
'configuration': {
'extract': EXTRACT_CONFIGURATION,
},
}
self.assertEqual(req['data'], SENT)
self._verifyResourceProperties(job, RESOURCE)
def test_exists_miss_w_bound_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
conn = _Connection()
client = _Client(project=self.PROJECT, connection=conn)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
self.assertFalse(job.exists())
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {'fields': 'id'})
def test_exists_hit_w_alternate_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection({})
client2 = _Client(project=self.PROJECT, connection=conn2)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client1)
self.assertTrue(job.exists(client=client2))
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self.assertEqual(req['query_params'], {'fields': 'id'})
def test_reload_w_bound_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
RESOURCE = self._makeResource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client)
job.reload()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(job, RESOURCE)
def test_reload_w_alternate_client(self):
PATH = 'projects/%s/jobs/%s' % (self.PROJECT, self.JOB_NAME)
RESOURCE = self._makeResource()
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
source = _Table(self.SOURCE_TABLE)
job = self._makeOne(self.JOB_NAME, source, [self.DESTINATION_URI],
client1)
job.reload(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % PATH)
self._verifyResourceProperties(job, RESOURCE)
class _Client(object):
def __init__(self, project='project', connection=None):
self.project = project
self.connection = connection
class _Table(object):
def __init__(self, name=None):
self._name = name
@property
def name(self):
if self._name is not None:
return self._name
return TestLoadTableFromStorageJob.TABLE_NAME
@property
def project(self):
return TestLoadTableFromStorageJob.PROJECT
@property
def dataset_name(self):
return TestLoadTableFromStorageJob.DS_NAME
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
from gcloud.exceptions import NotFound
self._requested.append(kw)
try:
response, self._responses = self._responses[0], self._responses[1:]
except:
raise NotFound('miss')
else:
return response
| EugenePig/gcloud-python | gcloud/bigquery/test_job.py | Python | apache-2.0 | 50,613 |
# Copyright 2007 John Kasunich and Jeff Epler
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import rs274.OpenGLTk, Tkinter, signal
from minigl import *
from math import *
class Collection(object):
def __init__(self, parts):
self.parts = parts
self.vol = 0
def traverse(self):
for p in self.parts:
if hasattr(p, "apply"):
p.apply()
if hasattr(p, "capture"):
p.capture()
if hasattr(p, "draw"):
p.draw()
if hasattr(p, "traverse"):
p.traverse()
if hasattr(p, "unapply"):
p.unapply()
def volume(self):
if hasattr(self, "vol") and self.vol != 0:
vol = self.vol
else:
vol = sum(part.volume() for part in self.parts)
#print "Collection.volume", vol
return vol
# a collection consisting of overlapping parts will have an incorrect
# volume, because overlapping volumes will be counted twice. If the
# correct volume is known, it can be set using this method
def set_volume(self,vol):
self.vol = vol;
class Translate(Collection):
def __init__(self, parts, x, y, z):
self.parts = parts
self.where = x, y, z
def apply(self):
glPushMatrix()
glTranslatef(*self.where)
def unapply(self):
glPopMatrix()
class Scale(Collection):
def __init__(self, parts, x, y, z):
self.parts = parts
self.scaleby = x, y, z
def apply(self):
glPushMatrix()
glScalef(*self.scaleby)
def unapply(self):
glPopMatrix()
class HalTranslate(Collection):
def __init__(self, parts, comp, var, x, y, z):
self.parts = parts
self.where = x, y, z
self.comp = comp
self.var = var
def apply(self):
x, y, z = self.where
v = self.comp[self.var]
glPushMatrix()
glTranslatef(x*v, y*v, z*v)
def unapply(self):
glPopMatrix()
class HalRotate(Collection):
def __init__(self, parts, comp, var, th, x, y, z):
self.parts = parts
self.where = th, x, y, z
self.comp = comp
self.var = var
def apply(self):
th, x, y, z = self.where
glPushMatrix()
glRotatef(th * self.comp[self.var], x, y, z)
def unapply(self):
glPopMatrix()
class Rotate(Collection):
def __init__(self, parts, th, x, y, z):
self.parts = parts
self.where = th, x, y, z
def apply(self):
th, x, y, z = self.where
glPushMatrix()
glRotatef(th, x, y, z)
def unapply(self):
glPopMatrix()
class Track(Collection):
'''move and rotate an object to point from one capture()'d
coordinate system to another.
we need "world" to convert coordinates from GL_MODELVIEW coordinates
to our coordinate system'''
def __init__(self, parts, position, target, world):
self.parts = parts
self.target = target
self.position = position
self.world2view = world
def angle_to(self,x,y,z):
'''returns polar coordinates in degrees to a point from the origin
a rotates around the x-axis; b rotates around the y axis; r is the distance'''
azimuth = atan2(y, x)*180/pi #longitude
elevation = atan2(z, sqrt(x**2 + y**2))*180/pi
radius = sqrt(x**2+y**2+z**2)
return((azimuth, elevation, radius))
def map_coords(self,tx,ty,tz,transform):
# now we have to transform them to the world frame
wx = tx*transform[0]+ty*transform[4]+tz*transform[8]+transform[12]
wy = tx*transform[1]+ty*transform[5]+tz*transform[9]+transform[13]
wz = tx*transform[2]+ty*transform[6]+tz*transform[10]+transform[14]
return([wx,wy,wz])
def apply(self):
#make sure we have something to work with first
if (self.world2view.t == []):
#something's borkled - give up
print "vismach.py: Track: why am i here? world is not in the scene yet"
glPushMatrix()
return
view2world = invert(self.world2view.t)
px, py, pz = self.position.t[12:15]
px, py, pz = self.map_coords(px,py,pz,view2world)
tx, ty, tz = self.target.t[12:15]
tx, ty, tz = self.map_coords(tx,ty,tz,view2world)
dx = tx - px; dy = ty - py; dz = tz - pz;
(az,el,r) = self.angle_to(dx,dy,dz)
if(hasattr(HUD, "debug_track") and HUD.debug_track == 1):
HUD.strs = []
HUD.strs += ["current coords: %3.4f %3.4f %3.4f " % (px, py, pz)]
HUD.strs += ["target coords: %3.4f %3.4f %3.4f" % (tx, ty, tz)]
HUD.strs += ["az,el,r: %3.4f %3.4f %3.4f" % (az,el,r)]
glPushMatrix()
glTranslatef(px,py,pz)
glRotatef(az-90,0,0,1)
glRotatef(el-90,1,0,0)
def unapply(self):
glPopMatrix()
class CoordsBase(object):
def __init__(self, *args):
self._coords = args
self.q = gluNewQuadric()
def coords(self):
return self._coords
# give endpoint X values and radii
# resulting cylinder is on the X axis
class CylinderX(CoordsBase):
def draw(self):
x1, r1, x2, r2 = self.coords()
if x1 > x2:
tmp = x1
x1 = x2
x2 = tmp
tmp = r1
r1 = r2
r2 = tmp
glPushMatrix()
# GL creates cylinders along Z, so need to rotate
z1 = x1
z2 = x2
glRotatef(90,0,1,0)
# need to translate the whole thing to z1
glTranslatef(0,0,z1)
# the cylinder starts out at Z=0
gluCylinder(self.q, r1, r2, z2-z1, 32, 1)
# bottom cap
glRotatef(180,1,0,0)
gluDisk(self.q, 0, r1, 32, 1)
glRotatef(180,1,0,0)
# the top cap needs flipped and translated
glPushMatrix()
glTranslatef(0,0,z2-z1)
gluDisk(self.q, 0, r2, 32, 1)
glPopMatrix()
glPopMatrix()
def volume(self):
x1, r1, x2, r2 = self.coords()
# actually a frustum of a cone
vol = 3.1415927/3.0 * abs(x1-x2)*(r1*r1+r1*r2+r2*r2)
#print "CylinderX.volume", vol
return vol
# give endpoint Y values and radii
# resulting cylinder is on the Y axis
class CylinderY(CoordsBase):
def __init__(self, y1, r1, y2, r2):
self._coords = y1, r1, y2, r2
self.q = gluNewQuadric()
def draw(self):
y1, r1, y2, r2 = self.coords()
if y1 > y2:
tmp = y1
y1 = y2
y2 = tmp
tmp = r1
r1 = r2
r2 = tmp
glPushMatrix()
# GL creates cylinders along Z, so need to rotate
z1 = y1
z2 = y2
glRotatef(-90,1,0,0)
# need to translate the whole thing to z1
glTranslatef(0,0,z1)
# the cylinder starts out at Z=0
gluCylinder(self.q, r1, r2, z2-z1, 32, 1)
# bottom cap
glRotatef(180,1,0,0)
gluDisk(self.q, 0, r1, 32, 1)
glRotatef(180,1,0,0)
# the top cap needs flipped and translated
glPushMatrix()
glTranslatef(0,0,z2-z1)
gluDisk(self.q, 0, r2, 32, 1)
glPopMatrix()
glPopMatrix()
def volume(self):
y1, r1, y2, r2 = self.coords()
# actually a frustum of a cone
vol = 3.1415927/3.0 * abs(y1-y2)*(r1*r1+r1*r2+r2*r2)
#print "CylinderY.volume", vol
return vol
class CylinderZ(CoordsBase):
def draw(self):
z1, r1, z2, r2 = self.coords()
if z1 > z2:
tmp = z1
z1 = z2
z2 = tmp
tmp = r1
r1 = r2
r2 = tmp
# need to translate the whole thing to z1
glPushMatrix()
glTranslatef(0,0,z1)
# the cylinder starts out at Z=0
gluCylinder(self.q, r1, r2, z2-z1, 32, 1)
# bottom cap
glRotatef(180,1,0,0)
gluDisk(self.q, 0, r1, 32, 1)
glRotatef(180,1,0,0)
# the top cap needs flipped and translated
glPushMatrix()
glTranslatef(0,0,z2-z1)
gluDisk(self.q, 0, r2, 32, 1)
glPopMatrix()
glPopMatrix()
def volume(self):
z1, r1, z2, r2 = self.coords()
# actually a frustum of a cone
vol = 3.1415927/3.0 * abs(z1-z2)*(r1*r1+r1*r2+r2*r2)
#print "CylinderZ.volume", vol
return vol
# give center and radius
class Sphere(CoordsBase):
def draw(self):
x, y, z, r = self.coords()
# need to translate the whole thing to x,y,z
glPushMatrix()
glTranslatef(x,y,z)
# the sphere starts out at the origin
gluSphere(self.q, r, 32, 16)
glPopMatrix()
def volume(self):
x, y, z, r = self.coords()
vol = 1.3333333*3.1415927*r*r*r
#print "Sphere.volume", vol
return vol
# triangular plate in XY plane
# specify the corners Z values for each side
class TriangleXY(CoordsBase):
def draw(self):
x1, y1, x2, y2, x3, y3, z1, z2 = self.coords()
x12 = x1-x2
y12 = y1-y2
x13 = x1-x3
y13 = y1-y3
cross = x12*y13 - x13*y12
if cross < 0:
tmp = x2
x2 = x3
x3 = tmp
tmp = y2
y2 = y3
y3 = tmp
if z1 > z2:
tmp = z1
z1 = z2
z2 = tmp
x12 = x1-x2
y12 = y1-y2
x23 = x2-x3
y23 = y2-y3
x31 = x3-x1
y31 = y3-y1
glBegin(GL_QUADS)
# side 1-2
h = hypot(x12,y12)
glNormal3f(-y12/h,x12/h,0)
glVertex3f(x1, y1, z1)
glVertex3f(x2, y2, z1)
glVertex3f(x2, y2, z2)
glVertex3f(x1, y1, z2)
# side 2-3
h = hypot(x23,y23)
glNormal3f(-y23/h,x23/h,0)
glVertex3f(x2, y2, z1)
glVertex3f(x3, y3, z1)
glVertex3f(x3, y3, z2)
glVertex3f(x2, y2, z2)
# side 3-1
h = hypot(x31,y31)
glNormal3f(-y31/h,x31/h,0)
glVertex3f(x3, y3, z1)
glVertex3f(x1, y1, z1)
glVertex3f(x1, y1, z2)
glVertex3f(x3, y3, z2)
glEnd()
glBegin(GL_TRIANGLES)
# upper face
glNormal3f(0,0,1)
glVertex3f(x1, y1, z2)
glVertex3f(x2, y2, z2)
glVertex3f(x3, y3, z2)
# lower face
glNormal3f(0,0,-1)
glVertex3f(x1, y1, z1)
glVertex3f(x3, y3, z1)
glVertex3f(x2, y2, z1)
glEnd()
def volume(self):
x1, y1, x2, y2, x3, y3, z1, z2 = self.coords()
# compute pts 2 and 3 relative to 1 (puts pt1 at origin)
x2 = x2-x1
x3 = x3-x1
y2 = y2-y1
y3 = y3-y1
# compute area of triangle
area = 0.5*abs(x2*y3 - x3*y2)
thk = abs(z1-z2)
vol = area*thk
#print "TriangleXY.volume = area * thickness)",vol, area, thk
return vol
# triangular plate in XZ plane
class TriangleXZ(TriangleXY):
def coords(self):
x1, z1, x2, z2, x3, z3, y1, y2 = TriangleXY.coords(self)
return x1, z1, x2, z2, x3, z3, -y1, -y2
def draw(self):
glPushMatrix()
glRotatef(90,1,0,0)
# create the triangle in XY plane
TriangleXY.draw(self)
# bottom cap
glPopMatrix()
def volume(self):
vol = TriangleXY.volume(self)
#print " TriangleXZ.volume",vol
return vol
# triangular plate in YZ plane
class TriangleYZ(TriangleXY):
def coords(self):
y1, z1, y2, z2, y3, z3, x1, x2 = TriangleXY.coords(self)
return z1, y1, z2, y2, z3, y3, -x1, -x2
def draw(self):
glPushMatrix()
glRotatef(90,0,-1,0)
# create the triangle in XY plane
TriangleXY.draw(self)
# bottom cap
glPopMatrix()
def volume(self):
vol = TriangleXY.volume(self)
#print " TriangleYZ.volume",vol
return vol
class ArcX(CoordsBase):
def draw(self):
x1, x2, r1, r2, a1, a2, steps = self.coords()
if x1 > x2:
tmp = x1
x1 = x2
x2 = tmp
if r1 > r2:
tmp = r1
r1 = r2
r2 = tmp
while a1 > a2:
a2 = a2 + 360
astep = ((a2-a1)/steps)*(pi/180)
a1rads = a1 * (pi/180)
# positive X end face
glBegin(GL_QUAD_STRIP)
glNormal3f(1,0,0)
n = 0
while n <= steps:
angle = a1rads+n*astep
s = sin(angle)
c = cos(angle)
glVertex3f(x2, r1*s, r1*c)
glVertex3f(x2, r2*s, r2*c)
n = n + 1
glEnd()
# negative X end face
glBegin(GL_QUAD_STRIP)
glNormal3f(-1,0,0)
n = 0
while n <= steps:
angle = a1rads+n*astep
s = sin(angle)
c = cos(angle)
glVertex3f(x1, r1*s, r1*c)
glVertex3f(x1, r2*s, r2*c)
n = n + 1
glEnd()
# inner diameter
glBegin(GL_QUAD_STRIP)
n = 0
while n <= steps:
angle = a1rads+n*astep
s = sin(angle)
c = cos(angle)
glNormal3f(0,-s, -c)
glVertex3f(x1, r1*s, r1*c)
glVertex3f(x2, r1*s, r1*c)
n = n + 1
glEnd()
# outer diameter
glBegin(GL_QUAD_STRIP)
n = 0
while n <= steps:
angle = a1rads+n*astep
s = sin(angle)
c = cos(angle)
glNormal3f(0, s, c)
glVertex3f(x1, r2*s, r2*c)
glVertex3f(x2, r2*s, r2*c)
n = n + 1
glEnd()
# end plates
glBegin(GL_QUADS)
# first end plate
angle = a1 * (pi/180)
s = sin(angle)
c = cos(angle)
glNormal3f(0, -c, s)
glVertex3f(x1, r2*s, r2*c)
glVertex3f(x2, r2*s, r2*c)
glVertex3f(x2, r1*s, r1*c)
glVertex3f(x1, r1*s, r1*c)
# other end
angle = a2 * (pi/180)
s = sin(angle)
c = cos(angle)
glNormal3f(0, c, -s)
glVertex3f(x1, r2*s, r2*c)
glVertex3f(x2, r2*s, r2*c)
glVertex3f(x2, r1*s, r1*c)
glVertex3f(x1, r1*s, r1*c)
glEnd()
def volume(self):
x1, x2, r1, r2, a1, a2, steps = self.coords()
if x1 > x2:
tmp = x1
x1 = x2
x2 = tmp
if r1 > r2:
tmp = r1
r1 = r2
r2 = tmp
while a1 > a2:
a2 = a2 + 360
height = x2 - x1
angle = a2 - a1
area = (angle/360.0)*pi*(r2*r2-r1*r1)
vol = area * height
#print "Arc.volume = angle * area * height",vol, angle, area, height
return vol
# six coordinate version - specify each side of the box
class Box(CoordsBase):
def draw(self):
x1, y1, z1, x2, y2, z2 = self.coords()
if x1 > x2:
tmp = x1
x1 = x2
x2 = tmp
if y1 > y2:
tmp = y1
y1 = y2
y2 = tmp
if z1 > z2:
tmp = z1
z1 = z2
z2 = tmp
glBegin(GL_QUADS)
# bottom face
glNormal3f(0,0,-1)
glVertex3f(x2, y1, z1)
glVertex3f(x1, y1, z1)
glVertex3f(x1, y2, z1)
glVertex3f(x2, y2, z1)
# positive X face
glNormal3f(1,0,0)
glVertex3f(x2, y1, z1)
glVertex3f(x2, y2, z1)
glVertex3f(x2, y2, z2)
glVertex3f(x2, y1, z2)
# positive Y face
glNormal3f(0,1,0)
glVertex3f(x1, y2, z1)
glVertex3f(x1, y2, z2)
glVertex3f(x2, y2, z2)
glVertex3f(x2, y2, z1)
# negative Y face
glNormal3f(0,-1,0)
glVertex3f(x2, y1, z2)
glVertex3f(x1, y1, z2)
glVertex3f(x1, y1, z1)
glVertex3f(x2, y1, z1)
# negative X face
glNormal3f(-1,0,0)
glVertex3f(x1, y1, z1)
glVertex3f(x1, y1, z2)
glVertex3f(x1, y2, z2)
glVertex3f(x1, y2, z1)
# top face
glNormal3f(0,0,1)
glVertex3f(x1, y2, z2)
glVertex3f(x1, y1, z2)
glVertex3f(x2, y1, z2)
glVertex3f(x2, y2, z2)
glEnd()
def volume(self):
x1, y1, z1, x2, y2, z2 = self.coords()
vol = abs((x1-x2)*(y1-y2)*(z1-z2))
#print "Box.volume", vol
return vol
# specify the width in X and Y, and the height in Z
# the box is centered on the origin
class BoxCentered(Box):
def __init__(self, xw, yw, zw):
Box.__init__(self, -xw/2.0, -yw/2.0, -zw/2.0, xw/2.0, yw/2.0, zw/2.0)
# specify the width in X and Y, and the height in Z
# the box is centered in X and Y, and runs from Z=0 up
# (or down) to the specified Z value
class BoxCenteredXY(Box):
def __init__(self, xw, yw, zw):
Box.__init__(self, -xw/2.0, -yw/2.0, 0, xw/2.0, yw/2.0, zw)
# capture current transformation matrix
# note that this tranforms from the current coordinate system
# to the viewport system, NOT to the world system
class Capture(object):
def __init__(self):
self.t = []
def capture(self):
self.t = glGetDoublev(GL_MODELVIEW_MATRIX)
def volume(self):
return 0.0
# function to invert a transform matrix
# based on http://steve.hollasch.net/cgindex/math/matrix/afforthinv.c
# with simplifications since we don't do scaling
# This function inverts a 4x4 matrix that is affine and orthogonal. In
# other words, the perspective components are [0 0 0 1], and the basis
# vectors are orthogonal to each other. In addition, the matrix must
# not do scaling
def invert(src):
# make a copy
inv=src[:]
# The inverse of the upper 3x3 is the transpose (since the basis
# vectors are orthogonal to each other.
inv[1],inv[4] = inv[4],inv[1]
inv[2],inv[8] = inv[8],inv[2]
inv[6],inv[9] = inv[9],inv[6]
# The inverse of the translation component is just the negation
# of the translation after dotting with the new upper3x3 rows. */
inv[12] = -(src[12]*inv[0] + src[13]*inv[4] + src[14]*inv[8])
inv[13] = -(src[12]*inv[1] + src[13]*inv[5] + src[14]*inv[9])
inv[14] = -(src[12]*inv[2] + src[13]*inv[6] + src[14]*inv[10])
return inv
class Hud(object):
'''head up display - draws a semi-transparent text box.
use HUD.strs for things that must be updated constantly,
and HUD.show("stuff") for one-shot things like error messages'''
def __init__(self, showme=1):
self.app = []
self.strs = []
self.messages = []
self.showme = 0
def show(self, string="xyzzy"):
self.showme = 1
if string != "xyzzy":
self.messages += [str(string)]
def hide(self):
self.showme = 0
def clear(self):
self.messages = []
def draw(self):
drawtext = self.strs + self.messages
self.lines = len(drawtext)
#draw head-up-display
#see axis.py for more font/color configurability
if ((self.showme == 0) or (self.lines == 0)):
return
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
#pointer to font?
fontbase = int(self.app.loadbitmapfont("9x15"))
char_width, char_height = 9, 15
xmargin,ymargin = 5,5
ypos = float(self.app.winfo_height())
glOrtho(0.0, self.app.winfo_width(), 0.0, ypos, -1.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
#draw the text box
maxlen = max([len(p) for p in drawtext])
box_width = maxlen * char_width
glDepthFunc(GL_ALWAYS)
glDepthMask(GL_FALSE)
glDisable(GL_LIGHTING)
glEnable(GL_BLEND)
glEnable(GL_NORMALIZE)
glBlendFunc(GL_ONE, GL_CONSTANT_ALPHA)
glColor3f(0.2,0,0)
glBlendColor(0,0,0,0.5) #rgba
glBegin(GL_QUADS)
glVertex3f(0, ypos, 1) #upper left
glVertex3f(0, ypos - 2*ymargin - char_height*len(drawtext), 1) #lower left
glVertex3f(box_width+2*xmargin, ypos - 2*ymargin - char_height*len(drawtext), 1) #lower right
glVertex3f(box_width+2*xmargin, ypos , 1) #upper right
glEnd()
glDisable(GL_BLEND)
glEnable(GL_LIGHTING)
#fill the box with text
maxlen = 0
ypos -= char_height+ymargin
i=0
glDisable(GL_LIGHTING)
glColor3f(0.9,0.9,0.9)
for string in drawtext:
maxlen = max(maxlen, len(string))
# if i < len(homed) and homed[i]:
# glRasterPos2i(6, ypos)
# glBitmap(13, 16, 0, 3, 17, 0, homeicon)
glRasterPos2i(xmargin, int(ypos))
for char in string:
glCallList(fontbase + ord(char))
# if i < len(homed) and limit[i]:
# glBitmap(13, 16, -5, 3, 17, 0, limiticon)
ypos -= char_height
i = i + 1
glDepthFunc(GL_LESS)
glDepthMask(GL_TRUE)
glEnable(GL_LIGHTING)
glPopMatrix()
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
class O(rs274.OpenGLTk.Opengl):
def __init__(self, *args, **kw):
rs274.OpenGLTk.Opengl.__init__(self, *args, **kw)
self.r_back = self.g_back = self.b_back = 0
self.bind('<Button-4>', self.zoomin)
self.bind('<Button-5>', self.zoomout)
#self.q1 = gluNewQuadric()
#self.q2 = gluNewQuadric()
#self.q3 = gluNewQuadric()
self.plotdata = []
self.plotlen = 4000
#does not show HUD by default
self.hud = Hud()
def set_viewangle(self, lat, lon):
self.lat = lat
self.lon = lon
rs274.OpenGLTk.glRotateScene(self, 0.5, self.xcenter, self.ycenter, self.zcenter, 0, 0, 0, 0)
self.tkRedraw()
def zoomin(self, event):
self.distance = self.distance / 1.1
self.tkRedraw()
def zoomout(self, event):
self.distance = self.distance * 1.1
self.tkRedraw()
def basic_lighting(self):
self.activate()
glLightfv(GL_LIGHT0, GL_POSITION, (1, -1, .5, 0))
glLightfv(GL_LIGHT0, GL_AMBIENT, (.2,.2,.2,0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, (.6,.6,.4,0))
glLightfv(GL_LIGHT0+1, GL_POSITION, (-1, -1, .5, 0))
glLightfv(GL_LIGHT0+1, GL_AMBIENT, (.0,.0,.0,0))
glLightfv(GL_LIGHT0+1, GL_DIFFUSE, (.0,.0,.4,0))
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, (1,1,1,0))
glDisable(GL_CULL_FACE)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT0+1)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def redraw(self, *args):
if self.winfo_width() == 1: return
self.model.traverse()
# current coords: world
# the matrices tool2view, work2view, and world2view
# transform from tool/work/world coords to viewport coords
# if we want to draw in tool coords, we need to do
# "tool -> view -> world" (since the current frame is world)
# and if we want to draw in work coords, we need
# "work -> view -> world". For both, we need to invert
# the world2view matrix to do the second step
view2world = invert(self.world2view.t)
# likewise, for backplot, we want to transform the tooltip
# position from tool coords (where it is [0,0,0]) to work
# coords, so we need tool -> view -> work
# so lets also invert the work2view matrix
view2work = invert(self.work2view.t)
# since backplot lines only need vertexes, not orientation,
# and the tooltip is at the origin, getting the tool coords
# is easy
tx, ty, tz = self.tool2view.t[12:15]
# now we have to transform them to the work frame
wx = tx*view2work[0]+ty*view2work[4]+tz*view2work[8]+view2work[12]
wy = tx*view2work[1]+ty*view2work[5]+tz*view2work[9]+view2work[13]
wz = tx*view2work[2]+ty*view2work[6]+tz*view2work[10]+view2work[14]
# wx, wy, wz are the values to use for backplot
# so we save them in a buffer
if len(self.plotdata) == self.plotlen:
del self.plotdata[:self.plotlen / 10]
point = [ wx, wy, wz ]
if not self.plotdata or point != self.plotdata[-1]:
self.plotdata.append(point)
# now lets draw something in the tool coordinate system
#glPushMatrix()
# matrixes take effect in reverse order, so the next
# two lines do "tool -> view -> world"
#glMultMatrixd(view2world)
#glMultMatrixd(self.tool2view.t)
# do drawing here
# cylinder normally goes to +Z, we want it down
#glTranslatef(0,0,-60)
#gluCylinder(self.q1, 20, 20, 60, 32, 16)
# back to world coords
#glPopMatrix()
# we can also draw in the work coord system
glPushMatrix()
# "work -> view -> world"
glMultMatrixd(view2world)
glMultMatrixd(self.work2view.t)
# now we can draw in work coords, and whatever we draw
# will move with the work, (if the work is attached to
# a table or indexer or something that moves with
# respect to the world
# just a test object, sitting on the table
#gluCylinder(self.q2, 40, 20, 60, 32, 16)
#draw head up display
if(hasattr(self.hud, "draw")):
self.hud.draw()
# draw backplot
glDisable(GL_LIGHTING)
glLineWidth(2)
glColor3f(1.0,0.5,0.5)
glBegin(GL_LINE_STRIP)
for p in self.plotdata:
glVertex3f(*p)
glEnd()
glEnable(GL_LIGHTING)
glColor3f(1,1,1)
glLineWidth(1)
glDisable(GL_BLEND)
glDepthFunc(GL_LESS)
# back to world again
glPopMatrix()
class Color(Collection):
def __init__(self, color, parts):
self.color = color
Collection.__init__(self, parts)
def apply(self):
glPushAttrib(GL_LIGHTING_BIT)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, self.color)
def unapply(self):
glPopAttrib()
class AsciiSTL:
def __init__(self, filename=None, data=None):
if data is None:
data = open(filename, "r")
elif isinstance(data, str):
data = data.split("\n")
self.list = None
t = []
n = [0,0,0]
self.d = d = []
for line in data:
if line.find("normal") != -1:
line = line.split()
x, y, z = map(float, line[-3:])
n = [x,y,z]
elif line.find("vertex") != -1:
line = line.split()
x, y, z = map(float, line[-3:])
t.append([x,y,z])
if len(t) == 3:
if n == [0,0,0]:
dx1 = t[1][0] - t[0][0]
dy1 = t[1][1] - t[0][1]
dz1 = t[1][2] - t[0][2]
dx2 = t[2][0] - t[0][0]
dy2 = t[2][1] - t[0][1]
dz2 = t[2][2] - t[0][2]
n = [y1*z2 - y2*z1, z1*x2 - z2*x1, y1*x2 - y2*x1]
d.append((n, t))
t = []
n = [0,0,0]
def draw(self):
if self.list is None:
# OpenGL isn't ready yet in __init__ so the display list
# is created during the first draw
self.list = glGenLists(1)
glNewList(self.list, GL_COMPILE_AND_EXECUTE)
glBegin(GL_TRIANGLES)
for n, t in self.d:
glNormal3f(*n)
glVertex3f(*t[0])
glVertex3f(*t[1])
glVertex3f(*t[2])
glEnd()
glEndList()
del self.d
else:
glCallList(self.list)
class AsciiOBJ:
def __init__(self, filename=None, data=None):
if data is None:
data = open(filename, "r")
elif isinstance(data, str):
data = data.split("\n")
self.v = v = []
self.vn = vn = []
self.f = f = []
for line in data:
if line.startswith("#"): continue
if line.startswith("vn"):
vn.append([float(w) for w in line.split()[1:]])
elif line.startswith("v"):
v.append([float(w) for w in line.split()[1:]])
elif line.startswith("f"):
f.append(self.parse_face(line))
# print v[:5]
# print vn[:5]
# print f[:5]
self.list = None
def parse_int(self, i):
if i == '': return None
return int(i)
def parse_slash(self, word):
return [self.parse_int(i) for i in word.split("/")]
def parse_face(self, line):
return [self.parse_slash(w) for w in line.split()[1:]]
def draw(self):
if self.list is None:
# OpenGL isn't ready yet in __init__ so the display list
# is created during the first draw
self.list = glGenLists(1)
glNewList(self.list, GL_COMPILE)
glDisable(GL_CULL_FACE)
glBegin(GL_TRIANGLES)
#print "obj", len(self.f)
for f in self.f:
for v, t, n in f:
if n:
glNormal3f(*self.vn[n-1])
glVertex3f(*self.v[v-1])
glEnd()
glEndList()
del self.v
del self.vn
del self.f
glCallList(self.list)
def main(model, tool, work, size=10, hud=0, rotation_vectors=None, lat=0, lon=0):
app = Tkinter.Tk()
t = O(app, double=1, depth=1)
# set which axes to rotate around
if rotation_vectors: t.rotation_vectors = rotation_vectors
# we want to be able to see the model from all angles
t.set_latitudelimits(-180, 180)
# set starting viewpoint if desired
t.after(100, lambda: t.set_viewangle(lat, lon))
#there's probably a better way of doing this
global HUD
HUD = 0
if(hud != 0 and hasattr(hud, "app")):
HUD = hud
#point our app at the global
t.hud = HUD
t.hud.app = t #HUD needs to know where to draw
# need to capture the world coordinate system
world = Capture()
t.model = Collection([model, world])
t.distance = size * 3
t.near = size * 0.01
t.far = size * 10.0
t.tool2view = tool
t.world2view = world
t.work2view = work
t.pack(fill="both", expand=1)
def update():
t.tkRedraw()
t.after(100, update)
update()
def quit(*args):
raise SystemExit
signal.signal(signal.SIGTERM, quit)
signal.signal(signal.SIGINT, quit)
app.mainloop()
| yishinli/emc2 | lib/python/vismach.py | Python | lgpl-2.1 | 27,917 |
## begin license ##
#
# "Weightless" is a High Performance Asynchronous Networking Library. See http://weightless.io
#
# Copyright (C) 2006-2011 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2011-2012, 2018-2021 Seecr (Seek You Too B.V.) https://seecr.nl
#
# This file is part of "Weightless"
#
# "Weightless" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Weightless" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Weightless"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from unittest import TestCase
from weightless.http import httpspec
class HttpSpecTest(TestCase):
def testParseHeader(self):
self.assertEqual((b'aap', {}), httpspec.parseHeaderFieldvalue(b'aap'))
self.assertEqual((b'aap', {b'noot': b'mies'}), httpspec.parseHeaderFieldvalue(b'aap; noot=mies'))
self.assertEqual((b'aap', {b'noot': b'mies', b'vis': b'vuur'}), httpspec.parseHeaderFieldvalue(b'aap; noot=mies; vis=vuur'))
def testParseHeadersfdsfdsfdsfds(self):
self.assertEqual({b'Aap': b'noot'}, httpspec.parseHeaders(b"aap: noot\r\n"))
self.assertEqual({b'Aap': b'noot', b'Vis': b'vuur'}, httpspec.parseHeaders(b"aap: noot\r\nvis: vuur\r\n"))
self.assertEqual({b'Aap': [b'noot', b'vuur']}, httpspec.parseHeaders(b"aap: noot\r\naap: vuur\r\n"))
self.assertEqual({'Aap': 'noot'}, httpspec.parseHeadersString("aap: noot\r\n"))
def testSetCookieAlwaysAsList(self):
self.assertEqual({b'Set-Cookie': [b'noot']}, httpspec.parseHeaders(b"Set-Cookie: noot\r\n"))
self.assertEqual({b'Set-Cookie': [b'noot', b'vuur']}, httpspec.parseHeaders(b"Set-Cookie: noot\r\nSet-Cookie: vuur\r\n"))
def testParseContentDispositionValues(self):
self.assertEqual((b'attachment', {}), httpspec.parseHeaderFieldvalue(b'attachment'))
self.assertEqual((b'attachment', {b'filename': b'document.pdf'}),
httpspec.parseHeaderFieldvalue(b'attachment; filename=document.pdf'))
self.assertEqual((b'attachment', {b'filename': b'with a ;.pdf'}),
httpspec.parseHeaderFieldvalue(b'attachment; filename="with a ;.pdf"'))
self.assertEqual((b'attachment', {b'filename': b'document.pdf', b'filename*': b'another document.pdf'}),
httpspec.parseHeaderFieldvalue(b'attachment; filename=document.pdf; filename*="another document.pdf"'))
self.assertEqual((b'attachment', {b'filename': r'with a \".pdf'.encode()}),
httpspec.parseHeaderFieldvalue(r'attachment; filename="with a \".pdf"'.encode()))
def testBoundary(self):
self.assertEqual((b'multipart/form-data', {b'boundary': b'-=-=-=-=-=-=-=-=TestBoundary1234567890'}),
httpspec.parseHeaderFieldvalue(b'multipart/form-data; boundary="-=-=-=-=-=-=-=-=TestBoundary1234567890"'))
| seecr/weightless-core | test/_http/httpspectest.py | Python | gpl-2.0 | 3,316 |
""" JobRunningWaitingRatioPolicy
Policy that calculates the efficiency following the formula:
( running ) / ( running + waiting + staging )
if the denominator is smaller than 10, it does not take any decision.
"""
from DIRAC import S_OK
from DIRAC.ResourceStatusSystem.PolicySystem.PolicyBase import PolicyBase
__RCSID__ = '$Id: JobRunningWaitingRatioPolicy.py 60769 2013-01-18 11:50:36Z ubeda $'
class JobRunningWaitingRatioPolicy( PolicyBase ):
"""
The JobRunningWaitingRatioPolicy class is a policy that checks the efficiency of the
jobs according to what is on JobDB.
Evaluates the JobRunningWaitingRatioPolicy results given by the JobCommand.JobCommand
"""
@staticmethod
def _evaluate( commandResult ):
""" _evaluate
efficiency < 0.5 :: Banned
efficiency < 0.9 :: Degraded
"""
result = {
'Status' : None,
'Reason' : None
}
if not commandResult[ 'OK' ]:
result[ 'Status' ] = 'Error'
result[ 'Reason' ] = commandResult[ 'Message' ]
return S_OK( result )
commandResult = commandResult[ 'Value' ]
if not commandResult:
result[ 'Status' ] = 'Unknown'
result[ 'Reason' ] = 'No values to take a decision'
return S_OK( result )
commandResult = commandResult[ 0 ]
if not commandResult:
result[ 'Status' ] = 'Unknown'
result[ 'Reason' ] = 'No values to take a decision'
return S_OK( result )
running = float( commandResult[ 'Running' ] )
waiting = float( commandResult[ 'Waiting' ] )
staging = float( commandResult[ 'Staging' ] )
total = running + waiting + staging
#we want a minimum amount of jobs to take a decision ( at least 10 pilots )
if total < 10:
result[ 'Status' ] = 'Unknown'
result[ 'Reason' ] = 'Not enough jobs to take a decision'
return S_OK( result )
efficiency = running / total
if efficiency < 0.4:
result[ 'Status' ] = 'Banned'
elif efficiency < 0.65:
result[ 'Status' ] = 'Degraded'
else:
result[ 'Status' ] = 'Active'
result[ 'Reason' ] = 'Job Running / Waiting ratio of %.2f' % efficiency
return S_OK( result )
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF | Sbalbp/DIRAC | ResourceStatusSystem/Policy/JobRunningWaitingRatioPolicy.py | Python | gpl-3.0 | 2,447 |
#!/usr/bin/env python
#
# Copyright 2004,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio.eng_option import eng_option
from optparse import OptionParser
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
usage="%prog: [options] output_filename"
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-I", "--audio-input", type="string", default="",
help="pcm input device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
parser.add_option("-N", "--nsamples", type="eng_float", default=None,
help="number of samples to collect [default=+inf]")
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
sample_rate = int(options.sample_rate)
src = audio.source (sample_rate, options.audio_input)
dst = gr.file_sink (gr.sizeof_float, filename)
if options.nsamples is None:
self.connect((src, 0), dst)
else:
head = gr.head(gr.sizeof_float, int(options.nsamples))
self.connect((src, 0), head, dst)
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
| owaiskhan/Retransmission-Combining | gr-audio/examples/python/audio_to_file.py | Python | gpl-3.0 | 2,271 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from oslo_serialization import jsonutils
from osc_lib.tests import utils
from senlinclient.common import format_utils
columns = ['col1', 'col2', 'col3']
data = ['abcde', ['fg', 'hi', 'jk'], {'lmnop': 'qrstu'}]
class ShowJson(format_utils.JsonFormat):
def take_action(self, parsed_args):
return columns, data
class ShowYaml(format_utils.YamlFormat):
def take_action(self, parsed_args):
return columns, data
class ShowShell(format_utils.ShellFormat):
def take_action(self, parsed_args):
return columns, data
class ShowValue(format_utils.ValueFormat):
def take_action(self, parsed_args):
return columns, data
class TestFormats(utils.TestCommand):
def test_json_format(self):
self.cmd = ShowJson(self.app, None)
parsed_args = self.check_parser(self.cmd, [], [])
expected = jsonutils.dumps(dict(zip(columns, data)), indent=2)
self.cmd.run(parsed_args)
self.assertEqual(jsonutils.loads(expected),
jsonutils.loads(self.app.stdout.make_string()))
def test_yaml_format(self):
self.cmd = ShowYaml(self.app, None)
parsed_args = self.check_parser(self.cmd, [], [])
expected = yaml.safe_dump(dict(zip(columns, data)),
default_flow_style=False)
self.cmd.run(parsed_args)
self.assertEqual(expected, self.app.stdout.make_string())
def test_shell_format(self):
self.cmd = ShowShell(self.app, None)
parsed_args = self.check_parser(self.cmd, [], [])
expected = '''\
col1="abcde"
col2="['fg', 'hi', 'jk']"
col3="{'lmnop': 'qrstu'}"
'''
self.cmd.run(parsed_args)
self.assertEqual(expected, self.app.stdout.make_string())
def test_value_format(self):
self.cmd = ShowValue(self.app, None)
parsed_args = self.check_parser(self.cmd, [], [])
expected = '''\
abcde
['fg', 'hi', 'jk']
{'lmnop': 'qrstu'}
'''
self.cmd.run(parsed_args)
self.assertEqual(expected, self.app.stdout.make_string())
| stackforge/python-senlinclient | senlinclient/tests/unit/test_format_utils.py | Python | apache-2.0 | 2,634 |
import tempfile, subprocess, getpass, os, signal, socket, psutil
def get_env():
'''Determines the type of job for the current environment.
Supported environments are Condor/Slurm/Local.
'''
if socket.getfqdn().endswith('tacc.utexas.edu'):
return SlurmJob()
if socket.getfqdn().endswith('cs.utexas.edu'):
output = subprocess.Popen(['condor_config_val', 'DAEMON_LIST'],
stdout=subprocess.PIPE).communicate()[0]
if 'SCHEDD' in output:
return CondorJob()
return Job()
def list_jobs():
'''Lists the jobs currently executing.'''
get_env().list_jobs()
def list_all_jobs():
'''Lists all the jobs current executing (not only your own).'''
get_env().list_all_jobs()
def kill_all_jobs():
'''Kills all the currently executing jobs.'''
get_env().kill_all_jobs()
def kill_jobs(pid_list):
'''Kills only the jobs in the provided pid list.'''
get_env().kill_jobs(pid_list)
class Job:
'''A locally running job.'''
def __init__(self, executable='', args=''):
self.executable = executable
self.output = 'job.out'
self.error = 'job.err'
self.log = 'job.log'
self.arguments = args
self.use_gpu = False
self.pid = None
self.proc = None
self.username = getpass.getuser()
self.completion_email = None
def set_executable(self, executable):
'''Specify the executable to be run.'''
self.executable = executable
def set_args(self, args):
'''Specify the arguments for the job.'''
self.arguments = args
def set_gpu(self, use_gpu):
'''Toggle gpu usage. Applicable to Condor/Slurm jobs.'''
self.use_gpu = use_gpu
def set_output(self, out):
'''Combine all output/error/log into the provided file.'''
self.output = out
self.error = out
self.log = out
def set_output_prefix(self, prefix):
'''Set a common prefix for the job's output/error/log files.'''
self.error = prefix + '.err'
self.output = prefix + '.out'
self.log = prefix + '.log'
def set_email(self, email):
'''If set, send an email on completion of job.'''
self.completion_email = email
def submit(self):
'''Run the job.'''
cmd = self.executable + ' ' + self.arguments
if self.completion_email:
subject = '[Complete] - ' + self.executable
cmd = '(' + cmd + '; echo ' + cmd + ' | mail -s \"' + \
subject + '\" ' + self.completion_email + ') '
proc = subprocess.Popen(cmd, stdout=open(self.output,'w'),
stderr=open(self.error,'w'),
shell=True, preexec_fn=os.setsid)
parent_pid = proc.pid
self.pid = parent_pid
try:
p = psutil.Process(parent_pid)
children = p.get_children(recursive=True)
if len(children) > 0:
self.pid = children[0].pid
except:
print 'Unable to determine pid of child process. Guessing pid=parent+1.'
self.pid = self.pid + 1
return self.pid
def alive(self):
'''Checks if the job is alive.'''
try:
os.kill(self.pid, 0)
except OSError:
return False
return True
def kill(self):
'''Kills the job.'''
self.kill_jobs([self.pid])
def list_jobs(self):
'''Lists your jobs running in the current environment.'''
subprocess.Popen(['ps','-u',self.username]).wait()
def list_all_jobs(self):
'''Lists all jobs running in the current environment.'''
subprocess.Popen(['ps','-ef']).wait()
def kill_all_jobs(self):
'''Kills all jobs in the current environment.'''
print 'You don\'t want me to do this. Use kill_jobs() instead.'
def kill_jobs(self, pid_list):
'''Kills only the jobs in the provided pid list.'''
import signal
for pid in pid_list:
os.kill(pid, signal.SIGTERM)
class CondorJob(Job):
'''A job to be executed on Condor.'''
def __init__(self, executable='', args=''):
Job.__init__(self, executable, args)
self.infile = None
self.group = 'GRAD'
self.project = 'AI_ROBOTICS'
self.description = 'Research'
self.requirements = None
self.universe = None
self.getenv = True
self.cpus = None
self.disk = None
self.memory = None
def submit(self):
'''Submit the job to Condor.'''
f = tempfile.NamedTemporaryFile() #open('condor_submit','w')
f.write('+Group = \"'+self.group+'\"\n')
f.write('+Project = \"'+self.project+'\"\n')
f.write('+ProjectDescription = \"'+self.description+'\"\n')
if self.universe:
f.write('universe = '+self.universe+'\n')
if self.getenv:
f.write('getenv = true\n')
f.write('Executable = '+self.executable+'\n')
if self.arguments:
f.write('Arguments = '+self.arguments+'\n')
if self.requirements:
f.write('Requirements = '+self.requirements+'\n')
if self.infile:
f.write('Input = '+self.infile+'\n')
f.write('Error = '+self.error+'\n')
f.write('Output = '+self.output+'\n')
f.write('Log = '+self.log+'\n')
if self.cpus:
f.write('request_cpus = '+str(self.cpus)+'\n')
if self.disk:
f.write('request_disk = '+str(self.disk)+'\n')
if self.memory:
f.write('request_memory = '+str(self.memory)+'\n')
if self.completion_email:
f.write('Notify_User = '+str(self.completion_email)+'\n')
f.write('Notification = Always\n')
if self.use_gpu:
f.write('+GPUJob = true\n')
f.write('Queue \n')
f.flush()
condorFile = f.name
output = subprocess.Popen(["condor_submit","-verbose",condorFile],
stdout=subprocess.PIPE).communicate()[0]
f.close()
s = output.find('** Proc ')+8
procID = output[s:output.find(':\n',s)]
try:
self.pid = float(procID)
except ValueError:
print output
self.pid = None
return self.pid
def set_gpu(self, use_gpu):
self.use_gpu = use_gpu
if use_gpu == True:
self.add_requirement('TARGET.GPUSlot')
def add_requirement(self, requirement):
'''Add a requirement to the job.'''
if not self.requirements:
self.requirements = requirement
else:
self.requirements += ' && ' + requirement
def hold_after_evict(self):
'''Add a requirement that puts the job on hold if it is evicted.'''
self.add_requirement('NumJobStarts == 0')
def request_cpus(self, requested_cpus):
''' Request a certain number of cpu cores.'''
self.cpus = requested_cpus
def request_disk(self, requested_disk):
''' Request a certain amount of disk space (in MB).'''
self.disk = str(requested_disk) + 'M'
def request_memory(self, requested_memory):
''' Request a certain amount of memory (in MB).'''
self.memory = requested_memory
def alive(self):
output = subprocess.Popen(['condor_q', str(self.pid)],
stdout=subprocess.PIPE).communicate()[0]
return str(self.pid) in output
def list_jobs(self):
print subprocess.Popen(['condor_q', '-wide', self.username],
stdout=subprocess.PIPE).communicate()[0]
def list_all_jobs(self):
print subprocess.Popen(['condor_q', '-wide'],
stdout=subprocess.PIPE).communicate()[0]
def kill_all_jobs(self):
print subprocess.Popen(['condor_rm', self.username],
stdout=subprocess.PIPE).communicate()[0]
def kill_jobs(self, pid_list):
cmd = ['condor_rm']
for pid in pid_list:
pid_str = str(pid)
if type(pid) == int:
pid_str += '.0'
cmd.append(pid_str)
print subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
class SlurmJob(Job):
'''A job to be executed on Slurm.'''
def __init__(self, executable='', args=''):
Job.__init__(self, executable, args)
self.hours = 12
self.minutes = 0
self.queue = 'gpu'
self.dep = None
def set_job_time(self, hours, minutes):
'''Specify the expected job runtime in hours and minutes.'''
assert(minutes < 60 and minutes >= 0)
assert(hours >= 0)
self.hours = hours
self.minutes = minutes
def set_depends(self, pid):
'''Specify the pid that this job depends on. This job will not execute
until the specified pid finishes.'''
self.dep = pid
def set_queue(self, queue):
'''Specify the queue that the job should enter.'''
self.queue = queue
def set_gpu(self, use_gpu):
if use_gpu:
self.set_queue('gpu')
def submit(self):
'''Submit the job to Slurm.'''
f = tempfile.NamedTemporaryFile()
f.write('#!/bin/bash\n')
f.write('#SBATCH -J '+str(self.executable)+'\n')
f.write('#SBATCH -o '+self.output+'\n')
f.write('#SBATCH -e '+self.error+'\n')
f.write('#SBATCH -p '+self.queue+'\n')
f.write('#SBATCH -N 1\n')
f.write('#SBATCH -n 20\n')
f.write('#SBATCH -t '+str(self.hours)+':'+str(self.minutes)+':00\n')
if self.dep:
f.write('#SBATCH -d '+self.dep+'\n')
if self.completion_email:
f.write('#SBATCH --mail-type=end\n')
f.write('#SBATCH --mail-user='+self.completion_email+'\n')
f.write(self.executable+' '+self.arguments+'\n')
f.flush()
jobFile = f.name
output = subprocess.Popen(["sbatch",jobFile],
stdout=subprocess.PIPE).communicate()[0]
f.close()
start = output.find('Submitted batch job ')+len('Submitted batch job ')
procID = output[start:output.find('\n',start)]
try:
self.pid = int(procID)
except ValueError:
print output
self.pid = None
return self.pid
def alive(self):
output = subprocess.Popen(['squeue','-j',str(self.pid)],
stdout=subprocess.PIPE).communicate()[0]
return str(self.pid) in output
def list_jobs(self):
print subprocess.Popen(['squeue','-u',self.username],
stdout=subprocess.PIPE).communicate()[0]
def list_all_jobs(self):
print subprocess.Popen(['squeue','-l'],
stdout=subprocess.PIPE).communicate()[0]
def kill_all_jobs(self):
print subprocess.Popen(['scancel','-u',self.username],
stdout=subprocess.PIPE).communicate()[0]
def kill_jobs(self, pid_list):
cmd = ['scancel'] + [str(pid) for pid in pid_list]
print subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
| LARG/cluster | cluster/cluster.py | Python | mit | 10,431 |
# -*- coding: utf-8 -*-
# Copyright (c) 2007 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing the Plugin installation dialog.
"""
from __future__ import unicode_literals
import os
import sys
import shutil
import zipfile
import compileall
import glob
try: # Py3
import urllib.parse as parse
except (ImportError):
import urlparse as parse # __IGNORE_WARNING__
from PyQt5.QtCore import pyqtSlot, Qt, QDir, QFileInfo
from PyQt5.QtWidgets import QWidget, QDialogButtonBox, QAbstractButton, \
QApplication, QDialog, QVBoxLayout
from E5Gui import E5FileDialog
from E5Gui.E5MainWindow import E5MainWindow
from .Ui_PluginInstallDialog import Ui_PluginInstallDialog
import Utilities
import Preferences
from Utilities.uic import compileUiFiles
class PluginInstallWidget(QWidget, Ui_PluginInstallDialog):
"""
Class implementing the Plugin installation dialog.
"""
def __init__(self, pluginManager, pluginFileNames, parent=None):
"""
Constructor
@param pluginManager reference to the plugin manager object
@param pluginFileNames list of plugin files suggested for
installation (list of strings)
@param parent parent of this dialog (QWidget)
"""
super(PluginInstallWidget, self).__init__(parent)
self.setupUi(self)
if pluginManager is None:
# started as external plugin installer
from .PluginManager import PluginManager
self.__pluginManager = PluginManager(doLoadPlugins=False)
self.__external = True
else:
self.__pluginManager = pluginManager
self.__external = False
self.__backButton = self.buttonBox.addButton(
self.tr("< Back"), QDialogButtonBox.ActionRole)
self.__nextButton = self.buttonBox.addButton(
self.tr("Next >"), QDialogButtonBox.ActionRole)
self.__finishButton = self.buttonBox.addButton(
self.tr("Install"), QDialogButtonBox.ActionRole)
self.__closeButton = self.buttonBox.button(QDialogButtonBox.Close)
self.__cancelButton = self.buttonBox.button(QDialogButtonBox.Cancel)
userDir = self.__pluginManager.getPluginDir("user")
if userDir is not None:
self.destinationCombo.addItem(
self.tr("User plugins directory"),
userDir)
globalDir = self.__pluginManager.getPluginDir("global")
if globalDir is not None and os.access(globalDir, os.W_OK):
self.destinationCombo.addItem(
self.tr("Global plugins directory"),
globalDir)
self.__installedDirs = []
self.__installedFiles = []
self.__restartNeeded = False
downloadDir = QDir(Preferences.getPluginManager("DownloadPath"))
for pluginFileName in pluginFileNames:
fi = QFileInfo(pluginFileName)
if fi.isRelative():
pluginFileName = QFileInfo(
downloadDir, fi.fileName()).absoluteFilePath()
self.archivesList.addItem(pluginFileName)
self.archivesList.sortItems()
self.__currentIndex = 0
self.__selectPage()
def restartNeeded(self):
"""
Public method to check, if a restart of the IDE is required.
@return flag indicating a restart is required (boolean)
"""
return self.__restartNeeded
def __createArchivesList(self):
"""
Private method to create a list of plugin archive names.
@return list of plugin archive names (list of strings)
"""
archivesList = []
for row in range(self.archivesList.count()):
archivesList.append(self.archivesList.item(row).text())
return archivesList
def __selectPage(self):
"""
Private method to show the right wizard page.
"""
self.wizard.setCurrentIndex(self.__currentIndex)
if self.__currentIndex == 0:
self.__backButton.setEnabled(False)
self.__nextButton.setEnabled(self.archivesList.count() > 0)
self.__finishButton.setEnabled(False)
self.__closeButton.hide()
self.__cancelButton.show()
elif self.__currentIndex == 1:
self.__backButton.setEnabled(True)
self.__nextButton.setEnabled(self.destinationCombo.count() > 0)
self.__finishButton.setEnabled(False)
self.__closeButton.hide()
self.__cancelButton.show()
else:
self.__backButton.setEnabled(True)
self.__nextButton.setEnabled(False)
self.__finishButton.setEnabled(True)
self.__closeButton.hide()
self.__cancelButton.show()
msg = self.tr(
"Plugin ZIP-Archives:\n{0}\n\nDestination:\n{1} ({2})")\
.format("\n".join(self.__createArchivesList()),
self.destinationCombo.currentText(),
self.destinationCombo.itemData(
self.destinationCombo.currentIndex())
)
self.summaryEdit.setPlainText(msg)
@pyqtSlot()
def on_addArchivesButton_clicked(self):
"""
Private slot to select plugin ZIP-archives via a file selection dialog.
"""
dn = Preferences.getPluginManager("DownloadPath")
archives = E5FileDialog.getOpenFileNames(
self,
self.tr("Select plugin ZIP-archives"),
dn,
self.tr("Plugin archive (*.zip)"))
if archives:
matchflags = Qt.MatchFixedString
if not Utilities.isWindowsPlatform():
matchflags |= Qt.MatchCaseSensitive
for archive in archives:
if len(self.archivesList.findItems(archive, matchflags)) == 0:
# entry not in list already
self.archivesList.addItem(archive)
self.archivesList.sortItems()
self.__nextButton.setEnabled(self.archivesList.count() > 0)
@pyqtSlot()
def on_archivesList_itemSelectionChanged(self):
"""
Private slot called, when the selection of the archives list changes.
"""
self.removeArchivesButton.setEnabled(
len(self.archivesList.selectedItems()) > 0)
@pyqtSlot()
def on_removeArchivesButton_clicked(self):
"""
Private slot to remove archives from the list.
"""
for archiveItem in self.archivesList.selectedItems():
itm = self.archivesList.takeItem(
self.archivesList.row(archiveItem))
del itm
self.__nextButton.setEnabled(self.archivesList.count() > 0)
@pyqtSlot(QAbstractButton)
def on_buttonBox_clicked(self, button):
"""
Private slot to handle the click of a button of the button box.
@param button reference to the button pressed (QAbstractButton)
"""
if button == self.__backButton:
self.__currentIndex -= 1
self.__selectPage()
elif button == self.__nextButton:
self.__currentIndex += 1
self.__selectPage()
elif button == self.__finishButton:
self.__finishButton.setEnabled(False)
self.__installPlugins()
if not Preferences.getPluginManager("ActivateExternal"):
Preferences.setPluginManager("ActivateExternal", True)
self.__restartNeeded = True
self.__closeButton.show()
self.__cancelButton.hide()
def __installPlugins(self):
"""
Private method to install the selected plugin archives.
@return flag indicating success (boolean)
"""
res = True
self.summaryEdit.clear()
for archive in self.__createArchivesList():
self.summaryEdit.append(
self.tr("Installing {0} ...").format(archive))
ok, msg, restart = self.__installPlugin(archive)
res = res and ok
if ok:
self.summaryEdit.append(self.tr(" ok"))
else:
self.summaryEdit.append(msg)
if restart:
self.__restartNeeded = True
self.summaryEdit.append("\n")
if res:
self.summaryEdit.append(self.tr(
"""The plugins were installed successfully."""))
else:
self.summaryEdit.append(self.tr(
"""Some plugins could not be installed."""))
return res
def __installPlugin(self, archiveFilename):
"""
Private slot to install the selected plugin.
@param archiveFilename name of the plugin archive
file (string)
@return flag indicating success (boolean), error message
upon failure (string) and flag indicating a restart
of the IDE is required (boolean)
"""
installedPluginName = ""
archive = archiveFilename
destination = self.destinationCombo.itemData(
self.destinationCombo.currentIndex())
# check if archive is a local url
url = parse.urlparse(archive)
if url[0].lower() == 'file':
archive = url[2]
# check, if the archive exists
if not os.path.exists(archive):
return False, \
self.tr(
"""<p>The archive file <b>{0}</b> does not exist. """
"""Aborting...</p>""").format(archive), \
False
# check, if the archive is a valid zip file
if not zipfile.is_zipfile(archive):
return False, \
self.tr(
"""<p>The file <b>{0}</b> is not a valid plugin """
"""ZIP-archive. Aborting...</p>""").format(archive), \
False
# check, if the destination is writeable
if not os.access(destination, os.W_OK):
return False, \
self.tr(
"""<p>The destination directory <b>{0}</b> is not """
"""writeable. Aborting...</p>""").format(destination), \
False
zip = zipfile.ZipFile(archive, "r")
# check, if the archive contains a valid plugin
pluginFound = False
pluginFileName = ""
for name in zip.namelist():
if self.__pluginManager.isValidPluginName(name):
installedPluginName = name[:-3]
pluginFound = True
pluginFileName = name
break
if not pluginFound:
return False, \
self.tr(
"""<p>The file <b>{0}</b> is not a valid plugin """
"""ZIP-archive. Aborting...</p>""").format(archive), \
False
# parse the plugin module's plugin header
pluginSource = Utilities.decode(zip.read(pluginFileName))[0]
packageName = ""
internalPackages = []
needsRestart = False
pyqtApi = 0
doCompile = True
for line in pluginSource.splitlines():
if line.startswith("packageName"):
tokens = line.split("=")
if tokens[0].strip() == "packageName" and \
tokens[1].strip()[1:-1] != "__core__":
if tokens[1].strip()[0] in ['"', "'"]:
packageName = tokens[1].strip()[1:-1]
else:
if tokens[1].strip() == "None":
packageName = "None"
elif line.startswith("internalPackages"):
tokens = line.split("=")
token = tokens[1].strip()[1:-1]
# it is a comma separated string
internalPackages = [p.strip() for p in token.split(",")]
elif line.startswith("needsRestart"):
tokens = line.split("=")
needsRestart = tokens[1].strip() == "True"
elif line.startswith("pyqtApi"):
tokens = line.split("=")
try:
pyqtApi = int(tokens[1].strip())
except ValueError:
pass
elif line.startswith("doNotCompile"):
tokens = line.split("=")
if tokens[1].strip() == "True":
doCompile = False
elif line.startswith("# End-Of-Header"):
break
if not packageName:
return False, \
self.tr(
"""<p>The plugin module <b>{0}</b> does not contain """
"""a 'packageName' attribute. Aborting...</p>""")\
.format(pluginFileName), \
False
if pyqtApi < 2:
return False, \
self.tr(
"""<p>The plugin module <b>{0}</b> does not conform"""
""" with the PyQt v2 API. Aborting...</p>""")\
.format(pluginFileName), \
False
# check, if it is a plugin, that collides with others
if not os.path.exists(os.path.join(destination, pluginFileName)) and \
packageName != "None" and \
os.path.exists(os.path.join(destination, packageName)):
return False, \
self.tr("""<p>The plugin package <b>{0}</b> exists. """
"""Aborting...</p>""")\
.format(os.path.join(destination, packageName)), \
False
if os.path.exists(os.path.join(destination, pluginFileName)) and \
packageName != "None" and \
not os.path.exists(os.path.join(destination, packageName)):
return False, \
self.tr("""<p>The plugin module <b>{0}</b> exists. """
"""Aborting...</p>""")\
.format(os.path.join(destination, pluginFileName)), \
False
activatePlugin = False
if not self.__external:
activatePlugin = \
not self.__pluginManager.isPluginLoaded(
installedPluginName) or \
(self.__pluginManager.isPluginLoaded(installedPluginName) and
self.__pluginManager.isPluginActive(installedPluginName))
# try to unload a plugin with the same name
self.__pluginManager.unloadPlugin(installedPluginName)
# uninstall existing plugin first to get clean conditions
self.__uninstallPackage(destination, pluginFileName, packageName)
# clean sys.modules
reload_ = self.__pluginManager.removePluginFromSysModules(
installedPluginName, packageName, internalPackages)
# now do the installation
self.__installedDirs = []
self.__installedFiles = []
try:
if packageName != "None":
namelist = sorted(zip.namelist())
tot = len(namelist)
prog = 0
self.progress.setMaximum(tot)
QApplication.processEvents()
for name in namelist:
self.progress.setValue(prog)
QApplication.processEvents()
prog += 1
if name == pluginFileName or \
name.startswith("{0}/".format(packageName)) or \
name.startswith("{0}\\".format(packageName)):
outname = name.replace("/", os.sep)
outname = os.path.join(destination, outname)
if outname.endswith("/") or outname.endswith("\\"):
# it is a directory entry
outname = outname[:-1]
if not os.path.exists(outname):
self.__makedirs(outname)
else:
# it is a file
d = os.path.dirname(outname)
if not os.path.exists(d):
self.__makedirs(d)
f = open(outname, "wb")
f.write(zip.read(name))
f.close()
self.__installedFiles.append(outname)
self.progress.setValue(tot)
# now compile user interface files
compileUiFiles(os.path.join(destination, packageName), True)
else:
outname = os.path.join(destination, pluginFileName)
f = open(outname, "w", encoding="utf-8")
f.write(pluginSource)
f.close()
self.__installedFiles.append(outname)
except os.error as why:
self.__rollback()
return False, \
self.tr(
"Error installing plugin. Reason: {0}").format(str(why)), \
False
except IOError as why:
self.__rollback()
return False, \
self.tr(
"Error installing plugin. Reason: {0}").format(str(why)), \
False
except OSError as why:
self.__rollback()
return False, \
self.tr(
"Error installing plugin. Reason: {0}").format(str(why)), \
False
except:
sys.stderr.write("Unspecific exception installing plugin.\n")
self.__rollback()
return False, \
self.tr("Unspecific exception installing plugin."), \
False
# now compile the plugins
if doCompile:
compileall.compile_dir(
os.path.join(destination, packageName), quiet=True)
compileall.compile_file(
os.path.join(destination, pluginFileName), quiet=True)
if not self.__external:
# now load and activate the plugin
self.__pluginManager.loadPlugin(installedPluginName, destination,
reload_)
if activatePlugin:
self.__pluginManager.activatePlugin(installedPluginName)
return True, "", needsRestart
def __rollback(self):
"""
Private method to rollback a failed installation.
"""
for fname in self.__installedFiles:
if os.path.exists(fname):
os.remove(fname)
for dname in self.__installedDirs:
if os.path.exists(dname):
shutil.rmtree(dname)
def __makedirs(self, name, mode=0o777):
"""
Private method to create a directory and all intermediate ones.
This is an extended version of the Python one in order to
record the created directories.
@param name name of the directory to create (string)
@param mode permission to set for the new directory (integer)
"""
head, tail = os.path.split(name)
if not tail:
head, tail = os.path.split(head)
if head and tail and not os.path.exists(head):
self.__makedirs(head, mode)
if tail == os.curdir:
# xxx/newdir/. exists if xxx/newdir exists
return
os.mkdir(name, mode)
self.__installedDirs.append(name)
def __uninstallPackage(self, destination, pluginFileName, packageName):
"""
Private method to uninstall an already installed plugin to prepare
the update.
@param destination name of the plugin directory (string)
@param pluginFileName name of the plugin file (string)
@param packageName name of the plugin package (string)
"""
if packageName == "" or packageName == "None":
packageDir = None
else:
packageDir = os.path.join(destination, packageName)
pluginFile = os.path.join(destination, pluginFileName)
try:
if packageDir and os.path.exists(packageDir):
shutil.rmtree(packageDir)
fnameo = "{0}o".format(pluginFile)
if os.path.exists(fnameo):
os.remove(fnameo)
fnamec = "{0}c".format(pluginFile)
if os.path.exists(fnamec):
os.remove(fnamec)
pluginDirCache = os.path.join(
os.path.dirname(pluginFile), "__pycache__")
if os.path.exists(pluginDirCache):
pluginFileName = os.path.splitext(
os.path.basename(pluginFile))[0]
for fnameo in glob.glob(
os.path.join(pluginDirCache,
"{0}*.pyo".format(pluginFileName))):
os.remove(fnameo)
for fnamec in glob.glob(
os.path.join(pluginDirCache,
"{0}*.pyc".format(pluginFileName))):
os.remove(fnamec)
os.remove(pluginFile)
except (IOError, OSError, os.error):
# ignore some exceptions
pass
class PluginInstallDialog(QDialog):
"""
Class for the dialog variant.
"""
def __init__(self, pluginManager, pluginFileNames, parent=None):
"""
Constructor
@param pluginManager reference to the plugin manager object
@param pluginFileNames list of plugin files suggested for
installation (list of strings)
@param parent reference to the parent widget (QWidget)
"""
super(PluginInstallDialog, self).__init__(parent)
self.setSizeGripEnabled(True)
self.__layout = QVBoxLayout(self)
self.__layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.__layout)
self.cw = PluginInstallWidget(pluginManager, pluginFileNames, self)
size = self.cw.size()
self.__layout.addWidget(self.cw)
self.resize(size)
self.setWindowTitle(self.cw.windowTitle())
self.cw.buttonBox.accepted.connect(self.accept)
self.cw.buttonBox.rejected.connect(self.reject)
def restartNeeded(self):
"""
Public method to check, if a restart of the IDE is required.
@return flag indicating a restart is required (boolean)
"""
return self.cw.restartNeeded()
class PluginInstallWindow(E5MainWindow):
"""
Main window class for the standalone dialog.
"""
def __init__(self, pluginFileNames, parent=None):
"""
Constructor
@param pluginFileNames list of plugin files suggested for
installation (list of strings)
@param parent reference to the parent widget (QWidget)
"""
super(PluginInstallWindow, self).__init__(parent)
self.cw = PluginInstallWidget(None, pluginFileNames, self)
size = self.cw.size()
self.setCentralWidget(self.cw)
self.resize(size)
self.setWindowTitle(self.cw.windowTitle())
self.setStyle(Preferences.getUI("Style"),
Preferences.getUI("StyleSheet"))
self.cw.buttonBox.accepted.connect(self.close)
self.cw.buttonBox.rejected.connect(self.close)
| paulmadore/Eric-IDE | 6-6.0.9/eric/PluginManager/PluginInstallDialog.py | Python | gpl-3.0 | 23,823 |
from __future__ import print_function
import os
import sys
import subprocess as sp
import pytest
import yaml
import tempfile
import requests
import argparse
import json
import re
from argparse import Namespace
from argparse import ArgumentParser
import glob
import contextlib
import tarfile
from helpers import CreateRecipe
from ggd import utils
from ggd import make_meta_recipe
import oyaml
if sys.version_info[0] == 3:
from io import StringIO
elif sys.version_info[0] == 2:
from StringIO import StringIO
#---------------------------------------------------------------------------------------------------------
## enable socket
#---------------------------------------------------------------------------------------------------------
from pytest_socket import enable_socket
def pytest_enable_socket():
enable_socket()
#---------------------------------------------------------------------------------------------------------
## Test Label
#---------------------------------------------------------------------------------------------------------
TEST_LABEL = "ggd-make-meta-recipe-test"
#---------------------------------------------------------------------------------------------------------
## IO redirection
#---------------------------------------------------------------------------------------------------------
## Create a redirect_stdout that works for python 2 and 3. (Similar to contextlib.redirect_stdout in python 3)
@contextlib.contextmanager
def redirect_stdout(target):
original = sys.stdout
sys.stdout = target
yield
sys.stdout = original
## Create a redirect_stderr that works for python 2 and 3. (Similar to contextlib.redirect_stderr in python 3)
@contextlib.contextmanager
def redirect_stderr(target):
original = sys.stderr
sys.stderr = target
yield
sys.stderr = original
#-----------------------------------------------------------------------------------------------------------------------
# Unit test for ggd make-recipe
#-----------------------------------------------------------------------------------------------------------------------
def test_make_bash_test_bad_summary():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
## test that make_bash fails when a bad summary is provided
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "Please provide a thorough summary of the data package" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a bad summary is provided
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary=' ',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "Please provide a thorough summary of the data package" in str(e)
pass
except Exception as e:
print(e)
assert False
def test_make_bash_test_bad_name():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
## test that make_bash fails when a bad name is provided
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing name
assert "The recipe name is required" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a bad name is provided
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name=' ', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[] )
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing name
assert "The recipe name is required" in str(e)
pass
except Exception as e:
print(e)
assert False
def test_make_bash_test_wildcards():
"""
Test the main method of ggd make-recipe, make sure that a name with a wildcard raises and assertion error
"""
pytest_enable_socket()
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test.gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\".\" wildcard is not allowed in the recipe name" in str(e)
assert "hg19-test.gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test?gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"?\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test?gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test*gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"*\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test*gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test[gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"[\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test[gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test]gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"]\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test]gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test{gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"{\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test{gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test}gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"}\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test}gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test!gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"!\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test!gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test+gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"+\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test+gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test^gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"^\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test^gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test$gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"$\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test$gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test(gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\"(\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test(gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
## test that make_bash fails when a wild card is added to the name
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test)gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
## Correctly raises an assetion error due to the missing summary
assert "\")\" wildcard is not allowed in the recipe name. Please rename the recipe." in str(e)
assert "hg19-test)gaps-ucsc-v1" in str(e)
pass
except Exception as e:
print(e)
assert False
def test_make_bash_test_bad_genome_build():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
## test that make_bash fails when a bad genome build is provided
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg09', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
temp_stderr = StringIO()
with redirect_stderr(temp_stderr):
make_meta_recipe.make_bash((),args)
except Exception as e:
os.rmdir("{}-{}-{}-v{}".format("hg09","test-gaps","ucsc","1"))
output = str(temp_stderr.getvalue().strip())
assert "ERROR: genome-build: hg09 not found in github repo for the Homo_sapiens species" in output
## test that make_bash fails when a bad genome build is provided
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hgmm10', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script='recipe.sh', species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
temp_stderr = StringIO()
with redirect_stderr(temp_stderr):
make_meta_recipe.make_bash((),args)
except Exception as e:
os.rmdir("{}-{}-{}-v{}".format("hgmm10","test-gaps","ucsc","1"))
output = temp_stderr.getvalue().strip()
assert "ERROR: genome-build: hgmm10 not found in github repo for the Homo_sapiens species" in output
def test_make_bash_test_bad_recipe():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
## test that make_bash fails when a bad recipe is provided
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script='bad-recipe.sh', species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
with pytest.raises(SystemExit) as pytest_wrapped_e:
make_meta_recipe.make_bash((), args)
os.rmdir("{}-{}-{}-v{}".format("hg19","test-gaps","ucsc","1"))
assert "SystemExit" in str(pytest_wrapped_e.exconly()) ## test that SystemExit was raised by sys.exit()
assert pytest_wrapped_e.match("1") ## Check that the exit code is 1
def test_make_bash_missing_tags():
"""
Test that there is an error when missing tags
"""
pytest_enable_socket()
recipe = CreateRecipe(
"""
hg19-test-gaps-ucsc-v1:
recipe.sh: |
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\
| gzip -dc \\
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend\tsize\ttype\tstrand"} {print $2,$3,$4,$7,$8,"+"}' \\
| gsort /dev/stdin $genome \\
| bgzip -c > gaps.bed.gz
tabix gaps.bed.gz
""", from_string=True)
recipe.write_recipes()
ggd_package = "hg19-test-gaps-ucsc-v1"
recipe_file = os.path.join(recipe.recipe_dirs["hg19-test-gaps-ucsc-v1"],"recipe.sh")
## Bad coordinate
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script=recipe_file, species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="2-based-exclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
assert "2-based-exclusive is not an acceptable genomic coordinate base" in str(e)
print(str(e))
pass
except Exception as e:
print(e)
assert False
## Emtpy data version
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='', data_provider="UCSC",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script=recipe_file, species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[],extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
assert "Please provide the version of the data this recipe curates" in str(e)
pass
except Exception as e:
print(e)
assert False
## Empty data provider
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="",
dependency=[], genome_build='hg19', package_version='1', keyword=['gaps', 'region'],
name='test-gaps', platform='noarch', script=recipe_file, species='Homo_sapiens', summary='Assembly gaps from USCS',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
try:
assert make_meta_recipe.make_bash((),args)
assert False
except AssertionError as e:
assert "The data provider is required" in str(e)
pass
except Exception as e:
print(e)
assert False
if os.path.exists(os.path.join(os.getcwd(), "hg19-test-gaps-ucsc-v1")):
os.rmdir("hg19-test-gaps-ucsc-v1")
def test_make_bash():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
recipe = CreateRecipe(
"""
test-meta-recipe-ucsc-v1:
recipe.sh: |
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\
| gzip -dc \\
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend\tsize\ttype\tstrand"} {print $2,$3,$4,$7,$8,"+"}' \\
| gsort /dev/stdin $genome \\
| bgzip -c > gaps.bed.gz
tabix gaps.bed.gz
""", from_string=True)
recipe.write_recipes()
ggd_package = "meta-recipe-test-metarecipe-ucsc-v1"
recipe_file = os.path.join(recipe.recipe_dirs["test-meta-recipe-ucsc-v1"],"recipe.sh")
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=[], genome_build='meta-recipe', package_version='1', keyword=['gaps', 'region'],
name='test-metarecipe', platform='noarch', script=recipe_file, species='meta-recipe', summary='some meta-recipe',
coordinate_base="0-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
assert make_meta_recipe.make_bash((),args)
new_recipe_file = os.path.join("./", ggd_package, "recipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_meta_recipe_file = os.path.join("./", ggd_package, "metarecipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_metayaml_file = os.path.join("./", ggd_package, "meta.yaml")
assert os.path.exists(new_metayaml_file)
assert os.path.isfile(new_metayaml_file)
new_postlink_file = os.path.join("./", ggd_package, "post-link.sh")
assert os.path.exists(new_postlink_file)
assert os.path.isfile(new_postlink_file)
new_checksums_file = os.path.join("./", ggd_package, "checksums_file.txt")
assert os.path.exists(new_checksums_file)
assert os.path.isfile(new_checksums_file)
## Test meta.yaml
try:
with open(new_metayaml_file, "r") as mf:
yamldict = yaml.safe_load(mf)
assert yamldict["build"]["noarch"] == "generic"
assert yamldict["build"]["number"] == 0
assert yamldict["extra"]["authors"] == "me"
assert yamldict["package"]["name"] == ggd_package
assert yamldict["package"]["version"] == "1"
assert yamldict["requirements"]["build"] == ['gsort', 'htslib', 'zlib']
assert yamldict["requirements"]["run"] == ['gsort', 'htslib', 'zlib']
assert yamldict["source"]["path"] == "."
assert yamldict["about"]["identifiers"]["genome-build"] == "meta-recipe"
assert yamldict["about"]["identifiers"]["species"] == "meta-recipe"
assert yamldict["about"]["keywords"] == ['gaps','region']
assert yamldict["about"]["summary"] == "some meta-recipe"
assert yamldict["about"]["tags"]["genomic-coordinate-base"] == "0-based-inclusive"
assert yamldict["about"]["tags"]["data-version"] == "27-Apr-2009"
assert yamldict["about"]["tags"]["data-provider"] == "UCSC"
assert yamldict["about"]["tags"]["file-type"] == []
assert yamldict["about"]["tags"]["final-files"] == []
assert yamldict["about"]["tags"]["final-file-sizes"] == {}
assert yamldict["about"]["tags"]["ggd-channel"] == "genomics"
except IOError as e:
print(e)
assert False
## Test post-link.sh
try:
with open(new_postlink_file, "r") as pf:
recipe_dir = False
pkd_dir = False
dir_env_var = False
file_env_var = False
run_recipe_script = False
file_extention = False
rename_data = False
set_new_name = False
for line in pf:
## Check new name
if "new_name=" in line:
assert line.strip() == '''new_name="$GGD_METARECIPE_ID-ucsc-v1"''' or line.strip() == '''new_name="$(echo $new_name | tr '[:upper:]' '[:lower:]')"''' or line.strip() == """#new_name=${new_name,,} Requires bash version >= 4.2"""
set_new_name = True
### Check the assignment of RECIPE_DIR
if "RECIPE_DIR=" in line:
assert line.strip() == """export RECIPE_DIR=$CONDA_ROOT/share/ggd/meta-recipe/meta-recipe/$new_name/1""" or line.strip() == """export RECIPE_DIR=$env_dir/share/ggd/meta-recipe/meta-recipe/$new_name/1"""
recipe_dir = True
### Check the assigning of PKG_DIR to conform with proper file filtering for Linus and macOSX
if "PKG_DIR=" in line:
assert line.strip() == """PKG_DIR=`find "$CONDA_SOURCE_PREFIX/pkgs/" -name "$PKG_NAME-$PKG_VERSION*" | grep -v ".tar.bz2" | grep "$PKG_VERSION.*$PKG_BUILDNUM$"`"""
pkd_dir = True
### Check enivornment variable setting
if "recipe_env_dir_name=" in line:
assert line.strip() == '''recipe_env_dir_name="ggd_""$new_name""_dir"'''.strip() \
or line.strip() == """recipe_env_dir_name="$(echo "$recipe_env_dir_name" | sed 's/-/_/g' | sed 's/\./_/g')" """.strip() \
or line.strip() == """echo "export $recipe_env_dir_name=$RECIPE_DIR" >> $activate_dir/env_vars.sh"""
dir_env_var = True
if "recipe_env_file_name=" in line:
assert line.strip() == '''recipe_env_file_name="ggd_""$new_name""_file"'''.strip() \
or line.strip() == '''recipe_env_file_name="$(echo "$recipe_env_file_name" | sed 's/-/_/g' | sed 's/\./_/g')"'''.strip() \
or line.strip() == """if [[ ! -z "${recipe_env_file_name:-}" ]] """.strip() \
or line.strip() == '''echo "export $recipe_env_file_name=$file_path" >> $activate_dir/env_vars.sh'''.strip()
file_env_var = True
#### Check that the recipe is being run
##### Ensure that the appropriate env variables are there
###### - $RECIPE_DIR
###### - $SCRIPTS_PATH
###### - $GGD_METARECIPE_ID
###### - $GGD_METARECIPE_ENV_VAR_FILE
###### - $GGD_METARECIPE_FINAL_COMMANDS_FILE
if "bash $SCRIPTS_PATH/metarecipe.sh" in line:
assert line.strip() == """(cd $RECIPE_DIR && bash $SCRIPTS_PATH/metarecipe.sh $GGD_METARECIPE_ID $SCRIPTS_PATH "$GGD_METARECIPE_ENV_VAR_FILE" "$GGD_METARECIPE_FINAL_COMMANDS_FILE")"""
run_recipe_script = True
assert recipe_dir
assert pkd_dir
assert dir_env_var
assert file_env_var
assert run_recipe_script
assert set_new_name
except IOError as e:
print(e)
assert False
os.remove(new_recipe_file)
os.remove(new_meta_recipe_file)
os.remove(new_metayaml_file)
os.remove(new_postlink_file)
os.remove(new_checksums_file)
os.rmdir(ggd_package)
def test_make_meta_recipe_all_params():
"""
Test the main method of ggd make-meta-recipe
"""
pytest_enable_socket()
recipe = CreateRecipe(
"""
test-meta-recipe2-ucsc-v1:
recipe.sh: |
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\
| gzip -dc \\
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend\tsize\ttype\tstrand"} {print $2,$3,$4,$7,$8,"+"}' \\
| gsort /dev/stdin $genome \\
| bgzip -c > gaps.bed.gz
tabix gaps.bed.gz
""", from_string=True)
recipe.write_recipes()
ggd_package = "meta-recipe-test-metarecipe2-ucsc-v1"
recipe_file = os.path.join(recipe.recipe_dirs["test-meta-recipe2-ucsc-v1"],"recipe.sh")
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=['vt','samtools','bedtools'], genome_build='meta-recipe', package_version='1', keyword=['gaps', 'region'],
name='test-metarecipe2', platform='none', script=recipe_file, species='meta-recipe', summary='some meta-recipe',
coordinate_base="1-based-inclusive", file_type= [],final_file=[], extra_scripts=[])
assert make_meta_recipe.make_bash((),args)
new_recipe_file = os.path.join("./", ggd_package, "recipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_meta_recipe_file = os.path.join("./", ggd_package, "metarecipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_metayaml_file = os.path.join("./", ggd_package, "meta.yaml")
assert os.path.exists(new_metayaml_file)
assert os.path.isfile(new_metayaml_file)
new_postlink_file = os.path.join("./", ggd_package, "post-link.sh")
assert os.path.exists(new_postlink_file)
assert os.path.isfile(new_postlink_file)
new_checksums_file = os.path.join("./", ggd_package, "checksums_file.txt")
assert os.path.exists(new_checksums_file)
assert os.path.isfile(new_checksums_file)
## Test meta.yaml
try:
with open(new_metayaml_file, "r") as mf:
yamldict = yaml.safe_load(mf)
assert yamldict["build"]["number"] == 0
assert "noarch" not in yamldict["build"].keys()
assert yamldict["extra"]["authors"] == "me"
assert yamldict["package"]["name"] == ggd_package
assert yamldict["package"]["version"] == "1"
assert yamldict["requirements"]["build"] == ['bedtools', 'gsort', 'htslib', 'samtools', 'vt', 'zlib']
assert yamldict["requirements"]["run"] == ['bedtools', 'gsort', 'htslib', 'samtools', 'vt', 'zlib']
assert yamldict["source"]["path"] == "."
assert yamldict["about"]["identifiers"]["genome-build"] == "meta-recipe"
assert yamldict["about"]["identifiers"]["species"] == "meta-recipe"
assert yamldict["about"]["keywords"] == ['gaps','region']
assert yamldict["about"]["summary"] == "some meta-recipe"
assert yamldict["about"]["tags"]["genomic-coordinate-base"] == "1-based-inclusive"
assert yamldict["about"]["tags"]["data-version"] == "27-Apr-2009"
assert yamldict["about"]["tags"]["file-type"] == [] ## Should be converted to lower case
assert yamldict["about"]["tags"]["final-files"] == []
assert yamldict["about"]["tags"]["final-file-sizes"] == {}
assert yamldict["about"]["tags"]["ggd-channel"] == "genomics"
except IOError as e:
print(e)
assert False
os.remove(new_recipe_file)
os.remove(new_meta_recipe_file)
os.remove(new_metayaml_file)
os.remove(new_postlink_file)
os.remove(new_checksums_file)
os.rmdir(ggd_package)
def test_make_meta_recipe_extra_scripts():
"""
Test the main method of ggd make-meta-recipe
"""
pytest_enable_socket()
recipe = CreateRecipe(
"""
test-meta-recipe2-ucsc-v1:
recipe.sh: |
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\
| gzip -dc \\
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend\tsize\ttype\tstrand"} {print $2,$3,$4,$7,$8,"+"}' \\
| gsort /dev/stdin $genome \\
| bgzip -c > gaps.bed.gz
tabix gaps.bed.gz
extra_script1.py: |
"this is an extra script"
extra_script2.sh: |
"this is another extra script"
""", from_string=True)
recipe.write_recipes()
ggd_package = "meta-recipe-test-metarecipe2-ucsc-v1"
recipe_file = os.path.join(recipe.recipe_dirs["test-meta-recipe2-ucsc-v1"],"recipe.sh")
extra_script = os.path.join(recipe.recipe_dirs["test-meta-recipe2-ucsc-v1"],"extra_script1.py")
extra_script2 = os.path.join(recipe.recipe_dirs["test-meta-recipe2-ucsc-v1"],"extra_script2.sh")
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=['vt','samtools','bedtools'], genome_build='meta-recipe', package_version='1', keyword=['gaps', 'region'],
name='test-metarecipe2', platform='none', script=recipe_file, species='meta-recipe', summary='some meta-recipe',
coordinate_base="1-based-inclusive", file_type= [],final_file=[], extra_scripts=[extra_script, extra_script2])
assert make_meta_recipe.make_bash((),args)
new_recipe_file = os.path.join("./", ggd_package, "recipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_meta_recipe_file = os.path.join("./", ggd_package, "metarecipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_extra_script_file = os.path.join("./", ggd_package, "extra_script1.py")
assert os.path.exists(new_extra_script_file)
assert os.path.isfile(new_extra_script_file)
new_extra_script_file2 = os.path.join("./", ggd_package, "extra_script2.sh")
assert os.path.exists(new_extra_script_file2)
assert os.path.isfile(new_extra_script_file2)
new_metayaml_file = os.path.join("./", ggd_package, "meta.yaml")
assert os.path.exists(new_metayaml_file)
assert os.path.isfile(new_metayaml_file)
new_postlink_file = os.path.join("./", ggd_package, "post-link.sh")
assert os.path.exists(new_postlink_file)
assert os.path.isfile(new_postlink_file)
new_checksums_file = os.path.join("./", ggd_package, "checksums_file.txt")
assert os.path.exists(new_checksums_file)
assert os.path.isfile(new_checksums_file)
os.remove(new_recipe_file)
os.remove(new_meta_recipe_file)
os.remove(new_extra_script_file)
os.remove(new_extra_script_file2)
os.remove(new_metayaml_file)
os.remove(new_postlink_file)
os.remove(new_checksums_file)
os.rmdir(ggd_package)
def test_make_bash_meta_yaml_key_order():
"""
Test the main method of ggd make-recipe
"""
pytest_enable_socket()
recipe = CreateRecipe(
"""
another-meta-recipe-ucsc-v1:
recipe.sh: |
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\
| gzip -dc \\
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend\tsize\ttype\tstrand"} {print $2,$3,$4,$7,$8,"+"}' \\
| gsort /dev/stdin $genome \\
| bgzip -c > gaps.bed.gz
tabix gaps.bed.gz
""", from_string=True)
recipe.write_recipes()
ggd_package = "meta-recipe-another-metarecipe-ucsc-v1"
recipe_file = os.path.join(recipe.recipe_dirs["another-meta-recipe-ucsc-v1"],"recipe.sh")
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=['vt','samtools','bedtools'], genome_build='meta-recipe', package_version='1', keyword=['gaps', 'region'],
name='another-metarecipe', platform='none', script=recipe_file, species='meta-recipe', summary='Assembly gaps from UCSC',
coordinate_base="0-based-inclusive", file_type= ["Bed"], final_file=["hg19-test-gaps3-ucsc-v1.bed.gz", "hg19-test-gaps3-ucsc-v1.bed.gz.tbi"], extra_scripts=[])
assert make_meta_recipe.make_bash((),args)
new_recipe_file = os.path.join("./", ggd_package, "recipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_metarecipe_file = os.path.join("./", ggd_package, "metarecipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_metayaml_file = os.path.join("./", ggd_package, "meta.yaml")
assert os.path.exists(new_metayaml_file)
assert os.path.isfile(new_metayaml_file)
new_postlink_file = os.path.join("./", ggd_package, "post-link.sh")
assert os.path.exists(new_postlink_file)
assert os.path.isfile(new_postlink_file)
new_checksums_file = os.path.join("./", ggd_package, "checksums_file.txt")
assert os.path.exists(new_checksums_file)
assert os.path.isfile(new_checksums_file)
## Test that the keys in the meta.yaml file are in the correct order.
## Conda-build requires a strict order: https://github.com/conda/conda-build/issues/3267
try:
ref_keys = ["build","extra","package","requirements","source","about"]
index = 0
with open(new_metayaml_file, "r") as mf:
for item in mf:
item = item.strip().replace(":","")
if item in ref_keys:
assert ref_keys[index] == item
ref_keys[index] = "Done"
index += 1
assert index-1 == 5 ## Index - 1 because an additional 1 was added at the end. (Only index 0-5 exists)
except IOError as e:
print(e)
assert False
os.remove(new_recipe_file)
os.remove(new_metarecipe_file)
os.remove(new_metayaml_file)
os.remove(new_postlink_file)
os.remove(new_checksums_file)
os.rmdir(ggd_package)
def test_make_bash_meta_yaml_ggd_dependency():
"""
Test the main method of ggd make-meta-recipe
"""
pytest_enable_socket()
recipe = CreateRecipe(
"""
more-meta-recipe-ucsc-v1:
recipe.sh: |
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/gap.txt.gz \\
| gzip -dc \\
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend\tsize\ttype\tstrand"} {print $2,$3,$4,$7,$8,"+"}' \\
| gsort /dev/stdin $genome \\
| bgzip -c > gaps.bed.gz
tabix gaps.bed.gz
""", from_string=True)
recipe.write_recipes()
ggd_package = "meta-recipe-test-gaps4-ucsc-v1"
recipe_file = os.path.join(recipe.recipe_dirs["more-meta-recipe-ucsc-v1"],"recipe.sh")
## grch37-gene-features-ensembl-v1 as a dependency
args = Namespace(authors='me', channel='genomics', command='make-meta-recipe', data_version='27-Apr-2009', data_provider="UCSC",
dependency=['grch37-gene-features-ensembl-v1','hg38-chrom-mapping-ensembl2ucsc-ncbi-v1','vt','samtools','bedtools'], genome_build='meta-recipe', package_version='1', keyword=['gaps', 'region'],
name='test-gaps4', platform='none', script=recipe_file, species='meta-recipe', summary='some meta-recipe',
coordinate_base="0-based-inclusive", file_type= ["Bed"], final_file=["hg19-test-gaps4-ucsc-v1.bed.gz", "hg19-test-gaps4-ucsc-v1.bed.gz.tbi"],extra_scripts=[])
assert make_meta_recipe.make_bash((),args)
new_recipe_file = os.path.join("./", ggd_package, "recipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_metarecipe_file = os.path.join("./", ggd_package, "metarecipe.sh")
assert os.path.exists(new_recipe_file)
assert os.path.isfile(new_recipe_file)
new_metayaml_file = os.path.join("./", ggd_package, "meta.yaml")
assert os.path.exists(new_metayaml_file)
assert os.path.isfile(new_metayaml_file)
new_postlink_file = os.path.join("./", ggd_package, "post-link.sh")
assert os.path.exists(new_postlink_file)
assert os.path.isfile(new_postlink_file)
new_checksums_file = os.path.join("./", ggd_package, "checksums_file.txt")
assert os.path.exists(new_checksums_file)
assert os.path.isfile(new_checksums_file)
## Test meta.yaml has an ggd dependency in the run requirements and not the build requirements
try:
with open(new_metayaml_file, "r") as mf:
yamldict = yaml.safe_load(mf)
assert yamldict["requirements"]["build"] == ['bedtools', 'gsort', 'htslib', 'samtools', 'vt', 'zlib']
assert "grch37-gene-features-ensembl-v1" not in yamldict["requirements"]["build"]
assert "hg38-chrom-mapping-ensembl2ucsc-ncbi-v1" not in yamldict["requirements"]["build"]
assert yamldict["requirements"]["run"] == ['bedtools', 'grch37-gene-features-ensembl-v1', 'gsort', 'hg38-chrom-mapping-ensembl2ucsc-ncbi-v1', 'htslib', 'samtools', 'vt', 'zlib']
assert "grch37-gene-features-ensembl-v1" in yamldict["requirements"]["run"]
assert "hg38-chrom-mapping-ensembl2ucsc-ncbi-v1" in yamldict["requirements"]["run"]
except IOError as e:
print(e)
assert False
os.remove(new_recipe_file)
os.remove(new_metarecipe_file)
os.remove(new_metayaml_file)
os.remove(new_postlink_file)
os.remove(new_checksums_file)
os.rmdir(ggd_package)
| gogetdata/ggd-cli | tests/test_make_meta_recipe.py | Python | mit | 47,025 |
"""Unit tests for physical_constraints.py."""
import copy
import unittest
import numpy
import pandas
from keras import backend as K
from gewittergefahr.gg_utils import physical_constraints
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import soundings
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import input_examples
from gewittergefahr.deep_learning import deep_learning_utils as dl_utils
from gewittergefahr.deep_learning import training_validation_io as trainval_io
TOLERANCE = 1e-6
# The following constants are used to test _find_constrained_radar_channels.
THESE_FIELD_NAMES = (
[radar_utils.REFL_NAME] * 3 +
[radar_utils.SPECTRUM_WIDTH_NAME] * 3 +
[radar_utils.VORTICITY_NAME] * 3 +
[radar_utils.VORTICITY_NAME] * 3
)
THESE_OPERATION_NAMES = 4 * [
input_examples.MIN_OPERATION_NAME, input_examples.MEAN_OPERATION_NAME,
input_examples.MAX_OPERATION_NAME
]
THESE_MIN_HEIGHTS_M_AGL = numpy.array(
[1000] * 3 + [1000] * 3 + [2000] * 3 + [5000] * 3,
dtype=int
)
THESE_MAX_HEIGHTS_M_AGL = numpy.array(
[3000] * 3 + [3000] * 3 + [4000] * 3 + [8000] * 3,
dtype=int
)
FIRST_LIST_OF_OPERATION_DICTS = [
{
input_examples.RADAR_FIELD_KEY: THESE_FIELD_NAMES[m],
input_examples.OPERATION_NAME_KEY: THESE_OPERATION_NAMES[m],
input_examples.MIN_HEIGHT_KEY: THESE_MIN_HEIGHTS_M_AGL[m],
input_examples.MAX_HEIGHT_KEY: THESE_MAX_HEIGHTS_M_AGL[m]
}
for m in range(len(THESE_MIN_HEIGHTS_M_AGL))
]
FIRST_GREATER_INDICES = numpy.array([1, 2, 4, 5, 7, 8, 10, 11], dtype=int)
FIRST_LESS_INDICES = numpy.array([0, 1, 3, 4, 6, 7, 9, 10], dtype=int)
THESE_FIELD_NAMES = (
[radar_utils.REFL_NAME] * 2 +
[radar_utils.SPECTRUM_WIDTH_NAME] * 2 +
[radar_utils.VORTICITY_NAME] * 2 +
[radar_utils.VORTICITY_NAME] * 2
)
THESE_OPERATION_NAMES = [
input_examples.MEAN_OPERATION_NAME, input_examples.MIN_OPERATION_NAME,
input_examples.MEAN_OPERATION_NAME, input_examples.MAX_OPERATION_NAME,
input_examples.MIN_OPERATION_NAME, input_examples.MEAN_OPERATION_NAME,
input_examples.MAX_OPERATION_NAME, input_examples.MIN_OPERATION_NAME
]
THESE_MIN_HEIGHTS_M_AGL = numpy.array(
[1000] * 2 + [1000] * 2 + [2000] * 2 + [5000] * 2,
dtype=int
)
THESE_MAX_HEIGHTS_M_AGL = numpy.array(
[3000] * 2 + [3000] * 2 + [4000] * 2 + [8000] * 2,
dtype=int
)
SECOND_LIST_OF_OPERATION_DICTS = [
{
input_examples.RADAR_FIELD_KEY: THESE_FIELD_NAMES[m],
input_examples.OPERATION_NAME_KEY: THESE_OPERATION_NAMES[m],
input_examples.MIN_HEIGHT_KEY: THESE_MIN_HEIGHTS_M_AGL[m],
input_examples.MAX_HEIGHT_KEY: THESE_MAX_HEIGHTS_M_AGL[m]
}
for m in range(len(THESE_MIN_HEIGHTS_M_AGL))
]
SECOND_GREATER_INDICES = numpy.array([0, 3, 5, 6], dtype=int)
SECOND_LESS_INDICES = numpy.array([1, 2, 4, 7], dtype=int)
THESE_FIELD_NAMES = (
[radar_utils.REFL_NAME] +
[radar_utils.SPECTRUM_WIDTH_NAME] * 2 +
[radar_utils.VORTICITY_NAME] +
[radar_utils.VORTICITY_NAME]
)
THESE_OPERATION_NAMES = [
input_examples.MEAN_OPERATION_NAME,
input_examples.MEAN_OPERATION_NAME, input_examples.MAX_OPERATION_NAME,
input_examples.MIN_OPERATION_NAME,
input_examples.MAX_OPERATION_NAME
]
THESE_MIN_HEIGHTS_M_AGL = numpy.array(
[1000] + [1000] * 2 + [2000] + [5000],
dtype=int
)
THESE_MAX_HEIGHTS_M_AGL = numpy.array(
[3000] + [3000] * 2 + [4000] + [8000],
dtype=int
)
THIRD_LIST_OF_OPERATION_DICTS = [
{
input_examples.RADAR_FIELD_KEY: THESE_FIELD_NAMES[m],
input_examples.OPERATION_NAME_KEY: THESE_OPERATION_NAMES[m],
input_examples.MIN_HEIGHT_KEY: THESE_MIN_HEIGHTS_M_AGL[m],
input_examples.MAX_HEIGHT_KEY: THESE_MAX_HEIGHTS_M_AGL[m]
}
for m in range(len(THESE_MIN_HEIGHTS_M_AGL))
]
THIRD_GREATER_INDICES = numpy.array([2], dtype=int)
THIRD_LESS_INDICES = numpy.array([1], dtype=int)
THESE_FIELD_NAMES = [
radar_utils.REFL_NAME, radar_utils.SPECTRUM_WIDTH_NAME,
radar_utils.VORTICITY_NAME, radar_utils.VORTICITY_NAME
]
THESE_OPERATION_NAMES = [
input_examples.MEAN_OPERATION_NAME, input_examples.MEAN_OPERATION_NAME,
input_examples.MIN_OPERATION_NAME, input_examples.MAX_OPERATION_NAME
]
THESE_MIN_HEIGHTS_M_AGL = numpy.array([1000, 1000, 2000, 5000], dtype=int)
THESE_MAX_HEIGHTS_M_AGL = numpy.array([3000, 3000, 4000, 8000], dtype=int)
FOURTH_LIST_OF_OPERATION_DICTS = [
{
input_examples.RADAR_FIELD_KEY: THESE_FIELD_NAMES[m],
input_examples.OPERATION_NAME_KEY: THESE_OPERATION_NAMES[m],
input_examples.MIN_HEIGHT_KEY: THESE_MIN_HEIGHTS_M_AGL[m],
input_examples.MAX_HEIGHT_KEY: THESE_MAX_HEIGHTS_M_AGL[m]
}
for m in range(len(THESE_MIN_HEIGHTS_M_AGL))
]
FOURTH_GREATER_INDICES = numpy.array([], dtype=int)
FOURTH_LESS_INDICES = numpy.array([], dtype=int)
# The following constants are used to test radar_constraints_to_loss_fn.
NUM_EXAMPLES = 10
NUM_RADAR_ROWS = 32
NUM_RADAR_COLUMNS = 32
# The following constants are used to test _normalize_minima_and_maxima.
NUM_REFL_HEIGHTS = 12
NUM_SOUNDING_HEIGHTS = 49
MIN_NORMALIZED_VALUE = 0.
MAX_NORMALIZED_VALUE = 1.
AZ_SHEAR_FIELD_NAMES = [
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME
]
SOUNDING_FIELD_NAMES = [
soundings.VIRTUAL_POTENTIAL_TEMPERATURE_NAME,
soundings.SPECIFIC_HUMIDITY_NAME, soundings.RELATIVE_HUMIDITY_NAME,
soundings.U_WIND_NAME, soundings.V_WIND_NAME
]
THIS_DICT = {
radar_utils.REFL_NAME: numpy.array([22, 15, 0, 77.5]),
radar_utils.SPECTRUM_WIDTH_NAME: numpy.array([3, 1.5, 0, 10]),
radar_utils.VORTICITY_NAME: numpy.array([2e-4, 3e-4, 0, 0.02]),
radar_utils.DIVERGENCE_NAME: numpy.array([2e-4, 2e-4, 0, 0.015]),
radar_utils.LOW_LEVEL_SHEAR_NAME: numpy.array([2e-4, 3e-4, 0, 0.02]),
radar_utils.MID_LEVEL_SHEAR_NAME: numpy.array([2e-4, 2e-4, 0, 0.015])
}
RADAR_NORMALIZATION_TABLE = pandas.DataFrame.from_dict(
THIS_DICT, orient='index')
THIS_RENAMING_DICT = {
0: dl_utils.MEAN_VALUE_COLUMN,
1: dl_utils.STANDARD_DEVIATION_COLUMN,
2: dl_utils.MIN_VALUE_COLUMN,
3: dl_utils.MAX_VALUE_COLUMN
}
RADAR_NORMALIZATION_TABLE.rename(columns=THIS_RENAMING_DICT, inplace=True)
THIS_DICT = {
soundings.VIRTUAL_POTENTIAL_TEMPERATURE_NAME:
numpy.array([300, 50, 250, 500.5]),
soundings.SPECIFIC_HUMIDITY_NAME: numpy.array([0.004, 0.003, 0, 0.02]),
soundings.RELATIVE_HUMIDITY_NAME: numpy.array([0.7, 0.2, 0, 1]),
soundings.U_WIND_NAME: numpy.array([-0.5, 5, -30, 30]),
soundings.V_WIND_NAME: numpy.array([0.5, 5, -30, 30])
}
SOUNDING_NORMALIZATION_TABLE = pandas.DataFrame.from_dict(
THIS_DICT, orient='index')
SOUNDING_NORMALIZATION_TABLE.rename(columns=THIS_RENAMING_DICT, inplace=True)
THIS_REFLECTIVITY_TENSOR = K.placeholder(
shape=(NUM_EXAMPLES, NUM_RADAR_ROWS, NUM_RADAR_COLUMNS, NUM_REFL_HEIGHTS,
1),
dtype=float
)
THIS_AZ_SHEAR_TENSOR = K.placeholder(
shape=(
NUM_EXAMPLES, NUM_RADAR_ROWS, NUM_RADAR_COLUMNS,
len(AZ_SHEAR_FIELD_NAMES)
), dtype=float
)
THIS_SOUNDING_TENSOR = K.placeholder(
shape=(NUM_EXAMPLES, NUM_SOUNDING_HEIGHTS, len(SOUNDING_FIELD_NAMES)),
dtype=float
)
FIRST_LIST_OF_INPUT_TENSORS = [
THIS_REFLECTIVITY_TENSOR, THIS_AZ_SHEAR_TENSOR, THIS_SOUNDING_TENSOR
]
FIRST_METADATA_DICT = {
cnn.CONV_2D3D_KEY: True,
cnn.LAYER_OPERATIONS_KEY: None,
cnn.TRAINING_OPTION_DICT_KEY: {
trainval_io.RADAR_FIELDS_KEY: AZ_SHEAR_FIELD_NAMES,
trainval_io.SOUNDING_FIELDS_KEY: SOUNDING_FIELD_NAMES,
trainval_io.MIN_NORMALIZED_VALUE_KEY: MIN_NORMALIZED_VALUE,
trainval_io.MAX_NORMALIZED_VALUE_KEY: MAX_NORMALIZED_VALUE
}
}
FIRST_MIN_VALUES_Z_NORM = [
numpy.array([-22. / 15]),
numpy.full(len(AZ_SHEAR_FIELD_NAMES), numpy.nan),
numpy.array([-6, -4. / 3, -3.5, numpy.nan, numpy.nan])
]
FIRST_MAX_VALUES_Z_NORM = [
numpy.full(1, numpy.nan),
numpy.full(len(AZ_SHEAR_FIELD_NAMES), numpy.nan),
numpy.array([numpy.nan, 332, 1.5, numpy.nan, numpy.nan])
]
FIRST_MIN_VALUES_MINMAX_NORM = [
numpy.array([0.]),
numpy.full(len(AZ_SHEAR_FIELD_NAMES), numpy.nan),
numpy.array([0, 0, 0, numpy.nan, numpy.nan])
]
FIRST_MAX_VALUES_MINMAX_NORM = [
numpy.full(1, numpy.nan),
numpy.full(len(AZ_SHEAR_FIELD_NAMES), numpy.nan),
numpy.array([numpy.nan, 1, 1, numpy.nan, numpy.nan])
]
SECOND_LIST_OF_INPUT_TENSORS = [THIS_AZ_SHEAR_TENSOR, THIS_SOUNDING_TENSOR]
SECOND_METADATA_DICT = {
cnn.CONV_2D3D_KEY: False,
cnn.LAYER_OPERATIONS_KEY: None,
cnn.TRAINING_OPTION_DICT_KEY: {
trainval_io.RADAR_FIELDS_KEY: AZ_SHEAR_FIELD_NAMES,
trainval_io.SOUNDING_FIELDS_KEY: SOUNDING_FIELD_NAMES,
trainval_io.MIN_NORMALIZED_VALUE_KEY: MIN_NORMALIZED_VALUE,
trainval_io.MAX_NORMALIZED_VALUE_KEY: MAX_NORMALIZED_VALUE
}
}
SECOND_MIN_VALUES_Z_NORM = [
numpy.full(len(AZ_SHEAR_FIELD_NAMES), numpy.nan),
numpy.array([-6, -4. / 3, -3.5, numpy.nan, numpy.nan])
]
SECOND_MAX_VALUES_Z_NORM = [
numpy.full(len(AZ_SHEAR_FIELD_NAMES), numpy.nan),
numpy.array([numpy.nan, 332, 1.5, numpy.nan, numpy.nan])
]
SECOND_MIN_VALUES_MINMAX_NORM = [
numpy.full(len(AZ_SHEAR_FIELD_NAMES), numpy.nan),
numpy.array([0, 0, 0, numpy.nan, numpy.nan])
]
SECOND_MAX_VALUES_MINMAX_NORM = [
numpy.full(len(AZ_SHEAR_FIELD_NAMES), numpy.nan),
numpy.array([numpy.nan, 1, 1, numpy.nan, numpy.nan])
]
THIS_LIST_OF_OPERATION_DICTS = copy.deepcopy(FOURTH_LIST_OF_OPERATION_DICTS)
THIS_RADAR_TENSOR = K.placeholder(
shape=(
NUM_EXAMPLES, NUM_RADAR_ROWS, NUM_RADAR_COLUMNS,
len(THIS_LIST_OF_OPERATION_DICTS)
), dtype=float
)
THIRD_LIST_OF_INPUT_TENSORS = [THIS_RADAR_TENSOR, THIS_SOUNDING_TENSOR]
THIRD_METADATA_DICT = {
cnn.CONV_2D3D_KEY: False,
cnn.LAYER_OPERATIONS_KEY: THIS_LIST_OF_OPERATION_DICTS,
cnn.TRAINING_OPTION_DICT_KEY: {
trainval_io.SOUNDING_FIELDS_KEY: SOUNDING_FIELD_NAMES,
trainval_io.MIN_NORMALIZED_VALUE_KEY: MIN_NORMALIZED_VALUE,
trainval_io.MAX_NORMALIZED_VALUE_KEY: MAX_NORMALIZED_VALUE
}
}
THIRD_MIN_VALUES_Z_NORM = [
numpy.array([-22. / 15, -2, numpy.nan, numpy.nan]),
numpy.array([-6, -4. / 3, -3.5, numpy.nan, numpy.nan])
]
THIRD_MAX_VALUES_Z_NORM = [
numpy.full(len(THIS_LIST_OF_OPERATION_DICTS), numpy.nan),
numpy.array([numpy.nan, 332, 1.5, numpy.nan, numpy.nan])
]
THIRD_MIN_VALUES_MINMAX_NORM = [
numpy.array([0, 0, numpy.nan, numpy.nan]),
numpy.array([0, 0, 0, numpy.nan, numpy.nan])
]
THIRD_MAX_VALUES_MINMAX_NORM = [
numpy.full(len(THIS_LIST_OF_OPERATION_DICTS), numpy.nan),
numpy.array([numpy.nan, 1, 1, numpy.nan, numpy.nan])
]
FOURTH_LIST_OF_INPUT_TENSORS = [THIS_RADAR_TENSOR]
FOURTH_METADATA_DICT = {
cnn.CONV_2D3D_KEY: False,
cnn.LAYER_OPERATIONS_KEY: THIS_LIST_OF_OPERATION_DICTS,
cnn.TRAINING_OPTION_DICT_KEY: {
trainval_io.SOUNDING_FIELDS_KEY: None,
trainval_io.MIN_NORMALIZED_VALUE_KEY: MIN_NORMALIZED_VALUE,
trainval_io.MAX_NORMALIZED_VALUE_KEY: MAX_NORMALIZED_VALUE
}
}
FOURTH_MIN_VALUES_Z_NORM = [
numpy.array([-22. / 15, -2, numpy.nan, numpy.nan])
]
FOURTH_MAX_VALUES_Z_NORM = [
numpy.full(len(THIS_LIST_OF_OPERATION_DICTS), numpy.nan)
]
FOURTH_MIN_VALUES_MINMAX_NORM = [
numpy.array([0, 0, numpy.nan, numpy.nan])
]
FOURTH_MAX_VALUES_MINMAX_NORM = [
numpy.full(len(THIS_LIST_OF_OPERATION_DICTS), numpy.nan)
]
def _compare_array_lists(first_array_list, second_array_list):
"""Compares two lists of numpy arrays.
Each list must be 1-D.
:param first_array_list: First list.
:param second_array_list: Second list.
:return: are_lists_equal: Boolean flag.
"""
num_first_arrays = len(first_array_list)
num_second_arrays = len(second_array_list)
if num_first_arrays != num_second_arrays:
return False
for i in range(num_first_arrays):
if not numpy.allclose(
first_array_list[i], second_array_list[i], atol=TOLERANCE,
equal_nan=True):
return False
return True
class PhysicalConstraintsTests(unittest.TestCase):
"""Each method is a unit test for physical_constraints.py."""
def test_find_constrained_radar_channels_first(self):
"""Ensures correct output from _find_constrained_radar_channels.
In this case, using first set of channels.
"""
these_greater_indices, these_less_indices = (
physical_constraints._find_constrained_radar_channels(
FIRST_LIST_OF_OPERATION_DICTS)
)
self.assertTrue(numpy.array_equal(
these_greater_indices, FIRST_GREATER_INDICES
))
self.assertTrue(numpy.array_equal(
these_less_indices, FIRST_LESS_INDICES
))
def test_find_constrained_radar_channels_second(self):
"""Ensures correct output from _find_constrained_radar_channels.
In this case, using second set of channels.
"""
these_greater_indices, these_less_indices = (
physical_constraints._find_constrained_radar_channels(
SECOND_LIST_OF_OPERATION_DICTS)
)
self.assertTrue(numpy.array_equal(
these_greater_indices, SECOND_GREATER_INDICES
))
self.assertTrue(numpy.array_equal(
these_less_indices, SECOND_LESS_INDICES
))
def test_find_constrained_radar_channels_third(self):
"""Ensures correct output from _find_constrained_radar_channels.
In this case, using third set of channels.
"""
these_greater_indices, these_less_indices = (
physical_constraints._find_constrained_radar_channels(
THIRD_LIST_OF_OPERATION_DICTS)
)
self.assertTrue(numpy.array_equal(
these_greater_indices, THIRD_GREATER_INDICES
))
self.assertTrue(numpy.array_equal(
these_less_indices, THIRD_LESS_INDICES
))
def test_find_constrained_radar_channels_fourth(self):
"""Ensures correct output from _find_constrained_radar_channels.
In this case, using fourth set of channels.
"""
these_greater_indices, these_less_indices = (
physical_constraints._find_constrained_radar_channels(
FOURTH_LIST_OF_OPERATION_DICTS)
)
self.assertTrue(numpy.array_equal(
these_greater_indices, FOURTH_GREATER_INDICES
))
self.assertTrue(numpy.array_equal(
these_less_indices, FOURTH_LESS_INDICES
))
def test_radar_constraints_to_loss_fn_first(self):
"""Ensures correct output from radar_constraints_to_loss_fn.
In this case, using first set of channels.
"""
these_dimensions = (
NUM_EXAMPLES, NUM_RADAR_ROWS, NUM_RADAR_COLUMNS,
len(FIRST_LIST_OF_OPERATION_DICTS)
)
this_radar_tensor = K.placeholder(shape=these_dimensions, dtype=float)
this_loss_tensor = physical_constraints.radar_constraints_to_loss_fn(
radar_tensor=this_radar_tensor,
list_of_layer_operation_dicts=FIRST_LIST_OF_OPERATION_DICTS)
self.assertTrue(this_loss_tensor is not None)
def test_radar_constraints_to_loss_fn_second(self):
"""Ensures correct output from radar_constraints_to_loss_fn.
In this case, using second set of channels.
"""
these_dimensions = (
NUM_EXAMPLES, NUM_RADAR_ROWS, NUM_RADAR_COLUMNS,
len(SECOND_LIST_OF_OPERATION_DICTS)
)
this_radar_tensor = K.placeholder(shape=these_dimensions, dtype=float)
this_loss_tensor = physical_constraints.radar_constraints_to_loss_fn(
radar_tensor=this_radar_tensor,
list_of_layer_operation_dicts=SECOND_LIST_OF_OPERATION_DICTS)
self.assertTrue(this_loss_tensor is not None)
def test_radar_constraints_to_loss_fn_third(self):
"""Ensures correct output from radar_constraints_to_loss_fn.
In this case, using third set of channels.
"""
these_dimensions = (
NUM_EXAMPLES, NUM_RADAR_ROWS, NUM_RADAR_COLUMNS,
len(THIRD_LIST_OF_OPERATION_DICTS)
)
this_radar_tensor = K.placeholder(shape=these_dimensions, dtype=float)
this_loss_tensor = physical_constraints.radar_constraints_to_loss_fn(
radar_tensor=this_radar_tensor,
list_of_layer_operation_dicts=THIRD_LIST_OF_OPERATION_DICTS)
self.assertTrue(this_loss_tensor is not None)
def test_radar_constraints_to_loss_fn_fourth(self):
"""Ensures correct output from radar_constraints_to_loss_fn.
In this case, using fourth set of channels.
"""
these_dimensions = (
NUM_EXAMPLES, NUM_RADAR_ROWS, NUM_RADAR_COLUMNS,
len(FOURTH_LIST_OF_OPERATION_DICTS)
)
this_radar_tensor = K.placeholder(shape=these_dimensions, dtype=float)
this_loss_tensor = physical_constraints.radar_constraints_to_loss_fn(
radar_tensor=this_radar_tensor,
list_of_layer_operation_dicts=FOURTH_LIST_OF_OPERATION_DICTS)
self.assertTrue(this_loss_tensor is None)
def test_normalize_minima_and_maxima_first_z(self):
"""Ensures correct output from _normalize_minima_and_maxima.
In this case, using first set of inputs and z-score normalization.
"""
FIRST_METADATA_DICT[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.NORMALIZATION_TYPE_KEY
] = dl_utils.Z_NORMALIZATION_TYPE_STRING
these_min_values_by_tensor, these_max_values_by_tensor = (
physical_constraints._normalize_minima_and_maxima(
list_of_input_tensors=FIRST_LIST_OF_INPUT_TENSORS,
cnn_metadata_dict=FIRST_METADATA_DICT, test_mode=True,
radar_normalization_table=RADAR_NORMALIZATION_TABLE,
sounding_normalization_table=SOUNDING_NORMALIZATION_TABLE)
)
self.assertTrue(_compare_array_lists(
FIRST_MIN_VALUES_Z_NORM, these_min_values_by_tensor
))
self.assertTrue(_compare_array_lists(
FIRST_MAX_VALUES_Z_NORM, these_max_values_by_tensor
))
def test_normalize_minima_and_maxima_first_minmax(self):
"""Ensures correct output from _normalize_minima_and_maxima.
In this case, using first set of inputs and minmax normalization.
"""
FIRST_METADATA_DICT[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.NORMALIZATION_TYPE_KEY
] = dl_utils.MINMAX_NORMALIZATION_TYPE_STRING
these_min_values_by_tensor, these_max_values_by_tensor = (
physical_constraints._normalize_minima_and_maxima(
list_of_input_tensors=FIRST_LIST_OF_INPUT_TENSORS,
cnn_metadata_dict=FIRST_METADATA_DICT, test_mode=True,
radar_normalization_table=RADAR_NORMALIZATION_TABLE,
sounding_normalization_table=SOUNDING_NORMALIZATION_TABLE)
)
self.assertTrue(_compare_array_lists(
FIRST_MIN_VALUES_MINMAX_NORM, these_min_values_by_tensor
))
self.assertTrue(_compare_array_lists(
FIRST_MAX_VALUES_MINMAX_NORM, these_max_values_by_tensor
))
def test_normalize_minima_and_maxima_second_z(self):
"""Ensures correct output from _normalize_minima_and_maxima.
In this case, using second set of inputs and z-score normalization.
"""
SECOND_METADATA_DICT[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.NORMALIZATION_TYPE_KEY
] = dl_utils.Z_NORMALIZATION_TYPE_STRING
these_min_values_by_tensor, these_max_values_by_tensor = (
physical_constraints._normalize_minima_and_maxima(
list_of_input_tensors=SECOND_LIST_OF_INPUT_TENSORS,
cnn_metadata_dict=SECOND_METADATA_DICT, test_mode=True,
radar_normalization_table=RADAR_NORMALIZATION_TABLE,
sounding_normalization_table=SOUNDING_NORMALIZATION_TABLE)
)
self.assertTrue(_compare_array_lists(
SECOND_MIN_VALUES_Z_NORM, these_min_values_by_tensor
))
self.assertTrue(_compare_array_lists(
SECOND_MAX_VALUES_Z_NORM, these_max_values_by_tensor
))
def test_normalize_minima_and_maxima_second_minmax(self):
"""Ensures correct output from _normalize_minima_and_maxima.
In this case, using second set of inputs and minmax normalization.
"""
SECOND_METADATA_DICT[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.NORMALIZATION_TYPE_KEY
] = dl_utils.MINMAX_NORMALIZATION_TYPE_STRING
these_min_values_by_tensor, these_max_values_by_tensor = (
physical_constraints._normalize_minima_and_maxima(
list_of_input_tensors=SECOND_LIST_OF_INPUT_TENSORS,
cnn_metadata_dict=SECOND_METADATA_DICT, test_mode=True,
radar_normalization_table=RADAR_NORMALIZATION_TABLE,
sounding_normalization_table=SOUNDING_NORMALIZATION_TABLE)
)
self.assertTrue(_compare_array_lists(
SECOND_MIN_VALUES_MINMAX_NORM, these_min_values_by_tensor
))
self.assertTrue(_compare_array_lists(
SECOND_MAX_VALUES_MINMAX_NORM, these_max_values_by_tensor
))
def test_normalize_minima_and_maxima_third_z(self):
"""Ensures correct output from _normalize_minima_and_maxima.
In this case, using third set of inputs and z-score normalization.
"""
THIRD_METADATA_DICT[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.NORMALIZATION_TYPE_KEY
] = dl_utils.Z_NORMALIZATION_TYPE_STRING
these_min_values_by_tensor, these_max_values_by_tensor = (
physical_constraints._normalize_minima_and_maxima(
list_of_input_tensors=THIRD_LIST_OF_INPUT_TENSORS,
cnn_metadata_dict=THIRD_METADATA_DICT, test_mode=True,
radar_normalization_table=RADAR_NORMALIZATION_TABLE,
sounding_normalization_table=SOUNDING_NORMALIZATION_TABLE)
)
self.assertTrue(_compare_array_lists(
THIRD_MIN_VALUES_Z_NORM, these_min_values_by_tensor
))
self.assertTrue(_compare_array_lists(
THIRD_MAX_VALUES_Z_NORM, these_max_values_by_tensor
))
def test_normalize_minima_and_maxima_third_minmax(self):
"""Ensures correct output from _normalize_minima_and_maxima.
In this case, using third set of inputs and minmax normalization.
"""
THIRD_METADATA_DICT[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.NORMALIZATION_TYPE_KEY
] = dl_utils.MINMAX_NORMALIZATION_TYPE_STRING
these_min_values_by_tensor, these_max_values_by_tensor = (
physical_constraints._normalize_minima_and_maxima(
list_of_input_tensors=THIRD_LIST_OF_INPUT_TENSORS,
cnn_metadata_dict=THIRD_METADATA_DICT, test_mode=True,
radar_normalization_table=RADAR_NORMALIZATION_TABLE,
sounding_normalization_table=SOUNDING_NORMALIZATION_TABLE)
)
self.assertTrue(_compare_array_lists(
THIRD_MIN_VALUES_MINMAX_NORM, these_min_values_by_tensor
))
self.assertTrue(_compare_array_lists(
THIRD_MAX_VALUES_MINMAX_NORM, these_max_values_by_tensor
))
def test_normalize_minima_and_maxima_fourth_z(self):
"""Ensures correct output from _normalize_minima_and_maxima.
In this case, using fourth set of inputs and z-score normalization.
"""
FOURTH_METADATA_DICT[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.NORMALIZATION_TYPE_KEY
] = dl_utils.Z_NORMALIZATION_TYPE_STRING
these_min_values_by_tensor, these_max_values_by_tensor = (
physical_constraints._normalize_minima_and_maxima(
list_of_input_tensors=FOURTH_LIST_OF_INPUT_TENSORS,
cnn_metadata_dict=FOURTH_METADATA_DICT, test_mode=True,
radar_normalization_table=RADAR_NORMALIZATION_TABLE,
sounding_normalization_table=SOUNDING_NORMALIZATION_TABLE)
)
self.assertTrue(_compare_array_lists(
FOURTH_MIN_VALUES_Z_NORM, these_min_values_by_tensor
))
self.assertTrue(_compare_array_lists(
FOURTH_MAX_VALUES_Z_NORM, these_max_values_by_tensor
))
def test_normalize_minima_and_maxima_fourth_minmax(self):
"""Ensures correct output from _normalize_minima_and_maxima.
In this case, using fourth set of inputs and minmax normalization.
"""
FOURTH_METADATA_DICT[cnn.TRAINING_OPTION_DICT_KEY][
trainval_io.NORMALIZATION_TYPE_KEY
] = dl_utils.MINMAX_NORMALIZATION_TYPE_STRING
these_min_values_by_tensor, these_max_values_by_tensor = (
physical_constraints._normalize_minima_and_maxima(
list_of_input_tensors=FOURTH_LIST_OF_INPUT_TENSORS,
cnn_metadata_dict=FOURTH_METADATA_DICT, test_mode=True,
radar_normalization_table=RADAR_NORMALIZATION_TABLE,
sounding_normalization_table=SOUNDING_NORMALIZATION_TABLE)
)
self.assertTrue(_compare_array_lists(
FOURTH_MIN_VALUES_MINMAX_NORM, these_min_values_by_tensor
))
self.assertTrue(_compare_array_lists(
FOURTH_MAX_VALUES_MINMAX_NORM, these_max_values_by_tensor
))
if __name__ == '__main__':
unittest.main()
| thunderhoser/GewitterGefahr | gewittergefahr/gg_utils/physical_constraints_test.py | Python | mit | 25,777 |
from __future__ import absolute_import, unicode_literals
from . import abase
class Subscription(
abase.CreatableResource, abase.ListableResource, abase.DeletableResource
):
RESOURCE = "subscription"
RESOURCE_PATH = "subscriptions"
| reincubate/ricloud | ricloud/resources/subscriptions.py | Python | lgpl-3.0 | 246 |
# coding=utf-8
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) Mercurial Contributors.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import os
import sys
from edenscm.mercurial.pycompat import decodeutf8
from testutil.dott import feature, sh, shlib, testtmp # noqa: F401
sh % "enable commitextras"
sh % "setconfig 'ui.allowemptycommit=1'"
sh % "HGENCODING=utf-8"
sh % "cat" << r'''
import edenscm.mercurial.revset
baseset = edenscm.mercurial.revset.baseset
def r3232(repo, subset, x):
""""simple revset that return [3,2,3,2]
revisions duplicated on purpose.
"""
if 3 not in subset:
if 2 in subset:
return baseset([2,2], repo=repo)
return baseset(repo=repo)
return baseset([3,3,2,2], repo=repo)
edenscm.mercurial.revset.symbols['r3232'] = r3232
''' > "testrevset.py"
(
sh % "cat"
<< r"""
[extensions]
testrevset=$TESTTMP/testrevset.py
"""
>> "$HGRCPATH"
)
def _try(*args):
return sh.hg("debugrevspec", "--debug", *args)
def trylist(*args):
return sh.hg("debugrevlistspec", "--debug", *args)
def log(arg):
return sh.hg("log", "-T", "{rev}\n", "-r", arg)
_currentbranch = None
def setbranch(branch):
global _currentbranch
_currentbranch = branch
def commit(*args):
if _currentbranch:
sh.hg("bookmark", "-i")
sh.hg("commit", "--extra=branch=%s" % _currentbranch, *args)
sh.hg("bookmark", "-f", "--", _currentbranch)
os.environ[_currentbranch] = sh.hg("log", "-r", ".", "-T", "{node}")
else:
sh.hg("commit", *args)
shlib.__dict__.update(
{
"try": _try,
"trylist": trylist,
"log": log,
"setbranch": setbranch,
"commit": commit,
}
)
# extension to build '_intlist()' and '_hexlist()', which is necessary because
# these predicates use '\0' as a separator:
sh % "cat" << r"""
from __future__ import absolute_import
from edenscm.mercurial import (
node as nodemod,
registrar,
revset,
revsetlang,
smartset,
)
cmdtable = {}
command = registrar.command(cmdtable)
@command('debugrevlistspec',
[('', 'optimize', None, 'print parsed tree after optimizing'),
('', 'bin', None, 'unhexlify arguments')])
def debugrevlistspec(ui, repo, fmt, *args, **opts):
if opts['bin']:
args = map(nodemod.bin, args)
expr = revsetlang.formatspec(fmt, list(args))
if ui.verbose:
tree = revsetlang.parse(expr, lookup=repo.__contains__)
ui.note(revsetlang.prettyformat(tree), "\n")
if opts["optimize"]:
opttree = revsetlang.optimize(revsetlang.analyze(tree))
ui.note("* optimized:\n", revsetlang.prettyformat(opttree),
"\n")
func = revset.match(ui, expr, repo)
revs = func(repo)
if ui.verbose:
ui.note("* set:\n", smartset.prettyformat(revs), "\n")
for c in revs:
ui.write("%s\n" % c)
""" > "debugrevlistspec.py"
(
sh % "cat"
<< r"""
[extensions]
debugrevlistspec = $TESTTMP/debugrevlistspec.py
"""
>> "$HGRCPATH"
)
sh % "hg init repo"
sh % "cd repo"
sh % "echo a" > "a"
sh % "setbranch a"
sh % "commit -Aqm0"
sh % "echo b" > "b"
sh % "setbranch b"
sh % "commit -Aqm1"
sh % "rm a"
sh % "setbranch a-b-c-"
sh % "commit -Aqm2 -u Bob"
sh % "hg log -r 'extra('\\''branch'\\'', '\\''a-b-c-'\\'')' --template '{rev}\\n'" == "2"
sh % "hg log -r 'extra('\\''branch'\\'')' --template '{rev}\\n'" == r"""
0
1
2"""
sh % "hg log -r 'extra('\\''branch'\\'', '\\''re:a'\\'')' --template '{rev} {branch}\\n'" == r"""
0 a
2 a-b-c-"""
sh % "hg co 1" == """
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(leaving bookmark a-b-c-)"""
sh % "setbranch +a+b+c+"
sh % "commit -Aqm3"
sh % "hg co -C 2" == """
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
(leaving bookmark +a+b+c+)"""
sh % "echo bb" > "b"
sh % "setbranch -a-b-c-"
sh % "commit -Aqm4 -d 'May 12 2005 UTC'"
sh % "hg co -C 3" == """
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
(leaving bookmark -a-b-c-)"""
sh % "setbranch '!a/b/c/'"
sh % "commit '-Aqm5 bug'"
sh % "hg merge 4" == r"""
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
(branch merge, don't forget to commit)"""
sh % "setbranch _a_b_c_"
sh % "commit '-Aqm6 issue619'"
sh % "setbranch .a.b.c."
sh % "commit -Aqm7"
sh % "setbranch all"
sh % "hg co 4" == """
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
(leaving bookmark .a.b.c.)"""
sh % decodeutf8(b"setbranch '\xc3\xa9'")
sh % "commit -Aqm9"
sh % "hg book -fr 6 1.0"
sh % "echo 'e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0'" >> ".hgtags"
sh % "hg book -i"
sh % "hg commit -Aqm 'add 1.0 tag'"
sh % "hg bookmark -r6 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
sh % "hg clone --quiet -U -r 7 . ../remote1"
sh % "hg clone --quiet -U -r 8 . ../remote2"
sh % "echo '[paths]'" >> ".hg/hgrc"
sh % "echo 'default = ../remote1'" >> ".hg/hgrc"
# trivial
sh % "try '0:1'" == r"""
(range
(symbol '0')
(symbol '1'))
* set:
<idset+ [0 1]>
0
1"""
sh % "try --optimize ':'" == r"""
(rangeall
None)
* optimized:
(rangeall
None)
* set:
<idset+ [0..=9]>
0
1
2
3
4
5
6
7
8
9"""
sh % "try '3::6'" == r"""
(dagrange
(symbol '3')
(symbol '6'))
* set:
<baseset+ [3, 5, 6]>
3
5
6"""
sh % "try '0|1|2'" == r"""
(or
(list
(symbol '0')
(symbol '1')
(symbol '2')))
* set:
<baseset [0, 1, 2]>
0
1
2"""
# names that should work without quoting
sh % "try a" == r"""
(symbol 'a')
* set:
<baseset [0]>
0"""
sh % "try b-a" == r"""
(minus
(symbol 'b')
(symbol 'a'))
* set:
<filteredset
<baseset [1]>,
<not
<baseset [0]>>>
1"""
sh % "try _a_b_c_" == r"""
(symbol '_a_b_c_')
* set:
<baseset [6]>
6"""
sh % "try _a_b_c_-a" == r"""
(minus
(symbol '_a_b_c_')
(symbol 'a'))
* set:
<filteredset
<baseset [6]>,
<not
<baseset [0]>>>
6"""
sh % "try .a.b.c." == r"""
(symbol '.a.b.c.')
* set:
<baseset [7]>
7"""
sh % "try .a.b.c.-a" == r"""
(minus
(symbol '.a.b.c.')
(symbol 'a'))
* set:
<filteredset
<baseset [7]>,
<not
<baseset [0]>>>
7"""
# names that should be caught by fallback mechanism
sh % "try -- -a-b-c-" == r"""
(symbol '-a-b-c-')
* set:
<baseset [4]>
4"""
sh % "log -a-b-c-" == "4"
sh % "try +a+b+c+" == r"""
(symbol '+a+b+c+')
* set:
<baseset [3]>
3"""
sh % "try '+a+b+c+:'" == r"""
(rangepost
(symbol '+a+b+c+'))
* set:
<idset+ [3..=9]>
3
4
5
6
7
8
9"""
sh % "try ':+a+b+c+'" == r"""
(rangepre
(symbol '+a+b+c+'))
* set:
<idset+ [0..=3]>
0
1
2
3"""
sh % "try -- '-a-b-c-:+a+b+c+'" == r"""
(range
(symbol '-a-b-c-')
(symbol '+a+b+c+'))
* set:
<idset- [3 4]>
4
3"""
sh % "log '-a-b-c-:+a+b+c+'" == r"""
4
3"""
sh % "try -- -a-b-c--a" == r"""
(minus
(minus
(minus
(negate
(symbol 'a'))
(symbol 'b'))
(symbol 'c'))
(negate
(symbol 'a')))
abort: unknown revision '-a'!
[255]"""
if sys.version_info[0] >= 3:
sh % decodeutf8(
b"try '\xc3\xa9'"
) == """
(symbol 'é')
* set:
<baseset [8]>
8"""
else:
sh % "try '\xc3\xa9'" == r"""
(symbol '\xc3\xa9')
* set:
<baseset [8]>
8"""
# no quoting needed
sh % "log '::a-b-c-'" == r"""
0
1
2"""
# quoting needed
sh % "try '\"-a-b-c-\"-a'" == r"""
(minus
(string '-a-b-c-')
(symbol 'a'))
* set:
<filteredset
<baseset [4]>,
<not
<baseset [0]>>>
4"""
sh % "log '1 or 2'" == r"""
1
2"""
sh % "log '1|2'" == r"""
1
2"""
sh % "log '1 and 2'"
sh % "log '1&2'"
sh % "try '1&2|3'" == r"""
(or
(list
(and
(symbol '1')
(symbol '2'))
(symbol '3')))
* set:
<addset
<baseset []>,
<baseset [3]>>
3"""
sh % "try '1|2&3'" == r"""
(or
(list
(symbol '1')
(and
(symbol '2')
(symbol '3'))))
* set:
<addset
<baseset [1]>,
<baseset []>>
1"""
sh % "try '1&2&3'" == r"""
(and
(and
(symbol '1')
(symbol '2'))
(symbol '3'))
* set:
<baseset []>"""
sh % "try '1|(2|3)'" == r"""
(or
(list
(symbol '1')
(group
(or
(list
(symbol '2')
(symbol '3'))))))
* set:
<addset
<baseset [1]>,
<baseset [2, 3]>>
1
2
3"""
sh % "log 1.0" == "6"
sh % "log a" == "0"
sh % "log 2785f51ee" == "0"
sh % "log 'date(2005)'" == "4"
sh % "log 'date(this is a test)'" == r"""
hg: parse error at 10: unexpected token: symbol
(date(this is a test)
^ here)
[255]"""
sh % "log 'date()'" == r"""
hg: parse error: date requires a string
[255]"""
sh % "log date" == r"""
abort: unknown revision 'date'!
[255]"""
sh % "log 'date('" == r"""
hg: parse error at 5: not a prefix: end
(date(
^ here)
[255]"""
sh % "log 'date(\"\\xy\")'" == r"""
hg: parse error: invalid \x escape* (glob)
[255]"""
sh % "log 'date(tip)'" == r"""
hg: parse error: invalid date: 'tip'
[255]"""
sh % "log '0:date'" == r"""
abort: unknown revision 'date'!
[255]"""
sh % "log '::\"date\"'" == r"""
abort: unknown revision 'date'!
[255]"""
sh % "hg book date -r 4"
sh % "log '0:date'" == r"""
0
1
2
3
4"""
sh % "log '::date'" == r"""
0
1
2
4"""
sh % "log '::\"date\"'" == r"""
0
1
2
4"""
sh % "log 'date(2005) and 1::'" == "4"
sh % "hg book -d date"
# function name should be a symbol
sh % "log '\"date\"(2005)'" == r"""
hg: parse error: not a symbol
[255]"""
# keyword arguments
sh % "log 'extra(branch, value=a)'" == "0"
sh % "log 'extra(branch, a, b)'" == r"""
hg: parse error: extra takes at most 2 positional arguments
[255]"""
sh % "log 'extra(a, label=b)'" == r"""
hg: parse error: extra got multiple values for keyword argument 'label'
[255]"""
sh % "log 'extra(label=branch, default)'" == r"""
hg: parse error: extra got an invalid argument
[255]"""
sh % "log 'extra(branch, foo+bar=baz)'" == r"""
hg: parse error: extra got an invalid argument
[255]"""
sh % "log 'extra(unknown=branch)'" == r"""
hg: parse error: extra got an unexpected keyword argument 'unknown'
[255]"""
sh % "try 'foo=bar|baz'" == r"""
(keyvalue
(symbol 'foo')
(or
(list
(symbol 'bar')
(symbol 'baz'))))
hg: parse error: can't use a key-value pair in this context
[255]"""
# right-hand side should be optimized recursively
sh % "try --optimize 'foo=(not public())'" == r"""
(keyvalue
(symbol 'foo')
(group
(not
(func
(symbol 'public')
None))))
* optimized:
(keyvalue
(symbol 'foo')
(func
(symbol '_notpublic')
None))
hg: parse error: can't use a key-value pair in this context
[255]"""
# relation-subscript operator has the highest binding strength (as function call):
sh % "hg debugrevspec -p parsed 'tip:tip^#generations[-1]'" == r"""
* parsed:
(range
(symbol 'tip')
(relsubscript
(parentpost
(symbol 'tip'))
(symbol 'generations')
(negate
(symbol '1'))))
9
8
7
6
5
4"""
sh % "hg debugrevspec -p parsed --no-show-revs 'not public()#generations[0]'" == r"""
* parsed:
(not
(relsubscript
(func
(symbol 'public')
None)
(symbol 'generations')
(symbol '0')))"""
# left-hand side of relation-subscript operator should be optimized recursively:
sh % "hg debugrevspec -p analyzed -p optimized --no-show-revs '(not public())#generations[0]'" == r"""
* analyzed:
(relsubscript
(not
(func
(symbol 'public')
None))
(symbol 'generations')
(symbol '0'))
* optimized:
(relsubscript
(func
(symbol '_notpublic')
None)
(symbol 'generations')
(symbol '0'))"""
# resolution of subscript and relation-subscript ternary operators:
sh % "hg debugrevspec -p analyzed 'tip[0]'" == r"""
* analyzed:
(subscript
(symbol 'tip')
(symbol '0'))
hg: parse error: can't use a subscript in this context
[255]"""
sh % "hg debugrevspec -p analyzed 'tip#rel[0]'" == r"""
* analyzed:
(relsubscript
(symbol 'tip')
(symbol 'rel')
(symbol '0'))
hg: parse error: unknown identifier: rel
[255]"""
sh % "hg debugrevspec -p analyzed '(tip#rel)[0]'" == r"""
* analyzed:
(subscript
(relation
(symbol 'tip')
(symbol 'rel'))
(symbol '0'))
hg: parse error: can't use a subscript in this context
[255]"""
sh % "hg debugrevspec -p analyzed 'tip#rel[0][1]'" == r"""
* analyzed:
(subscript
(relsubscript
(symbol 'tip')
(symbol 'rel')
(symbol '0'))
(symbol '1'))
hg: parse error: can't use a subscript in this context
[255]"""
sh % "hg debugrevspec -p analyzed 'tip#rel0#rel1[1]'" == r"""
* analyzed:
(relsubscript
(relation
(symbol 'tip')
(symbol 'rel0'))
(symbol 'rel1')
(symbol '1'))
hg: parse error: unknown identifier: rel1
[255]"""
sh % "hg debugrevspec -p analyzed 'tip#rel0[0]#rel1[1]'" == r"""
* analyzed:
(relsubscript
(relsubscript
(symbol 'tip')
(symbol 'rel0')
(symbol '0'))
(symbol 'rel1')
(symbol '1'))
hg: parse error: unknown identifier: rel1
[255]"""
# parse errors of relation, subscript and relation-subscript operators:
sh % "hg debugrevspec '[0]'" == r"""
hg: parse error at 0: not a prefix: [
([0]
^ here)
[255]"""
sh % "hg debugrevspec '.#'" == r"""
hg: parse error at 2: not a prefix: end
(.#
^ here)
[255]"""
sh % "hg debugrevspec '#rel'" == r"""
hg: parse error at 0: not a prefix: #
(#rel
^ here)
[255]"""
sh % "hg debugrevspec '.#rel[0'" == r"""
hg: parse error at 7: unexpected token: end
(.#rel[0
^ here)
[255]"""
sh % "hg debugrevspec '.]'" == r"""
hg: parse error at 1: invalid token
(.]
^ here)
[255]"""
sh % "hg debugrevspec '.#generations[a]'" == r"""
hg: parse error: relation subscript must be an integer
[255]"""
sh % "hg debugrevspec '.#generations[1-2]'" == r"""
hg: parse error: relation subscript must be an integer
[255]"""
# parsed tree at stages:
sh % "hg debugrevspec -p all '()'" == r"""
* parsed:
(group
None)
* expanded:
(group
None)
* concatenated:
(group
None)
* analyzed:
None
* optimized:
None
hg: parse error: missing argument
[255]"""
sh % "hg debugrevspec --no-optimized -p all '()'" == r"""
* parsed:
(group
None)
* expanded:
(group
None)
* concatenated:
(group
None)
* analyzed:
None
hg: parse error: missing argument
[255]"""
sh % "hg debugrevspec -p parsed -p analyzed -p optimized '(0|1)-1'" == r"""
* parsed:
(minus
(group
(or
(list
(symbol '0')
(symbol '1'))))
(symbol '1'))
* analyzed:
(and
(or
(list
(symbol '0')
(symbol '1')))
(not
(symbol '1')))
* optimized:
(difference
(func
(symbol '_list')
(string '0\x001'))
(symbol '1'))
0"""
sh % "hg debugrevspec -p unknown 0" == r"""
abort: invalid stage name: unknown
[255]"""
sh % "hg debugrevspec -p all --optimize 0" == r"""
abort: cannot use --optimize with --show-stage
[255]"""
# verify optimized tree:
sh % "hg debugrevspec --verify '0|1'"
sh % "hg debugrevspec --verify -v -p analyzed -p optimized 'r3232() & 2'" == r"""
* analyzed:
(and
(func
(symbol 'r3232')
None)
(symbol '2'))
* optimized:
(andsmally
(func
(symbol 'r3232')
None)
(symbol '2'))
* analyzed set:
<baseset [2]>
* optimized set:
<baseset [2, 2]>
--- analyzed
+++ optimized
2
+2
[1]"""
sh % "hg debugrevspec --no-optimized --verify-optimized 0" == r"""
abort: cannot use --verify-optimized with --no-optimized
[255]"""
# Test that symbols only get parsed as functions if there's an opening
# parenthesis.
sh % "hg book only -r 9"
sh % "log 'only(only)'" == r"""
8
9"""
# ':y' behaves like '0:y', but can't be rewritten as such since the revision '0'
# may be hidden (issue5385)
sh % "try -p parsed -p analyzed ':'" == r"""
* parsed:
(rangeall
None)
* analyzed:
(rangeall
None)
* set:
<idset+ [0..=9]>
0
1
2
3
4
5
6
7
8
9"""
sh % "try -p analyzed ':1'" == r"""
* analyzed:
(rangepre
(symbol '1'))
* set:
<idset+ [0 1]>
0
1"""
sh % "try -p analyzed ':(1|2)'" == r"""
* analyzed:
(rangepre
(or
(list
(symbol '1')
(symbol '2'))))
* set:
<idset+ [0 1 2]>
0
1
2"""
sh % "try -p analyzed ':(1&2)'" == r"""
* analyzed:
(rangepre
(and
(symbol '1')
(symbol '2')))
* set:
<baseset []>"""
# infix/suffix resolution of ^ operator (issue2884):
# x^:y means (x^):y
sh % "try '1^:2'" == r"""
(range
(parentpost
(symbol '1'))
(symbol '2'))
* set:
<idset+ [0 1 2]>
0
1
2"""
sh % "try '1^::2'" == r"""
(dagrange
(parentpost
(symbol '1'))
(symbol '2'))
* set:
<baseset+ [0, 1, 2]>
0
1
2"""
sh % "try '9^:'" == r"""
(rangepost
(parentpost
(symbol '9')))
* set:
<idset+ [8 9]>
8
9"""
# x^:y should be resolved before omitting group operators
sh % "try '1^(:2)'" == r"""
(parent
(symbol '1')
(group
(rangepre
(symbol '2'))))
hg: parse error: ^ expects a number 0, 1, or 2
[255]"""
# x^:y should be resolved recursively
sh % "try 'sort(1^:2)'" == r"""
(func
(symbol 'sort')
(range
(parentpost
(symbol '1'))
(symbol '2')))
* set:
<idset+ [0 1 2]>
0
1
2"""
sh % "try '(3^:4)^:2'" == r"""
(range
(parentpost
(group
(range
(parentpost
(symbol '3'))
(symbol '4'))))
(symbol '2'))
* set:
<idset+ [0 1 2]>
0
1
2"""
sh % "try '(3^::4)^::2'" == r"""
(dagrange
(parentpost
(group
(dagrange
(parentpost
(symbol '3'))
(symbol '4'))))
(symbol '2'))
* set:
<baseset+ [0, 1, 2]>
0
1
2"""
sh % "try '(9^:)^:'" == r"""
(rangepost
(parentpost
(group
(rangepost
(parentpost
(symbol '9'))))))
* set:
<idset+ [4..=9]>
4
5
6
7
8
9"""
# x^ in alias should also be resolved
sh % "try A --config 'revsetalias.A=1^:2'" == r"""
(symbol 'A')
* expanded:
(range
(parentpost
(symbol '1'))
(symbol '2'))
* set:
<idset+ [0 1 2]>
0
1
2"""
sh % "try 'A:2' --config 'revsetalias.A=1^'" == r"""
(range
(symbol 'A')
(symbol '2'))
* expanded:
(range
(parentpost
(symbol '1'))
(symbol '2'))
* set:
<idset+ [0 1 2]>
0
1
2"""
# but not beyond the boundary of alias expansion, because the resolution should
# be made at the parsing stage
sh % "try '1^A' --config 'revsetalias.A=:2'" == r"""
(parent
(symbol '1')
(symbol 'A'))
* expanded:
(parent
(symbol '1')
(rangepre
(symbol '2')))
hg: parse error: ^ expects a number 0, 1, or 2
[255]"""
# ancestor can accept 0 or more arguments
sh % "log 'ancestor()'"
sh % "log 'ancestor(1)'" == "1"
sh % "log 'ancestor(4,5)'" == "1"
sh % "log 'ancestor(4,5) and 4'"
sh % "log 'ancestor(0,0,1,3)'" == "0"
sh % "log 'ancestor(3,1,5,3,5,1)'" == "1"
sh % "log 'ancestor(0,1,3,5)'" == "0"
sh % "log 'ancestor(1,2,3,4,5)'" == "1"
# test ancestors
sh % "hg log -G -T '{rev}\\n' --config 'experimental.graphshorten=True'" == r"""
@ 9
o 8
│ o 7
│ o 6
╭─┤
│ o 5
o │ 4
│ o 3
o │ 2
├─╯
o 1
o 0"""
sh % "log 'ancestors(5)'" == r"""
0
1
3
5"""
sh % "log 'ancestor(ancestors(5))'" == "0"
sh % "log '::r3232()'" == r"""
0
1
2
3"""
# test ancestors with depth limit
# (depth=0 selects the node itself)
sh % "log 'reverse(ancestors(9, depth=0))'" == "9"
# (interleaved: '4' would be missing if heap queue were higher depth first)
sh % "log 'reverse(ancestors(8:9, depth=1))'" == r"""
9
8
4"""
# (interleaved: '2' would be missing if heap queue were higher depth first)
sh % "log 'reverse(ancestors(7+8, depth=2))'" == r"""
8
7
6
5
4
2"""
# (walk example above by separate queries)
sh % "log 'reverse(ancestors(8, depth=2)) + reverse(ancestors(7, depth=2))'" == r"""
8
4
2
7
6
5"""
# (walk 2nd and 3rd ancestors)
sh % "log 'reverse(ancestors(7, depth=3, startdepth=2))'" == r"""
5
4
3
2"""
# (interleaved: '4' would be missing if higher-depth ancestors weren't scanned)
sh % "log 'reverse(ancestors(7+8, depth=2, startdepth=2))'" == r"""
5
4
2"""
# (note that 'ancestors(x, depth=y, startdepth=z)' does not identical to
# 'ancestors(x, depth=y) - ancestors(x, depth=z-1)' because a node may have
# multiple depths)
sh % "log 'reverse(ancestors(7+8, depth=2) - ancestors(7+8, depth=1))'" == r"""
5
2"""
# test bad arguments passed to ancestors()
sh % "log 'ancestors(., depth=-1)'" == r"""
hg: parse error: negative depth
[255]"""
sh % "log 'ancestors(., depth=foo)'" == r"""
hg: parse error: ancestors expects an integer depth
[255]"""
# test descendants
sh % "hg log -G -T '{rev}\\n' --config 'experimental.graphshorten=True'" == r"""
@ 9
o 8
│ o 7
│ o 6
╭─┤
│ o 5
o │ 4
│ o 3
o │ 2
├─╯
o 1
o 0"""
# (null is ultimate root and has optimized path)
sh % "log 'null:4 & descendants(null)'" == r"""
-1
0
1
2
3
4"""
# (including merge)
sh % "log ':8 & descendants(2)'" == r"""
2
4
6
7
8"""
# (multiple roots)
sh % "log ':8 & descendants(2+5)'" == r"""
2
4
5
6
7
8"""
# test descendants with depth limit
# (depth=0 selects the node itself)
sh % "log 'descendants(0, depth=0)'" == "0"
sh % "log 'null: & descendants(null, depth=0)'" == "-1"
# (p2 = null should be ignored)
sh % "log 'null: & descendants(null, depth=2)'" == r"""
-1
0
1"""
# (multiple paths: depth(6) = (2, 3))
sh % "log 'descendants(1+3, depth=2)'" == r"""
1
2
3
4
5
6"""
# (multiple paths: depth(5) = (1, 2), depth(6) = (2, 3))
sh % "log 'descendants(3+1, depth=2, startdepth=2)'" == r"""
4
5
6"""
# (multiple depths: depth(6) = (0, 2, 4), search for depth=2)
sh % "log 'descendants(0+3+6, depth=3, startdepth=1)'" == r"""
1
2
3
4
5
6
7"""
# (multiple depths: depth(6) = (0, 4), no match)
sh % "log 'descendants(0+6, depth=3, startdepth=1)'" == r"""
1
2
3
4
5
7"""
# test ancestors/descendants relation subscript:
sh % "log 'tip#generations[0]'" == "9"
sh % "log '.#generations[-1]'" == "8"
sh % "log '.#g[(-1)]'" == "8"
sh % "hg debugrevspec -p parsed 'roots(:)#g[2]'" == r"""
* parsed:
(relsubscript
(func
(symbol 'roots')
(rangeall
None))
(symbol 'g')
(symbol '2'))
2
3"""
# test author
sh % "log 'author(bob)'" == "2"
sh % "log 'author(\"re:bob|test\")'" == r"""
0
1
2
3
4
5
6
7
8
9"""
sh % "log 'author(r\"re:\\S\")'" == r"""
0
1
2
3
4
5
6
7
8
9"""
sh % "log 'children(ancestor(4,5))'" == r"""
2
3"""
sh % "log 'children(4)'" == r"""
6
8"""
sh % "log 'children(null)'" == "0"
sh % "log 'closed()'"
sh % "log 'contains(a)'" == r"""
0
1
3
5"""
sh % "log 'contains(\"../repo/a\")'" == r"""
0
1
3
5"""
sh % "log 'desc(B)'" == "5"
sh % "hg log -r 'desc(r\"re:S?u\")' --template '{rev} {desc|firstline}\\n'" == r"""
5 5 bug
6 6 issue619"""
sh % "log 'descendants(2 or 3)'" == r"""
2
3
4
5
6
7
8
9"""
sh % "log 'file(\"b*\")'" == r"""
1
4"""
sh % "log 'filelog(\"b\")'" == r"""
1
4"""
sh % "log 'filelog(\"../repo/b\")'" == r"""
1
4"""
sh % "log 'follow()'" == r"""
0
1
2
4
8
9"""
sh % "log 'grep(\"issue\\\\d+\")'" == "6"
sh % "try 'grep(\"(\")'" == r"""
(func
(symbol 'grep')
(string '('))
hg: parse error: invalid match pattern: * (glob)
[255]"""
sh % "try 'grep(\"\\bissue\\\\d+\")'" == r"""
(func
(symbol 'grep')
(string '\x08issue\\d+'))
* set:
<filteredset
<fullreposet+ [0..=9]>,
<grep '\x08issue\\d+'>>"""
sh % "try 'grep(r\"\\bissue\\d+\")'" == r"""
(func
(symbol 'grep')
(string '\\bissue\\d+'))
* set:
<filteredset
<fullreposet+ [0..=9]>,
<grep '\\bissue\\d+'>>
6"""
sh % "try 'grep(r\"\\\")'" == r"""
hg: parse error at 7: unterminated string
(grep(r"\")
^ here)
[255]"""
sh % "log 'head()'" == r"""
7
9"""
sh % "log 'heads(6::)'" == "7"
sh % "log 'keyword(issue)'" == "6"
sh % "log 'keyword(\"test a\")'"
# Test first (=limit) and last
sh % "log 'limit(head(), 1)'" == "7"
sh % "log 'limit(author(\"re:bob|test\"), 3, 5)'" == r"""
5
6
7"""
sh % "log 'limit(author(\"re:bob|test\"), offset=6)'" == "6"
sh % "log 'limit(author(\"re:bob|test\"), offset=10)'"
sh % "log 'limit(all(), 1, -1)'" == r"""
hg: parse error: negative offset
[255]"""
sh % "log 'limit(all(), -1)'" == r"""
hg: parse error: negative number to select
[255]"""
sh % "log 'limit(all(), 0)'"
sh % "log 'last(all(), -1)'" == r"""
hg: parse error: negative number to select
[255]"""
sh % "log 'last(all(), 0)'"
sh % "log 'last(all(), 1)'" == "9"
sh % "log 'last(all(), 2)'" == r"""
8
9"""
# Test smartset.slice() by first/last()
# (using unoptimized set, filteredset as example)
sh % "hg debugrevspec --no-show-revs -s '0:7 & all()'" == r"""
* set:
<idset+ [0..=7]>"""
sh % "log 'limit(0:7 & all(), 3, 4)'" == r"""
4
5
6"""
sh % "log 'limit(7:0 & all(), 3, 4)'" == r"""
3
2
1"""
sh % "log 'last(0:7 & all(), 2)'" == r"""
6
7"""
# (using baseset)
sh % "hg debugrevspec --no-show-revs -s 0+1+2+3+4+5+6+7" == r"""
* set:
<baseset [0, 1, 2, 3, 4, 5, 6, 7]>"""
sh % "hg debugrevspec --no-show-revs -s '0::7'" == r"""
* set:
<baseset+ [0, 1, 2, 3, 4, 5, 6, 7]>"""
sh % "log 'limit(0+1+2+3+4+5+6+7, 3, 4)'" == r"""
4
5
6"""
sh % "log 'limit(sort(0::7, rev), 3, 4)'" == r"""
4
5
6"""
sh % "log 'limit(sort(0::7, -rev), 3, 4)'" == r"""
3
2
1"""
sh % "log 'last(sort(0::7, rev), 2)'" == r"""
6
7"""
sh % "hg debugrevspec -s 'limit(sort(0::7, rev), 3, 6)'" == r"""
* set:
<baseset+ [6, 7]>
6
7"""
sh % "hg debugrevspec -s 'limit(sort(0::7, rev), 3, 9)'" == r"""
* set:
<baseset+ []>"""
sh % "hg debugrevspec -s 'limit(sort(0::7, -rev), 3, 6)'" == r"""
* set:
<baseset- [0, 1]>
1
0"""
sh % "hg debugrevspec -s 'limit(sort(0::7, -rev), 3, 9)'" == r"""
* set:
<baseset- []>"""
sh % "hg debugrevspec -s 'limit(0::7, 0)'" == r"""
* set:
<baseset+ []>"""
# (using spanset)
sh % "hg debugrevspec --no-show-revs -s '0:7'" == r"""
* set:
<idset+ [0..=7]>"""
sh % "log 'limit(0:7, 3, 4)'" == r"""
4
5
6"""
sh % "log 'limit(7:0, 3, 4)'" == r"""
3
2
1"""
sh % "log 'limit(0:7, 3, 6)'" == r"""
6
7"""
sh % "log 'limit(7:0, 3, 6)'" == r"""
1
0"""
sh % "log 'last(0:7, 2)'" == r"""
6
7"""
sh % "hg debugrevspec -s 'limit(0:7, 3, 6)'" == r"""
* set:
<baseset slice=6:9
<idset+ [0..=7]>>
6
7"""
sh % "hg debugrevspec -s 'limit(0:7, 3, 9)'" == r"""
* set:
<baseset slice=9:12
<idset+ [0..=7]>>"""
sh % "hg debugrevspec -s 'limit(7:0, 3, 6)'" == r"""
* set:
<baseset slice=6:9
<idset- [0..=7]>>
1
0"""
sh % "hg debugrevspec -s 'limit(7:0, 3, 9)'" == r"""
* set:
<baseset slice=9:12
<idset- [0..=7]>>"""
sh % "hg debugrevspec -s 'limit(0:7, 0)'" == r"""
* set:
<baseset slice=0:0
<idset+ [0..=7]>>"""
# Test order of first/last revisions
sh % "hg debugrevspec -s 'first(4:0, 3) & 3:'" == r"""
* set:
<filteredset
<baseset slice=0:3
<idset- [0..=4]>>,
<idset+ [3..=9]>>
4
3"""
sh % "hg debugrevspec -s '3: & first(4:0, 3)'" == r"""
* set:
<idset+ [3 4]>
3
4"""
sh % "hg debugrevspec -s 'last(4:0, 3) & :1'" == r"""
* set:
<filteredset
<baseset slice=0:3
<idset+ [0..=4]>>,
<idset+ [0 1]>>
1
0"""
sh % "hg debugrevspec -s ':1 & last(4:0, 3)'" == r"""
* set:
<idset+ [0 1]>
0
1"""
# Test scmutil.revsingle() should return the last revision
sh % "hg debugrevspec -s 'last(0::)'" == r"""
* set:
<baseset slice=0:1
<generatorset->>
9"""
sh % "hg identify -r '0::' --num" == "9"
# Test matching
sh % "log 'matching(6)'" == "6"
sh % "log 'matching(6:7, \"phase parents user date branch summary files description\")'" == r"""
6
7"""
# Testing min and max
# max: simple
sh % "log 'max(contains(a))'" == "5"
# max: simple on unordered set)
sh % "log 'max((4+0+2+5+7) and contains(a))'" == "5"
# max: no result
sh % "log 'max(contains(stringthatdoesnotappearanywhere))'"
# max: no result on unordered set
sh % "log 'max((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'"
# min: simple
sh % "log 'min(contains(a))'" == "0"
# min: simple on unordered set
sh % "log 'min((4+0+2+5+7) and contains(a))'" == "0"
# min: empty
sh % "log 'min(contains(stringthatdoesnotappearanywhere))'"
# min: empty on unordered set
sh % "log 'min((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'"
sh % "log 'merge()'" == "6"
sh % "log 'modifies(b)'" == "4"
sh % "log 'modifies(\"path:b\")'" == "4"
sh % "log 'modifies(\"*\")'" == r"""
4
6"""
sh % "log 'modifies(\"set:modified()\")'" == "4"
sh % "log 'id(5)'" == "2"
sh % "log 'only(9)'" == r"""
8
9"""
sh % "log 'only(8)'" == "8"
sh % "log 'only(9, 5)'" == r"""
2
4
8
9"""
sh % "log 'only(7 + 9, 5 + 2)'" == r"""
4
6
7
8
9"""
# Test empty set input
sh % "log 'only(p2())'"
sh % "log 'only(p1(), p2())'" == r"""
0
1
2
4
8
9"""
# Test '%' operator
sh % "log '9%'" == r"""
8
9"""
sh % "log '9%5'" == r"""
2
4
8
9"""
sh % "log '(7 + 9)%(5 + 2)'" == r"""
4
6
7
8
9"""
# Test operand of '%' is optimized recursively (issue4670)
sh % "try --optimize '8:9-8%'" == r"""
(onlypost
(minus
(range
(symbol '8')
(symbol '9'))
(symbol '8')))
* optimized:
(func
(symbol 'only')
(difference
(range
(symbol '8')
(symbol '9'))
(symbol '8')))
* set:
<baseset+ [8, 9]>
8
9"""
sh % "try --optimize '(9)%(5)'" == r"""
(only
(group
(symbol '9'))
(group
(symbol '5')))
* optimized:
(func
(symbol 'only')
(list
(symbol '9')
(symbol '5')))
* set:
<baseset+ [2, 4, 8, 9]>
2
4
8
9"""
# Test the order of operations
sh % "log '7 + 9%5 + 2'" == r"""
7
2
4
8
9"""
# Test explicit numeric revision
sh % "log 'rev(-2)'"
sh % "log 'rev(-1)'" == "-1"
sh % "log 'rev(0)'" == "0"
sh % "log 'rev(9)'" == "9"
sh % "log 'rev(10)'"
sh % "log 'rev(tip)'" == r"""
hg: parse error: rev expects a number
[255]"""
# Test hexadecimal revision
sh % "log 'id(2)'" == r"""
abort: 00changelog.i@2: ambiguous identifier!
[255]"""
sh % "log 'id(23268)'" == "4"
sh % "log 'id(2785f51eece)'" == "0"
sh % "log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532c)'" == "8"
sh % "log 'id(d5d0dcbdc4a)'"
sh % "log 'id(d5d0dcbdc4w)'"
sh % "log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532d)'"
sh % "log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532q)'"
sh % "log 'id(1.0)'"
sh % "log 'id(xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)'"
# Test null revision
sh % "log '(null)'" == "-1"
sh % "log '(null:0)'" == r"""
-1
0"""
sh % "log '(0:null)'" == r"""
0
-1"""
sh % "log 'null::0'" == r"""
-1
0"""
sh % "log 'null:tip - 0:'" == "-1"
sh % "log 'null: and null::'" | "head -1" == "-1"
sh % "log 'null: or 0:'" | "head -2" == r"""
-1
0"""
sh % "log 'ancestors(null)'" == "-1"
sh % "log 'reverse(null:)'" | "tail -2" == r"""
0
-1"""
sh % "log 'first(null:)'" == "-1"
sh % "log 'min(null:)'"
# BROKEN: should be '-1'
sh % "log 'tip:null and all()'" | "tail -2" == r"""
1
0"""
# Test working-directory revision
sh % "hg debugrevspec 'wdir()'" == "2147483647"
sh % "hg debugrevspec 'wdir()^'" == "9"
sh % "hg up 7" == "0 files updated, 0 files merged, 1 files removed, 0 files unresolved"
sh % "hg debugrevspec 'wdir()^'" == "7"
sh % "hg debugrevspec 'wdir()^0'" == "2147483647"
sh % "hg debugrevspec 'wdir()~3'" == "5"
sh % "hg debugrevspec 'ancestors(wdir())'" == r"""
0
1
2
3
4
5
6
7
2147483647"""
sh % "hg debugrevspec 'wdir()~0'" == "2147483647"
sh % "hg debugrevspec 'p1(wdir())'" == "7"
sh % "hg debugrevspec 'p2(wdir())'"
sh % "hg debugrevspec 'parents(wdir())'" == "7"
sh % "hg debugrevspec 'wdir()^1'" == "7"
sh % "hg debugrevspec 'wdir()^2'"
sh % "hg debugrevspec 'wdir()^3'" == r"""
hg: parse error: ^ expects a number 0, 1, or 2
[255]"""
# DAG ranges with wdir()
sh % "hg debugrevspec 'wdir()::1'"
sh % "hg debugrevspec 'wdir()::wdir()'" == "2147483647"
sh % "hg debugrevspec 'wdir()::(1+wdir())'" == "2147483647"
sh % "hg debugrevspec '6::wdir()'" == r"""
6
7
2147483647"""
sh % "hg debugrevspec '5::(wdir()+7)'" == r"""
5
6
7
2147483647"""
sh % "hg debugrevspec '(1+wdir())::(2+wdir())'" == r"""
1
2
3
4
5
6
7
2147483647"""
# For tests consistency
sh % "hg up 9" == "1 files updated, 0 files merged, 0 files removed, 0 files unresolved"
sh % "hg debugrevspec 'tip or wdir()'" == r"""
9
2147483647"""
sh % "hg debugrevspec '0:tip and wdir()'"
sh % "log '0:wdir()'" | "tail -3" == r"""
8
9
2147483647"""
sh % "log 'wdir():0'" | "head -3" == r"""
2147483647
9
8"""
sh % "log 'wdir():wdir()'" == "2147483647"
sh % "log '(all() + wdir()) & min(. + wdir())'" == "9"
sh % "log '(all() + wdir()) & max(. + wdir())'" == "2147483647"
sh % "log 'first(wdir() + .)'" == "2147483647"
sh % "log 'last(. + wdir())'" == "2147483647"
# Test working-directory integer revision and node id
# (BUG: '0:wdir()' is still needed to populate wdir revision)
sh % "hg debugrevspec '0:wdir() & 2147483647'" == "2147483647"
sh % "hg debugrevspec '0:wdir() & rev(2147483647)'" == "2147483647"
sh % "hg debugrevspec '0:wdir() & ffffffffffffffffffffffffffffffffffffffff'" == "2147483647"
sh % "hg debugrevspec '0:wdir() & ffffffffffff'" == "2147483647"
sh % "hg debugrevspec '0:wdir() & id(ffffffffffffffffffffffffffffffffffffffff)'" == "2147483647"
sh % "hg debugrevspec '0:wdir() & id(ffffffffffff)'" == "2147483647"
sh % "cd .."
# Test short 'ff...' hash collision
# (BUG: '0:wdir()' is still needed to populate wdir revision)
sh % "hg init wdir-hashcollision"
sh % "cd wdir-hashcollision"
(
sh % "cat"
<< r"""
[experimental]
evolution.createmarkers=True
"""
>> ".hg/hgrc"
)
sh % "echo 0" > "a"
sh % "hg ci -qAm 0"
for i in [2463, 2961, 6726, 78127]:
sh.hg("up", "-q", "0")
with open("a", "wb") as f:
f.write(b"%i\n" % i)
sh.hg("ci", "-qm", "%s" % i)
sh % "hg up -q null"
sh % "hg log -r '0:wdir()' -T '{rev}:{node} {shortest(node, 3)}\\n'" == r"""
0:b4e73ffab476aa0ee32ed81ca51e07169844bc6a b4e
1:fffbae3886c8fbb2114296380d276fd37715d571 fffba
2:fffb6093b00943f91034b9bdad069402c834e572 fffb6
3:fff48a9b9de34a4d64120c29548214c67980ade3 fff4
4:ffff85cff0ff78504fcdc3c0bc10de0c65379249 ffff8
2147483647:ffffffffffffffffffffffffffffffffffffffff fffff"""
sh % "hg debugobsolete fffbae3886c8fbb2114296380d276fd37715d571" == ""
sh % "hg debugrevspec '0:wdir() & fff'" == r"""
abort: 00changelog.i@fff: ambiguous identifier!
[255]"""
sh % "hg debugrevspec '0:wdir() & ffff'" == r"""
abort: 00changelog.i@ffff: ambiguous identifier!
[255]"""
sh % "hg debugrevspec '0:wdir() & fffb'" == r"""
abort: 00changelog.i@fffb: ambiguous identifier!
[255]"""
# BROKEN should be '2' (node lookup uses unfiltered repo since dc25ed84bee8)
sh % "hg debugrevspec '0:wdir() & id(fffb)'" == r"""
abort: 00changelog.i@fffb: ambiguous identifier!
[255]"""
sh % "hg debugrevspec '0:wdir() & ffff8'" == "4"
sh % "hg debugrevspec '0:wdir() & fffff'" == "2147483647"
sh % "cd .."
sh % "cd repo"
sh % "log 'outgoing()'" == r"""
8
9"""
sh % "log 'outgoing(\"../remote1\")'" == r"""
8
9"""
sh % "log 'outgoing(\"../remote2\")'" == r"""
3
5
6
7
9"""
sh % "log 'p1(merge())'" == "5"
sh % "log 'p2(merge())'" == "4"
sh % "log 'parents(merge())'" == r"""
4
5"""
sh % "log 'removes(a)'" == r"""
2
6"""
sh % "log 'roots(all())'" == "0"
sh % "log 'reverse(2 or 3 or 4 or 5)'" == r"""
5
4
3
2"""
sh % "log 'reverse(all())'" == r"""
9
8
7
6
5
4
3
2
1
0"""
sh % "log 'reverse(all()) & filelog(b)'" == r"""
4
1"""
sh % "log 'rev(5)'" == "5"
sh % "log 'sort(limit(reverse(all()), 3))'" == r"""
7
8
9"""
sh % "log 'sort(2 or 3 or 4 or 5, date)'" == r"""
2
3
5
4"""
# Test order of revisions in compound expression
# ----------------------------------------------
# The general rule is that only the outermost (= leftmost) predicate can
# enforce its ordering requirement. The other predicates should take the
# ordering defined by it.
# 'A & B' should follow the order of 'A':
sh % "log '2:0 & 0::2'" == r"""
2
1
0"""
# 'head()' combines sets in right order:
sh % "log '9:7 & head()'" == r"""
9
7"""
# 'x:y' takes ordering parameter into account:
sh % "try -p optimized '3:0 & 0:3 & not 2:1'" == r"""
* optimized:
(difference
(and
(range
(symbol '3')
(symbol '0'))
(range
(symbol '0')
(symbol '3')))
(range
(symbol '2')
(symbol '1')))
* set:
<idset- [0 3]>
3
0"""
# 'a + b', which is optimized to '_list(a b)', should take the ordering of
# the left expression:
sh % "try --optimize '2:0 & (0 + 1 + 2)'" == r"""
(and
(range
(symbol '2')
(symbol '0'))
(group
(or
(list
(symbol '0')
(symbol '1')
(symbol '2')))))
* optimized:
(and
(range
(symbol '2')
(symbol '0'))
(func
(symbol '_list')
(string '0\x001\x002')))
* set:
<idset- [0 1 2]>
2
1
0"""
# 'A + B' should take the ordering of the left expression:
sh % "try --optimize '2:0 & (0:1 + 2)'" == r"""
(and
(range
(symbol '2')
(symbol '0'))
(group
(or
(list
(range
(symbol '0')
(symbol '1'))
(symbol '2')))))
* optimized:
(and
(range
(symbol '2')
(symbol '0'))
(or
(list
(range
(symbol '0')
(symbol '1'))
(symbol '2'))))
* set:
<idset- [0 1 2]>
2
1
0"""
# '_intlist(a b)' should behave like 'a + b':
sh % "trylist --optimize '2:0 & %ld' 0 1 2" == r"""
(and
(range
(symbol '2')
(symbol '0'))
(func
(symbol '_intlist')
(string '0\x001\x002')))
* optimized:
(andsmally
(range
(symbol '2')
(symbol '0'))
(func
(symbol '_intlist')
(string '0\x001\x002')))
* set:
<idset- [0 1 2]>
2
1
0"""
sh % "trylist --optimize '%ld & 2:0' 0 2 1" == r"""
(and
(func
(symbol '_intlist')
(string '0\x002\x001'))
(range
(symbol '2')
(symbol '0')))
* optimized:
(and
(func
(symbol '_intlist')
(string '0\x002\x001'))
(range
(symbol '2')
(symbol '0')))
* set:
<filteredset
<baseset [0, 2, 1]>,
<idset- [0 1 2]>>
0
2
1"""
# '_hexlist(a b)' should behave like 'a + b':
args = sh.hg("log", "-T", "{node} ", "-r0:2")
sh % (
"trylist --optimize --bin '2:0 & %%ln' %s" % args
) == r"""
(and
(range
(symbol '2')
(symbol '0'))
(func
(symbol '_hexlist')
(string '*'))) (glob)
* optimized:
(and
(range
(symbol '2')
(symbol '0'))
(func
(symbol '_hexlist')
(string '*'))) (glob)
* set:
<idset- [0 1 2]>
2
1
0"""
args = sh.hg("log", "-T", "{node} ", "-r0+2+1")
sh % (
"trylist --optimize --bin '%%ln & 2:0' %s" % args
) == r"""
(and
(func
(symbol '_hexlist')
(string '*')) (glob)
(range
(symbol '2')
(symbol '0')))
* optimized:
(andsmally
(func
(symbol '_hexlist')
(string '*')) (glob)
(range
(symbol '2')
(symbol '0')))
* set:
<baseset [0, 2, 1]>
0
2
1"""
# '_list' should not go through the slow follow-order path if order doesn't
# matter:
sh % "try -p optimized '2:0 & not (0 + 1)'" == r"""
* optimized:
(difference
(range
(symbol '2')
(symbol '0'))
(func
(symbol '_list')
(string '0\x001')))
* set:
<idset- [2]>
2"""
sh % "try -p optimized '2:0 & not (0:2 & (0 + 1))'" == r"""
* optimized:
(difference
(range
(symbol '2')
(symbol '0'))
(and
(range
(symbol '0')
(symbol '2'))
(func
(symbol '_list')
(string '0\x001'))))
* set:
<idset- [2]>
2"""
# because 'present()' does nothing other than suppressing an error, the
# ordering requirement should be forwarded to the nested expression
sh % "try -p optimized 'present(2 + 0 + 1)'" == r"""
* optimized:
(func
(symbol 'present')
(func
(symbol '_list')
(string '2\x000\x001')))
* set:
<baseset [2, 0, 1]>
2
0
1"""
sh % "try --optimize '2:0 & present(0 + 1 + 2)'" == r"""
(and
(range
(symbol '2')
(symbol '0'))
(func
(symbol 'present')
(or
(list
(symbol '0')
(symbol '1')
(symbol '2')))))
* optimized:
(and
(range
(symbol '2')
(symbol '0'))
(func
(symbol 'present')
(func
(symbol '_list')
(string '0\x001\x002'))))
* set:
<idset- [0 1 2]>
2
1
0"""
# 'reverse()' should take effect only if it is the outermost expression:
sh % "try --optimize '0:2 & reverse(all())'" == r"""
(and
(range
(symbol '0')
(symbol '2'))
(func
(symbol 'reverse')
(func
(symbol 'all')
None)))
* optimized:
(and
(range
(symbol '0')
(symbol '2'))
(func
(symbol 'reverse')
(func
(symbol 'all')
None)))
* set:
<idset+ [0 1 2]>
0
1
2"""
# 'sort()' should take effect only if it is the outermost expression:
sh % "try --optimize '0:2 & sort(all(), -rev)'" == r"""
(and
(range
(symbol '0')
(symbol '2'))
(func
(symbol 'sort')
(list
(func
(symbol 'all')
None)
(negate
(symbol 'rev')))))
* optimized:
(and
(range
(symbol '0')
(symbol '2'))
(func
(symbol 'sort')
(list
(func
(symbol 'all')
None)
(string '-rev'))))
* set:
<idset+ [0 1 2]>
0
1
2"""
# invalid argument passed to noop sort():
sh % "log '0:2 & sort()'" == r"""
hg: parse error: sort requires one or two arguments
[255]"""
sh % "log '0:2 & sort(all(), -invalid)'" == r"""
hg: parse error: unknown sort key '-invalid'
[255]"""
# for 'A & f(B)', 'B' should not be affected by the order of 'A':
sh % "try --optimize '2:0 & first(1 + 0 + 2)'" == r"""
(and
(range
(symbol '2')
(symbol '0'))
(func
(symbol 'first')
(or
(list
(symbol '1')
(symbol '0')
(symbol '2')))))
* optimized:
(and
(range
(symbol '2')
(symbol '0'))
(func
(symbol 'first')
(func
(symbol '_list')
(string '1\x000\x002'))))
* set:
<filteredset
<baseset [1]>,
<idset- [0 1 2]>>
1"""
sh % "try --optimize '2:0 & not last(0 + 2 + 1)'" == r"""
(and
(range
(symbol '2')
(symbol '0'))
(not
(func
(symbol 'last')
(or
(list
(symbol '0')
(symbol '2')
(symbol '1'))))))
* optimized:
(difference
(range
(symbol '2')
(symbol '0'))
(func
(symbol 'last')
(func
(symbol '_list')
(string '0\x002\x001'))))
* set:
<idset- [0 2]>
2
0"""
# for 'A & (op)(B)', 'B' should not be affected by the order of 'A':
sh % "try --optimize '2:0 & (1 + 0 + 2):(0 + 2 + 1)'" == r"""
(and
(range
(symbol '2')
(symbol '0'))
(range
(group
(or
(list
(symbol '1')
(symbol '0')
(symbol '2'))))
(group
(or
(list
(symbol '0')
(symbol '2')
(symbol '1'))))))
* optimized:
(and
(range
(symbol '2')
(symbol '0'))
(range
(func
(symbol '_list')
(string '1\x000\x002'))
(func
(symbol '_list')
(string '0\x002\x001'))))
* set:
<idset- [1]>
1"""
# 'A & B' can be rewritten as 'flipand(B, A)' by weight.
sh % "try --optimize 'contains(\"glob:*\") & (2 + 0 + 1)'" == r"""
(and
(func
(symbol 'contains')
(string 'glob:*'))
(group
(or
(list
(symbol '2')
(symbol '0')
(symbol '1')))))
* optimized:
(andsmally
(func
(symbol 'contains')
(string 'glob:*'))
(func
(symbol '_list')
(string '2\x000\x001')))
* set:
<filteredset
<baseset+ [0, 1, 2]>,
<contains 'glob:*'>>
0
1
2"""
# and in this example, 'A & B' is rewritten as 'B & A', but 'A' overrides
# the order appropriately:
sh % "try --optimize 'reverse(contains(\"glob:*\")) & (0 + 2 + 1)'" == r"""
(and
(func
(symbol 'reverse')
(func
(symbol 'contains')
(string 'glob:*')))
(group
(or
(list
(symbol '0')
(symbol '2')
(symbol '1')))))
* optimized:
(andsmally
(func
(symbol 'reverse')
(func
(symbol 'contains')
(string 'glob:*')))
(func
(symbol '_list')
(string '0\x002\x001')))
* set:
<filteredset
<baseset- [0, 1, 2]>,
<contains 'glob:*'>>
2
1
0"""
# test sort revset
# --------------------------------------------
# test when adding two unordered revsets
sh % "log 'sort(keyword(issue) or modifies(b))'" == r"""
4
6"""
# test when sorting a reversed collection in the same way it is
sh % "log 'sort(reverse(all()), -rev)'" == r"""
9
8
7
6
5
4
3
2
1
0"""
# test when sorting a reversed collection
sh % "log 'sort(reverse(all()), rev)'" == r"""
0
1
2
3
4
5
6
7
8
9"""
# test sorting two sorted collections in different orders
sh % "log 'sort(outgoing() or reverse(removes(a)), rev)'" == r"""
2
6
8
9"""
# test sorting two sorted collections in different orders backwards
sh % "log 'sort(outgoing() or reverse(removes(a)), -rev)'" == r"""
9
8
6
2"""
# test empty sort key which is noop
sh % "log 'sort(0 + 2 + 1, \"\")'" == r"""
0
2
1"""
# test invalid sort keys
sh % "log 'sort(all(), -invalid)'" == r"""
hg: parse error: unknown sort key '-invalid'
[255]"""
sh % "cd .."
# test sorting by multiple keys including variable-length strings
sh % "hg init sorting"
sh % "cd sorting"
(
sh % "cat"
<< r"""
[ui]
logtemplate = '{rev} {branch|p5}{desc|p5}{author|p5}{date|hgdate}\n'
[templatealias]
p5(s) = pad(s, 5)
"""
>> ".hg/hgrc"
)
sh % "setbranch b12"
sh % "commit -m m111 -u u112 -d '111 10800'"
sh % "setbranch b11"
sh % "commit -m m12 -u u111 -d '112 7200'"
sh % "setbranch b111"
sh % "commit -m m11 -u u12 -d '111 3600'"
sh % "setbranch b112"
sh % "commit -m m111 -u u11 -d '120 0'"
# compare revisions (has fast path):
sh % "hg log -r 'sort(all(), rev)'" == r"""
0 b12 m111 u112 111 10800
1 b11 m12 u111 112 7200
2 b111 m11 u12 111 3600
3 b112 m111 u11 120 0"""
sh % "hg log -r 'sort(all(), -rev)'" == r"""
3 b112 m111 u11 120 0
2 b111 m11 u12 111 3600
1 b11 m12 u111 112 7200
0 b12 m111 u112 111 10800"""
# compare variable-length strings (issue5218):
sh % "hg log -r 'sort(all(), branch)'" == r"""
1 b11 m12 u111 112 7200
2 b111 m11 u12 111 3600
3 b112 m111 u11 120 0
0 b12 m111 u112 111 10800"""
sh % "hg log -r 'sort(all(), -branch)'" == r"""
0 b12 m111 u112 111 10800
3 b112 m111 u11 120 0
2 b111 m11 u12 111 3600
1 b11 m12 u111 112 7200"""
sh % "hg log -r 'sort(all(), desc)'" == r"""
2 b111 m11 u12 111 3600
0 b12 m111 u112 111 10800
3 b112 m111 u11 120 0
1 b11 m12 u111 112 7200"""
sh % "hg log -r 'sort(all(), -desc)'" == r"""
1 b11 m12 u111 112 7200
0 b12 m111 u112 111 10800
3 b112 m111 u11 120 0
2 b111 m11 u12 111 3600"""
sh % "hg log -r 'sort(all(), user)'" == r"""
3 b112 m111 u11 120 0
1 b11 m12 u111 112 7200
0 b12 m111 u112 111 10800
2 b111 m11 u12 111 3600"""
sh % "hg log -r 'sort(all(), -user)'" == r"""
2 b111 m11 u12 111 3600
0 b12 m111 u112 111 10800
1 b11 m12 u111 112 7200
3 b112 m111 u11 120 0"""
# compare dates (tz offset should have no effect):
sh % "hg log -r 'sort(all(), date)'" == r"""
0 b12 m111 u112 111 10800
2 b111 m11 u12 111 3600
1 b11 m12 u111 112 7200
3 b112 m111 u11 120 0"""
sh % "hg log -r 'sort(all(), -date)'" == r"""
3 b112 m111 u11 120 0
1 b11 m12 u111 112 7200
0 b12 m111 u112 111 10800
2 b111 m11 u12 111 3600"""
# be aware that 'sort(x, -k)' is not exactly the same as 'reverse(sort(x, k))'
# because '-k' reverses the comparison, not the list itself:
sh % "hg log -r 'sort(0 + 2, date)'" == r"""
0 b12 m111 u112 111 10800
2 b111 m11 u12 111 3600"""
sh % "hg log -r 'sort(0 + 2, -date)'" == r"""
0 b12 m111 u112 111 10800
2 b111 m11 u12 111 3600"""
sh % "hg log -r 'reverse(sort(0 + 2, date))'" == r"""
2 b111 m11 u12 111 3600
0 b12 m111 u112 111 10800"""
# sort by multiple keys:
sh % "hg log -r 'sort(all(), \"branch -rev\")'" == r"""
1 b11 m12 u111 112 7200
2 b111 m11 u12 111 3600
3 b112 m111 u11 120 0
0 b12 m111 u112 111 10800"""
sh % "hg log -r 'sort(all(), \"-desc -date\")'" == r"""
1 b11 m12 u111 112 7200
3 b112 m111 u11 120 0
0 b12 m111 u112 111 10800
2 b111 m11 u12 111 3600"""
sh % "hg log -r 'sort(all(), \"user -branch date rev\")'" == r"""
3 b112 m111 u11 120 0
1 b11 m12 u111 112 7200
0 b12 m111 u112 111 10800
2 b111 m11 u12 111 3600"""
# toposort prioritises graph branches
sh % "hg up 2" == """
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
(leaving bookmark b112)"""
sh % "touch a"
sh % "hg addremove" == "adding a"
sh % "hg ci -m t1 -u tu -d '130 0'"
sh % "echo a" >> "a"
sh % "hg ci -m t2 -u tu -d '130 0'"
sh % "hg book book1"
sh % "hg up 4" == r"""
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(leaving bookmark book1)"""
sh % "touch a"
sh % "hg addremove"
sh % "hg ci -m t3 -u tu -d '130 0'"
sh % "hg log -r 'sort(all(), topo)'" == r"""
6 b111 t3 tu 130 0
5 b111 t2 tu 130 0
4 b111 t1 tu 130 0
3 b112 m111 u11 120 0
2 b111 m11 u12 111 3600
1 b11 m12 u111 112 7200
0 b12 m111 u112 111 10800"""
sh % "hg log -r 'sort(all(), -topo)'" == r"""
0 b12 m111 u112 111 10800
1 b11 m12 u111 112 7200
2 b111 m11 u12 111 3600
3 b112 m111 u11 120 0
4 b111 t1 tu 130 0
5 b111 t2 tu 130 0
6 b111 t3 tu 130 0"""
sh % "hg log -r 'sort(all(), topo, topo.firstbranch=book1)'" == r"""
5 b111 t2 tu 130 0
6 b111 t3 tu 130 0
4 b111 t1 tu 130 0
3 b112 m111 u11 120 0
2 b111 m11 u12 111 3600
1 b11 m12 u111 112 7200
0 b12 m111 u112 111 10800"""
# topographical sorting can't be combined with other sort keys, and you can't
# use the topo.firstbranch option when topo sort is not active:
sh % "hg log -r 'sort(all(), \"topo user\")'" == r"""
hg: parse error: topo sort order cannot be combined with other sort keys
[255]"""
sh % "hg log -r 'sort(all(), user, topo.firstbranch=book1)'" == r"""
hg: parse error: topo.firstbranch can only be used when using the topo sort key
[255]"""
# topo.firstbranch should accept any kind of expressions:
sh % "hg log -r 'sort(0, topo, topo.firstbranch=(book1))'" == "0 b12 m111 u112 111 10800"
sh % "cd .."
sh % "cd repo"
# test multiline revset with errors
sh % "hg log -r '\n. +\n.^ +'" == r"""
hg: parse error at 9: not a prefix: end
( . + .^ +
^ here)
[255]"""
| facebookexperimental/eden | eden/hg-server/tests/test-revset-t.py | Python | gpl-2.0 | 55,679 |
# coding:utf-8
import numpy as np
from chainer import cuda, Function, gradient_check, report, training, utils, Variable
from chainer import datasets, iterators, optimizers, serializers
import chainer.functions as F
import chainer.links as L
import sys
import argparse
import _pickle as pickle
import MeCab
from LSTM import LSTM
BOS_INDEX = 0
EOS_INDEX = 1
# arguments
parser = argparse.ArgumentParser()
parser.add_argument('--unit_size', type=int, default=100)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--gpu', type=int, default=-1)
args = parser.parse_args()
xp = cuda.cupy if args.gpu >= 0 else np
xp.random.seed(args.seed)
mecab = MeCab.Tagger ("-Ochasen")
m = MeCab.Tagger ("-O wakati")
vocab = pickle.load(open('data/vocab.bin','rb'))
train_data = pickle.load(open('data/train_data.bin', 'rb'))
rnn = LSTM(len(vocab),args.unit_size)
model = L.Classifier(rnn)
if args.gpu >= 0:
print('use GPU!')
cuda.get_device(args.gpu).use()
model.to_gpu()
serializers.load_npz('data/latest.model',model)
# vocabのキーと値を入れ替えたもの
ivocab = {}
for c, i in vocab.items():
ivocab[i] = c
def get_index_a(_model):
_model.predictor.reset_state()
_sentence_index_a = []
index = BOS_INDEX
while index != EOS_INDEX:
y = _model.predictor(xp.array([index], dtype=xp.int32))
probability = F.softmax(y)
probability.data[0] /= sum(probability.data[0])
try:
#確率によって、ランダムに1つ単語を選択
#index = np.argmax(probability.data[0])
index = xp.random.choice(range(len(probability.data[0])), p=probability.data[0])
if index!=EOS_INDEX:
#終了<EOS>でなかった場合
_sentence_index_a.append(index)
except Exception as e:
print('probability error')
break
return _sentence_index_a
def get_next_word_prob(_model, word, next_word, needModelStateReset=False):
if needModelStateReset:
_model.predictor.reset_state()
_sentence_index_a = []
index = vocab[word]
while index != EOS_INDEX:
y = _model.predictor(xp.array([index], dtype=xp.int32))
probability = F.softmax(y)
next_probs = probability.data[0]
m = np.argsort(probability.data[0])
break
# In this case, the input could be an unknow word.
if next_word not in vocab:
return (0.0, 0.0)
next_index = vocab[next_word]
k, = np.where(m == next_index)
order_prob = k[0] / len(m)
next_prob = next_probs[k[0]]
return (order_prob, next_prob, k[0])
def suggest_corrections(order_prob, next_prob, index, num=5):
suggestions = []
# Step1: If it's lower than 25% of the over-all probs
if order_prob < 0.25:
count = 0
for ind in order_prob[::-1]:
w = ivocab[ind]
suggestions.append(w)
count += 1
if count >= num:
break
return suggestions
def text_correction(_model, text):
tokens = m.parse(text).split()
for i in range(len(tokens)):
if i == len(tokens) - 1:
break
word = tokens[i]
next_word = tokens[i + 1]
needModelStateReset = True if i == 0 else False
(order_prob, next_prob, index) = get_next_word_prob(_model, word, next_word, needModelStateReset)
suggestions = suggest_corrections(order_prob, next_prob, index)
if len(suggestions) > 0:
print("low prob detected", order_prob, next_word, suggestions)
print('\n-=-=-=-=-=-=-=-')
#for i in range(1):
#sentence_index_a = get_index_a(model)
order_prob, next_prob, index = get_next_word_prob(model, "最大", "の", needModelStateReset=True)
print(order_prob, next_prob, index)
order_prob, next_prob, index = get_next_word_prob(model, "の", "害悪")
print(order_prob, next_prob, index)
'''
for index in sentence_index_a:
sys.stdout.write( ivocab[index].split("::")[0] )
print('\n-=-=-=-=-=-=-=-')
'''
print('generated!') | SPJ-AI/lesson | text_generator/sc_generate.py | Python | gpl-3.0 | 4,136 |
# -*- encoding: utf-8 -*-
import os
import os.path
from .common import BaseTest
import kids.file as kf
class OemConfigTest(BaseTest):
COMMAND = 'config'
def test_get_empty(self):
out, err, errlvl = self.cmd(
'$tprog get')
self.assertEqual(
errlvl, 0,
msg=("should succeed. (errlvl=%r)\n%s"
% (errlvl, err)))
self.assertEqual(
out, "",
msg="Should display empty config. "
"Current stdout:\n%s" % out)
self.assertEqual(
err, "",
msg="Should not output anything on stderr. "
"Current stderr:\n%s" % err)
self.assertTrue(
all(not kf.chk.exists(f)
for f in self.cfg_files))
def test_global_get_empty(self):
out, err, errlvl = self.cmd(
'$tprog get --global')
self.assertEqual(
errlvl, 0,
msg=("should succeed. (errlvl=%r)\n%s"
% (errlvl, err)))
self.assertEqual(
out, "",
msg="Should display empty config. "
"Current stdout:\n%s" % out)
self.assertEqual(
err, "",
msg="Should not output anything on stderr. "
"Current stderr:\n%s" % err)
self.assertTrue(
all(not kf.chk.exists(f)
for f in [self.system_filename,
self.global_filename]))
def test_set(self):
out, err, errlvl = self.cmd(
'$tprog set a.b.c.d 2')
self.assertEqual(
errlvl, 0,
msg=("Should succeed. (errlvl=%r)\n%s"
% (errlvl, err)))
self.assertEqual(
out, "",
msg="Should not display anything on stdout. "
"Current stdout:\n%s" % out)
self.assertEqual(
err, "",
msg="Should not display anything on stderr. "
"Current stderr:\n%s" % err)
def test_global_set(self):
out, err, errlvl = self.cmd(
'$tprog set --global a.b.c.d 2')
self.assertEqual(
errlvl, 0,
msg=("Should succeed. (errlvl=%r)\n%s"
% (errlvl, err)))
self.assertEqual(
out, "",
msg="Should not display anything on stdout. "
"Current stdout:\n%s" % out)
self.assertEqual(
err, "",
msg="Should not display anything on stderr. "
"Current stderr:\n%s" % err)
self.assertFalse(kf.chk.exists(self.system_filename))
self.assertTrue(kf.chk.exists(self.global_filename))
class NoLocalPathOemConfigTest(OemConfigTest):
def test_local_get_empty(self):
out, err, errlvl = self.cmd(
'$tprog get --local')
self.assertNotEqual(
errlvl, 0,
msg="Should fail.")
self.assertEqual(
out, "",
msg="Should not display anything on stdout. "
"Current stdout:\n%s" % out)
self.assertContains(
err, "local",
msg="Should output an error message containing 'local' on stderr. "
"Current stderr:\n%s" % err)
self.assertTrue(
all(not kf.chk.exists(f)
for f in [self.system_filename,
self.global_filename]))
def test_set(self):
super(NoLocalPathOemConfigTest, self).test_set()
self.assertFalse(kf.chk.exists(self.system_filename))
self.assertTrue(kf.chk.exists(self.global_filename))
def test_local_set(self):
out, err, errlvl = self.cmd(
'$tprog set --local a.b.c.d 2')
self.assertNotEqual(
errlvl, 0,
msg=("Should fail. (errlvl=%r)\n%s"
% (errlvl, err)))
self.assertEqual(
out, "",
msg="Should not display anything on stdout. "
"Current stdout:\n%s" % out)
self.assertContains(
err, "local",
msg="Should output an error message containing 'local' on stderr. "
"Current stderr:\n%s" % err)
self.assertTrue(
all(not kf.chk.exists(f)
for f in [self.system_filename,
self.global_filename]))
def test_rm_get(self):
out = self.w('$tprog set a.b.c.d 2')
self.assertEqual(
out, "",
msg="Should display nothing. "
"Current stdout:\n%s" % out)
out, err, errlvl = self.cmd(
'$tprog rm a.b.c.d')
self.assertEqual(
errlvl, 0,
msg=("Set should succeed. (errlvl=%r)\n%s"
% (errlvl, err)))
self.assertEqual(
err, "",
msg="There should be no standard output outputed. "
"Current stdout:\n%r" % out)
self.assertEqual(
out, "",
msg="There should be no standard error displayed. "
"Current stderr:\n%r" % err)
out = self.w('$tprog get')
self.assertEqual(
out, "",
msg="Should display empty config. "
"Current stdout:\n%s" % out)
self.assertFalse(kf.chk.exists(self.system_filename))
self.assertTrue(kf.chk.exists(self.global_filename))
class LocalPathOemConfigTest(OemConfigTest):
def setUp(self):
super(LocalPathOemConfigTest, self).setUp()
kf.mkdir("myaddon")
os.chdir("myaddon")
kf.touch("__openerp__.py")
self.local_filename = os.path.join(self.tmpdir, "myaddon", ".oem.rc")
self.cfg_files.append(self.local_filename)
def test_local_get_empty(self):
out, err, errlvl = self.cmd(
'$tprog get')
self.assertEqual(
errlvl, 0,
msg="Should succeed.")
self.assertEqual(
out, "",
msg="Should display empty config. "
"Current stdout:\n%s" % out)
self.assertEqual(
err, "",
msg="Should not output anything on stderr. "
"Current stderr:\n%s" % err)
self.assertTrue(
all(not kf.chk.exists(f)
for f in self.cfg_files))
def test_global_set(self):
super(LocalPathOemConfigTest, self).test_global_set()
self.assertFalse(kf.chk.exists(self.local_filename))
def test_local_set(self):
out, err, errlvl = self.cmd(
'$tprog set --local a.b.c.d 2')
self.assertEqual(
errlvl, 0,
msg=("Should succeed. (errlvl=%r)\n%s"
% (errlvl, err)))
self.assertEqual(
out, "",
msg="Should not display anything on stdout. "
"Current stdout:\n%s" % out)
self.assertEqual(
err, "",
msg="Should not display anything on stderr. "
"Current stderr:\n%s" % err)
self.assertFalse(kf.chk.exists(self.system_filename))
self.assertFalse(kf.chk.exists(self.global_filename))
self.assertTrue(kf.chk.exists(self.local_filename))
def test_rm_get(self):
out = self.w('$tprog set a.b.c.d 2')
self.assertEqual(
out, "",
msg="Should display nothing. "
"Current stdout:\n%s" % out)
self.assertFalse(kf.chk.exists(self.system_filename))
self.assertFalse(kf.chk.exists(self.global_filename))
self.assertTrue(kf.chk.exists(self.local_filename))
out = self.w('$tprog set --global a.x 3')
self.assertEqual(
out, "",
msg="Should display nothing. "
"Current stdout:\n%s" % out)
self.assertFalse(kf.chk.exists(self.system_filename))
self.assertTrue(kf.chk.exists(self.global_filename))
self.assertTrue(kf.chk.exists(self.local_filename))
out = self.w('$tprog set --global a.y 3 && $tprog set a.y 3')
self.assertEqual(
out, "",
msg="Should display nothing. "
"Current stdout:\n%s" % out)
self.assertFalse(kf.chk.exists(self.system_filename))
self.assertTrue(kf.chk.exists(self.global_filename))
self.assertTrue(kf.chk.exists(self.local_filename))
out, err, errlvl = self.cmd(
'$tprog rm a.b.c.d')
self.assertEqual(
errlvl, 0,
msg=("Should succeed. (errlvl=%r)\n%s"
% (errlvl, err)))
self.assertEqual(
err, "",
msg="There should be no standard output outputed. "
"Current stdout:\n%r" % out)
self.assertEqual(
out, "",
msg="There should be no standard error displayed. "
"Current stderr:\n%r" % err)
out = self.w('$tprog get a.b.c.d', ignore_errlvls=[1])
self.assertEqual(
out, "",
msg="Should not display anything. "
"Current stdout:\n%s" % out)
self.assertFalse(kf.chk.exists(self.system_filename))
self.assertTrue(kf.chk.exists(self.global_filename))
self.assertTrue(kf.chk.exists(self.local_filename))
out, err, errlvl = self.cmd(
'$tprog rm a.x')
self.assertEqual(
errlvl, 0,
msg=("Should succeed. (errlvl=%r)\n%s"
% (errlvl, err)))
self.assertEqual(
err, "",
msg="There should be no standard output outputed. "
"Current stdout:\n%r" % out)
self.assertEqual(
out, "",
msg="There should be no standard error displayed. "
"Current stderr:\n%r" % err)
out = self.w('$tprog get a.x', ignore_errlvls=[1])
self.assertEqual(
out, "",
msg="Should not display anything. "
"Current stdout:\n%s" % out)
self.assertFalse(kf.chk.exists(self.system_filename))
self.assertTrue(kf.chk.exists(self.global_filename))
self.assertTrue(kf.chk.exists(self.local_filename))
out, err, errlvl = self.cmd(
'$tprog rm a.y')
self.assertEqual(
errlvl, 0,
msg=("Should succeed. (errlvl=%r)\n%s"
% (errlvl, err)))
self.assertEqual(
err, "",
msg="There should be no standard output outputed. "
"Current stdout:\n%r" % out)
self.assertEqual(
out, "",
msg="There should be no standard error displayed. "
"Current stderr:\n%r" % err)
out = self.w('$tprog get a.y', ignore_errlvls=[0])
self.assertEqual(
out, "3",
msg="Should be displaying 3. "
"Current stdout:\n%s" % out)
self.assertFalse(kf.chk.exists(self.system_filename))
self.assertTrue(kf.chk.exists(self.global_filename))
self.assertTrue(kf.chk.exists(self.local_filename))
| 0k/oem | src/oem/tests/test_oem_config.py | Python | bsd-2-clause | 10,898 |
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import *
from past.utils import old_div
from pymongo import MongoClient
from uuid import UUID
import time
# Our imports
import emission.analysis.modelling.tour_model as eamt
def pipeline(groundTruth, cluster_func, diff_metric, K_option = 'manual'):
routeDict = getRouteDict(groundTruth)
differenceDict = getDifferenceDict(routeDict, diff_metric)
if K_option == 'manual':
K = len(groundTruth)
medoids, clusters = cluster_func(routeDict, K, differenceDict)
return CCR(clusters, medoids, groundTruth)
def getRouteDict(routeDB):
#print 'getting route track points ... '
routeDict = {}
for cluster in routeDB:
for _id in cluster['list']:
routeDict[_id] = eamt.route_matching.getRoute(_id)
return routeDict
def getDifferenceDict(routeDict, diff_metric = 'DTW'):
#print 'calculating difference matrix ... '
ids = list(routeDict.keys())
differences = {}
for _id in ids:
differences[_id] = {}
for _id in ids:
for key in ids:
try:
differences[_id][key]
differences[key][_id]
except KeyError:
if diff_metric == 'DTW':
value = eamt.DTW.Dtw(routeDict[_id], routeDict[key], common.calDistance)
value = value.calculate_distance()
differences[_id][key] = value
differences[key][_id] = value
if diff_metric == 'newDTW':
value = eamt.DTW.dynamicTimeWarp(routeDict[_id], routeDict[key])
differences[_id][key] = value
differences[key][_id] = value
if diff_metric == 'DtwSym':
value = eamt.DTW.DtwSym(routeDict[_id], routeDict[key], common.calDistance)
value = value.calculate_distance()
differences[_id][key] = value
differences[key][_id] = value
if diff_metric == 'DtwAsym':
value = eamt.DTW.DtwAsym(routeDict[_id], routeDict[key], common.calDistance)
value = value.calculate_distance()
differences[_id][key] = value
differences[key][_id] = value
if diff_metric == 'LCS':
value = eamt.LCS.lcsScore(routeDict[_id], routeDict[key], 2000)
differences[_id][key] = value
differences[key][_id] = value
if diff_metric == 'Frechet':
value = eamt.Frechet.Frechet(routeDict[_id], routeDict[key])
differences[_id][key] = value
differences[key][_id] = value
return differences
def listCompare(list1, list2):
fst = set(list1)
Snd = set(list2)
count = 0
while len(fst) != 0 and len(Snd) != 0:
elem = fst.pop()
if elem in Snd:
count += 1
Snd.remove(elem)
return count
def CCR(testClusters, medoids, groundTruthClusters):
N = sum([len(testClusters[i]) for i in medoids])
count = 0
clusterList = [testClusters[i] for i in medoids]
for cluster in groundTruthClusters:
maxcount = 0
for currList in clusterList:
currCount = listCompare(currList, cluster['list'])
if currCount > maxcount:
maxcount = currCount
count+= maxcount
return old_div(float(count),float(N))
if __name__ == '__main__':
usercluster = MongoClient().Routes.user_groundTruth
difference_metric = ['DTW'
,'newDTW'
,'DtwSym'
, 'DtwAsym'
#, 'LCS'
#, 'Frechet'
]
K_value_option = 'manual'
print('######')
for user in usercluster.find():
print('USER ID: ' + str(user['user_id']))
print('NUMBER OF CLUSTERS: '+ str(len(user['clusters'])))
print('NUMBER OF SECTIONS: '+ str(user['size']))
print('')
if user == 'artificialRoutes':
#ccr = pipeline(groundTruth, routeType, K_medoid.find_centers, diff_metric = difference_metric, K_option = K_value_option)
ccr = 0
else:
for metric in difference_metric:
start = time.time()
ccr = pipeline(user['clusters'], eamt.kmedoid.find_centers, metric, K_value_option)
end = time.time()
print('...DIFFERENCE METRIC: ' + metric)
print('...CLUSTER CORRECTNESS RATE: ' + str(round(ccr,2)))
print('...PIPELINE TIME ElAPSED: ' + str(round((end - start)*100, 2)) + 'ms')
print('')
print('######')
| shankari/e-mission-server | emission/analysis/modelling/tour_model/trajectory_matching/DifferenceMetricPipeline.py | Python | bsd-3-clause | 5,060 |
class A:
def <weak_warning descr="Function name should be lowercase">fooBar</weak_warning>(self): pass
class B(A):
def fooBar(self): pass | kdwink/intellij-community | python/testData/inspections/PyPep8NamingInspection/overridden.py | Python | apache-2.0 | 142 |
"""test_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.contrib.auth.views import login, logout, password_reset, password_reset_done,password_reset_confirm
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', include('login.urls')),
# url(r'^login/$',login,{'template_name':'login/login_form.html'}),
url(r'^login/', include('login.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^music/', include('music.urls')),
url(r'^reset-password/$', password_reset, name='password_reset'),
url(r'^reset-password/done/$', password_reset_done, name='password_reset_done'),
url(r'^reset-password/confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>,+)/$', password_reset_confirm, name='password_reset_confirm')
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT) | sawardekar/django | test_django/urls.py | Python | mit | 1,660 |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dispatches the uiautomator tests."""
import logging
import os
from pylib import android_commands
from pylib.base import base_test_result
from pylib.base import shard
import test_package
import test_runner
def Dispatch(options):
"""Dispatches uiautomator tests onto connected device(s).
If possible, this method will attempt to shard the tests to
all connected devices. Otherwise, dispatch and run tests on one device.
Args:
options: Command line options.
Returns:
A TestRunResults object holding the results of the Java tests.
Raises:
Exception: when there are no attached devices.
"""
test_pkg = test_package.TestPackage(
options.uiautomator_jar, options.uiautomator_info_jar)
tests = test_pkg._GetAllMatchingTests(
options.annotations, options.test_filter)
if not tests:
logging.warning('No uiautomator tests to run with current args.')
return base_test_result.TestRunResults()
attached_devices = android_commands.GetAttachedDevices()
if not attached_devices:
raise Exception('There are no devices online.')
if options.device:
assert options.device in attached_devices
attached_devices = [options.device]
def TestRunnerFactory(device, shard_index):
return test_runner.TestRunner(
options, device, shard_index, test_pkg, [])
return shard.ShardAndRunTests(TestRunnerFactory, attached_devices, tests,
options.build_type)
| plxaye/chromium | src/build/android/pylib/uiautomator/dispatch.py | Python | apache-2.0 | 1,621 |
import tempfile
import unittest
from hamcrest import *
from tests.functional import command
from tests.functional.http_stub import HttpStub
class TestErrorHandling(unittest.TestCase):
def setUp(self):
HttpStub.start()
def tearDown(self):
HttpStub.stop()
def test_it_fails_if_neither_file_nor_stdin_provided(self):
cmd = command.do("./backdrop-send "
"--url http://localhost:8000/data_set "
"--token data_set-auth-token")
assert_that(cmd.exit_status, is_not(0))
def test_it_reports_http_errors(self):
HttpStub.set_response_codes(500)
cmd = command.do("./backdrop-send "
"--url http://localhost:8000/data_set "
"--token data_set-auth-token", stdin='{"key": "value"}')
assert_that(cmd.exit_status, is_not(0))
assert_that(cmd.stderr, contains_string("Unable to send to backdrop"))
assert_that(cmd.stderr, contains_string("500"))
def test_it_reports_connection_errors(self):
cmd = command.do("./backdrop-send "
"--url http://non-existent-url "
"--token data_set-auth-token", stdin='{"key": "value"}')
assert_that(cmd.exit_status, is_not(0))
assert_that(cmd.stderr, contains_string("Unable to send to backdrop"))
def test_it_reports_authorization_errors(self):
HttpStub.set_response_codes(403)
cmd = command.do("./backdrop-send "
"--url http://localhost:8000/data_set "
"--token wrong-token", stdin='{"key": "value"}')
assert_that(cmd.exit_status, is_not(0))
assert_that(cmd.stderr, contains_string(
"Unable to send to backdrop. "
"Unauthorised: check your access token."))
| alphagov/backdropsend | tests/functional/test_error_handling.py | Python | mit | 1,857 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Debug middleware"""
from __future__ import print_function
import sys
import webob.dec
from openstack.common.middleware import base
class Debug(base.Middleware):
"""Helper class that returns debug information.
Can be inserted into any WSGI application chain to get information about
the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
print()
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in resp.headers.iteritems():
print(key, "=", value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Prints the contents of a wrapper string iterator when iterated."""
print(("*" * 40) + " BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
| JioCloud/oslo-incubator | openstack/common/middleware/debug.py | Python | apache-2.0 | 1,760 |
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
import datetime
import time
from dateutil.relativedelta import relativedelta
class product_uom(osv.osv):
_inherit = "product.uom"
_columns = {
'min_quantity': fields.float('Quantité minimum'),
'amount': fields.float('Montant', digits_compute= dp.get_precision('Product Price')),
}
class product_pricelist(osv.osv):
_inherit = "product.pricelist"
def _price_rule_get_multi(self, cr, uid, pricelist, products_by_qty_by_partner, context=None):
context = context or {}
date = context.get('date') or time.strftime('%Y-%m-%d')
date = date[0:10]
products = map(lambda x: x[0], products_by_qty_by_partner)
currency_obj = self.pool.get('res.currency')
product_obj = self.pool.get('product.template')
product_uom_obj = self.pool.get('product.uom')
price_type_obj = self.pool.get('product.price.type')
for product in products:
if product.id==False:
return {}
#if not products:
# return {}
version = False
for v in pricelist.version_id:
if ((v.date_start is False) or (v.date_start <= date)) and ((v.date_end is False) or (v.date_end >= date)):
version = v
break
if not version:
raise osv.except_osv(_('Warning!'), u"Au moins une liste de prix n'a pas de version active !\n \
Créez ou activez en une.\nListe de prix="+str(_(pricelist.name))+' : date='+str(date) + ' : Article='+str(products[0].is_code))
categ_ids = {}
for p in products:
categ = p.categ_id
while categ:
categ_ids[categ.id] = True
categ = categ.parent_id
categ_ids = categ_ids.keys()
is_product_template = products[0]._name == "product.template"
if is_product_template:
prod_tmpl_ids = [tmpl.id for tmpl in products]
# all variants of all products
prod_ids = [p.id for p in
list(chain.from_iterable([t.product_variant_ids for t in products]))]
else:
prod_ids = [product.id for product in products]
prod_tmpl_ids = [product.product_tmpl_id.id for product in products]
#Added date filter
cr.execute(
'SELECT i.id '
'FROM product_pricelist_item AS i '
'WHERE (product_tmpl_id IS NULL OR product_tmpl_id = any(%s)) '
'AND (product_id IS NULL OR (product_id = any(%s))) '
'AND ((categ_id IS NULL) OR (categ_id = any(%s))) '
'AND (price_version_id = %s) '
'AND (date_start IS NULL OR date_start <= %s) '
'AND (date_end IS NULL OR date_end >= %s) '
'ORDER BY sequence, min_quantity desc',
(prod_tmpl_ids, prod_ids, categ_ids, version.id, date, date))
item_ids = [x[0] for x in cr.fetchall()]
items = self.pool.get('product.pricelist.item').browse(cr, uid, item_ids, context=context)
price_types = {}
results = {}
for product, qty, partner in products_by_qty_by_partner:
#TODO : Permet de résoudre un bug de 0.99999999
qty=round(qty,5)
results[product.id] = 0.0
rule_id = False
price = False
# La liste de prix de vente fonctionne en unité de mesure
# La liste de prix d'achat fonctionne en unité d'achat
if pricelist.type=='sale':
qty_uom_id = context.get('uom') or product.uom_id.id
price_uom_id = product.uom_id.id
else:
qty_uom_id = context.get('uom') or product.uom_po_id.id
price_uom_id = product.uom_po_id.id
qty_in_product_uom = qty
product_qty = qty
if qty_uom_id != price_uom_id:
try:
if pricelist.type=='sale':
qty_in_product_uom = product_uom_obj._compute_qty(
cr, uid, context['uom'], qty, product.uom_id.id or product.uos_id.id)
else:
qty_in_product_uom = product_uom_obj._compute_qty(
cr, uid, context['uom'], qty, product.uom_po_id.id or product.uos_id.id)
except except_orm:
# Ignored - incompatible UoM in context, use default product UoM
pass
for rule in items:
if rule.min_quantity and qty_in_product_uom < rule.min_quantity:
continue
if is_product_template:
if rule.product_tmpl_id and product.id != rule.product_tmpl_id.id:
continue
if rule.product_id and \
(product.product_variant_count > 1 or product.product_variant_ids[0].id != rule.product_id.id):
# product rule acceptable on template if has only one variant
continue
else:
if rule.product_tmpl_id and product.product_tmpl_id.id != rule.product_tmpl_id.id:
continue
if rule.product_id and product.id != rule.product_id.id:
continue
if rule.categ_id:
cat = product.categ_id
while cat:
if cat.id == rule.categ_id.id:
break
cat = cat.parent_id
if not cat:
continue
if rule.base == -1:
if rule.base_pricelist_id:
price_tmp = self._price_get_multi(cr, uid,
rule.base_pricelist_id, [(product,
qty, partner)], context=context)[product.id]
ptype_src = rule.base_pricelist_id.currency_id.id
price_uom_id = qty_uom_id
price = currency_obj.compute(cr, uid,
ptype_src, pricelist.currency_id.id,
price_tmp, round=False,
context=context)
elif rule.base == -2:
seller = False
for seller_id in product.seller_ids:
if (not partner) or (seller_id.name.id != partner):
continue
seller = seller_id
if not seller and product.seller_ids:
seller = product.seller_ids[0]
if seller:
qty_in_seller_uom = qty
seller_uom = seller.product_uom.id
if qty_uom_id != seller_uom:
qty_in_seller_uom = product_uom_obj._compute_qty(cr, uid, qty_uom_id, qty, to_uom_id=seller_uom)
price_uom_id = seller_uom
for line in seller.pricelist_ids:
if line.min_quantity <= qty_in_seller_uom:
price = line.price
else:
if rule.base not in price_types:
price_types[rule.base] = price_type_obj.browse(cr, uid, int(rule.base))
price_type = price_types[rule.base]
# price_get returns the price in the context UoM, i.e. qty_uom_id
price_uom_id = qty_uom_id
price = currency_obj.compute(
cr, uid,
price_type.currency_id.id, pricelist.currency_id.id,
product_obj._price_get(cr, uid, [product], price_type.field, context=context)[product.id],
round=False, context=context)
if price is not False:
price_limit = price
price = price * (1.0+(rule.price_discount or 0.0))
if rule.price_round:
price = tools.float_round(price, precision_rounding=rule.price_round)
if pricelist.type=='sale':
convert_to_price_uom = (lambda price: product_uom_obj._compute_price(
cr, uid, product.uom_id.id,
price, price_uom_id))
else:
convert_to_price_uom = (lambda price: product_uom_obj._compute_price(
cr, uid, product.uom_po_id.id,
price, price_uom_id))
if rule.price_surcharge and rule.min_quantity <= product_qty:
price_surcharge = convert_to_price_uom(rule.price_surcharge)
price += price_surcharge
if rule.price_min_margin:
price_min_margin = convert_to_price_uom(rule.price_min_margin)
price = max(price, price_limit + price_min_margin)
if rule.price_max_margin:
price_max_margin = convert_to_price_uom(rule.price_max_margin)
price = min(price, price_limit + price_max_margin)
rule_id = rule.id
break
# Final price conversion to target UoM
price = product_uom_obj._compute_price(cr, uid, price_uom_id, price, qty_uom_id)
results[product.id] = (price, rule_id)
return results
#class product_pricelist_item(osv.osv):
# _inherit = "product.pricelist.item"
# def create(self, cr, uid, vals, context=None):
# res=super(product_pricelist_item, self).create(cr, uid, vals, context)
# return res
| tonygalmiche/is_plastigray | product_pricelist.py | Python | mit | 10,140 |
# Copyright (C) 2013 by Clearcode <http://clearcode.cc>
# and associates (see AUTHORS).
# This file is part of pytest-redis.
# pytest-redis is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pytest-redis is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with pytest-redis. If not, see <http://www.gnu.org/licenses/>.
"""Plugin configuration module for pytest-redis."""
from tempfile import gettempdir
from pytest_redis import factories
# pylint:disable=invalid-name
_help_exec = "Redis server executable"
_help_host = "Host at which Redis will accept connections"
_help_port = "Port at which Redis will accept connections"
_help_timeout = "Client's connection timeout in seconds"
_help_loglevel = "Redis log verbosity level"
_help_db_count = "Number of redis databases"
_help_compress = "Turn on redis dump files compression."
_help_rdbchecksum = "Whether to add checksum to the rdb files"
_help_syslog = "Whether to enable logging to the system logger"
_help_save = "Redis persistance frequency configuration - seconds keys"
_help_decode = (
"Client: to decode response or not. " "See redis.StrictRedis decode_reponse client parameter."
)
_help_datadir = "Directory where test Redis instance data files will be stored"
def pytest_addoption(parser):
"""Define configuration options."""
parser.addini(name="redis_exec", help=_help_exec, default="/usr/bin/redis-server")
parser.addini(name="redis_host", help=_help_host, default="127.0.0.1")
parser.addini(
name="redis_port",
help=_help_port,
default=None,
)
parser.addini(
name="redis_timeout",
help=_help_timeout,
default=30,
)
parser.addini(
name="redis_loglevel",
help=_help_loglevel,
default="notice",
)
parser.addini(
name="redis_db_count",
help=_help_db_count,
default=8,
)
parser.addini(
name="redis_save",
help=_help_save,
default=None,
)
parser.addini(name="redis_compression", type="bool", help=_help_compress)
parser.addini(name="redis_rdbchecksum", type="bool", help=_help_rdbchecksum)
parser.addini(name="redis_syslog", type="bool", help=_help_syslog)
parser.addini(name="redis_decode", type="bool", help=_help_decode, default=False)
parser.addini(name="redis_datadir", help=_help_datadir, default=None)
parser.addoption(
"--redis-exec",
action="store",
dest="redis_exec",
help=_help_exec,
)
parser.addoption(
"--redis-host",
action="store",
dest="redis_host",
help=_help_host,
)
parser.addoption("--redis-port", action="store", dest="redis_port", help=_help_port)
parser.addoption("--redis-timeout", action="store", dest="redis_timeout", help=_help_timeout)
parser.addoption("--redis-loglevel", action="store", dest="redis_loglevel", help=_help_loglevel)
parser.addoption("--redis-db-count", action="store", dest="redis_db_count", help=_help_db_count)
parser.addoption("--redis-save", action="store", dest="redis_save", help=_help_save)
parser.addoption(
"--redis-compression", action="store_true", dest="redis_compression", help=_help_compress
)
parser.addoption(
"--redis-rdbchecksum", action="store_true", dest="redis_rdbchecksum", help=_help_rdbchecksum
)
parser.addoption("--redis-syslog", action="store_true", dest="redis_syslog", help=_help_syslog)
parser.addoption(
"--redis-client-decode", action="store_true", dest="redis_decode", help=_help_decode
)
parser.addoption("--redis-datadir", action="store", dest="redis_datadir", help=_help_datadir)
redis_proc = factories.redis_proc()
redis_nooproc = factories.redis_noproc()
redisdb = factories.redisdb("redis_proc")
# pylint:enable=invalid-name
| ClearcodeHQ/pytest-redis | src/pytest_redis/plugin.py | Python | lgpl-3.0 | 4,289 |
"""
The scramblesuit module implements the ScrambleSuit obfuscation protocol.
The paper discussing the design and evaluation of the ScrambleSuit pluggable
transport protocol is available here:
http://www.cs.kau.se/philwint/scramblesuit/
"""
from twisted.internet import error
from twisted.internet import reactor
import obfsproxy.transports.base as base
import obfsproxy.common.serialize as pack
import obfsproxy.common.log as logging
import random
import base64
import probdist
import mycrypto
import message
import const
import util
import packetmorpher
import ticket
import uniformdh
import state
log = logging.get_obfslogger()
class ScrambleSuitTransport( base.BaseTransport ):
"""
Implement the ScrambleSuit protocol.
The class implements methods which implement the ScrambleSuit protocol. A
large part of the protocol's functionality is outsources to different
modules.
"""
def __init__( self, transportConfig ):
"""
Initialise a ScrambleSuitTransport object.
"""
log.error("\n\n################################################\n"
"Do NOT rely on ScrambleSuit for strong security!\n"
"################################################\n")
log.debug("Initialising %s." % const.TRANSPORT_NAME)
util.setStateLocation(transportConfig.getStateLocation())
# Load the server's persistent state from file.
if self.weAreServer:
self.srvState = state.load()
# Initialise the protocol's state machine.
log.debug("Switching to state ST_WAIT_FOR_AUTH.")
self.protoState = const.ST_WAIT_FOR_AUTH
# Buffers for incoming and outgoing data.
self.sendBuf = self.recvBuf = ""
# Buffer for inter-arrival time obfuscation.
self.choppingBuf = ""
# AES instances to decrypt incoming and encrypt outgoing data.
self.sendCrypter = mycrypto.PayloadCrypter()
self.recvCrypter = mycrypto.PayloadCrypter()
# Packet morpher to modify the protocol's packet length distribution.
self.pktMorpher = packetmorpher.new(self.srvState.pktDist
if self.weAreServer else None)
# Inter-arrival time morpher to obfuscate inter arrival times.
self.iatMorpher = self.srvState.iatDist if self.weAreServer else \
probdist.new(lambda: random.random() %
const.MAX_PACKET_DELAY)
if self.weAreServer:
# `True' if the ticket is already decrypted but not yet
# authenticated.
self.decryptedTicket = False
if not hasattr(self, 'uniformDHSecret'):
# As the server, we get the shared secret from the constructor.
cfg = transportConfig.getServerTransportOptions()
self.uniformDHSecret = base64.b32decode(cfg["password"])
self.uniformDHSecret = self.uniformDHSecret.strip()
else:
# As the client, we get the shared secret from obfsproxy calling
# `handle_socks_args()'.
if not hasattr(self, 'uniformDHSecret'):
self.uniformDHSecret = None
self.uniformdh = uniformdh.new(self.uniformDHSecret, self.weAreServer)
# Variables used to unpack protocol messages.
self.totalLen = self.payloadLen = self.flags = None
def deriveSecrets( self, masterKey ):
"""
Derive various session keys from the given `masterKey'.
The argument `masterKey' is used to derive two session keys and nonces
for AES-CTR and two HMAC keys. The derivation is done using
HKDF-SHA256.
"""
assert len(masterKey) == const.MASTER_KEY_LENGTH
log.debug("Deriving session keys from %d-byte master key." %
len(masterKey))
# We need key material for two symmetric AES-CTR keys, nonces and
# HMACs. In total, this equals 144 bytes of key material.
hkdf = mycrypto.HKDF_SHA256(masterKey, "", (32 * 4) + (8 * 2))
okm = hkdf.expand()
assert len(okm) >= ((32 * 4) + (8 * 2))
# Set AES-CTR keys and nonces for our two AES instances.
self.sendCrypter.setSessionKey(okm[0:32], okm[32:40])
self.recvCrypter.setSessionKey(okm[40:72], okm[72:80])
# Set the keys for the two HMACs protecting our data integrity.
self.sendHMAC = okm[80:112]
self.recvHMAC = okm[112:144]
if self.weAreServer:
self.sendHMAC, self.recvHMAC = util.swap(self.sendHMAC,
self.recvHMAC)
self.sendCrypter, self.recvCrypter = util.swap(self.sendCrypter,
self.recvCrypter)
def handshake( self, circuit ):
"""
Initiate a ScrambleSuit handshake over `circuit'.
This method is only relevant for clients since servers never initiate
handshakes. If a session ticket is available, it is redeemed.
Otherwise, a UniformDH handshake is conducted.
"""
# The server handles the handshake passively.
if self.weAreServer:
return
# The preferred authentication mechanism is a session ticket.
bridge = circuit.downstream.transport.getPeer()
storedTicket = ticket.findStoredTicket(bridge)
if storedTicket is not None:
log.debug("Redeeming stored session ticket.")
(masterKey, rawTicket) = storedTicket
self.deriveSecrets(masterKey)
circuit.downstream.write(ticket.createTicketMessage(rawTicket,
self.sendHMAC))
# We switch to ST_CONNECTED opportunistically since we don't know
# yet whether the server accepted the ticket.
log.debug("Switching to state ST_CONNECTED.")
self.protoState = const.ST_CONNECTED
self.flushSendBuffer(circuit)
# Conduct an authenticated UniformDH handshake if there's no ticket.
else:
log.debug("No session ticket to redeem. Running UniformDH.")
circuit.downstream.write(self.uniformdh.createHandshake())
def sendRemote( self, circuit, data, flags=const.FLAG_PAYLOAD ):
"""
Send data to the remote end after a connection was established.
The given `data' is first encapsulated in protocol messages. Then, the
protocol message(s) are sent over the wire using the given `circuit'.
The argument `flags' specifies the protocol message flags with the
default flags signalling payload.
"""
log.debug("Processing %d bytes of outgoing data." % len(data))
# Wrap the application's data in ScrambleSuit protocol messages.
messages = message.createProtocolMessages(data, flags=flags)
# Let the packet morpher tell us how much we should pad.
paddingLen = self.pktMorpher.calcPadding(sum([len(msg) for
msg in messages]))
# If padding > header length, a single message will do...
if paddingLen > const.HDR_LENGTH:
messages.append(message.new("", paddingLen=paddingLen -
const.HDR_LENGTH))
# ...otherwise, we use two padding-only messages.
else:
messages.append(message.new("", paddingLen=const.MPU -
const.HDR_LENGTH))
messages.append(message.new("", paddingLen=paddingLen))
blurb = "".join([msg.encryptAndHMAC(self.sendCrypter,
self.sendHMAC) for msg in messages])
# Flush data chunk for chunk to obfuscate inter arrival times.
if len(self.choppingBuf) == 0:
self.choppingBuf += blurb
reactor.callLater(self.iatMorpher.randomSample(),
self.flushPieces, circuit)
else:
# flushPieces() is still busy processing the chopping buffer.
self.choppingBuf += blurb
def flushPieces( self, circuit ):
"""
Write the application data in chunks to the wire.
The cached data is written in chunks to `circuit'. After every write
call, control is given back to the Twisted reactor so it has a chance
to flush the data. Shortly thereafter, this function is called again
to write the next chunk of data. The delays in between subsequent
write calls are controlled by the inter arrival time obfuscator.
"""
# Drain and send an MTU-sized chunk from the chopping buffer.
if len(self.choppingBuf) > const.MTU:
circuit.downstream.write(self.choppingBuf[0:const.MTU])
self.choppingBuf = self.choppingBuf[const.MTU:]
# Drain and send whatever is left in the output buffer.
else:
circuit.downstream.write(self.choppingBuf)
self.choppingBuf = ""
return
reactor.callLater(self.iatMorpher.randomSample(),
self.flushPieces, circuit)
def extractMessages( self, data, aes ):
"""
Unpacks (i.e., decrypts and authenticates) protocol messages.
The raw `data' coming directly from the wire is decrypted using `aes'
and authenticated. The payload (be it a session ticket or actual
payload) is then returned as unencrypted protocol messages. In case of
invalid headers or HMACs, an exception is raised.
"""
assert aes and (data is not None)
self.recvBuf += data
msgs = []
# Keep trying to unpack as long as there is at least a header.
while len(self.recvBuf) >= const.HDR_LENGTH:
# If necessary, extract the header fields.
if self.totalLen == self.payloadLen == self.flags == None:
self.totalLen = pack.ntohs(aes.decrypt(self.recvBuf[16:18]))
self.payloadLen = pack.ntohs(aes.decrypt(self.recvBuf[18:20]))
self.flags = ord(aes.decrypt(self.recvBuf[20]))
if not message.isSane(self.totalLen,
self.payloadLen, self.flags):
raise base.PluggableTransportError("Invalid header.")
# Parts of the message are still on the wire; waiting.
if (len(self.recvBuf) - const.HDR_LENGTH) < self.totalLen:
break
rcvdHMAC = self.recvBuf[0:const.HMAC_SHA256_128_LENGTH]
vrfyHMAC = mycrypto.HMAC_SHA256_128(self.recvHMAC,
self.recvBuf[const.HMAC_SHA256_128_LENGTH:
(self.totalLen + const.HDR_LENGTH)])
if rcvdHMAC != vrfyHMAC:
raise base.PluggableTransportError("Invalid message HMAC.")
# Decrypt the message and remove it from the input buffer.
extracted = aes.decrypt(self.recvBuf[const.HDR_LENGTH:
(self.totalLen + const.HDR_LENGTH)])[:self.payloadLen]
msgs.append(message.new(payload=extracted, flags=self.flags))
self.recvBuf = self.recvBuf[const.HDR_LENGTH + self.totalLen:]
# Protocol message processed; now reset length fields.
self.totalLen = self.payloadLen = self.flags = None
return msgs
def processMessages( self, circuit, data ):
"""
Acts on extracted protocol messages based on header flags.
After the incoming `data' is decrypted and authenticated, this method
processes the received data based on the header flags. Payload is
written to the local application using `circuit', new tickets are
stored or keys are added to the replay table.
"""
assert circuit
if (data is None) or (len(data) == 0):
return
# Try to extract protocol messages from the encrypted blurb.
msgs = self.extractMessages(data, self.recvCrypter)
if (msgs is None) or (len(msgs) == 0):
return
for msg in msgs:
# Forward data to the application.
if msg.flags & const.FLAG_PAYLOAD:
circuit.upstream.write(msg.payload)
# Store newly received ticket and send ACK to the server.
elif self.weAreClient and msg.flags == const.FLAG_NEW_TICKET:
assert len(msg) == (const.HDR_LENGTH + const.TICKET_LENGTH +
const.MASTER_KEY_LENGTH)
peer = circuit.downstream.transport.getPeer()
ticket.storeNewTicket(msg.payload[0:const.MASTER_KEY_LENGTH],
msg.payload[const.MASTER_KEY_LENGTH:
const.MASTER_KEY_LENGTH +
const.TICKET_LENGTH], peer)
# Use the PRNG seed to generate the same probability distributions
# as the server. That's where the polymorphism comes from.
elif self.weAreClient and msg.flags == const.FLAG_PRNG_SEED:
assert len(msg.payload) == const.PRNG_SEED_LENGTH
log.debug("Obtained PRNG seed.")
prng = random.Random(msg.payload)
pktDist = probdist.new(lambda: prng.randint(const.HDR_LENGTH,
const.MTU),
seed=msg.payload)
self.pktMorpher = packetmorpher.new(pktDist)
self.iatMorpher = probdist.new(lambda: prng.random() %
const.MAX_PACKET_DELAY,
seed=msg.payload)
else:
log.warning("Invalid message flags: %d." % msg.flags)
def flushSendBuffer( self, circuit ):
"""
Flush the application's queued data.
The application could have sent data while we were busy authenticating
the remote machine. Using `circuit', this method flushes the data
which could have been queued in the meanwhile in `self.sendBuf'.
"""
assert circuit
if len(self.sendBuf) == 0:
return
# Flush the buffered data, the application is so eager to send.
log.debug("Flushing %d bytes of buffered application data." %
len(self.sendBuf))
self.sendRemote(circuit, self.sendBuf)
self.sendBuf = ""
def receiveTicket( self, data ):
"""
Extract and verify a potential session ticket.
The given `data' is treated as a session ticket. The ticket is being
decrypted and authenticated (yes, in that order). If all these steps
succeed, `True' is returned. Otherwise, `False' is returned.
"""
if len(data) < (const.TICKET_LENGTH + const.MARK_LENGTH +
const.HMAC_SHA256_128_LENGTH):
return False
potentialTicket = data.peek()
# Now try to decrypt and parse the ticket. We need the master key
# inside to verify the HMAC in the next step.
if not self.decryptedTicket:
newTicket = ticket.decrypt(potentialTicket[:const.TICKET_LENGTH],
self.srvState)
if newTicket != None and newTicket.isValid():
self.deriveSecrets(newTicket.masterKey)
self.decryptedTicket = True
else:
return False
# First, find the mark to efficiently locate the HMAC.
mark = mycrypto.HMAC_SHA256_128(self.recvHMAC,
potentialTicket[:const.TICKET_LENGTH])
index = util.locateMark(mark, potentialTicket)
if not index:
return False
# Now, verify if the HMAC is valid.
existingHMAC = potentialTicket[index + const.MARK_LENGTH:
index + const.MARK_LENGTH +
const.HMAC_SHA256_128_LENGTH]
myHMAC = mycrypto.HMAC_SHA256_128(self.recvHMAC,
potentialTicket[0:
index + const.MARK_LENGTH] +
util.getEpoch())
if not util.isValidHMAC(myHMAC, existingHMAC, self.recvHMAC):
log.warning("The HMAC is invalid: `%s' vs. `%s'." %
(myHMAC.encode('hex'), existingHMAC.encode('hex')))
return False
# Do nothing if the ticket is replayed. Immediately closing the
# connection would be suspicious.
if self.srvState.isReplayed(existingHMAC):
log.warning("The HMAC was already present in the replay table.")
return False
data.drain(index + const.MARK_LENGTH + const.HMAC_SHA256_128_LENGTH)
log.debug("Adding the HMAC authenticating the ticket message to the " \
"replay table: %s." % existingHMAC.encode('hex'))
self.srvState.registerKey(existingHMAC)
log.debug("Switching to state ST_CONNECTED.")
self.protoState = const.ST_CONNECTED
return True
def receivedUpstream( self, data, circuit ):
"""
Sends data to the remote machine or queues it to be sent later.
Depending on the current protocol state, the given `data' is either
directly sent to the remote machine using `circuit' or queued. The
buffer is then flushed once, a connection is established.
"""
if self.protoState == const.ST_CONNECTED:
self.sendRemote(circuit, data.read())
# Buffer data we are not ready to transmit yet.
else:
self.sendBuf += data.read()
log.debug("Buffered %d bytes of outgoing data." %
len(self.sendBuf))
def receivedDownstream( self, data, circuit ):
"""
Receives and processes data coming from the remote machine.
The incoming `data' is dispatched depending on the current protocol
state and whether we are the client or the server. The data is either
payload or authentication data.
"""
if self.weAreServer and (self.protoState == const.ST_WAIT_FOR_AUTH):
# First, try to interpret the incoming data as session ticket.
if self.receiveTicket(data):
log.debug("Ticket authentication succeeded.")
self.flushSendBuffer(circuit)
self.sendRemote(circuit,
ticket.issueTicketAndKey(self.srvState),
flags=const.FLAG_NEW_TICKET)
self.sendRemote(circuit, self.srvState.prngSeed,
flags=const.FLAG_PRNG_SEED)
# Second, interpret the data as a UniformDH handshake.
elif self.uniformdh.receivePublicKey(data, self.deriveSecrets,
self.srvState):
# Now send the server's UniformDH public key to the client.
handshakeMsg = self.uniformdh.createHandshake()
newTicket = ticket.issueTicketAndKey(self.srvState)
log.debug("Sending %d bytes of UniformDH handshake and "
"session ticket." % len(handshakeMsg))
circuit.downstream.write(handshakeMsg)
log.debug("UniformDH authentication succeeded.")
self.sendRemote(circuit, newTicket,
flags=const.FLAG_NEW_TICKET)
self.sendRemote(circuit, self.srvState.prngSeed,
flags=const.FLAG_PRNG_SEED)
log.debug("Switching to state ST_CONNECTED.")
self.protoState = const.ST_CONNECTED
self.flushSendBuffer(circuit)
else:
log.debug("Authentication unsuccessful so far. "
"Waiting for more data.")
return
if self.weAreClient and (self.protoState == const.ST_WAIT_FOR_AUTH):
if not self.uniformdh.receivePublicKey(data, self.deriveSecrets):
log.debug("Unable to finish UniformDH handshake just yet.")
return
log.debug("Switching to state ST_CONNECTED.")
self.protoState = const.ST_CONNECTED
self.flushSendBuffer(circuit)
if self.protoState == const.ST_CONNECTED:
self.processMessages(circuit, data.read())
@classmethod
def register_external_mode_cli( cls, subparser ):
"""
Register a CLI arguments to pass a secret or ticket to ScrambleSuit.
Two options are made available over the command line interface: one to
specify a ticket file and one to specify a UniformDH shared secret.
"""
subparser.add_argument("--shared-secret",
type=str,
help="Shared secret for UniformDH",
dest="uniformDHSecret")
super(ScrambleSuitTransport, cls).register_external_mode_cli(subparser)
@classmethod
def validate_external_mode_cli( cls, args ):
"""
Assign the given command line arguments to local variables.
"""
uniformDHSecret = None
try:
uniformDHSecret = base64.b32decode(args.uniformDHSecret)
except (TypeError, AttributeError) as error:
log.error(error.message)
raise base.PluggableTransportError(
"UniformDH shared secret, '%s', isn't valid base32!"
% args.sharedSecret)
parentalApproval = super(
ScrambleSuitTransport, cls).validate_external_mode_cli(args)
if not parentalApproval:
# XXX not very descriptive nor helpful, but the parent class only
# returns a boolean without telling us what's wrong.
raise base.PluggableTransportError(
"Pluggable Transport args invalid: %s" % args )
if uniformDHSecret:
rawLength = len(uniformDHSecret)
if rawLength != const.SHARED_SECRET_LENGTH:
raise base.PluggableTransportError(
"The UniformDH shared secret must be %d bytes in length,",
"but %d bytes are given."
% (const.SHARED_SECRET_LENGTH, rawLength))
else:
cls.uniformDHSecret = uniformDHSecret
def handle_socks_args( self, args ):
"""
Receive arguments `args' passed over a SOCKS connection.
The SOCKS authentication mechanism is (ab)used to pass arguments to
pluggable transports. This method receives these arguments and parses
them. As argument, we only expect a UniformDH shared secret.
"""
log.debug("Received the following arguments over SOCKS: %s." % args)
if len(args) != 1:
raise base.SOCKSArgsError("Too many SOCKS arguments "
"(expected 1 but got %d)." % len(args))
# The ScrambleSuit specification defines that the shared secret is
# called "password".
if not args[0].startswith("password="):
raise base.SOCKSArgsError("The SOCKS argument must start with "
"`password='.")
# A shared secret might already be set if obfsproxy is in external
# mode.
if self.uniformDHSecret:
log.warning("A UniformDH shared secret was already specified over "
"the command line. Using the SOCKS secret instead.")
self.uniformDHSecret = base64.b32decode(args[0].split('=')[1].strip())
rawLength = len(self.uniformDHSecret)
if rawLength != const.SHARED_SECRET_LENGTH:
raise base.PluggableTransportError("The UniformDH shared secret "
"must be %d bytes in length but %d bytes are given." %
(const.SHARED_SECRET_LENGTH, rawLength))
self.uniformdh = uniformdh.new(self.uniformDHSecret, self.weAreServer)
class ScrambleSuitClient( ScrambleSuitTransport ):
"""
Extend the ScrambleSuit class.
"""
def __init__( self, transportConfig ):
"""
Initialise a ScrambleSuitClient object.
"""
self.weAreClient = True
self.weAreServer = False
ScrambleSuitTransport.__init__(self, transportConfig)
class ScrambleSuitServer( ScrambleSuitTransport ):
"""
Extend the ScrambleSuit class.
"""
def __init__( self, transportConfig ):
"""
Initialise a ScrambleSuitServer object.
"""
self.weAreServer = True
self.weAreClient = False
ScrambleSuitTransport.__init__(self, transportConfig)
| isislovecruft/scramblesuit | scramblesuit.py | Python | bsd-3-clause | 25,113 |
#!/usr/bin/python
'''This utility prints out a mapping string that is syntactically correct
to be used in the schemas/spl_mapping.json file.
'''
import logging
import os
import simplejson as json
import sys
# TODO(hansnelsen): Added pretty printing for pleasant looking JSON
# TODO(hansnelsen): Add writing directly to schemas/spl_mapping.json once it is
# pretty
def generate_mapping():
sections = open('../openfda/spl/data/sections.csv', 'r')
mapping_list = []
openfda = '''
"openfda": {
"properties": {
"application_number": {
"type": "string",
"index": "analyzed"
},
"application_number_exact": {
"type": "string",
"index": "not_analyzed"
},
"brand_name": {
"type": "string",
"index": "analyzed"
},
"brand_name_exact": {
"type": "string",
"index": "not_analyzed"
},
"substance_name": {
"type": "string",
"index": "analyzed"
},
"substance_name_exact": {
"type": "string",
"index": "not_analyzed"
},
"dosage_form": {
"type": "string",
"index": "analyzed"
},
"dosage_form_exact": {
"type": "string",
"index": "not_analyzed"
},
"generic_name": {
"type": "string",
"index": "analyzed"
},
"generic_name_exact": {
"type": "string",
"index": "not_analyzed"
},
"manufacturer_name": {
"type": "string",
"index": "analyzed"
},
"manufacturer_name_exact": {
"type": "string",
"index": "not_analyzed"
},
"product_ndc": {
"type": "string",
"index": "analyzed"
},
"product_ndc_exact": {
"type": "string",
"index": "not_analyzed"
},
"product_type": {
"type": "string",
"index": "analyzed"
},
"product_type_exact": {
"type": "string",
"index": "not_analyzed"
},
"route": {
"type": "string",
"index": "analyzed"
},
"route_exact": {
"type": "string",
"index": "not_analyzed"
},
"rxcui": {
"type": "string",
"index": "analyzed"
},
"rxcui_exact": {
"type": "string",
"index": "not_analyzed"
},
"rxstring": {
"type": "string",
"index": "analyzed"
},
"rxstring_exact": {
"type": "string",
"index": "not_analyzed"
},
"rxtty": {
"type": "string",
"index": "analyzed"
},
"rxtty_exact": {
"type": "string",
"index": "not_analyzed"
},
"spl_id": {
"type": "string",
"index": "analyzed"
},
"spl_id_exact": {
"type": "string",
"index": "not_analyzed"
},
"package_ndc": {
"type": "string",
"index": "analyzed"
},
"package_ndc_exact": {
"type": "string",
"index": "not_analyzed"
},
"spl_set_id": {
"type": "string",
"index": "analyzed"
},
"spl_set_id_exact": {
"type": "string",
"index": "not_analyzed"
},
"unii": {
"type": "string",
"index": "analyzed"
},
"unii_exact": {
"type": "string",
"index": "not_analyzed"
},
"pharm_class_moa": {
"type": "string",
"index": "analyzed"
},
"pharm_class_moa_exact": {
"type": "string",
"index": "not_analyzed"
},
"pharm_class_pe": {
"type": "string",
"index": "analyzed"
},
"pharm_class_pe_exact": {
"type": "string",
"index": "not_analyzed"
},
"pharm_class_cs": {
"type": "string",
"index": "analyzed"
},
"pharm_class_cs_exact": {
"type": "string",
"index": "not_analyzed"
},
"pharm_class_epc": {
"type": "string",
"index": "analyzed"
},
"pharm_class_epc_exact": {
"type": "string",
"index": "not_analyzed"
},
"nui": {
"type": "string",
"index": "analyzed"
},
"nui_exact": {
"type": "string",
"index": "not_analyzed"
},
"upc": {
"type": "string",
"index": "analyzed"
},
"upc_exact": {
"type": "string",
"index": "not_analyzed"
},
"is_original_packager": {
"type": "boolean"
},
"is_original_packager_exact": {
"type": "boolean"
}
}
}'''
mapping_header = '''{
"spl": {
"_source": {
"includes": [
"*"
],
"excludes": [
"openfda.*_exact"
]
},
"properties": {
"set_id": {
"type": "string",
"index": "not_analyzed"
},
"id": {
"type": "string",
"index": "not_analyzed"
},
"version": {
"type": "string",
"index": "analyzed"
},
"effective_time": {
"type": "date",
"format": "basic_date||date"
},
'''
mapping_footer = ''',
"@timestamp": {
"type": "date",
"format": "basic_date||date"
}
}
}
}'''
mapping_list.append(mapping_header)
for row in sections:
name = row.split(',')[1]\
.replace(':', '')\
.replace(' & ', ' and ')\
.replace('/', ' or ')\
.replace(' ', '_')\
.lower()\
.replace('spl_unclassified', 'spl_unclassified_section')\
.strip()
row_string = ''' "%(name)s": {
"type": "string",
"index": "analyzed"
},
"%(name)s_table": {
"type": "string",
"index": "no",
"include_in_all": false
},''' % locals()
mapping_list.append(row_string)
mapping_list.append(openfda)
mapping_list.append(mapping_footer)
try:
mapping_string = '\n'.join(mapping_list)
json.loads(mapping_string)
return mapping_string
except:
logging.info('It appears that something is wrong with your json string')
if __name__ == '__main__':
fmt_string = '%(created)f %(filename)s:%(lineno)s [%(funcName)s] %(message)s'
logging.basicConfig(stream=sys.stderr,
format=fmt_string,
level=logging.DEBUG)
print generate_mapping() | HiTechIronMan/openfda | scripts/generate_mapping_from_sections.py | Python | cc0-1.0 | 10,264 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from recordlinkage.preprocessing import clean
from recordlinkage.preprocessing import phonenumbers
from recordlinkage.preprocessing import phonetic
from recordlinkage.preprocessing import phonetic_algorithms
from recordlinkage.preprocessing import value_occurence
class TestCleaningStandardise(object):
def test_clean(self):
values = pd.Series([
'Mary-ann', 'Bob :)', 'Angel', 'Bob (alias Billy)', 'Mary ann',
'John', np.nan
])
expected = pd.Series(
['mary ann', 'bob', 'angel', 'bob', 'mary ann', 'john', np.nan])
clean_series = clean(values)
# Check if series are identical.
pdt.assert_series_equal(clean_series, expected)
clean_series_nothing = clean(
values,
lowercase=False,
replace_by_none=False,
replace_by_whitespace=False,
strip_accents=False,
remove_brackets=False)
# Check if ntohing happend.
pdt.assert_series_equal(clean_series_nothing, values)
def test_clean_empty(self):
""" Test the cleaning of an empty Series"""
# Check empty series
pdt.assert_series_equal(clean(pd.Series()), pd.Series())
def test_clean_unicode(self):
values = pd.Series([
u'Mary-ann', u'Bob :)', u'Angel', u'Bob (alias Billy)',
u'Mary ann', u'John', np.nan
])
expected = pd.Series([
u'mary ann', u'bob', u'angel', u'bob', u'mary ann', u'john', np.nan
])
clean_series = clean(values)
# Check if series are identical.
pdt.assert_series_equal(clean_series, expected)
def test_clean_parameters(self):
values = pd.Series([
u'Mary-ann', u'Bob :)', u'Angel', u'Bob (alias Billy)',
u'Mary ann', u'John', np.nan
])
expected = pd.Series([
u'mary ann', u'bob', u'angel', u'bob', u'mary ann', u'john', np.nan
])
clean_series = clean(
values,
lowercase=True,
replace_by_none=r'[^ \-\_A-Za-z0-9]+',
replace_by_whitespace=r'[\-\_]',
remove_brackets=True)
# Check if series are identical.
pdt.assert_series_equal(clean_series, expected)
def test_clean_lower(self):
values = pd.Series([np.nan, 'LowerHigher', 'HIGHERLOWER'])
expected = pd.Series([np.nan, 'lowerhigher', 'higherlower'])
clean_series = clean(values, lowercase=True)
# Check if series are identical.
pdt.assert_series_equal(clean_series, expected)
def test_clean_brackets(self):
values = pd.Series([np.nan, 'bra(cke)ts', 'brackets with (brackets)'])
expected = pd.Series([np.nan, 'brats', 'brackets with'])
clean_series = clean(values, remove_brackets=True)
# Check if series are identical.
pdt.assert_series_equal(clean_series, expected)
def test_clean_accent_stripping(self):
values = pd.Series(['ősdfésdfë', 'without'])
expected = pd.Series(['osdfesdfe', 'without'])
values_unicode = pd.Series([u'ősdfésdfë', u'without'])
expected_unicode = pd.Series([u'osdfesdfe', u'without'])
# values_callable = pd.Series([u'ősdfésdfë', u'without'])
# expected_callable = pd.Series([u'ősdfésdfë', u'without'])
# # Callable.
# pdt.assert_series_equal(
# clean(values_callable, strip_accents=lambda x: x),
# expected_callable)
# Check if series are identical.
pdt.assert_series_equal(
clean(values, strip_accents='unicode'), expected)
# Check if series are identical.
pdt.assert_series_equal(clean(values, strip_accents='ascii'), expected)
# Check if series are identical.
pdt.assert_series_equal(
clean(values_unicode, strip_accents='unicode'), expected_unicode)
# Check if series are identical.
pdt.assert_series_equal(
clean(values_unicode, strip_accents='ascii'), expected_unicode)
with pytest.raises(ValueError):
clean(values, strip_accents='unknown_algorithm')
def test_clean_phonenumbers(self):
values = pd.Series(
[np.nan, '0033612345678', '+1 201 123 4567', '+336-123 45678'])
expected = pd.Series(
[np.nan, '0033612345678', '+12011234567', '+33612345678'])
clean_series = phonenumbers(values)
# Check if series are identical.
pdt.assert_series_equal(clean_series, expected)
def test_value_occurence(self):
values = pd.Series([
np.nan, np.nan, 'str1', 'str1', 'str1', 'str1', 'str2', 'str3',
'str3', 'str1'
])
expected = pd.Series([2, 2, 5, 5, 5, 5, 1, 2, 2, 5])
pdt.assert_series_equal(value_occurence(values), expected)
class TestEncodingStandardise(object):
def test_encode_soundex(self):
values = pd.Series([
np.nan, u'John', u'Mary Ann', u'billy', u'Jonathan', u'Gretha',
u'Micheal', u'Sjors'
])
expected = pd.Series([
np.nan, u'J500', u'M650', u'B400', u'J535', u'G630', u'M240',
u'S620'
])
phon = phonetic(values, 'soundex')
pdt.assert_series_equal(phon, expected)
def test_encode_nysiis(self):
values = pd.Series([
np.nan, u'John', u'Mary Ann', u'billy', u'Jonathan', u'Gretha',
u'Micheal', u'Sjors'
])
expected = pd.Series([
np.nan, u'JAN', u'MARYAN', u'BALY', u'JANATAN', u'GRAT', u'MACAL',
u'SJAR'
])
phon = phonetic(values, 'nysiis')
pdt.assert_series_equal(phon, expected)
def test_encode_metaphone(self):
values = pd.Series([
np.nan, u'John', u'Mary Ann', u'billy', u'Jonathan', u'Gretha',
u'Micheal', u'Sjors'
])
expected = pd.Series(
[np.nan, u'JN', u'MRYN', u'BL', u'JN0N', u'KR0', u'MXL', u'SJRS'])
phon = phonetic(values, method='metaphone')
pdt.assert_series_equal(phon, expected)
def test_encode_match_rating(self):
values = pd.Series([
np.nan, u'John', u'Mary Ann', u'billy', u'Jonathan', u'Gretha',
u'Micheal', u'Sjors'
])
expected = pd.Series([
np.nan, u'JHN', u'MRYNN', u'BLLY', u'JNTHN', u'GRTH', u'MCHL',
u'SJRS'
])
phon = phonetic(values, method='match_rating')
pdt.assert_series_equal(phon, expected)
def test_phonetic_does_not_exist(self):
values = pd.Series([
np.nan, u'John', u'Mary Ann', u'billy', u'Jonathan', u'Gretha',
u'Micheal', u'Sjors'
])
with pytest.raises(ValueError):
phonetic(values, 'unknown_algorithm')
def test_list_of_algorithms(self):
algorithms = phonetic_algorithms
assert isinstance(algorithms, list)
assert 'soundex' in algorithms
assert 'nysiis' in algorithms
assert 'metaphone' in algorithms
assert 'match_rating' in algorithms
| J535D165/recordlinkage | tests/test_preprocessing.py | Python | bsd-3-clause | 7,315 |
#!/usr/bin/env python
import json
import threading
import time
import Tkinter as tk
import ttk
import tkFileDialog as filedialog
import urllib
import urllib2
class JSONText(tk.Text):
def __init__(self, *args, **kwargs):
tk.Text.__init__(self, *args, **kwargs)
self.tag_configure('number', foreground='#009999')
self.tag_configure('boolean', font='bold')
self.tag_configure('string', foreground='#dd1144')
self.tag_configure('keystring', foreground='#000080')
def highlight(self):
self.set_tags('(0|[1-9])[0-9]*(\.[0-9]*)?', 'number')
self.set_tags('(true|false|null)', 'boolean')
self.set_tags('"[^":]*"', 'string')
self.set_tags('"[^":]*"(?=\:)', 'keystring')
def set_tags(self, pattern, tag):
start = self.index('1.0')
self.mark_set('matchStart', start)
self.mark_set('matchEnd', start)
self.mark_set('searchLimit', self.index('end'))
count = tk.IntVar()
while True:
index = self.search(pattern, 'matchEnd', 'searchLimit',
count=count, regexp=True)
if not index:
break
self.mark_set('matchStart', index)
self.mark_set('matchEnd', '{}+{}c'.format(index, count.get()))
self.tag_add(tag, 'matchStart', 'matchEnd')
class App(ttk.Frame):
def __init__(self, master=None):
ttk.Frame.__init__(self, master)
self.grid(sticky='nswe')
def init(self):
self.master.title('ShnergleDevClient')
self.master.option_add('*tearOff', tk.FALSE)
menu = tk.Menu(self.master)
menu_protocol = tk.Menu(menu)
self.url_protocol = tk.StringVar()
self.url_protocol.set('https')
menu_protocol.add_radiobutton(label='HTTP', variable=self.url_protocol,
value='http')
menu_protocol.add_radiobutton(label='HTTPS',
variable=self.url_protocol,
value='https')
menu_server = tk.Menu(menu)
self.url_server = tk.StringVar()
self.url_server.set('shnergle-api.azurewebsites.net')
menu_server.add_radiobutton(label='shnergle-api.azurewebsites.net',
variable=self.url_server,
value='shnergle-api.azurewebsites.net')
menu_server.add_radiobutton(label='localhost',
variable=self.url_server,
value='localhost')
menu_port = tk.Menu(menu)
self.url_port = tk.StringVar()
self.url_port.set('default')
menu_port.add_radiobutton(label='Default', variable=self.url_port,
value='default')
menu_port.add_separator()
menu_port.add_radiobutton(label='80', variable=self.url_port,
value='80')
menu_port.add_radiobutton(label='443', variable=self.url_port,
value='443')
menu_port.add_radiobutton(label='8080', variable=self.url_port,
value='8080')
menu_version = tk.Menu(menu)
self.url_version = tk.StringVar()
self.url_version.set('latest')
menu_version.add_radiobutton(label='Latest', variable=self.url_version,
value='latest')
menu_version.add_separator()
menu_version.add_radiobutton(label='1', variable=self.url_version,
value='v1')
menu_wrap = tk.Menu(menu)
self.wrap_mode = tk.StringVar()
self.wrap_mode.set('none')
menu_wrap.add_radiobutton(label='None', variable=self.wrap_mode,
value='none', command=self.set_wrap)
menu_wrap.add_radiobutton(label='Character', variable=self.wrap_mode,
value='char', command=self.set_wrap)
menu_wrap.add_radiobutton(label='Word', variable=self.wrap_mode,
value='word', command=self.set_wrap)
menu_clear_history = tk.Menu(menu)
menu_clear_history.add_command(label='Facebook ID',
command=self.clear_history_facebook)
menu_clear_history.add_command(label='Other Parameters',
command=self.clear_history_params)
menu_clear_history.add_command(label='Image',
command=self.clear_history_image)
menu_clear_history.add_separator()
menu_clear_history.add_command(label='All',
command=self.clear_history)
menu_window = tk.Menu(menu, name='file')
menu_help = tk.Menu(menu, name='help')
menu.add_cascade(menu=menu_protocol, label='Protocol')
menu.add_cascade(menu=menu_server, label='Server')
menu.add_cascade(menu=menu_port, label='Port')
menu.add_cascade(menu=menu_version, label='API Version')
menu.add_cascade(menu=menu_wrap, label='Editor Wrap')
menu.add_cascade(menu=menu_clear_history, label='Clear History')
menu.add_cascade(menu=menu_window, label='Window')
menu.add_cascade(menu=menu_help, label='Help')
self.master['menu'] = menu
main_bar = ttk.Frame(self)
self.url_method = tk.StringVar()
ttk.Combobox(main_bar, textvariable=self.url_method,
values=['images',
'rankings',
'users',
'user_searches']).grid(sticky='nswe')
self.url_action = tk.StringVar()
self.url_action.set('get')
ttk.Radiobutton(main_bar, text='get', variable=self.url_action,
value='get').grid(row=0, column=1)
ttk.Radiobutton(main_bar, text='set', variable=self.url_action,
value='set').grid(row=0, column=2)
ttk.Button(main_bar, text='Send', command=self.retrieve).grid(row=0,
column=3)
main_bar.grid(columnspan=4, sticky='nswe')
main_bar.columnconfigure(0, weight=1)
ttk.Label(self, text='Facebook ID:').grid()
self.post_facebook = tk.StringVar()
self.post_facebook.set('test')
self.combo_facebook = ttk.Combobox(self,
textvariable=self.post_facebook)
self.combo_facebook.grid(row=1, column=1, columnspan=2, sticky='we')
ttk.Button(self, text='Clear', command=self.clear_facebook).grid(
row=1, column=3)
ttk.Label(self, text='Other Parameters:').grid()
self.post_params = tk.StringVar()
self.post_params.set('key=value&variable=content')
self.combo_params = ttk.Combobox(self, textvariable=self.post_params)
self.combo_params.grid(row=2, column=1, columnspan=2, sticky='we')
ttk.Button(self, text='Clear', command=self.clear_params).grid(
row=2, column=3)
ttk.Label(self, text='Image:').grid()
self.post_image = tk.StringVar()
self.combo_image = ttk.Combobox(self, textvariable=self.post_image)
self.combo_image.grid(row=3, column=1, sticky='we')
ttk.Button(self, text='Browse', command=self.browse_image).grid(
row=3, column=2)
ttk.Button(self, text='Clear', command=self.clear_image).grid(
row=3, column=3)
editor = ttk.Frame(self)
self.output = JSONText(editor, state='disabled')
self.output.grid(sticky='nswe')
output_scroll_y = ttk.Scrollbar(editor, orient=tk.VERTICAL,
command=self.output.yview)
output_scroll_y.grid(row=0, column=1, sticky='ns')
output_scroll_x = ttk.Scrollbar(editor, orient=tk.HORIZONTAL,
command=self.output.xview)
output_scroll_x.grid(sticky='we')
self.output.configure(xscrollcommand=output_scroll_x.set,
yscrollcommand=output_scroll_y.set,
wrap=self.wrap_mode.get())
ttk.Sizegrip(editor).grid(row=1, column=1, sticky='se')
editor.rowconfigure(0, weight=1)
editor.columnconfigure(0, weight=1)
editor.grid(columnspan=4, sticky='nswe')
self.winfo_toplevel().rowconfigure(0, weight=1)
self.winfo_toplevel().columnconfigure(0, weight=1)
self.rowconfigure(4, weight=1)
self.columnconfigure(1, weight=1)
return self
def retrieve(self):
self.dialog = tk.Toplevel(self)
self.dialog.resizable(tk.FALSE, tk.FALSE)
self.dialog.title('Loading...')
progress = ttk.Progressbar(self.dialog, orient=tk.HORIZONTAL,
length=250, mode='indeterminate')
progress.pack()
progress.start()
RetrievalThread(self).start()
@property
def address(self):
port = ''
if self.url_port.get() != 'default':
port = ':' + self.url_port.get()
version = ''
if self.url_version.get() != 'latest':
version = '/v' + self.url_version.get()
return (self.url_protocol.get() + '://' + self.url_server.get() +
port + version + '/' + self.url_method.get() + '/' +
self.url_action.get())
@property
def data(self):
res = dict(i.split('=') for i in self.post_params.get().split('&'))
res['app_secret'] = 'FCuf65iuOUDCjlbiyyer678Coutyc64v655478VGvgh76'
if self.post_facebook.get():
res['facebook_id'] = self.post_facebook.get()
if self.post_image.get():
res['image'] = open(self.post_image.get())
return urllib.urlencode(res).encode('utf8')
def escape(self, jsonstr):
jsonstr = jsonstr.replace('\\n', '\n')
jsonstr = jsonstr.replace('\\"', "'")
jsonstr = jsonstr.replace('\\\\', '\\')
return jsonstr
def pretty_print(self, jsonstr):
try:
return self.escape(json.dumps(json.loads(jsonstr), sort_keys=True,
indent=4, separators=(',', ': ')))
except Exception:
return jsonstr
def clear_facebook(self):
self.post_facebook.set('')
def clear_params(self):
self.post_params.set('')
def clear_image(self):
self.post_image.set('')
def clear_history(self):
self.clear_history_facebook()
self.clear_history_params()
self.clear_history_image()
def clear_history_facebook(self):
self.combo_facebook['values'] = ()
def clear_history_params(self):
self.combo_params['values'] = ()
def clear_history_image(self):
self.combo_image['values'] = ()
def browse_image(self):
self.post_image.set(filedialog.askopenfilename())
def set_wrap(self):
self.output['wrap'] = self.wrap_mode.get()
class RetrievalThread(threading.Thread):
def __init__(self, main):
threading.Thread.__init__(self)
self.main = main
def run(self):
self.main.output['state'] = 'normal'
for combo in (self.main.combo_facebook, self.main.combo_params,
self.main.combo_image):
combo_params = []
combo_params.append(combo.get())
for item in combo['values']:
if item and item != combo.get():
combo_params.append(item)
combo['values'] = combo_params
try:
self.main.output.delete('1.0', tk.END)
except Exception:
pass
result = ''
loadtime = None
try:
loadtime = time.time()
result = urllib2.urlopen(self.main.address, self.main.data)
result = result.read()
loadtime = time.time() - loadtime
self.main.master.title('ShnergleDevClient - ' + str(loadtime) + 's')
result = self.main.pretty_print(result.decode('utf8'))
except urllib2.URLError as e:
if hasattr(e, 'read'):
result = self.main.pretty_print(e.read().decode('utf8'))
else:
result = e
except Exception as e:
result = e
self.main.output.insert(tk.END, result)
self.main.output.highlight()
self.main.output['state'] = 'disabled'
self.main.dialog.destroy()
if __name__ == '__main__':
App().init().mainloop()
| shnergle/ShnergleDevClient | main.py | Python | mit | 12,831 |
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import testpy
def GetConfiguration(context, root):
return testpy.AsyncHooksTestConfiguration(context, root, 'async-hooks')
| MTASZTAKI/ApertusVR | plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/test/async-hooks/testcfg.py | Python | mit | 204 |
from django import forms
from django.core.exceptions import ValidationError
from cyder.models import Ctnr
from cyder.cydns.address_record.models import AddressRecord
from cyder.cydns.view.models import View
from cyder.cydns.forms import DNSForm
from cyder.cydns.nameserver.models import Nameserver
from cyder.cydhcp.interface.static_intr.models import StaticInterface
from cyder.base.mixins import UsabilityFormMixin
class NameserverForm(DNSForm, UsabilityFormMixin):
glue_ip_str = forms.CharField(label="Glue's IP Address", required=False)
glue_ctnr = forms.ModelChoiceField(
queryset=Ctnr.objects.all(),
required=False,
label="Glue's Container")
class Meta:
model = Nameserver
fields = ('domain', 'server', 'views', 'ttl', 'glue_ip_str',
'glue_ctnr', 'description')
exclude = ('addr_glue', 'intr_glue')
widgets = {'views': forms.CheckboxSelectMultiple}
def __init__(self, *args, **kwargs):
super(NameserverForm, self).__init__(*args, **kwargs)
if not self.instance:
return
if not self.instance.glue:
# If it doesn't have glue, it doesn't need it.
return
addr_glue = AddressRecord.objects.filter(
label=self.instance.glue.label,
domain=self.instance.glue.domain)
intr_glue = StaticInterface.objects.filter(
label=self.instance.glue.label,
domain=self.instance.glue.domain)
glue_choices = []
for glue in addr_glue:
glue_choices.append(("addr_{0}".format(glue.pk), str(glue)))
for glue in intr_glue:
glue_choices.append(("intr_{0}".format(glue.pk), str(glue)))
if isinstance(self.instance.glue, AddressRecord):
initial = "addr_{0}".format(self.instance.glue.pk)
elif isinstance(self.instance.glue, StaticInterface):
initial = "intr_{0}".format(self.instance.glue.pk)
self.fields['glue'] = forms.ChoiceField(choices=glue_choices,
initial=initial)
def clean(self, *args, **kwargs):
self.glue = None
if self.instance.pk is None:
domain = self.cleaned_data['domain']
glue_ip_str, glue_ctnr = (self.cleaned_data['glue_ip_str'],
self.cleaned_data['glue_ctnr'])
server = self.cleaned_data['server'].strip('.')
if '.' in server:
if server.lower() != domain.name.lower():
glue_label, glue_domain = tuple(server.split('.', 1))
glue_domain = glue_domain.strip('.').lower()
else:
glue_label, glue_domain = "", server.lower()
else:
raise ValidationError(
"Please provide a fully qualified server name.")
if domain.delegated:
if glue_domain and glue_domain != domain.name.lower():
if glue_ip_str or glue_ctnr:
raise ValidationError(
"This record does not need glue, so "
"please leave the glue fields blank.")
else:
if not (glue_ip_str and glue_ctnr):
raise ValidationError(
"This zone is delegated, so "
"please provide information for glue.")
gluekwargs = {'domain': domain, 'label': glue_label,
'ip_str': glue_ip_str, 'ctnr': glue_ctnr}
try:
self.glue = AddressRecord.objects.get(**gluekwargs)
except AddressRecord.DoesNotExist:
self.glue = AddressRecord(**gluekwargs)
self.glue.set_is_glue()
self.glue.save()
for v in View.objects.all():
self.glue.views.add(v)
elif not domain.delegated and (glue_ip_str or glue_ctnr):
raise ValidationError("This zone is not delegated, so please "
"leave the glue fields blank.")
cleaned_data = super(NameserverForm, self).clean(*args, **kwargs)
return cleaned_data
def save(self, *args, **kwargs):
try:
super(NameserverForm, self).save(*args, **kwargs)
except Exception, e:
if self.glue and self.glue.pk is not None:
self.glue.delete(validate_glue=False)
raise ValidationError(e)
class NSDelegated(forms.Form):
server = forms.CharField()
server_ip_address = forms.CharField()
| akeym/cyder | cyder/cydns/nameserver/forms.py | Python | bsd-3-clause | 4,749 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.http import HttpResponseRedirect # noqa
from django.utils import timezone
from horizon import exceptions
from horizon import middleware
from horizon.test import helpers as test
class MiddlewareTests(test.TestCase):
def setUp(self):
self._timezone_backup = timezone.get_current_timezone_name()
return super(MiddlewareTests, self).setUp()
def tearDown(self):
timezone.activate(self._timezone_backup)
return super(MiddlewareTests, self).tearDown()
def test_redirect_login_fail_to_login(self):
url = settings.LOGIN_URL
request = self.factory.post(url)
mw = middleware.HorizonMiddleware()
resp = mw.process_exception(request, exceptions.NotAuthorized())
resp.client = self.client
self.assertRedirects(resp, url)
def test_process_response_redirect_on_ajax_request(self):
url = settings.LOGIN_URL
mw = middleware.HorizonMiddleware()
request = self.factory.post(url,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
request.horizon = {'async_messages':
[('error', 'error_msg', 'extra_tag')]}
response = HttpResponseRedirect(url)
response.client = self.client
resp = mw.process_response(request, response)
self.assertEqual(200, resp.status_code)
self.assertEqual(url, resp['X-Horizon-Location'])
def test_timezone_awareness(self):
url = settings.LOGIN_REDIRECT_URL
mw = middleware.HorizonMiddleware()
request = self.factory.get(url)
request.session['django_timezone'] = 'America/Chicago'
mw.process_request(request)
self.assertEqual(
timezone.get_current_timezone_name(), 'America/Chicago')
request.session['django_timezone'] = 'Europe/Paris'
mw.process_request(request)
self.assertEqual(timezone.get_current_timezone_name(), 'Europe/Paris')
request.session['django_timezone'] = 'UTC'
mw.process_request(request)
self.assertEqual(timezone.get_current_timezone_name(), 'UTC')
| ankur-gupta91/horizon-net-ip | horizon/test/tests/middleware.py | Python | apache-2.0 | 2,862 |
# Copyright 2016-2021 Peppy Player [email protected]
#
# This file is part of Peppy Player.
#
# Peppy Player is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Peppy Player is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Peppy Player. If not, see <http://www.gnu.org/licenses/>.
import pygame
import os
from ui.component import Component
from svg import Parser, Rasterizer
from pyowm import OWM
from pyowm.utils.config import get_default_config
from datetime import datetime
LOCATION = "location"
WIND_LABEL = "wind"
WIND = "wnd"
ASTRONOMY = "astronomy"
CONDITION = "condition"
CHILL = "chill"
DIRECTION = "direction"
SPEED = "speed"
TEMPERATURE = "temp"
HUMIDITY = "humidity"
PRESSURE = "pressure"
PRESS = "press"
VISIBILITY = "visibility"
SUNRISE = "sunrise"
SUNSET = "sunset"
CODE = "code"
IMAGE_CODE = "weather_icon_name"
TEXT = "text"
DAY = "day"
HIGH = "high"
LOW = "low"
UNIT = "unit"
UNITS = "units"
ICONS_FOLDER = "icons"
CODE_UNKNOWN = "3200"
STATUS = "status"
DEG = "deg"
BLACK = (0, 0, 0)
DEGREE_SYMBOL = "\u00B0"
UNKNOWN = "?"
MPH = "mph"
GENERATED_IMAGE = "generated.img."
class WeatherUtil(object):
""" Utility class """
def __init__(self, app_key, weather_config, labels, unit, path):
""" Initializer
:param app_key: OpenWeather API key
:param weather_config: weather config object
:param labels: labels
:param unit: unit
:param path: base path
"""
self.app_key = app_key
self.weather_config = weather_config
self.labels = labels
self.base_path = path
self.unit_config = unit
config_dict = get_default_config()
config_dict['language'] = weather_config["language"]
owm = OWM(app_key, config_dict)
self.weather_manager = owm.weather_manager()
if unit.lower() == "f":
self.unit = "imperial"
else:
self.unit = "metric"
self.image_cache = {}
self.code_image_map = {}
self.code_image_map["01d"] = "01d.svg"
self.code_image_map["01n"] = "01n.svg"
self.code_image_map["02d"] = "02d.svg"
self.code_image_map["02n"] = "02n.svg"
self.code_image_map["03d"] = "03d.svg"
self.code_image_map["03n"] = "03n.svg"
self.code_image_map["04d"] = "04d.svg"
self.code_image_map["04n"] = "04n.svg"
self.code_image_map["09d"] = "09d.svg"
self.code_image_map["09n"] = "09n.svg"
self.code_image_map["10d"] = "10d.svg"
self.code_image_map["10n"] = "10n.svg"
self.code_image_map["11d"] = "11d.svg"
self.code_image_map["11n"] = "11n.svg"
self.code_image_map["13d"] = "13d.svg"
self.code_image_map["13n"] = "13n.svg"
self.code_image_map["50d"] = "50d.svg"
self.code_image_map["50n"] = "50n.svg"
self.weather_json = None
self.last_load_timestamp = None
def load_json(self, latitude, longitude, force=False):
""" Load weather json object from OpenWeather
:param latitude: latitude
:param longitude: longitude
:param force: enforce loading
:return: weather object
"""
if not latitude and not longitude:
self.city = None
return None
now = datetime.now()
if self.last_load_timestamp and not force:
diff = now.minute - self.last_load_timestamp.minute
if diff <= 10:
return self.weather
self.weather = self.current_observation = self.forecasts = None
try:
self.weather = self.weather_manager.one_call(lat=float(latitude), lon=float(longitude), exclude='minutely,hourly,alerts', units=self.unit)
except:
return None
if self.weather and self.weather.current and self.weather.forecast_daily:
self.current_observation = self.weather.current
self.forecasts = self.weather.forecast_daily
else:
self.weather = None
self.last_load_timestamp = now
return self.weather
def get_units(self):
""" Get weather units
:return: units
"""
return self.unit_config.upper()
def get_wind(self):
""" Get wind section
:return: wind section
"""
return {
SPEED: str(self.current_observation.wnd[SPEED]),
}
def get_atmosphere(self):
""" Get atmosphere section
:return: atmosphere section
"""
return {
HUMIDITY: str(self.current_observation.humidity)
}
def get_astronomy(self):
""" Get astronomy section (sunrise/sunset)
:return: astronomy section
"""
astronomy = {
SUNRISE: self.current_observation.srise_time,
SUNSET: self.current_observation.sset_time
}
return astronomy
def get_condition(self):
""" Get condition section
:return: condition section
"""
condition = {
TEMPERATURE: self.get_temperature(self.current_observation.temp[TEMPERATURE]),
IMAGE_CODE: self.current_observation.weather_icon_name,
STATUS: self.current_observation.detailed_status.capitalize()
}
return condition
def get_temperature(self, t):
""" Create temperature string from the float number
:param t: temperature as a float number
:return: temperature as integre string
"""
temp = str(t)
index = temp.find(".")
if index == -1:
index = temp.find(",")
if index != -1:
temp_current = temp[0 : index]
else:
temp_current = temp
return temp_current
def get_forecast(self):
""" Get forecast section
:return: forecast section
"""
return self.forecasts
def load_multi_color_svg_icon(self, folder, image_name, bounding_box=None):
""" Load SVG image
:param folder: icon folder
:param image_name: svg image file name
:param bounding_box: image bounding box
:return: bitmap image rasterized from svg image
"""
name = self.code_image_map[image_name]
path = os.path.join(self.base_path, folder, name)
cache_path = path + "." + str(bounding_box.w) + "." + str(bounding_box.h)
try:
i = self.image_cache[cache_path]
return (cache_path, i)
except KeyError:
pass
try:
svg_image = Parser.parse_file(path)
except:
return None
w = svg_image.width + 2
h = svg_image.height + 2
k_w = bounding_box.w / w
k_h = bounding_box.h / h
scale_factor = min(k_w, k_h)
w_final = int(w * scale_factor)
h_final = int(h * scale_factor)
r = Rasterizer()
buff = r.rasterize(svg_image, w_final, h_final, scale_factor)
image = pygame.image.frombuffer(buff, (w_final, h_final), 'RGBA')
self.image_cache[cache_path] = image
return (cache_path, image)
def get_text_width(self, text, fgr, font_height):
""" Calculate text width
:param text: text
:param fgr: text color
:param font_height: font height
:return: text width
"""
self.font = self.get_font(font_height)
size = self.font.size(text)
label = self.font.render(text, 1, fgr)
return label.get_size()[0]
def get_text_component(self, text, fgr, font_height):
""" Create text component using supplied parameters
:param text: text
:param fgr: text color
:param font_height: font height
:return: text component
"""
self.font = self.get_font(font_height)
label = self.font.render(text, 1, fgr)
comp = Component(self, label)
comp.text = text
comp.text_size = font_height
comp.fgr = fgr
return comp
def draw_image(self, image, x, y, container, rect, name):
""" Draw background defined by input parameters
:param image: image to draw
:param x: x coordinate
:param y: y coordinate
:param container: container to which image will be added
:param rect: bounding box
:param name: component name
"""
c = Component(self)
c.name = name
c.content = image
c.content_x = x
c.content_y = y
c.image_filename = c.name
c.bounding_box = rect
container.add_component(c)
return c
def get_weekday(self, ms):
""" Get weekday from milliseconds
:param ms: milliseconds
:return: weekday
"""
dt = datetime.fromtimestamp(ms)
wd = dt.weekday()
return self.labels["weekday." + str(wd)]
def get_time(self, t):
""" Get time
:param t: time input string
:return: formatted time
"""
dt = datetime.fromtimestamp(t)
return dt.strftime("%H") + ":" + dt.strftime("%M")
| project-owner/Peppy | screensaver/peppyweather/weatherutil.py | Python | gpl-3.0 | 9,893 |
'''
=========================
Infusionsoft Experiment
=========================
[](https://travis-ci.org/BMJHayward/infusionsoft_xpmt)
DESCRIPTION
=============
API wrapper for Infusionsoft CRM. Infusionsoft a.k.a. 'IS' from here on. Intended usespecific reporting not found in
Extract, transform, load data from IS, send to excel, csv, pandas, matplotlib, numpy etc.
dataserv and pandaserv are main files of interest for the moment.
For target reports see classes inheriting from LocalDB, Query and Extract in dataserv.py.
TODO:
========
+ use pandas or matplotlib for IO/dataviz
'''
import glob
import sys
import os, os.path
import sqlite3
import csv
import time
import statistics
from datetime import datetime, timedelta, date
from infusionsoft.library import Infusionsoft
import json
import locale
import pickle
from collections import OrderedDict
from shutil import move
from dateutil import parser
RAW_DATA_DIR = 'rawdata'
RESULT_DATA_DIR = 'resultdata'
DB_DIR = 'databases'
try:
os.mkdir( RAW_DATA_DIR )
except FileExistsError as fexe:
print('using existing data folder: ', RAW_DATA_DIR)
try:
os.mkdir( RESULT_DATA_DIR )
except FileExistsError as fexe:
print('using existing results folder: ', RESULT_DATA_DIR)
try:
os.mkdir( DB_DIR )
except FileExistsError as fexe:
print('using existing database folder: ', DB_DIR)
class LocalDB:
''' Methods for operating on local sqlite database.
Would like report classes to be able use either Query or LocalDB in the same way. Maybe.
'''
@staticmethod
def sendto_sqlite(query_array, newtable, db='dataserv.db'):
'''Use sqlite3 module to output to local DB. Saves API calls. Using text datatypes
in tables to avoid type conversion for datetime objects.
'''
conn = sqlite3.connect(db, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = conn.cursor()
if isinstance(query_array, dict):
create_table = 'CREATE TABLE ' + newtable + ' (key, value);'
c.execute(create_table)
insert_into_table = 'INSERT INTO ' + newtable + ' values (?,?);'
for item in query_array.items():
c.execute(insert_into_table, (item[0], str(item[1])))
elif isinstance(query_array, list):
create_table = 'CREATE TABLE ' + newtable + str(tuple(query_array.pop(0))) + ' ;'
c.execute(create_table)
questionmarks = '('+''.join(['?,' for i in range(len(query_array[0])-1)])+'?)'
insert_into_table = 'INSERT INTO ' + newtable + ' values ' + questionmarks + ';'
c.executemany(insert_into_table, query_array)
else:
raise TypeError('Need to pass list or dict')
conn.commit()
conn.close()
@staticmethod
def sendto_json(query_array, filename):
'''Use json to store entire query as json file.'''
with open(filename, 'x') as file:
for item in query_array:
json.dumps(item, file)
@staticmethod
def get_db_table(db_name, db_table):
''' Pass in database name and table as string, get back the table.'''
conn = sqlite3.connect(db_name)
c = conn.cursor()
c.execute('SELECT * FROM {}'.format(db_table))
db_tbl = c.fetchall()
return db_tbl
@staticmethod
def get_db_column(dbname, dbtbl, dbcol):
''' Pass in name, table and column as string, get back the column.'''
conn = sqlite3.connect(dbname)
cur = conn.cursor()
cur.execute('SELECT {0} FROM {1}'.format(dbcol, dbtbl))
returncolumn = cur.fetchall()
return returncolumn
@staticmethod
def get_csv(filename):
''' Give local csv file as string, returns a list of lists of that file. '''
csvdata = []
csvfile = csv.reader(open(filename))
for row in csvfile:
csvdata.extend(row)
csvfile.close()
return csvdata
@staticmethod
def convert_currencystring(dbname, dbtbl, dbcol):
'''Converts currency column in AUD to float.'''
locale.setlocale(locale.LC_ALL, '')
conn = sqlite3.connect(dbname)
c = conn.cursor()
getdata = 'SELECT [' + dbcol + '], rowid FROM ' + dbtbl + ';'
c.execute(getdata)
transactions = c.fetchall()
for row in transactions:
transactions[transactions.index(row)] = list(row)
try:
for trxn in transactions:
trxn[0] = trxn[0].strip('AUD')
trxn[0] = trxn[0].strip('-AUD')
trxn[0] = trxn[0].strip('N/')
trxn[0] = locale.atof(trxn[0])
except AttributeError:
pass
except ValueError:
trxn[0] = 0 # Because some contacts have orders with no total recorded in IS. Not sure why.
for row in transactions:
transactions[transactions.index(row)] = tuple(row)
updatedata = 'UPDATE ' + dbtbl + ' SET [' + dbcol + ']=? WHERE rowid=?;'
c.executemany(updatedata, transactions)
conn.commit()
conn.close()
def db_iterate(self, dbname):
currencycolumns = {
'sales': ['Order Total'],
'contactsales': ['invamount'],
'leadsource_ROI':
('Expenses','Revenue','Cost Per Visitor','Cost Per Contact','Cost Per Customer'),
'products':['price']
}
for dbtbl in currencycolumns.keys():
print('Going through ', dbtbl, ' table.')
for dbcol in currencycolumns[dbtbl]:
print('Going through ', dbcol, ' in ', dbtbl, '.')
self.convert_currencystring(dbname, dbtbl, dbcol)
def stripcurrencycodes(self):
''' Iterates through databases is local directory, removes currency code,
converts column to float.
'''
for file in os.listdir():
ext = os.path.splitext(file)[1]
if (ext == '.db') or (ext == '.sqlite'):
print('Going through: ', file)
try:
self.db_iterate(file)
except Exception as e:
print(e, sys.exc_info()[2])
print('Done with: ', file)
@staticmethod
def create_joinlisttable(dbname):
''' Creates join of two tables. Currently on uses sales and contacts table.
Might open this to other tables later.
'''
conn = sqlite3.connect(dbname)
c = conn.cursor()
join_contacts_invoices = '''\
SELECT contacts.Id, contacts.[Date Created], contacts.[Lead Source],\
sales.[Order Total], sales.[Order Date] \
FROM contacts INNER JOIN sales \
ON contacts.Id = sales.ContactId;\
'''
c.execute(join_contacts_invoices)
joinlist = c.fetchall()
joinlist.sort(key = lambda x: x[0])
c.execute('''CREATE TABLE contactsales(
contactid text, entrydate text, leadsource text, invamount text, invdate text);''')
c.executemany('INSERT INTO contactsales VALUES (?,?,?,?,?);', joinlist)
conn.commit()
conn.close()
@staticmethod
def get_invoicedates(dbname):
''' Returns list of purchase dates for each contact. '''
conn = sqlite3.connect(dbname)
c = conn.cursor()
conn.text_factory = int
c.execute('SELECT Id FROM contacts;')
contact_idlist = c.fetchall()
contact_invlist = dict()
conn.text_factory = str
for cid in contact_idlist:
c.execute('SELECT Date FROM sales where sales.ContactId = (?);', cid)
contact_invlist[cid] = c.fetchall()
conn.close()
return contact_invlist
@staticmethod
def str2dateconv(datept):
'''Use to take date columns from db to create datetime objects.'''
datepoint = parser.parse(datept,dayfirst=True)
return datepoint
@staticmethod
def date2strconv(datept):
'''Use to write datetime objects back to db in format dd/mm/yyyy.'''
return datept.strftime('%d/%m/%Y')
@staticmethod
def datecolumncheck(columnname):
''' Columns below contain dates in infusionsoft.
Some of these are app specific. You will need to update
to match your infusionsoft account.
'''
datecolumns = \
{'Date Created',
'Last Updated',
'Program Started date',
'Assessment Booked Date',
'Date Sent',
'Initial Enquiry Date',
'Date Equipment was received',
'PlusThis New lead date',
'Referred date',
'Order Date',
'entrydate',
'invdate'
}
if columnname in datecolumns:
return True
@staticmethod
def currencycolumncheck(columnname):
''' Columns below contain currency transactions in infusionsoft.
Some of these are app specific. You will need to update
to match your infusionsoft account.
'''
currencycolumns = \
{'Price',
'Expenses',
'Revenue',
'Cost Per Visitor',
'Cost Per Contact',
'Cost Per Customer',
'Order Total',
}
if columnname in currencycolumns:
return True
class Query:
'''Create connection to API and run basic queries.'''
def __init__(self):
''' Instantiate Infusionsoft object and create connection to
account app.
'''
self.key = os.environ['INFUSION_APIKEY']
self.app_name = os.environ['INFUSION_APPNAME']
self.infusionsoft = Infusionsoft(self.app_name, self.key)
def _basequery(self, **kwargs):
'''Query contact table by default. Overwrite search parameters with
kwargs. Combine with _getpages() to return whole database.
'''
self.default = dict(
table='Contact',
limit=10,
page=0,
queryData={'ContactType': '%'},
returnData=['City','State','Country']
)
if kwargs is not None:
self.default.update(kwargs)
try:
self.data = self.infusionsoft.DataService(
'query', self.default['table'],
self.default['limit'], self.default['page'],
self.default['queryData'], self.default['returnData']
)
return self.data
except Exception as exc:
print('Error running query: ', exc)
def _count(self, table, field):
'''Return number of entries in table to retrieve all data.
Return an int to use as limit in queries, append to list results.
'''
self.count = self.infusionsoft.DataService('count', table, {field: '%'})
return self.count
def _getpages(self, table, field):
'''Calculate number of pages to search through using dataservice.'''
self.totalrecords = self._count(table, field)
self.pages = (self.totalrecords//999 + 1)
return self.pages
class Extract(Query):
'''Pull mass data for analysis using Query() as base. Intended as layer
between direct queries and each report class.
'''
def __init__(self):
'''Use super() to create API connection.
Timeframes for reported data.'''
self.thirtydays = None
self.month = None
self.quarter = None
self.year = None
self.alltime = None
super(Extract, self).__init__()
def tags(self, **kwargs):
'''Return tags for target contact.'''
self.tagargs = dict(
table='ContactGroupAssign',
queryData={'ContactId': '154084'},
returnData=['GroupId']
)
if kwargs is not None:
self.tagargs.update(kwargs)
self.tag = self._basequery(**self.tagargs)
return self.tag
def dates(self, **kwargs):
'''Return list of date created for all contact types.'''
self.dateargs = dict(
table='Contact',
queryData={'ContactType': '%'},
returnData=['DateCreated']
)
if kwargs is not None:
self.dateargs.update(kwargs)
self.date = self._basequery(**self.dateargs)
return self.date
def leadsources(self, **kwargs):
'''Return leadsource for contacts. Number of contacts is limit key.'''
self.sourceargs = dict(
table='Contact',
queryData={'ContactType': '%'},
returnData=['Leadsource']
)
if kwargs is not None:
self.sourceargs.update(kwargs)
self.leadsource = self._basequery(**self.sourceargs)
return self.leadsource
def invoices(self, target_id=None, **kwargs):
''' Returns list of dicts, key is 'DateCreated'.
USAGE:Iterate over list from contact_idanddate() to get target_id.
'''
if type(target_id) is str:
pass
elif (target_id is not None and type(target_id) is int):
target_id = str(target_id)
else:
print("Input on invoices() failed, check target_id")
self.inv_args = dict(
table='Invoice',
queryData={'ContactId': target_id},
returnData=['DateCreated']
)
if kwargs is not None:
self.inv_args.update(kwargs)
self.inv_dates = self._basequery(**self.inv_args)
return self.inv_dates
class Leadtime(LocalDB):
'''
Use local database to calculate leadtime instead of API.
Just call stats_leadtime(), returns dict with everything.
'''
def stats_LT(self, dbname, INCLUDE_LIST = False):
''' Main entry point for database form of Leadtime class.
Pass it nothing, get back dictionary mean, median, quintile and
std deviation. Component functions listed below in order of appearance.
'''
lt = self.get_leadtime(dbname)
average_leadtime = statistics.mean(lt)
std_dev = statistics.pstdev(lt)
quintile_5 = int(0.8 * len(lt))
eightypercentofsales = lt[quintile_5]
median_leadtime = statistics.median(lt)
if INCLUDE_LIST == True:
stats = dict(average_leadtime = average_leadtime,
standard_deviation = std_dev,
eightypercent = eightypercentofsales,
median = median_leadtime,
fulllist = lt)
else:
stats = dict(average_leadtime = average_leadtime,
standard_deviation = std_dev,
eightypercent = eightypercentofsales,
median = median_leadtime)
return stats
def get_leadtime(self, dbname):
leadtime = [row['leadtime'] for row in self.get_data(dbname).values()]
leadtime = [i for i in leadtime if i >= 0]
return leadtime
def get_data(self,dbname):
data = self.get_db_table(dbname, 'contactsales')
data = self.list_convert(data)
data = self.leadtime_from_db(data)
return data
def list_convert(self, targetlist):
newlist = [list(row) for row in targetlist]
for newrow in newlist:
newrow[1] = self.convert_datestring(newrow[1])
newrow[4] = self.convert_datestring(newrow[4])
return newlist
def leadtime_from_db(self, targetlist):
newlist = dict()
for row in targetlist:
if row[0] not in newlist.keys():
newlist[row[0]] = dict(entrydate = row[1], invdates = [row[4]])
else:
newlist[row[0]]['invdates'].append(row[4])
leadtime = min(newlist[row[0]]['invdates']) - newlist[row[0]]['entrydate']
newlist[row[0]]['leadtime'] = leadtime.days
return newlist
def convert_datestring(self, targetdate):
newdate = targetdate.split()[0]
newdate = newdate.split('/')
newdate = [int(n) for n in newdate]
newdate.reverse()
newdate = date(newdate[0], newdate[1], newdate[2])
return newdate
class LeadtimetoSale(Extract):
'''
Return length of time from gaining a lead to making first sale.
HOW TO USE:
>>> leadtime = LeadtimetoSale().leadtime()
>>> Output().ascsvdict(leadtime)
'''
def leadtime(self, **kwargs):
''' Use extract() to get data, use process() to make it sensible.
Return an object useful for visualistion.
'''
self.idd = self.iddates(**kwargs)
for i in self.idd:
idarg = i['Id']
i['Invoices'] = (self.get_inv(idarg))
self.first_inv_date(i)
self.get_daystosale(i)
return self.idd
def iddates(self, **kwargs):
'''Returns Id and DateCreated from Contact table as dict.'''
self.id = dict(returnData = ['Id', 'DateCreated'])
if kwargs is not None:
self.id.update(kwargs)
return self._basequery(**self.id)
def get_inv(self, idarg):
'''Returns DateCreated of invoices of id arg.'''
self.xinf=self.invoices(target_id = idarg)
return self.xinf
def first_inv_date(self, dct):
'''Pass in dict with Invoices key, returns earliest invoice date.'''
if 'Invoices' in dct.keys():
inv_dates = dct['Invoices']
for date in range(0, len(inv_dates)):
inv_dates[date] = inv_dates[date]['DateCreated']
if len(inv_dates) == 0:
first_sale = 0
elif len(inv_dates) != 0:
first_sale = min(inv_dates)
dct['FirstSale'] = first_sale
else:
print("Need to give me a dictionary with an 'Invoices' key.")
def created_minus_sale(self, dct):
'''Gives number of days between date of lead and date of sale.'''
leadtime = dct['FirstSale'] - dct['DateCreated']
dct['LeadTime'] = leadtime
def get_daystosale(self, leadtimedict):
'''Pass in dict from LeadtimetoSale(), returns number of days from
lead generation to first purchase for that contact.
'''
if 'DateCreated' and 'FirstSale' in leadtimedict.keys():
self.created = leadtimedict['DateCreated']
self.firstsale = leadtimedict['FirstSale']
self.days = self.datecompare(self.created, self.firstsale)
leadtimedict['LeadTime'] = self.days
else:
print('Need to know FirstSale to do this.')
def datecompare(self, xmlrpcDateCreated, xmlrpcFirstSale):
'''Calc days between 2 dates returned from IS.
Dates passed in must be xmlrpc.client.DateTime if python3.x or
xmlrpclib.DateTime if python2.x. Can also use DateTime-like
objects which have the timetuple() method.
'''
# need to handle int values of 0 for dates here
self.date1 = xmlrpcDateCreated.timetuple()
if type(xmlrpcFirstSale) != int:
self.date2 = xmlrpcFirstSale.timetuple()
self.days = time.mktime(self.date2) - time.mktime(self.date1)
seconds_per_day = 60*60*24
self.days = self.days // seconds_per_day
else:
self.days = 999999 # create outlier to filter or review
return self.days
class CostSaleLeadsource(LocalDB):
'''Return a cost per sale per leadsource dictionary.'''
def stats_CSL(self, dbname):
'''
+get expenses per leadsource via API
+get number of sales per leadsource via API
+combine the two
^OR^
+run leadsource ROI report
'''
try:
self.convert_currencystring(dbname, 'leadsource_ROI', 'Expenses')
self.convert_currencystring(dbname, 'leadsource_ROI', 'Revenue')
except AttributeError as att_e:
print(att_e)
self.leadsource_ROI = self.get_db_table(dbname, 'leadsource_ROI')
CSL = OrderedDict()
CSL['Leadsource'] = ('Percent profit', 'Dollar profit', 'Revenue', 'Expenses')
for entry in self.leadsource_ROI:
entry = list(entry)
self.destring_leadsourceROI_table(entry)
self.leadsrc = entry[2]
self.leadsrc_stats = self.ROI_stats(entry)
CSL[self.leadsrc] = self.leadsrc_stats
return CSL
def destring_leadsourceROI_table(self, row):
''' Might want to use named constants in to_float and to_int, but
I will probably only use this here and nowhere else.
'''
to_float = {4,5,6,8,10,13,14} # Uses set because I hardly ever use them and they are cool
to_int = {0,1,7,9,12} # I also like looking at them
for x in to_float:
try:
row[x] = float(row[x])
except:
row[x] = 0
for y in to_int:
try:
row[y] = int(row[y])
except:
row[y] = 0
def ROI_stats(self, leadsource_row):
''' Used to create a dict of dicts with stats for each leadsource. '''
try:
expns = leadsource_row[4]
revnu = leadsource_row[5]
percent_profit = (1 - (expns / revnu)) * 100
if hasattr(percent_profit, 'index'):
percent_profit = percent_profit[0]
except ZeroDivisionError:
percent_profit = 0
dollar_profit = leadsource_row[5] - leadsource_row[4]
revenue = leadsource_row[5]
expenses = leadsource_row[4]
stat_list = [percent_profit, dollar_profit, revenue, expenses]
return stat_list
class AverageTransactionValue:
'''Return average amount of transaction across all products.
TODO: segment by time period, leadsource, product etc.
+Wouldn't mind breaking this down for each leadsource
'''
def stats_ATV(self, dbname):
'''
+get all sales
+get number of sales
+do arithmetic mean
+ e.g: in SQL: SELECT AVG([Order Total]) FROM sales;
'''
LocalDB.convert_currencystring(dbname, 'sales', 'Order Total')
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('SELECT [Order Total] FROM sales;')
atv = c.fetchall()
atv = [float(i[0]) for i in atv]
atv = statistics.mean(atv)
atv = {'ATV': atv} # output as dict for same formatting as other report classes
conn.close()
return atv
class CustomerLifetimeValue(LocalDB):
'''Calculate how much any given customer spends on average long term.'''
def __init__(self, dbname):
SQL_QUERY = 'SELECT ContactId, SUM([Order Total]) FROM sales \
GROUP BY ContactId \
ORDER BY ContactId;'
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute(SQL_QUERY)
CLV_DATA = c.fetchall()
self.spend = [row[1] for row in CLV_DATA]
conn.close()
def stats_CLV(self):
'''
+get target contact invoices
+sum value of all invoices
+repeat for all contacts who have purchased
+get average of all contacts lifetimevalue
+wouldn't mind breaking down CLV by leadsource
'''
average_CLV = statistics.mean(self.spend)
median_CLV = statistics.median(self.spend)
mode_CLV = statistics.mode(self.spend)
std_dev = statistics.pstdev(self.spend)
quintile_5 = int(0.8 * len(self.spend))
eighty_percent = self.spend[quintile_5]
stats = dict(average_CLV = average_CLV,
standard_deviation = std_dev,
eightypercent = eighty_percent,
median = median_CLV,
mode = mode_CLV)
return stats
class Process:
'''Raw query data processed here for target output.
This class is really only useful when subclassing Extract.
Kept here for reference for now.
'''
def procarray(self, array):
for dictionary in range(0, len(array)):
if type(array[dictionary]) is list:
self.procarray(array[dictionary])
elif type(array[dictionary]) is dict:
self.procdict(array[dictionary])
def procdict(self, dictionary):
for key in dictionary.keys():
if key == 'DateCreated':
self.procdate(key, dictionary)
elif key == 'FirstSale' and type(dictionary[key]) != int:
self.procdate(key,dictionary)
elif key == 'Invoices':
# self.procarray(dictionary[key])
invlist = dictionary[key]
for inv in range(0,len(invlist)):
invlist[inv] = self.convert_date(invlist[inv])
dictionary[key] = invlist
else:
pass
def procdate(self, key, dictionary):
IS_date = dictionary[key]
newdate = self.convert_date(IS_date)
dictionary[key] = newdate
def convert_date(self, IS_dateobject):
try:
convdate = IS_dateobject.timetuple()
convdate = datetime(convdate.tm_year, convdate.tm_mon, convdate.tm_mday)
return convdate
except TypeError as te:
print("wrong type ", te)
def combine_list(self, *lists):
ziplist = zip(*lists)
ziplist = list(ziplist)
return ziplist
class Output:
'''Take data ready for output. Methods to write to file.'''
@staticmethod
def stats_outputall(dbname):
allstats = Output().stats_getall(dbname)
for report in allstats:
Output().ascsv([allstats[report]], report + '.csv')
print("Report: ", report, " saved to file successfully.")
Output().strap_csvfiles()
@staticmethod
def strap_csvfiles():
try:
import xlwt
except ImportError:
print('Python installation needs xlwt library. Try pip install xlwt on the command line.')
wb = xlwt.Workbook()
reportfiles = ['ATV.csv', 'LT.csv', 'CLV.csv', 'CSL.csv']
for filename in reportfiles:
(f_path, f_name) = os.path.split(filename)
(f_short_name, f_extension) = os.path.splitext(f_name)
ws = wb.add_sheet(f_short_name)
spamReader = csv.reader(open(filename, 'r'))
for rowx, row in enumerate(spamReader):
for colx, value in enumerate(row):
ws.write(rowx, colx, value)
wb.save('allstats.xls')
move('allstats.xls', RESULT_DATA_DIR)
for filename in reportfiles:
move(filename, RESULT_DATA_DIR)
print('All done! Your file is named \"allstats.xls\", in: ' + RESULT_DATA_DIR)
@staticmethod
def stats_getall(dbname):
''' Get return data from all report classes,
return dict of reportname:data pairs.
'''
allstats = {
'LT': Leadtime().stats_LT(dbname),
'CSL': CostSaleLeadsource().stats_CSL(dbname),
'ATV': AverageTransactionValue().stats_ATV(dbname),
'CLV': CustomerLifetimeValue(dbname).stats_CLV()
}
return allstats
@staticmethod
def asfile(target=None, query=None, filename='dataserv.csv'):
''' primarily to send to spreadsheet. Target and query do same thing, was useful
to make more sense interacting with Infusionsoft API.
'''
data = None
if target is not None:
data = target
elif query is not None:
data = query
else:
msg = "No data to output"
return msg
with open(filename, 'a+') as tempfile:
try:
for line in data:
tempfile.write(repr(line))
tempfile.write(",")
tempfile.write("\n")
print(line)
except TypeError:
tempfile.write(data)
@staticmethod
def ascsv(targlist, outfile):
'''
Pass in result of query as list of dicts from query. Alternately, use
elif to pass result objects in different forms to the one function,
or to several similar functions contained in Output class.
'''
with open(outfile, 'w', newline='') as datafile:
writer = csv.writer(datafile)
for item in targlist:
for key, value in item.items():
writer.writerow([key, value])
@staticmethod
def ascsvdict(item, outfile):
'''Item arg is list of dicts. Like ascsv but with DictWriter class.'''
names = item[0].keys()
with open(outfile,'w', newline='') as data:
writer = csv.DictWriter(data, fieldnames=names)
writer.writeheader()
writer.writerows(item)
@staticmethod
def datetime_to_excel(dateobj):
xldate = dateobj.strftime('%x')
return xldate
@staticmethod
def ashtml(self, queryfunc, filename):
'''Put in data, intended to save as valid static webpage.'''
pass
@staticmethod
def asimage(self, queryfunc, filename):
'''If you just want the visual form of your data.'''
pass
@staticmethod
def as3rdparty(self, queryfunc, filename):
'''' to send to pandas, matplotlib, etc etc '''
pass
@staticmethod
def to_picklejar(data_to_save, name):
'''Give whatever object you have to pickle, save it for your next session with given name.'''
if type(name) != str:
name = str(name)
with open(name, 'wb') as picklejar:
pickle.dump(data_to_save, picklejar)
def importer():
''' csvarray should be string including .csv extension in local folder '''
dbname = input('please enter database name: ')
dbname = os.path.join(DB_DIR, dbname)
datafiles = make_tablename()
importer = LocalDB()
for table, filename in datafiles.items():
tblname = table
tbldata = importer.get_csv(filename)
new_headerrow = tbldata[0]
remove_duplicates(new_headerrow)
tbldata[0] = new_headerrow
importer.sendto_sqlite(tbldata, tblname, db=dbname)
move(filename, RAW_DATA_DIR)
importer.create_joinlisttable(dbname)
return dbname
def remove_duplicates(headerrow):
''' Infusionsoft csv files often have duplicate strings as header row.
When importing to sql, this raises sqlite3.OperationalError. Pass in the
first row of your csv file to fix this. importer() calls this for you as well.
'''
for item in headerrow:
if headerrow.count(item) > 1:
idx = headerrow.index(item)
for col in range(idx + 1, len(headerrow)):
if headerrow[col] == item:
headerrow[col] = '_' + headerrow[col]
print(item, ':', headerrow.count(item))
def make_tablename():
'''takes user input at command line for csv files exported from IS'''
filetypes = {'contacts': '', 'sales': '', 'products': '', 'leadsource_ROI': ''}
for filetype in filetypes.keys():
filetypes[filetype] = input('please enter filename for {0} data: '.format(filetype))
if not os.path.isfile(filetypes[filetype]):
raise FileNotFoundError('File not in this directory. Please check and rerun the program.')
return filetypes
def main():
dbname = importer()
Output.stats_outputall(dbname)
if __name__ == "__main__":
main()
| BMJHayward/infusionsoft_xpmt | src/dataserv.py | Python | mit | 31,807 |
# -*- coding: utf-8 -*-
from . import test_export, test_import, test_load
| vileopratama/vitech | src/openerp/addons/test_impex/tests/__init__.py | Python | mit | 76 |
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
#
# Created by Martin J. Laubach on 2011-08-01
# Copyright (c) 2011 Martin J. Laubach. All rights reserved.
#
# ------------------------------------------------------------------------
from django.dispatch import Signal
# ------------------------------------------------------------------------
# This signal is sent when an item editor managed object is completely
# saved, especially including all foreign or manytomany dependencies.
itemeditor_post_save_related = Signal()
# ------------------------------------------------------------------------
| feincms/feincms | feincms/signals.py | Python | bsd-3-clause | 709 |
# Copyright (c) 2010 Spotify AB
# Copyright (c) 2010-2011 Yelp
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a connection to the EMR service
"""
import types
import boto
import boto.utils
from boto.ec2.regioninfo import RegionInfo
from boto.emr.emrobject import AddInstanceGroupsResponse, BootstrapActionList, \
Cluster, ClusterSummaryList, HadoopStep, \
InstanceGroupList, InstanceList, JobFlow, \
JobFlowStepList, \
ModifyInstanceGroupsResponse, \
RunJobFlowResponse, StepSummaryList
from boto.emr.step import JarStep
from boto.connection import AWSQueryConnection
from boto.exception import EmrResponseError
class EmrConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')
DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',
'elasticmapreduce.us-east-1.amazonaws.com')
ResponseError = EmrResponseError
# Constants for AWS Console debugging
DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(EmrConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs,
profile_name=profile_name)
# Many of the EMR hostnames are of the form:
# <region>.<service_name>.amazonaws.com
# rather than the more common:
# <service_name>.<region>.amazonaws.com
# so we need to explicitly set the region_name and service_name
# for the SigV4 signing.
self.auth_region_name = self.region.name
self.auth_service_name = 'elasticmapreduce'
def _required_auth_capability(self):
return ['hmac-v4']
def describe_cluster(self, cluster_id):
"""
Describes an Elastic MapReduce cluster
:type cluster_id: str
:param cluster_id: The cluster id of interest
"""
params = {
'ClusterId': cluster_id
}
return self.get_object('DescribeCluster', params, Cluster)
def describe_jobflow(self, jobflow_id):
"""
Describes a single Elastic MapReduce job flow
:type jobflow_id: str
:param jobflow_id: The job flow id of interest
"""
jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])
if jobflows:
return jobflows[0]
def describe_jobflows(self, states=None, jobflow_ids=None,
created_after=None, created_before=None):
"""
Retrieve all the Elastic MapReduce job flows on your account
:type states: list
:param states: A list of strings with job flow states wanted
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
:type created_after: datetime
:param created_after: Bound on job flow creation time
:type created_before: datetime
:param created_before: Bound on job flow creation time
"""
params = {}
if states:
self.build_list_params(params, states, 'JobFlowStates.member')
if jobflow_ids:
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
if created_after:
params['CreatedAfter'] = created_after.strftime(
boto.utils.ISO8601)
if created_before:
params['CreatedBefore'] = created_before.strftime(
boto.utils.ISO8601)
return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])
def describe_step(self, cluster_id, step_id):
"""
Describe an Elastic MapReduce step
:type cluster_id: str
:param cluster_id: The cluster id of interest
:type step_id: str
:param step_id: The step id of interest
"""
params = {
'ClusterId': cluster_id,
'StepId': step_id
}
return self.get_object('DescribeStep', params, HadoopStep)
def list_bootstrap_actions(self, cluster_id, marker=None):
"""
Get a list of bootstrap actions for an Elastic MapReduce cluster
:type cluster_id: str
:param cluster_id: The cluster id of interest
:type marker: str
:param marker: Pagination marker
"""
params = {
'ClusterId': cluster_id
}
if marker:
params['Marker'] = marker
return self.get_object('ListBootstrapActions', params, BootstrapActionList)
def list_clusters(self, created_after=None, created_before=None,
cluster_states=None, marker=None):
"""
List Elastic MapReduce clusters with optional filtering
:type created_after: datetime
:param created_after: Bound on cluster creation time
:type created_before: datetime
:param created_before: Bound on cluster creation time
:type cluster_states: list
:param cluster_states: Bound on cluster states
:type marker: str
:param marker: Pagination marker
"""
params = {}
if created_after:
params['CreatedAfter'] = created_after.strftime(
boto.utils.ISO8601)
if created_before:
params['CreatedBefore'] = created_before.strftime(
boto.utils.ISO8601)
if marker:
params['Marker'] = marker
if cluster_states:
self.build_list_params(params, cluster_states, 'ClusterStates.member')
return self.get_object('ListClusters', params, ClusterSummaryList)
def list_instance_groups(self, cluster_id, marker=None):
"""
List EC2 instance groups in a cluster
:type cluster_id: str
:param cluster_id: The cluster id of interest
:type marker: str
:param marker: Pagination marker
"""
params = {
'ClusterId': cluster_id
}
if marker:
params['Marker'] = marker
return self.get_object('ListInstanceGroups', params, InstanceGroupList)
def list_instances(self, cluster_id, instance_group_id=None,
instance_group_types=None, marker=None):
"""
List EC2 instances in a cluster
:type cluster_id: str
:param cluster_id: The cluster id of interest
:type instance_group_id: str
:param instance_group_id: The EC2 instance group id of interest
:type instance_group_types: list
:param instance_group_types: Filter by EC2 instance group type
:type marker: str
:param marker: Pagination marker
"""
params = {
'ClusterId': cluster_id
}
if instance_group_id:
params['InstanceGroupId'] = instance_group_id
if marker:
params['Marker'] = marker
if instance_group_types:
self.build_list_params(params, instance_group_types,
'InstanceGroupTypeList.member')
return self.get_object('ListInstances', params, InstanceList)
def list_steps(self, cluster_id, step_states=None, marker=None):
"""
List cluster steps
:type cluster_id: str
:param cluster_id: The cluster id of interest
:type step_states: list
:param step_states: Filter by step states
:type marker: str
:param marker: Pagination marker
"""
params = {
'ClusterId': cluster_id
}
if marker:
params['Marker'] = marker
if step_states:
self.build_list_params(params, step_states, 'StepStateList.member')
return self.get_object('ListSteps', params, StepSummaryList)
def add_tags(self, resource_id, tags):
"""
Create new metadata tags for the specified resource id.
:type resource_id: str
:param resource_id: The cluster id
:type tags: dict
:param tags: A dictionary containing the name/value pairs.
If you want to create only a tag name, the
value for that tag should be the empty string
(e.g. '') or None.
"""
assert isinstance(resource_id, basestring)
params = {
'ResourceId': resource_id,
}
params.update(self._build_tag_list(tags))
return self.get_status('AddTags', params, verb='POST')
def remove_tags(self, resource_id, tags):
"""
Remove metadata tags for the specified resource id.
:type resource_id: str
:param resource_id: The cluster id
:type tags: list
:param tags: A list of tag names to remove.
"""
params = {
'ResourceId': resource_id,
}
params.update(self._build_string_list('TagKeys', tags))
return self.get_status('RemoveTags', params, verb='POST')
def terminate_jobflow(self, jobflow_id):
"""
Terminate an Elastic MapReduce job flow
:type jobflow_id: str
:param jobflow_id: A jobflow id
"""
self.terminate_jobflows([jobflow_id])
def terminate_jobflows(self, jobflow_ids):
"""
Terminate an Elastic MapReduce job flow
:type jobflow_ids: list
:param jobflow_ids: A list of job flow IDs
"""
params = {}
self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
return self.get_status('TerminateJobFlows', params, verb='POST')
def add_jobflow_steps(self, jobflow_id, steps):
"""
Adds steps to a jobflow
:type jobflow_id: str
:param jobflow_id: The job flow id
:type steps: list(boto.emr.Step)
:param steps: A list of steps to add to the job
"""
if not isinstance(steps, types.ListType):
steps = [steps]
params = {}
params['JobFlowId'] = jobflow_id
# Step args
step_args = [self._build_step_args(step) for step in steps]
params.update(self._build_step_list(step_args))
return self.get_object(
'AddJobFlowSteps', params, JobFlowStepList, verb='POST')
def add_instance_groups(self, jobflow_id, instance_groups):
"""
Adds instance groups to a running cluster.
:type jobflow_id: str
:param jobflow_id: The id of the jobflow which will take the
new instance groups
:type instance_groups: list(boto.emr.InstanceGroup)
:param instance_groups: A list of instance groups to add to the job
"""
if not isinstance(instance_groups, types.ListType):
instance_groups = [instance_groups]
params = {}
params['JobFlowId'] = jobflow_id
params.update(self._build_instance_group_list_args(instance_groups))
return self.get_object('AddInstanceGroups', params,
AddInstanceGroupsResponse, verb='POST')
def modify_instance_groups(self, instance_group_ids, new_sizes):
"""
Modify the number of nodes and configuration settings in an
instance group.
:type instance_group_ids: list(str)
:param instance_group_ids: A list of the ID's of the instance
groups to be modified
:type new_sizes: list(int)
:param new_sizes: A list of the new sizes for each instance group
"""
if not isinstance(instance_group_ids, types.ListType):
instance_group_ids = [instance_group_ids]
if not isinstance(new_sizes, types.ListType):
new_sizes = [new_sizes]
instance_groups = zip(instance_group_ids, new_sizes)
params = {}
for k, ig in enumerate(instance_groups):
# could be wrong - the example amazon gives uses
# InstanceRequestCount, while the api documentation
# says InstanceCount
params['InstanceGroups.member.%d.InstanceGroupId' % (k+1) ] = ig[0]
params['InstanceGroups.member.%d.InstanceCount' % (k+1) ] = ig[1]
return self.get_object('ModifyInstanceGroups', params,
ModifyInstanceGroupsResponse, verb='POST')
def run_jobflow(self, name, log_uri=None, ec2_keyname=None,
availability_zone=None,
master_instance_type='m1.small',
slave_instance_type='m1.small', num_instances=1,
action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
enable_debugging=False,
hadoop_version=None,
steps=[],
bootstrap_actions=[],
instance_groups=None,
additional_info=None,
ami_version=None,
api_params=None,
visible_to_all_users=None,
job_flow_role=None):
"""
Runs a job flow
:type name: str
:param name: Name of the job flow
:type log_uri: str
:param log_uri: URI of the S3 bucket to place logs
:type ec2_keyname: str
:param ec2_keyname: EC2 key used for the instances
:type availability_zone: str
:param availability_zone: EC2 availability zone of the cluster
:type master_instance_type: str
:param master_instance_type: EC2 instance type of the master
:type slave_instance_type: str
:param slave_instance_type: EC2 instance type of the slave nodes
:type num_instances: int
:param num_instances: Number of instances in the Hadoop cluster
:type action_on_failure: str
:param action_on_failure: Action to take if a step terminates
:type keep_alive: bool
:param keep_alive: Denotes whether the cluster should stay
alive upon completion
:type enable_debugging: bool
:param enable_debugging: Denotes whether AWS console debugging
should be enabled.
:type hadoop_version: str
:param hadoop_version: Version of Hadoop to use. This no longer
defaults to '0.20' and now uses the AMI default.
:type steps: list(boto.emr.Step)
:param steps: List of steps to add with the job
:type bootstrap_actions: list(boto.emr.BootstrapAction)
:param bootstrap_actions: List of bootstrap actions that run
before Hadoop starts.
:type instance_groups: list(boto.emr.InstanceGroup)
:param instance_groups: Optional list of instance groups to
use when creating this job.
NB: When provided, this argument supersedes num_instances
and master/slave_instance_type.
:type ami_version: str
:param ami_version: Amazon Machine Image (AMI) version to use
for instances. Values accepted by EMR are '1.0', '2.0', and
'latest'; EMR currently defaults to '1.0' if you don't set
'ami_version'.
:type additional_info: JSON str
:param additional_info: A JSON string for selecting additional features
:type api_params: dict
:param api_params: a dictionary of additional parameters to pass
directly to the EMR API (so you don't have to upgrade boto to
use new EMR features). You can also delete an API parameter
by setting it to None.
:type visible_to_all_users: bool
:param visible_to_all_users: Whether the job flow is visible to all IAM
users of the AWS account associated with the job flow. If this
value is set to ``True``, all IAM users of that AWS
account can view and (if they have the proper policy permissions
set) manage the job flow. If it is set to ``False``, only
the IAM user that created the job flow can view and manage
it.
:type job_flow_role: str
:param job_flow_role: An IAM role for the job flow. The EC2
instances of the job flow assume this role. The default role is
``EMRJobflowDefault``. In order to use the default role,
you must have already created it using the CLI.
:rtype: str
:return: The jobflow id
"""
params = {}
if action_on_failure:
params['ActionOnFailure'] = action_on_failure
if log_uri:
params['LogUri'] = log_uri
params['Name'] = name
# Common instance args
common_params = self._build_instance_common_args(ec2_keyname,
availability_zone,
keep_alive,
hadoop_version)
params.update(common_params)
# NB: according to the AWS API's error message, we must
# "configure instances either using instance count, master and
# slave instance type or instance groups but not both."
#
# Thus we switch here on the truthiness of instance_groups.
if not instance_groups:
# Instance args (the common case)
instance_params = self._build_instance_count_and_type_args(
master_instance_type,
slave_instance_type,
num_instances)
params.update(instance_params)
else:
# Instance group args (for spot instances or a heterogenous cluster)
list_args = self._build_instance_group_list_args(instance_groups)
instance_params = dict(
('Instances.%s' % k, v) for k, v in list_args.iteritems()
)
params.update(instance_params)
# Debugging step from EMR API docs
if enable_debugging:
debugging_step = JarStep(name='Setup Hadoop Debugging',
action_on_failure='TERMINATE_JOB_FLOW',
main_class=None,
jar=self.DebuggingJar,
step_args=self.DebuggingArgs)
steps.insert(0, debugging_step)
# Step args
if steps:
step_args = [self._build_step_args(step) for step in steps]
params.update(self._build_step_list(step_args))
if bootstrap_actions:
bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]
params.update(self._build_bootstrap_action_list(bootstrap_action_args))
if ami_version:
params['AmiVersion'] = ami_version
if additional_info is not None:
params['AdditionalInfo'] = additional_info
if api_params:
for key, value in api_params.iteritems():
if value is None:
params.pop(key, None)
else:
params[key] = value
if visible_to_all_users is not None:
if visible_to_all_users:
params['VisibleToAllUsers'] = 'true'
else:
params['VisibleToAllUsers'] = 'false'
if job_flow_role is not None:
params['JobFlowRole'] = job_flow_role
response = self.get_object(
'RunJobFlow', params, RunJobFlowResponse, verb='POST')
return response.jobflowid
def set_termination_protection(self, jobflow_id,
termination_protection_status):
"""
Set termination protection on specified Elastic MapReduce job flows
:type jobflow_ids: list or str
:param jobflow_ids: A list of job flow IDs
:type termination_protection_status: bool
:param termination_protection_status: Termination protection status
"""
assert termination_protection_status in (True, False)
params = {}
params['TerminationProtected'] = (termination_protection_status and "true") or "false"
self.build_list_params(params, [jobflow_id], 'JobFlowIds.member')
return self.get_status('SetTerminationProtection', params, verb='POST')
def set_visible_to_all_users(self, jobflow_id, visibility):
"""
Set whether specified Elastic Map Reduce job flows are visible to all IAM users
:type jobflow_ids: list or str
:param jobflow_ids: A list of job flow IDs
:type visibility: bool
:param visibility: Visibility
"""
assert visibility in (True, False)
params = {}
params['VisibleToAllUsers'] = (visibility and "true") or "false"
self.build_list_params(params, [jobflow_id], 'JobFlowIds.member')
return self.get_status('SetVisibleToAllUsers', params, verb='POST')
def _build_bootstrap_action_args(self, bootstrap_action):
bootstrap_action_params = {}
bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path
try:
bootstrap_action_params['Name'] = bootstrap_action.name
except AttributeError:
pass
args = bootstrap_action.args()
if args:
self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member')
return bootstrap_action_params
def _build_step_args(self, step):
step_params = {}
step_params['ActionOnFailure'] = step.action_on_failure
step_params['HadoopJarStep.Jar'] = step.jar()
main_class = step.main_class()
if main_class:
step_params['HadoopJarStep.MainClass'] = main_class
args = step.args()
if args:
self.build_list_params(step_params, args, 'HadoopJarStep.Args.member')
step_params['Name'] = step.name
return step_params
def _build_bootstrap_action_list(self, bootstrap_actions):
if not isinstance(bootstrap_actions, types.ListType):
bootstrap_actions = [bootstrap_actions]
params = {}
for i, bootstrap_action in enumerate(bootstrap_actions):
for key, value in bootstrap_action.iteritems():
params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value
return params
def _build_step_list(self, steps):
if not isinstance(steps, types.ListType):
steps = [steps]
params = {}
for i, step in enumerate(steps):
for key, value in step.iteritems():
params['Steps.member.%s.%s' % (i+1, key)] = value
return params
def _build_string_list(self, field, items):
if not isinstance(items, types.ListType):
items = [items]
params = {}
for i, item in enumerate(items):
params['%s.member.%s' % (field, i + 1)] = item
return params
def _build_tag_list(self, tags):
assert isinstance(tags, dict)
params = {}
for i, key_value in enumerate(sorted(tags.iteritems()), start=1):
key, value = key_value
current_prefix = 'Tags.member.%s' % i
params['%s.Key' % current_prefix] = key
if value:
params['%s.Value' % current_prefix] = value
return params
def _build_instance_common_args(self, ec2_keyname, availability_zone,
keep_alive, hadoop_version):
"""
Takes a number of parameters used when starting a jobflow (as
specified in run_jobflow() above). Returns a comparable dict for
use in making a RunJobFlow request.
"""
params = {
'Instances.KeepJobFlowAliveWhenNoSteps': str(keep_alive).lower(),
}
if hadoop_version:
params['Instances.HadoopVersion'] = hadoop_version
if ec2_keyname:
params['Instances.Ec2KeyName'] = ec2_keyname
if availability_zone:
params['Instances.Placement.AvailabilityZone'] = availability_zone
return params
def _build_instance_count_and_type_args(self, master_instance_type,
slave_instance_type, num_instances):
"""
Takes a master instance type (string), a slave instance type
(string), and a number of instances. Returns a comparable dict
for use in making a RunJobFlow request.
"""
params = {'Instances.MasterInstanceType': master_instance_type,
'Instances.SlaveInstanceType': slave_instance_type,
'Instances.InstanceCount': num_instances}
return params
def _build_instance_group_args(self, instance_group):
"""
Takes an InstanceGroup; returns a dict that, when its keys are
properly prefixed, can be used for describing InstanceGroups in
RunJobFlow or AddInstanceGroups requests.
"""
params = {'InstanceCount': instance_group.num_instances,
'InstanceRole': instance_group.role,
'InstanceType': instance_group.type,
'Name': instance_group.name,
'Market': instance_group.market}
if instance_group.market == 'SPOT':
params['BidPrice'] = instance_group.bidprice
return params
def _build_instance_group_list_args(self, instance_groups):
"""
Takes a list of InstanceGroups, or a single InstanceGroup. Returns
a comparable dict for use in making a RunJobFlow or AddInstanceGroups
request.
"""
if not isinstance(instance_groups, types.ListType):
instance_groups = [instance_groups]
params = {}
for i, instance_group in enumerate(instance_groups):
ig_dict = self._build_instance_group_args(instance_group)
for key, value in ig_dict.iteritems():
params['InstanceGroups.member.%d.%s' % (i+1, key)] = value
return params
| harshilasu/LinkurApp | y/google-cloud-sdk/platform/gsutil/third_party/boto/boto/emr/connection.py | Python | gpl-3.0 | 28,351 |
#! /usr/bin/python3
import json
from web3 import Web3, RPCProvider
from operator import itemgetter
import time
import sys
import datetime
import math
precision = 1000000000000000000
dust = 10000000000000
mkr_addr = "0xc66ea802717bfb9833400264dd12c2bceaa34a6d"
weth_addr = "0xecf8f87f810ecf450940c9f60066b4a7a501d6a7"
geth_addr = "0xa74476443119A942dE498590Fe1f2454d7D4aC0d"
market_addr = "0xC350eBF34B6d83B64eA0ee4E39b6Ebe18F02aD2F"
#market_addr = "0x454e4f5bb176a54638f727b3314c709cb4f66dae"
acct_owner = "0x6E39564ecFD4B5b0bA36CD944a46bCA6063cACE5"
web3rpc = Web3(RPCProvider(port=8545))
web3rpc.eth.defaultAccount = acct_owner
web3rpc.eth.defaultBlock = "latest"
logFile = open('maker-matcher.json', 'a+')
def print_log( log_type, entry ):
ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
entry = '[{date:' + ts + ',"' + log_type + '":' + entry + '}],\n'
logFile.write( entry )
def fix_books(precision, buy_book_amount, sell_book_amount, bid_id, ask_id):
print('"ETH":%f,"MKR":%f}' % (buy_book_amount/precision, sell_book_amount/precision))
print_log('log','{"ETH":%f,"MKR":%f}' % (buy_book_amount/precision, sell_book_amount/precision))
try:
print("Submitting Buy Book order", end='', flush=True )
try:
if market_contract.call().buy(bid_id, buy_book_amount):
try:
result_bb = market_contract.transact().buy(bid_id, buy_book_amount)
print_log('log','{"buy_tx":"%s"}' % (result_bb))
while web3rpc.eth.getTransactionReceipt(result_bb) is None:
print(".", end='', flush=True)
time.sleep(2)
print("")
except:
print("")
print_log('ERR','"Failed Buy Book transaction"')
return False
else:
print("")
print_log('ERR','"Failed Buy Book transaction"')
return False
except:
print("")
print("Buy checked failed to check.")
except:
print_log('ERR','"Failed pre Buy Book check, trying Sell Book"')
return False
try:
print("Submitting Sell Book Order", end='', flush=True)
if market_contract.call().buy(ask_id, sell_book_amount):
try:
result_sb = market_contract.transact().buy(ask_id, sell_book_amount)
print_log('log','{"sell_tx":"%s"}' % (result_sb))
while web3rpc.eth.getTransactionReceipt(result_sb) is None:
print(".", end='', flush=True)
time.sleep(2)
print("")
return True
except:
print_log('ERR','"Failed Sell Book transaction"')
return False
else:
print_log('ERR','"Failed Sell Book transaction"')
return False
except:
print_log('ERR','"Failed Sell Book order"')
return False
with open('simple_market.abi', 'r') as abi_file:
abi_json = abi_file.read().replace('\n','')
abi = json.loads(abi_json)
market_contract = web3rpc.eth.contract(abi=abi, address=market_addr)
with open('erc20.abi', 'r') as abi_file:
abi_json = abi_file.read().replace('\n','')
abi = json.loads(abi_json)
weth_contract = web3rpc.eth.contract(abi=abi, address=weth_addr)
mkr_contract = web3rpc.eth.contract(abi=abi, address=mkr_addr)
match_found = False
while [ not match_found ]:
time.sleep(5)
weth_balance = float(weth_contract.call().balanceOf(acct_owner))/precision
mkr_balance = float(mkr_contract.call().balanceOf(acct_owner))/precision
last_offer_id = market_contract.call().last_offer_id()
id = 0
offers = []
while id < last_offer_id + 1:
offers.append(market_contract.call().offers(id))
id = id + 1
print("\nBalances: %0.5f WETH - %0.5f MKR\n" % (weth_balance, mkr_balance))
#print("There are %i offers" % last_offer_id)
id=0
buy_orders = []
sell_orders = []
for offer in offers:
valid = offer[5]
if valid :
sell_how_much = float(offer[0]) / precision
sell_which_token = offer[1]
buy_how_much = float(offer[2]) / precision
buy_which_token = offer[3]
owner = offer[4][2:8]
bid = float(offer[0])/float(offer[2])
ask = float(offer[2])/float(offer[0])
if sell_which_token == mkr_addr and buy_which_token == weth_addr:
sell_orders.append([id, sell_how_much, ask, buy_how_much, owner])
if sell_which_token == weth_addr and buy_which_token == mkr_addr:
buy_orders.append([id, buy_how_much, bid, buy_how_much, owner])
id = id + 1
#Sort the order books
if len(buy_orders) > 0:
depth = len(buy_orders)
#find highest non dust bid
bid_qty = 0
bid_id = 0
bid = 0
current_depth = 0
buy_orders.sort(key=itemgetter(2), reverse=True)
while current_depth <= len(buy_orders):
bid_qty = float(buy_orders[current_depth][1])
if bid_qty > 0.0001:
bid = float(buy_orders[current_depth][2])
bid_id = int(buy_orders[current_depth][0])
print ("Highest bid is for %f MKR @ %f ETH/MKR" % (bid_qty,bid))
break
else:
current_depth = current_depth + 1
else:
print ("Buy book is empty")
continue
if len(sell_orders) > 0:
depth = len(sell_orders)
sell_orders.sort(key=itemgetter(2), reverse=False)
#find lowest non dust ask
ask_qty = 0
ask_id = 0
ask = 0
current_depth = 0
while current_depth <= len(sell_orders):
ask_qty = float(sell_orders[current_depth][1])
if ask_qty > 0.0001:
ask_id = int(sell_orders[current_depth][0])
ask = float(sell_orders[current_depth][2])
print ("Lowest ask is for %f MKR @ %f ETH/MKR" % (ask_qty,ask))
break
else:
current_depth = current_depth + 1
else:
print ("Sell book is empty")
continue
#Make sure we have enough allowance
allowance = float(weth_contract.call().allowance(acct_owner, market_addr))/precision
if allowance < 100:
print("Out of WETH allowance")
print_log("ERR", "Out of WETH allowance")
continue
# result = weth_contract.transact().approve(acct_owner, int(10000*precision))
# print ("Update weth allowance: %s -> 10000" % (allowance))
# while web3rpc.eth.getTransactionReceipt(result) is None:
# print(".", end='', flush=True)
# time.sleep(2)
# print("")
allowance = float(weth_contract.call().allowance(acct_owner, market_addr))/precision
if allowance < 100:
print("Out of wETH allowance")
print_log("ERR", "Out of MKR allowance")
continue
# result = mkr_contract.transact().approve(acct_owner, int(10000*precision))
# print ("Update mkr allowance: %s -> 10000" % (allowance))
# while web3rpc.eth.getTransactionReceipt(result) is None:
# print(".", end='', flush=True)
# time.sleep(2)
# print("")
if round(bid*100000) >= round(ask*100000):
match_found = True
print("Match found")
#print("\nAction needed!")
if weth_balance < ask_qty:
ask_qty = weth_balance
if mkr_balance < bid_qty:
bid_qty = mkr_balance
if bid_qty < ask_qty:
qty = bid_qty
else:
qty = ask_qty
qty = round(qty, 18)
bid = round(bid, 5)
ask = round(ask, 5)
if qty <= 0.001:
#print_log("ERR", "Order is too small to process")
print("Order is too small.")
continue
buy_book_amount = int(qty*bid_long*precision)
buy_book_amount = buy_book_amount
sell_book_amount = int(qty*precision)
sell_book_amount = sell_book_amount
#print("buy_book_amount: %s sell_book_amount %s bid_id %s ask_id %s" % (buy_book_amount/precision, sell_book_amount/precision, bid_id, ask_id))
if not fix_books(precision, buy_book_amount, sell_book_amount, bid_id, ask_id):
print("Something went wrong, aborting")
print("buy_book_amount: %s sell_book_amount %s bid_id %s ask_id %s" % (buy_book_amount, sell_book_amount, bid_id, ask_id))
print_log('ERR','"Something went wrong, aborting"')
logFile.close()
sys.exit()
print("Settled order for %f MKR @ %f ETH/MKR" % (float(qty), float(bid)))
profit = round((bid - ask) * (buy_how_much), 4)
print_log('log',"Settled order for " + str(float(qty)) + "MKR. Profit/Loss: " + str(profit) + " MKR" )
break
time.sleep(5)
logFile.close()
time.sleep(10) #Give things a chance to settle out
| Riverhead/maker-trade-fixer | fix_books.py | Python | mit | 8,494 |
#!/usr/bin/python
from domain.client import Client
from filerepository import FileRepository
from repository import RepositoryException
class FileClientRepository(FileRepository):
def _validate_instance(self, client):
if not isinstance(client, Client):
raise RepositoryException("Not Client")
def _get_table_header(self):
return "ID\t\t\tName\t\t\tCNP"
| leyyin/university | fundamentals-of-programming/labs/lab_5-11/repository/fileclient.py | Python | mit | 392 |
dia=input("Digite seu dia de nascimento: ")
mes=input("Digite seu mês de nascimento: ")
ano=input("Digite seu ano de nascimento: ")
print("Você nasceu no dia", dia, "de", mes, "de", ano) | AlbertoAlfredo/exercicios-cursos | Curso-em-video/Python/aulas-python/Desafios/desafio002.py | Python | mit | 188 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
tables = frappe.db.sql_list("show tables")
for old_dt, new_dt in [["Sales BOM Item", "Product Bundle Item"],
["Sales BOM", "Product Bundle"]]:
if "tab"+new_dt not in tables:
frappe.rename_doc("DocType", old_dt, new_dt, force=True)
| mahabuber/erpnext | erpnext/patches/v5_1/sales_bom_rename.py | Python | agpl-3.0 | 440 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import UserError, AccessError
from test_sale_common import TestSale
class TestSaleOrder(TestSale):
def test_sale_order(self):
""" Test the sale order flow (invoicing and quantity updates)
- Invoice repeatedly while varrying delivered quantities and check that invoice are always what we expect
"""
# DBO TODO: validate invoice and register payments
inv_obj = self.env['account.invoice']
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
})
self.assertEqual(so.amount_total, sum([2 * p.list_price for (k, p) in self.products.iteritems()]), 'Sale: total amount is wrong')
# send quotation
so.force_quotation_send()
self.assertTrue(so.state == 'sent', 'Sale: state after sending is wrong')
# confirm quotation
so.action_confirm()
self.assertTrue(so.state == 'sale')
self.assertTrue(so.invoice_status == 'to invoice')
# create invoice: only 'invoice on order' products are invoiced
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 2, 'Sale: invoice is missing lines')
self.assertEqual(inv.amount_total, sum([2 * p.list_price if p.invoice_policy == 'order' else 0 for (k, p) in self.products.iteritems()]), 'Sale: invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'no', 'Sale: SO status after invoicing should be "nothing to invoice"')
self.assertTrue(len(so.invoice_ids) == 1, 'Sale: invoice is missing')
# deliver lines except 'time and material' then invoice again
for line in so.order_line:
line.qty_delivered = 2 if line.product_id.expense_policy=='no' else 0
self.assertTrue(so.invoice_status == 'to invoice', 'Sale: SO status after delivery should be "to invoice"')
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 2, 'Sale: second invoice is missing lines')
self.assertEqual(inv.amount_total, sum([2 * p.list_price if p.invoice_policy == 'delivery' else 0 for (k, p) in self.products.iteritems()]), 'Sale: second invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'invoiced', 'Sale: SO status after invoicing everything should be "invoiced"')
self.assertTrue(len(so.invoice_ids) == 2, 'Sale: invoice is missing')
# go over the sold quantity
for line in so.order_line:
if line.product_id == self.products['serv_order']:
line.qty_delivered = 10
self.assertTrue(so.invoice_status == 'upselling', 'Sale: SO status after increasing delivered qty higher than ordered qty should be "upselling"')
# upsell and invoice
for line in so.order_line:
if line.product_id == self.products['serv_order']:
line.product_uom_qty = 10
inv_id = so.action_invoice_create()
inv = inv_obj.browse(inv_id)
self.assertEqual(len(inv.invoice_line_ids), 1, 'Sale: third invoice is missing lines')
self.assertEqual(inv.amount_total, 8 * self.products['serv_order'].list_price, 'Sale: second invoice total amount is wrong')
self.assertTrue(so.invoice_status == 'invoiced', 'Sale: SO status after invoicing everything (including the upsel) should be "invoiced"')
def test_unlink_cancel(self):
""" Test deleting and cancelling sale orders depending on their state and on the user's rights """
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
})
# SO in state 'draft' can be deleted
so_copy = so.copy()
with self.assertRaises(AccessError):
so_copy.sudo(self.user).unlink()
self.assertTrue(so_copy.sudo(self.manager).unlink(), 'Sale: deleting a quotation should be possible')
# SO in state 'cancel' can be deleted
so_copy = so.copy()
so_copy.action_confirm()
self.assertTrue(so_copy.state == 'sale', 'Sale: SO should be in state "sale"')
so_copy.action_cancel()
self.assertTrue(so_copy.state == 'cancel', 'Sale: SO should be in state "cancel"')
with self.assertRaises(AccessError):
so_copy.sudo(self.user).unlink()
self.assertTrue(so_copy.sudo(self.manager).unlink(), 'Sale: deleting a cancelled SO should be possible')
# SO in state 'sale' or 'done' cannot be deleted
so.action_confirm()
self.assertTrue(so.state == 'sale', 'Sale: SO should be in state "sale"')
with self.assertRaises(UserError):
so.sudo(self.manager).unlink()
so.action_done()
self.assertTrue(so.state == 'done', 'Sale: SO should be in state "done"')
with self.assertRaises(UserError):
so.sudo(self.manager).unlink()
def test_cost_invoicing(self):
""" Test confirming a vendor invoice to reinvoice cost on the so """
serv_cost = self.env.ref('product.service_cost_01')
prod_gap = self.env.ref('product.product_product_1')
so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': prod_gap.name, 'product_id': prod_gap.id, 'product_uom_qty': 2, 'product_uom': prod_gap.uom_id.id, 'price_unit': prod_gap.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
})
so.action_confirm()
so._create_analytic_account()
inv_partner = self.env.ref('base.res_partner_2')
company = self.env.ref('base.main_company')
journal = self.env['account.journal'].create({'name': 'Purchase Journal - Test', 'code': 'STPJ', 'type': 'purchase', 'company_id': company.id})
account_payable = self.env['account.account'].create({'code': 'X1111', 'name': 'Sale - Test Payable Account', 'user_type_id': self.env.ref('account.data_account_type_payable').id, 'reconcile': True})
account_income = self.env['account.account'].create({'code': 'X1112', 'name': 'Sale - Test Account', 'user_type_id': self.env.ref('account.data_account_type_direct_costs').id})
invoice_vals = {
'name': '',
'type': 'in_invoice',
'partner_id': inv_partner.id,
'invoice_line_ids': [(0, 0, {'name': serv_cost.name, 'product_id': serv_cost.id, 'quantity': 2, 'uom_id': serv_cost.uom_id.id, 'price_unit': serv_cost.standard_price, 'account_analytic_id': so.project_id.id, 'account_id': account_income.id})],
'account_id': account_payable.id,
'journal_id': journal.id,
'currency_id': company.currency_id.id,
}
inv = self.env['account.invoice'].create(invoice_vals)
inv.signal_workflow('invoice_open')
sol = so.order_line.filtered(lambda l: l.product_id == serv_cost)
self.assertTrue(sol, 'Sale: cost invoicing does not add lines when confirming vendor invoice')
self.assertTrue(sol.price_unit == 160 and sol.qty_delivered == 2 and sol.product_uom_qty == sol.qty_invoiced == 0, 'Sale: line is wrong after confirming vendor invoice')
| ayepezv/GAD_ERP | addons/sale/tests/test_sale_order.py | Python | gpl-3.0 | 8,136 |
# encoding: utf-8
# pylint: disable=missing-docstring,redefined-outer-name
import pytest
from flask_login import current_user, login_user, logout_user
from tests import utils
from app.modules.users import models
@pytest.yield_fixture()
def patch_User_password_scheme():
# pylint: disable=invalid-name,protected-access
"""
By default, the application uses ``bcrypt`` to store passwords securely.
However, ``bcrypt`` is a slow hashing algorithm (by design), so it is
better to downgrade it to ``plaintext`` while testing, since it will save
us quite some time.
"""
# NOTE: It seems a hacky way, but monkeypatching is a hack anyway.
password_field_context = models.User.password.property.columns[0].type.context
# NOTE: This is used here to forcefully resolve the LazyCryptContext
password_field_context.context_kwds
password_field_context._config._init_scheme_list(('plaintext', ))
password_field_context._config._init_records()
password_field_context._config._init_default_schemes()
yield
password_field_context._config._init_scheme_list(('bcrypt', ))
password_field_context._config._init_records()
password_field_context._config._init_default_schemes()
@pytest.fixture()
def user_instance(patch_User_password_scheme):
# pylint: disable=unused-argument,invalid-name
user_id = 1
_user_instance = utils.generate_user_instance(user_id=user_id)
_user_instance.get_id = lambda: user_id
return _user_instance
@pytest.yield_fixture()
def authenticated_user_instance(flask_app, user_instance):
with flask_app.test_request_context('/'):
login_user(user_instance)
yield current_user
logout_user()
@pytest.yield_fixture()
def anonymous_user_instance(flask_app):
with flask_app.test_request_context('/'):
yield current_user
| frol/flask-restplus-server-example | tests/modules/users/conftest.py | Python | mit | 1,851 |
import pytest
import os
import shutil
import core
virtuallinks = core.import_package('virtuallinks')
def setup_function(function):
shutil.rmtree('temporary', ignore_errors=True)
os.mkdir('temporary')
os.chdir('temporary')
def teardown_function(function):
os.chdir('..')
shutil.rmtree('temporary', ignore_errors=True)
def test_unmonitor_fail():
with pytest.raises(KeyError):
virtuallinks.unmonitor('open')
def test_monitor_double_unmonitor():
assert virtuallinks.nregistered() == 0
virtuallinks.monitor('open')
virtuallinks.monitor('open')
virtuallinks.unmonitor('open')
assert virtuallinks.nregistered() == 0
def test_monitor_unmonitor_double():
assert virtuallinks.nregistered() == 0
virtuallinks.monitor('open')
assert virtuallinks.nregistered() == 1
virtuallinks.unmonitor('open')
assert virtuallinks.nregistered() == 0
virtuallinks.monitor('open')
assert virtuallinks.nregistered() == 1
virtuallinks.unmonitor('open')
assert virtuallinks.nregistered() == 0
def test_monitor_after_inspector(capsys):
virtuallinks.enable_inspector()
virtuallinks.monitor('open')
out, err = capsys.readouterr()
assert out == ''
assert err == ''
virtuallinks.unmonitor('open')
virtuallinks.disable_inspector()
def _test_monitor_inspector_interleaved_0(capsys):
virtuallinks.monitor('open')
virtuallinks.enable_inspector()
virtuallinks.unmonitor('open')
virtuallinks.disable_inspector()
with open('file.txt', 'w') as f:
f.write('')
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
def test_monitor_inspector_interleaved_1(capsys):
virtuallinks.monitor('open')
virtuallinks.enable_inspector()
virtuallinks.unmonitor('open')
with open('file.txt', 'w') as f:
f.write('')
virtuallinks.disable_inspector()
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
def test_monitor_inspector_interleaved_2(capsys):
virtuallinks.monitor('open')
virtuallinks.enable_inspector()
with open('file.txt', 'w') as f:
f.write('')
virtuallinks.unmonitor('open')
virtuallinks.disable_inspector()
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
def test_monitor_inspector_interleaved_3(capsys):
virtuallinks.monitor('open')
with open('file.txt', 'w') as f:
f.write('')
virtuallinks.enable_inspector()
virtuallinks.unmonitor('open')
virtuallinks.disable_inspector()
assert os.path.exists('file.txt')
assert os.path.isfile('file.txt')
virtuallinks.unmonitor_all()
virtuallinks.unlink_all()
| ffunenga/virtuallinks | tests/core/test_installing.py | Python | mit | 2,693 |
#! flask/bin/python
from os.path import abspath
from flask import current_app
from flask.ext.script import Manager
from flask.ext.assets import ManageAssets
from flask.ext.migrate import Migrate, MigrateCommand
from bluespot import create_app
from bluespot.extensions import db
app = create_app(mode='development')
manager = Manager(app)
manager.add_command('assets', ManageAssets())
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
manager.run()
#app.run(host='0.0.0.0',debug = True)
| unifispot/unifispot-free | manage.py | Python | mit | 510 |
from django.contrib import admin
from access.admin import *
from someapp.models import *
class ChildInline(AccessTabularInline):
model = SomeChild
# Register your models here.
class ObjectAdmin(AccessModelAdmin):
inlines = [
ChildInline,
]
# Register your models here.
admin.site.register(SomeObject,ObjectAdmin)
class ChildAdmin(AccessModelAdmin):
pass
admin.site.register(SomeChild,ChildAdmin)
| nnseva/django-access | example/someapp/admin.py | Python | lgpl-3.0 | 426 |
#########################################################
# Generator for skeletons of .cmd USE file with graphs #
#########################################################
# Variables :
# USE Program
prog = 'rhododendrone.use'
# Size of grid :
x_size = 3
y_size = 3
nodes_num = x_size * y_size
# Drone :
drone_num = 1 # not used yet
dcap = 5
dbat = 2
# Receptacle :
recp_num = 1 # not used yet
rcap = 6
# Output file
cmd_file = open('test.cmd', 'w')
# Header of file :
cmd_file.write("reset\nopen "+prog+"\n")
# Creation of nodes :
for i in range(1,nodes_num+1):
cmd_file.write("!create n"+str(i)+" : Node\n")
# Instantiate System
cmd_file.write("!create S : System\n")
cmd_file.write("!set S.grid := Set{")
for i in range(1, nodes_num):
cmd_file.write("n"+str(i)+",")
cmd_file.write("n"+str(nodes_num)+"}\n")
# Instanciate Drone:
cmd_file.write("!create d1 : Drone\n")
cmd_file.write("!set d1.DCAP := "+str(dcap)+"\n")
cmd_file.write("!set d1.DBAT := "+str(dbat)+"\n")
# Instanciate Commande:
cmd_file.write("!create c1 : Commande\n")
cmd_file.write("!set c1.produits := 4\n")
# Instanciate Receptacle :
cmd_file.write("!create r1 : Receptacle\n")
cmd_file.write("!set r1.RCAP := "+str(rcap)+"\n")
# Set relations :
#North :
for i in range(1,x_size):
for j in range(1, y_size+1):
cmd_file.write("!insert (n"+str(j+y_size+(i-1)*y_size)+",n"+str(j+(i-1)*y_size)+") into North\n")
# South :
for i in range(1,x_size):
for j in range(1, y_size+1):
cmd_file.write("!insert (n"+str(j+(i-1)*y_size)+",n"+str(j+y_size+(i-1)*y_size)+") into South\n")
# East :
for i in range(1,x_size+1):
for j in range(1, y_size):
cmd_file.write("!insert (n"+str(j+y_size*(i-1))+",n"+str((j+1)+y_size*(i-1))+") into East\n")
# West :
for i in range(1,x_size+1):
for j in range(1, y_size):
cmd_file.write("!insert (n"+str((j+1)+y_size*(i-1))+",n"+str((j+y_size*(i-1)))+") into West\n")
# Insertion of other object (static for the moment) :
cmd_file.write("!insert (d1,n1) into Loc\n")
cmd_file.write("!insert (c1,r1) into Destination\n")
cmd_file.write("!insert (d1,c1) into Livraison\n")
cmd_file.write("!insert (n3,r1) into GeoLoc\n")
cmd_file.write("!insert (S,n5) into Fleuriste\n")
cmd_file.write("check\n")
| ToshRaka/Rhododendrone | utils_py/graph.py | Python | gpl-2.0 | 2,265 |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import geocoder
import psycopg2
def createTable(db_name, user, password, table_name, overwrite = False):
try:
con = psycopg2.connect("dbname={} user={} password={}".format(db_name, user, password))
cur = con.cursor()
except:
print "oops error"
if overwrite == True:
del_table_query = """DROP TABLE IF EXISTS {table_name};""".format(table_name = table_name)
cur.execute(del_table_query)
insert_query = """CREATE TABLE IF NOT EXISTS {table_name} (
id bigint,
time varchar(50),
latitude decimal,
longitude decimal,
selfrepcity varchar(500),
lang varchar(10),
source varchar(250),
countrycode varchar(10),
countryname varchar(250),
location varchar(250),
url varchar(100),
text varchar(500),
loclat decimal,
loclong decimal);
""".format(table_name = table_name)
cur.execute(insert_query)
con.commit()
cur.close()
con.close()
createTable(db_name="tweets", user="user", password="user", table_name = "trumptweets", overwrite = True) | SatoshiNakamotoGeoscripting/SatoshiNakamotoGeoscripting | Tweets project/createTable.py | Python | mit | 1,465 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
This module is the Python connector for the peinjector patching service, it is part of the peinjector project.
https://github.com/JonDoNym/peinjector
Provides de-serialization and in-stream patch applying capabilities for PE Files
"""
__author__ = 'A.A.'
# Unpack binary data
from struct import unpack_from
# Holds an single patch part
class PePatchPart(object):
# Constructor
def __init__(self, mem, position, insert):
self.mem = mem
self.next = None
self.position = position
self.insert = insert
self.finished = False
# Deserialize and applies patches on pe files
class PePatch(object):
# Sentinel size
pepatch_sentinelsize = 9;
# First Patch part
first = None
# Constructor
def __init__(self, serialized_memory):
serialized_mem_size = len(serialized_memory)
current_position = 0
current = None
patch = None
# Deserialize data
while (serialized_mem_size - current_position) >= self.pepatch_sentinelsize:
mem_size, position, insert = unpack_from("<II?", serialized_memory, current_position)
# 2*sizeof(uint32_t) + sizeof(uint8_t)
current_position += 9
# Length Error
if (serialized_mem_size - current_position) < mem_size:
return
# Extract Data
patch_data = serialized_memory[current_position:current_position + mem_size]
# Change Position
current_position += mem_size
# Add Patch
if mem_size > 0:
patch = PePatchPart(patch_data, position, insert)
else:
patch = None
# Build chain
if current is not None:
current.next = patch
if self.first is None:
self.first = patch
current = patch
# Length Error
if (serialized_mem_size - current_position) > 0:
self.first = None
return
# Patch is ok
def patch_ok(self):
return self.first is not None
# Apply patch on stream data
def apply_patch(self, mem, position):
all_finished = True
# Nothing to patch
if self.first is None:
return mem
# Apply each patch part
current = self.first
while current is not None:
# Finished, no need to check
if current.finished:
current = current.next
continue
# Patch starts inside memory
if position <= current.position < (position + len(mem)):
delta_position = current.position - position
# Insert
if current.insert:
mem = mem[:delta_position] + current.mem + mem[delta_position:]
# Patch part finished
current.finished = True
# Overwrite
else:
mem = mem[:delta_position] + current.mem[:len(mem)-delta_position] \
+ mem[delta_position+len(current.mem):]
# Patch applied
all_finished = False
# Append after current mem part (important if current part is the last part)
elif current.insert and (current.position == (position + len(mem))):
# Append patch
mem = mem + current.mem
# Patch part finished
current.finished = True
# Patch applied
all_finished = False
# Patch starts before memory
elif (not current.insert) and ((current.position + len(current.mem)) > position)\
and (current.position < position):
delta_position = position - current.position
mem = current.mem[delta_position:delta_position+len(mem)] + mem[len(current.mem)-delta_position:]
# Patch applied
all_finished = False
# Patch finished
elif (current.position + len(current.mem)) < position:
current.finished = True
# Reset total finished
else:
# Patch waiting
all_finished = False
# Next patch part
current = current.next
# Patch finished
if all_finished:
self.first = None
# Return patched memory
return mem
| Esser420/EvilTwinFramework | core/MITMCore/MITMPluginsAux/libPePatch.py | Python | gpl-2.0 | 4,758 |
from frappe import _
def get_data():
return {
'heatmap': True,
'heatmap_message': _('Memeber Activity'),
'fieldname': 'member',
'transactions': [
{
'label': _('Membership Details'),
'items': ['Membership']
}
]
} | indictranstech/erpnext | erpnext/non_profit/doctype/member/member_dashboard.py | Python | agpl-3.0 | 238 |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/__init__.py
__version__=''' $Id: __init__.py 3047 2007-02-23 17:45:16Z rgbecker $ '''
__doc__=''
from reportlab.platypus.flowables import Flowable, Image, Macro, PageBreak, Preformatted, Spacer, XBox, \
CondPageBreak, KeepTogether, TraceInfo, FailOnWrap, FailOnDraw, PTOContainer, \
KeepInFrame, ParagraphAndImage, ImageAndFlowables
from reportlab.platypus.paragraph import Paragraph, cleanBlockQuotedText, ParaLines
from reportlab.platypus.paraparser import ParaFrag
from reportlab.platypus.tables import Table, TableStyle, CellStyle, LongTable
from reportlab.platypus.frames import Frame
from reportlab.platypus.doctemplate import BaseDocTemplate, NextPageTemplate, PageTemplate, ActionFlowable, \
SimpleDocTemplate, FrameBreak, PageBegin, Indenter, NotAtTopPageBreak
from xpreformatted import XPreformatted
| alexissmirnov/donomo | donomo_archive/lib/reportlab/platypus/__init__.py | Python | bsd-3-clause | 1,060 |
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
from androguard.core import bytecode
from androguard.core import androconf
from androguard.core.bytecodes.dvm_permissions import DVM_PERMISSIONS
import StringIO
from struct import pack, unpack
from xml.sax.saxutils import escape
from zlib import crc32
import re
from xml.dom import minidom
# 0: chilkat
# 1: default python zipfile module
# 2: patch zipfile module
ZIPMODULE = 1
import sys
if sys.hexversion < 0x2070000 :
try :
import chilkat
ZIPMODULE = 0
# UNLOCK : change it with your valid key !
try :
CHILKAT_KEY = open("key.txt", "rb").read()
except Exception :
CHILKAT_KEY = "testme"
except ImportError :
ZIPMODULE = 1
else :
ZIPMODULE = 1
################################################### CHILKAT ZIP FORMAT #####################################################
class ChilkatZip :
def __init__(self, raw) :
self.files = []
self.zip = chilkat.CkZip()
self.zip.UnlockComponent( CHILKAT_KEY )
self.zip.OpenFromMemory( raw, len(raw) )
filename = chilkat.CkString()
e = self.zip.FirstEntry()
while e != None :
e.get_FileName(filename)
self.files.append( filename.getString() )
e = e.NextEntry()
def delete(self, patterns) :
el = []
filename = chilkat.CkString()
e = self.zip.FirstEntry()
while e != None :
e.get_FileName(filename)
if re.match(patterns, filename.getString()) != None :
el.append( e )
e = e.NextEntry()
for i in el :
self.zip.DeleteEntry( i )
def remplace_file(self, filename, buff) :
entry = self.zip.GetEntryByName(filename)
if entry != None :
obj = chilkat.CkByteData()
obj.append( buff, len(buff) )
return entry.ReplaceData( obj )
return False
def write(self) :
obj = chilkat.CkByteData()
self.zip.WriteToMemory( obj )
return obj.getBytes()
def namelist(self) :
return self.files
def read(self, elem) :
e = self.zip.GetEntryByName( elem )
s = chilkat.CkByteData()
e.Inflate( s )
return s.getBytes()
def sign_apk(filename, keystore, storepass):
from subprocess import Popen, PIPE, STDOUT
compile = Popen([androconf.CONF["PATH_JARSIGNER"],
"-sigalg",
"MD5withRSA",
"-digestalg",
"SHA1",
"-storepass",
storepass,
"-keystore",
keystore,
filename,
"alias_name"],
stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
######################################################## APK FORMAT ########################################################
class APK:
"""
This class can access to all elements in an APK file
:param filename: specify the path of the file, or raw data
:param raw: specify if the filename is a path or raw data (optional)
:param mode: specify the mode to open the file (optional)
:param magic_file: specify the magic file (optional)
:param zipmodule: specify the type of zip module to use (0:chilkat, 1:zipfile, 2:patch zipfile)
:type filename: string
:type raw: boolean
:type mode: string
:type magic_file: string
:type zipmodule: int
:Example:
APK("myfile.apk")
APK(open("myfile.apk", "rb").read(), raw=True)
"""
def __init__(self, filename, raw=False, mode="r", magic_file=None, zipmodule=ZIPMODULE):
self.filename = filename
self.xml = {}
self.axml = {}
self.arsc = {}
self.package = ""
self.androidversion = {}
self.permissions = []
self.valid_apk = False
self.files = {}
self.files_crc32 = {}
self.magic_file = magic_file
if raw == True:
self.__raw = filename
else:
fd = open(filename, "rb")
self.__raw = fd.read()
fd.close()
self.zipmodule = zipmodule
if zipmodule == 0:
self.zip = ChilkatZip(self.__raw)
elif zipmodule == 2:
from androguard.patch import zipfile
self.zip = zipfile.ZipFile(StringIO.StringIO(self.__raw), mode=mode)
else:
import zipfile
self.zip = zipfile.ZipFile(StringIO.StringIO(self.__raw), mode=mode)
for i in self.zip.namelist():
if i == "AndroidManifest.xml":
self.axml[i] = AXMLPrinter(self.zip.read(i))
try:
self.xml[i] = minidom.parseString(self.axml[i].get_buff())
except:
self.xml[i] = None
if self.xml[i] != None:
self.package = self.xml[i].documentElement.getAttribute("package")
self.androidversion["Code"] = self.xml[i].documentElement.getAttribute("android:versionCode")
self.androidversion["Name"] = self.xml[i].documentElement.getAttribute("android:versionName")
for item in self.xml[i].getElementsByTagName('uses-permission'):
self.permissions.append(str(item.getAttribute("android:name")))
self.valid_apk = True
self.get_files_types()
def get_AndroidManifest(self):
"""
Return the Android Manifest XML file
:rtype: xml object
"""
return self.xml["AndroidManifest.xml"]
def is_valid_APK(self):
"""
Return true if the APK is valid, false otherwise
:rtype: boolean
"""
return self.valid_apk
def get_filename(self):
"""
Return the filename of the APK
:rtype: string
"""
return self.filename
def get_package(self):
"""
Return the name of the package
:rtype: string
"""
return self.package
def get_androidversion_code(self):
"""
Return the android version code
:rtype: string
"""
return self.androidversion["Code"]
def get_androidversion_name(self):
"""
Return the android version name
:rtype: string
"""
return self.androidversion["Name"]
def get_files(self):
"""
Return the files inside the APK
:rtype: a list of strings
"""
return self.zip.namelist()
def get_files_types(self):
"""
Return the files inside the APK with their associated types (by using python-magic)
:rtype: a dictionnary
"""
try:
import magic
except ImportError:
# no lib magic !
for i in self.get_files():
buffer = self.zip.read(i)
self.files_crc32[i] = crc32(buffer)
self.files[i] = "Unknown"
return self.files
if self.files != {}:
return self.files
builtin_magic = 0
try:
getattr(magic, "MagicException")
except AttributeError:
builtin_magic = 1
if builtin_magic:
ms = magic.open(magic.MAGIC_NONE)
ms.load()
for i in self.get_files():
buffer = self.zip.read(i)
self.files[i] = ms.buffer(buffer)
self.files[i] = self._patch_magic(buffer, self.files[i])
self.files_crc32[i] = crc32(buffer)
else:
m = magic.Magic(magic_file=self.magic_file)
for i in self.get_files():
buffer = self.zip.read(i)
self.files[i] = m.from_buffer(buffer)
self.files[i] = self._patch_magic(buffer, self.files[i])
self.files_crc32[i] = crc32(buffer)
return self.files
def _patch_magic(self, buffer, orig):
if ("Zip" in orig) or ("DBase" in orig):
val = androconf.is_android_raw(buffer)
if val == "APK":
if androconf.is_valid_android_raw(buffer):
return "Android application package file"
elif val == "AXML":
return "Android's binary XML"
return orig
def get_files_crc32(self):
if self.files_crc32 == {}:
self.get_files_types()
return self.files_crc32
def get_files_information(self):
"""
Return the files inside the APK with their associated types and crc32
:rtype: string, string, int
"""
if self.files == {}:
self.get_files_types()
for i in self.get_files():
try:
yield i, self.files[i], self.files_crc32[i]
except KeyError:
yield i, "", ""
def get_raw(self):
"""
Return raw bytes of the APK
:rtype: string
"""
return self.__raw
def get_file(self, filename):
"""
Return the raw data of the specified filename
:rtype: string
"""
try:
return self.zip.read(filename)
except KeyError:
return ""
def get_dex(self):
"""
Return the raw data of the classes dex file
:rtype: string
"""
return self.get_file("classes.dex")
def get_elements(self, tag_name, attribute):
"""
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
l = []
for i in self.xml :
for item in self.xml[i].getElementsByTagName(tag_name) :
value = item.getAttribute(attribute)
value = self.format_value( value )
l.append( str( value ) )
return l
def format_value(self, value) :
if len(value) > 0 :
if value[0] == "." :
value = self.package + value
else :
v_dot = value.find(".")
if v_dot == 0 :
value = self.package + "." + value
elif v_dot == -1 :
value = self.package + "." + value
return value
def get_element(self, tag_name, attribute):
"""
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml :
for item in self.xml[i].getElementsByTagName(tag_name) :
value = item.getAttribute(attribute)
if len(value) > 0 :
return value
return None
def get_main_activity(self) :
"""
Return the name of the main activity
:rtype: string
"""
for i in self.xml :
x = set()
y = set()
for item in self.xml[i].getElementsByTagName("activity") :
for sitem in item.getElementsByTagName( "action" ) :
val = sitem.getAttribute( "android:name" )
if val == "android.intent.action.MAIN" :
x.add( item.getAttribute( "android:name" ) )
for sitem in item.getElementsByTagName( "category" ) :
val = sitem.getAttribute( "android:name" )
if val == "android.intent.category.LAUNCHER" :
y.add( item.getAttribute( "android:name" ) )
z = x.intersection(y)
if len(z) > 0 :
return self.format_value(z.pop())
return None
def get_activities(self) :
"""
Return the android:name attribute of all activities
:rtype: a list of string
"""
return self.get_elements("activity", "android:name")
def get_services(self) :
"""
Return the android:name attribute of all services
:rtype: a list of string
"""
return self.get_elements("service", "android:name")
def get_receivers(self) :
"""
Return the android:name attribute of all receivers
:rtype: a list of string
"""
return self.get_elements("receiver", "android:name")
def get_providers(self) :
"""
Return the android:name attribute of all providers
:rtype: a list of string
"""
return self.get_elements("provider", "android:name")
def get_permissions(self) :
"""
Return permissions
:rtype: list of string
"""
return self.permissions
def get_details_permissions(self) :
"""
Return permissions with details
:rtype: list of string
"""
l = {}
for i in self.permissions :
perm = i
pos = i.rfind(".")
if pos != -1 :
perm = i[pos+1:]
try :
l[ i ] = DVM_PERMISSIONS["MANIFEST_PERMISSION"][ perm ]
except KeyError :
l[ i ] = [ "dangerous", "Unknown permission from android reference", "Unknown permission from android reference" ]
return l
def get_max_sdk_version(self):
"""
Return the android:maxSdkVersion attribute
:rtype: string
"""
return self.get_element("uses-sdk", "android:maxSdkVersion")
def get_min_sdk_version(self):
"""
Return the android:minSdkVersion attribute
:rtype: string
"""
return self.get_element("uses-sdk", "android:minSdkVersion")
def get_target_sdk_version(self) :
"""
Return the android:targetSdkVersion attribute
:rtype: string
"""
return self.get_element( "uses-sdk", "android:targetSdkVersion" )
def get_libraries(self) :
"""
Return the android:name attributes for libraries
:rtype: list
"""
return self.get_elements( "uses-library", "android:name" )
def get_certificate(self, filename) :
"""
Return a certificate object by giving the name in the apk file
"""
import chilkat
cert = chilkat.CkCert()
f = self.get_file( filename )
success = cert.LoadFromBinary(f, len(f))
return success, cert
def new_zip(self, filename, deleted_files=None, new_files={}) :
"""
Create a new zip file
:param filename: the output filename of the zip
:param deleted_files: a regex pattern to remove specific file
:param new_files: a dictionnary of new files
:type filename: string
:type deleted_files: None or a string
:type new_files: a dictionnary (key:filename, value:content of the file)
"""
if self.zipmodule == 2:
from androguard.patch import zipfile
zout = zipfile.ZipFile(filename, 'w')
else:
import zipfile
zout = zipfile.ZipFile(filename, 'w')
for item in self.zip.infolist():
if deleted_files != None:
if re.match(deleted_files, item.filename) == None:
if item.filename in new_files:
zout.writestr(item, new_files[item.filename])
else:
buffer = self.zip.read(item.filename)
zout.writestr(item, buffer)
zout.close()
def get_android_manifest_axml(self):
"""
Return the :class:`AXMLPrinter` object which corresponds to the AndroidManifest.xml file
:rtype: :class:`AXMLPrinter`
"""
try:
return self.axml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_manifest_xml(self):
"""
Return the xml object which corresponds to the AndroidManifest.xml file
:rtype: object
"""
try:
return self.xml["AndroidManifest.xml"]
except KeyError:
return None
def get_android_resources(self):
"""
Return the :class:`ARSCParser` object which corresponds to the resources.arsc file
:rtype: :class:`ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
try:
self.arsc["resources.arsc"] = ARSCParser(self.zip.read("resources.arsc"))
return self.arsc["resources.arsc"]
except KeyError:
return None
def show(self):
self.get_files_types()
print "FILES: "
for i in self.get_files():
try:
print "\t", i, self.files[i], "%x" % self.files_crc32[i]
except KeyError:
print "\t", i, "%x" % self.files_crc32[i]
print "PERMISSIONS: "
details_permissions = self.get_details_permissions()
for i in details_permissions:
print "\t", i, details_permissions[i]
print "MAIN ACTIVITY: ", self.get_main_activity()
print "ACTIVITIES: ", self.get_activities()
print "SERVICES: ", self.get_services()
print "RECEIVERS: ", self.get_receivers()
print "PROVIDERS: ", self.get_providers()
def show_Certificate(cert):
print "Issuer: C=%s, CN=%s, DN=%s, E=%s, L=%s, O=%s, OU=%s, S=%s" % (cert.issuerC(), cert.issuerCN(), cert.issuerDN(), cert.issuerE(), cert.issuerL(), cert.issuerO(), cert.issuerOU(), cert.issuerS())
print "Subject: C=%s, CN=%s, DN=%s, E=%s, L=%s, O=%s, OU=%s, S=%s" % (cert.subjectC(), cert.subjectCN(), cert.subjectDN(), cert.subjectE(), cert.subjectL(), cert.subjectO(), cert.subjectOU(), cert.subjectS())
######################################################## AXML FORMAT ########################################################
# Translated from http://code.google.com/p/android4me/source/browse/src/android/content/res/AXmlResourceParser.java
UTF8_FLAG = 0x00000100
class StringBlock:
def __init__(self, buff):
self.start = buff.get_idx()
self._cache = {}
self.header = unpack('<h', buff.read(2))[0]
self.header_size = unpack('<h', buff.read(2))[0]
self.chunkSize = unpack('<i', buff.read(4))[0]
self.stringCount = unpack('<i', buff.read(4))[0]
self.styleOffsetCount = unpack('<i', buff.read(4))[0]
self.flags = unpack('<i', buff.read(4))[0]
self.m_isUTF8 = ((self.flags & UTF8_FLAG) != 0)
self.stringsOffset = unpack('<i', buff.read(4))[0]
self.stylesOffset = unpack('<i', buff.read(4))[0]
self.m_stringOffsets = []
self.m_styleOffsets = []
self.m_strings = []
self.m_styles = []
for i in range(0, self.stringCount):
self.m_stringOffsets.append(unpack('<i', buff.read(4))[0])
for i in range(0, self.styleOffsetCount):
self.m_styleOffsets.append(unpack('<i', buff.read(4))[0])
size = self.chunkSize - self.stringsOffset
if self.stylesOffset != 0:
size = self.stylesOffset - self.stringsOffset
# FIXME
if (size % 4) != 0:
androconf.warning("ooo")
for i in range(0, size):
self.m_strings.append(unpack('=b', buff.read(1))[0])
if self.stylesOffset != 0:
size = self.chunkSize - self.stylesOffset
# FIXME
if (size % 4) != 0:
androconf.warning("ooo")
for i in range(0, size / 4):
self.m_styles.append(unpack('<i', buff.read(4))[0])
def getString(self, idx):
if idx in self._cache:
return self._cache[idx]
if idx < 0 or not self.m_stringOffsets or idx >= len(self.m_stringOffsets):
return ""
offset = self.m_stringOffsets[idx]
if not self.m_isUTF8:
length = self.getShort2(self.m_strings, offset)
offset += 2
self._cache[idx] = self.decode(self.m_strings, offset, length)
else:
offset += self.getVarint(self.m_strings, offset)[1]
varint = self.getVarint(self.m_strings, offset)
offset += varint[1]
length = varint[0]
self._cache[idx] = self.decode2(self.m_strings, offset, length)
return self._cache[idx]
def getStyle(self, idx):
print idx
print idx in self.m_styleOffsets, self.m_styleOffsets[idx]
print self.m_styles[0]
def decode(self, array, offset, length):
length = length * 2
length = length + length % 2
data = ""
for i in range(0, length):
t_data = pack("=b", self.m_strings[offset + i])
data += unicode(t_data, errors='ignore')
if data[-2:] == "\x00\x00":
break
end_zero = data.find("\x00\x00")
if end_zero != -1:
data = data[:end_zero]
return data.decode("utf-16", 'replace')
def decode2(self, array, offset, length):
data = ""
for i in range(0, length):
t_data = pack("=b", self.m_strings[offset + i])
data += unicode(t_data, errors='ignore')
return data.decode("utf-8", 'replace')
def getVarint(self, array, offset):
val = array[offset]
more = (val & 0x80) != 0
val &= 0x7f
if not more:
return val, 1
return val << 8 | array[offset + 1] & 0xff, 2
def getShort(self, array, offset):
value = array[offset / 4]
if ((offset % 4) / 2) == 0:
return value & 0xFFFF
else:
return value >> 16
def getShort2(self, array, offset):
return (array[offset + 1] & 0xff) << 8 | array[offset] & 0xff
def show(self):
print "StringBlock", hex(self.start), hex(self.header), hex(self.header_size), hex(self.chunkSize), hex(self.stringsOffset), self.m_stringOffsets
for i in range(0, len(self.m_stringOffsets)):
print i, repr(self.getString(i))
ATTRIBUTE_IX_NAMESPACE_URI = 0
ATTRIBUTE_IX_NAME = 1
ATTRIBUTE_IX_VALUE_STRING = 2
ATTRIBUTE_IX_VALUE_TYPE = 3
ATTRIBUTE_IX_VALUE_DATA = 4
ATTRIBUTE_LENGHT = 5
CHUNK_AXML_FILE = 0x00080003
CHUNK_RESOURCEIDS = 0x00080180
CHUNK_XML_FIRST = 0x00100100
CHUNK_XML_START_NAMESPACE = 0x00100100
CHUNK_XML_END_NAMESPACE = 0x00100101
CHUNK_XML_START_TAG = 0x00100102
CHUNK_XML_END_TAG = 0x00100103
CHUNK_XML_TEXT = 0x00100104
CHUNK_XML_LAST = 0x00100104
START_DOCUMENT = 0
END_DOCUMENT = 1
START_TAG = 2
END_TAG = 3
TEXT = 4
class AXMLParser:
def __init__(self, raw_buff):
self.reset()
self.buff = bytecode.BuffHandle(raw_buff)
self.buff.read(4)
self.buff.read(4)
self.sb = StringBlock(self.buff)
self.m_resourceIDs = []
self.m_prefixuri = {}
self.m_uriprefix = {}
self.m_prefixuriL = []
self.visited_ns = []
def reset(self):
self.m_event = -1
self.m_lineNumber = -1
self.m_name = -1
self.m_namespaceUri = -1
self.m_attributes = []
self.m_idAttribute = -1
self.m_classAttribute = -1
self.m_styleAttribute = -1
def next(self):
self.doNext()
return self.m_event
def doNext(self):
if self.m_event == END_DOCUMENT:
return
event = self.m_event
self.reset()
while True:
chunkType = -1
# Fake END_DOCUMENT event.
if event == END_TAG:
pass
# START_DOCUMENT
if event == START_DOCUMENT:
chunkType = CHUNK_XML_START_TAG
else:
if self.buff.end():
self.m_event = END_DOCUMENT
break
chunkType = unpack('<L', self.buff.read(4))[0]
if chunkType == CHUNK_RESOURCEIDS:
chunkSize = unpack('<L', self.buff.read(4))[0]
# FIXME
if chunkSize < 8 or chunkSize % 4 != 0:
androconf.warning("ooo")
for i in range(0, chunkSize / 4 - 2):
self.m_resourceIDs.append(unpack('<L', self.buff.read(4))[0])
continue
# FIXME
if chunkType < CHUNK_XML_FIRST or chunkType > CHUNK_XML_LAST:
androconf.warning("ooo")
# Fake START_DOCUMENT event.
if chunkType == CHUNK_XML_START_TAG and event == -1:
self.m_event = START_DOCUMENT
break
self.buff.read(4) # /*chunkSize*/
lineNumber = unpack('<L', self.buff.read(4))[0]
self.buff.read(4) # 0xFFFFFFFF
if chunkType == CHUNK_XML_START_NAMESPACE or chunkType == CHUNK_XML_END_NAMESPACE:
if chunkType == CHUNK_XML_START_NAMESPACE:
prefix = unpack('<L', self.buff.read(4))[0]
uri = unpack('<L', self.buff.read(4))[0]
self.m_prefixuri[prefix] = uri
self.m_uriprefix[uri] = prefix
self.m_prefixuriL.append((prefix, uri))
self.ns = uri
else:
self.ns = -1
self.buff.read(4)
self.buff.read(4)
(prefix, uri) = self.m_prefixuriL.pop()
#del self.m_prefixuri[ prefix ]
#del self.m_uriprefix[ uri ]
continue
self.m_lineNumber = lineNumber
if chunkType == CHUNK_XML_START_TAG:
self.m_namespaceUri = unpack('<L', self.buff.read(4))[0]
self.m_name = unpack('<L', self.buff.read(4))[0]
# FIXME
self.buff.read(4) # flags
attributeCount = unpack('<L', self.buff.read(4))[0]
self.m_idAttribute = (attributeCount >> 16) - 1
attributeCount = attributeCount & 0xFFFF
self.m_classAttribute = unpack('<L', self.buff.read(4))[0]
self.m_styleAttribute = (self.m_classAttribute >> 16) - 1
self.m_classAttribute = (self.m_classAttribute & 0xFFFF) - 1
for i in range(0, attributeCount * ATTRIBUTE_LENGHT):
self.m_attributes.append(unpack('<L', self.buff.read(4))[0])
for i in range(ATTRIBUTE_IX_VALUE_TYPE, len(self.m_attributes), ATTRIBUTE_LENGHT):
self.m_attributes[i] = self.m_attributes[i] >> 24
self.m_event = START_TAG
break
if chunkType == CHUNK_XML_END_TAG:
self.m_namespaceUri = unpack('<L', self.buff.read(4))[0]
self.m_name = unpack('<L', self.buff.read(4))[0]
self.m_event = END_TAG
break
if chunkType == CHUNK_XML_TEXT:
self.m_name = unpack('<L', self.buff.read(4))[0]
# FIXME
self.buff.read(4)
self.buff.read(4)
self.m_event = TEXT
break
def getPrefixByUri(self, uri):
try:
return self.m_uriprefix[uri]
except KeyError:
return -1
def getPrefix(self):
try:
return self.sb.getString(self.m_uriprefix[self.m_namespaceUri])
except KeyError:
return u''
def getName(self):
if self.m_name == -1 or (self.m_event != START_TAG and self.m_event != END_TAG) :
return u''
return self.sb.getString(self.m_name)
def getText(self) :
if self.m_name == -1 or self.m_event != TEXT :
return u''
return self.sb.getString(self.m_name)
def getNamespacePrefix(self, pos):
prefix = self.m_prefixuriL[pos][0]
return self.sb.getString(prefix)
def getNamespaceUri(self, pos):
uri = self.m_prefixuriL[pos][1]
return self.sb.getString(uri)
def getXMLNS(self):
buff = ""
for i in self.m_uriprefix:
if i not in self.visited_ns:
buff += "xmlns:%s=\"%s\"\n" % (self.sb.getString(self.m_uriprefix[i]), self.sb.getString(self.m_prefixuri[self.m_uriprefix[i]]))
self.visited_ns.append(i)
return buff
def getNamespaceCount(self, pos) :
pass
def getAttributeOffset(self, index):
# FIXME
if self.m_event != START_TAG:
androconf.warning("Current event is not START_TAG.")
offset = index * 5
# FIXME
if offset >= len(self.m_attributes):
androconf.warning("Invalid attribute index")
return offset
def getAttributeCount(self):
if self.m_event != START_TAG:
return -1
return len(self.m_attributes) / ATTRIBUTE_LENGHT
def getAttributePrefix(self, index):
offset = self.getAttributeOffset(index)
uri = self.m_attributes[offset + ATTRIBUTE_IX_NAMESPACE_URI]
prefix = self.getPrefixByUri(uri)
if prefix == -1:
return ""
return self.sb.getString(prefix)
def getAttributeName(self, index) :
offset = self.getAttributeOffset(index)
name = self.m_attributes[offset+ATTRIBUTE_IX_NAME]
if name == -1 :
return ""
return self.sb.getString( name )
def getAttributeValueType(self, index) :
offset = self.getAttributeOffset(index)
return self.m_attributes[offset+ATTRIBUTE_IX_VALUE_TYPE]
def getAttributeValueData(self, index) :
offset = self.getAttributeOffset(index)
return self.m_attributes[offset+ATTRIBUTE_IX_VALUE_DATA]
def getAttributeValue(self, index) :
offset = self.getAttributeOffset(index)
valueType = self.m_attributes[offset+ATTRIBUTE_IX_VALUE_TYPE]
if valueType == TYPE_STRING :
valueString = self.m_attributes[offset+ATTRIBUTE_IX_VALUE_STRING]
return self.sb.getString( valueString )
# WIP
return ""
#int valueData=m_attributes[offset+ATTRIBUTE_IX_VALUE_DATA];
#return TypedValue.coerceToString(valueType,valueData);
TYPE_ATTRIBUTE = 2
TYPE_DIMENSION = 5
TYPE_FIRST_COLOR_INT = 28
TYPE_FIRST_INT = 16
TYPE_FLOAT = 4
TYPE_FRACTION = 6
TYPE_INT_BOOLEAN = 18
TYPE_INT_COLOR_ARGB4 = 30
TYPE_INT_COLOR_ARGB8 = 28
TYPE_INT_COLOR_RGB4 = 31
TYPE_INT_COLOR_RGB8 = 29
TYPE_INT_DEC = 16
TYPE_INT_HEX = 17
TYPE_LAST_COLOR_INT = 31
TYPE_LAST_INT = 31
TYPE_NULL = 0
TYPE_REFERENCE = 1
TYPE_STRING = 3
RADIX_MULTS = [ 0.00390625, 3.051758E-005, 1.192093E-007, 4.656613E-010 ]
DIMENSION_UNITS = [ "px","dip","sp","pt","in","mm" ]
FRACTION_UNITS = [ "%", "%p" ]
COMPLEX_UNIT_MASK = 15
def complexToFloat(xcomplex):
return (float)(xcomplex & 0xFFFFFF00) * RADIX_MULTS[(xcomplex >> 4) & 3]
class AXMLPrinter:
def __init__(self, raw_buff):
self.axml = AXMLParser(raw_buff)
self.xmlns = False
self.buff = u''
while True:
_type = self.axml.next()
# print "tagtype = ", _type
if _type == START_DOCUMENT:
self.buff += u'<?xml version="1.0" encoding="utf-8"?>\n'
elif _type == START_TAG:
self.buff += u'<' + self.getPrefix(self.axml.getPrefix()) + self.axml.getName() + u'\n'
self.buff += self.axml.getXMLNS()
for i in range(0, self.axml.getAttributeCount()):
self.buff += "%s%s=\"%s\"\n" % ( self.getPrefix(
self.axml.getAttributePrefix(i) ), self.axml.getAttributeName(i), self._escape( self.getAttributeValue( i ) ) )
self.buff += u'>\n'
elif _type == END_TAG :
self.buff += "</%s%s>\n" % ( self.getPrefix( self.axml.getPrefix() ), self.axml.getName() )
elif _type == TEXT :
self.buff += "%s\n" % self.axml.getText()
elif _type == END_DOCUMENT :
break
# pleed patch
def _escape(self, s):
s = s.replace("&", "&")
s = s.replace('"', """)
s = s.replace("'", "'")
s = s.replace("<", "<")
s = s.replace(">", ">")
return escape(s)
def get_buff(self):
return self.buff.encode('utf-8')
def get_xml(self):
return minidom.parseString(self.get_buff()).toprettyxml()
def get_xml_obj(self):
return minidom.parseString(self.get_buff())
def getPrefix(self, prefix):
if prefix == None or len(prefix) == 0:
return u''
return prefix + u':'
def getAttributeValue(self, index):
_type = self.axml.getAttributeValueType(index)
_data = self.axml.getAttributeValueData(index)
if _type == TYPE_STRING:
return self.axml.getAttributeValue(index)
elif _type == TYPE_ATTRIBUTE:
return "?%s%08X" % (self.getPackage(_data), _data)
elif _type == TYPE_REFERENCE:
return "@%s%08X" % (self.getPackage(_data), _data)
elif _type == TYPE_FLOAT:
return "%f" % unpack("=f", pack("=L", _data))[0]
elif _type == TYPE_INT_HEX:
return "0x%08X" % _data
elif _type == TYPE_INT_BOOLEAN:
if _data == 0:
return "false"
return "true"
elif _type == TYPE_DIMENSION:
return "%f%s" % (complexToFloat(_data), DIMENSION_UNITS[_data & COMPLEX_UNIT_MASK])
elif _type == TYPE_FRACTION:
return "%f%s" % (complexToFloat(_data) * 100, FRACTION_UNITS[_data & COMPLEX_UNIT_MASK])
elif _type >= TYPE_FIRST_COLOR_INT and _type <= TYPE_LAST_COLOR_INT:
return "#%08X" % _data
elif _type >= TYPE_FIRST_INT and _type <= TYPE_LAST_INT:
return "%d" % androconf.long2int(_data)
return "<0x%X, type 0x%02X>" % (_data, _type)
def getPackage(self, id):
if id >> 24 == 1:
return "android:"
return ""
RES_NULL_TYPE = 0x0000
RES_STRING_POOL_TYPE = 0x0001
RES_TABLE_TYPE = 0x0002
RES_XML_TYPE = 0x0003
# Chunk types in RES_XML_TYPE
RES_XML_FIRST_CHUNK_TYPE = 0x0100
RES_XML_START_NAMESPACE_TYPE= 0x0100
RES_XML_END_NAMESPACE_TYPE = 0x0101
RES_XML_START_ELEMENT_TYPE = 0x0102
RES_XML_END_ELEMENT_TYPE = 0x0103
RES_XML_CDATA_TYPE = 0x0104
RES_XML_LAST_CHUNK_TYPE = 0x017f
# This contains a uint32_t array mapping strings in the string
# pool back to resource identifiers. It is optional.
RES_XML_RESOURCE_MAP_TYPE = 0x0180
# Chunk types in RES_TABLE_TYPE
RES_TABLE_PACKAGE_TYPE = 0x0200
RES_TABLE_TYPE_TYPE = 0x0201
RES_TABLE_TYPE_SPEC_TYPE = 0x0202
class ARSCParser:
def __init__(self, raw_buff):
self.analyzed = False
self.buff = bytecode.BuffHandle(raw_buff)
#print "SIZE", hex(self.buff.size())
self.header = ARSCHeader(self.buff)
self.packageCount = unpack('<i', self.buff.read(4))[0]
#print hex(self.packageCount)
self.stringpool_main = StringBlock(self.buff)
self.next_header = ARSCHeader(self.buff)
self.packages = {}
self.values = {}
for i in range(0, self.packageCount):
current_package = ARSCResTablePackage(self.buff)
package_name = current_package.get_name()
self.packages[package_name] = []
mTableStrings = StringBlock(self.buff)
mKeyStrings = StringBlock(self.buff)
#self.stringpool_main.show()
#self.mTableStrings.show()
#self.mKeyStrings.show()
self.packages[package_name].append(current_package)
self.packages[package_name].append(mTableStrings)
self.packages[package_name].append(mKeyStrings)
pc = PackageContext(current_package, self.stringpool_main, mTableStrings, mKeyStrings)
current = self.buff.get_idx()
while not self.buff.end():
header = ARSCHeader(self.buff)
self.packages[package_name].append(header)
if header.type == RES_TABLE_TYPE_SPEC_TYPE:
self.packages[package_name].append(ARSCResTypeSpec(self.buff, pc))
elif header.type == RES_TABLE_TYPE_TYPE:
a_res_type = ARSCResType(self.buff, pc)
self.packages[package_name].append(a_res_type)
entries = []
for i in range(0, a_res_type.entryCount):
current_package.mResId = current_package.mResId & 0xffff0000 | i
entries.append((unpack('<i', self.buff.read(4))[0], current_package.mResId))
self.packages[package_name].append(entries)
for entry, res_id in entries:
if self.buff.end():
break
if entry != -1:
ate = ARSCResTableEntry(self.buff, res_id, pc)
self.packages[package_name].append(ate)
elif header.type == RES_TABLE_PACKAGE_TYPE:
break
else:
androconf.warning("unknown type")
break
current += header.size
self.buff.set_idx(current)
def _analyse(self):
if self.analyzed:
return
self.analyzed = True
for package_name in self.packages:
self.values[package_name] = {}
nb = 3
for header in self.packages[package_name][nb:]:
if isinstance(header, ARSCHeader):
if header.type == RES_TABLE_TYPE_TYPE:
a_res_type = self.packages[package_name][nb + 1]
if a_res_type.config.get_language() not in self.values[package_name]:
self.values[package_name][a_res_type.config.get_language()] = {}
self.values[package_name][a_res_type.config.get_language()]["public"] = []
c_value = self.values[package_name][a_res_type.config.get_language()]
entries = self.packages[package_name][nb + 2]
nb_i = 0
for entry, res_id in entries:
if entry != -1:
ate = self.packages[package_name][nb + 3 + nb_i]
#print ate.is_public(), a_res_type.get_type(), ate.get_value(), hex(ate.mResId)
if ate.get_index() != -1:
c_value["public"].append((a_res_type.get_type(), ate.get_value(), ate.mResId))
if a_res_type.get_type() not in c_value:
c_value[a_res_type.get_type()] = []
if a_res_type.get_type() == "string":
c_value["string"].append(self.get_resource_string(ate))
elif a_res_type.get_type() == "id":
if not ate.is_complex():
c_value["id"].append(self.get_resource_id(ate))
elif a_res_type.get_type() == "bool":
if not ate.is_complex():
c_value["bool"].append(self.get_resource_bool(ate))
elif a_res_type.get_type() == "integer":
c_value["integer"].append(self.get_resource_integer(ate))
elif a_res_type.get_type() == "color":
c_value["color"].append(self.get_resource_color(ate))
elif a_res_type.get_type() == "dimen":
c_value["dimen"].append(self.get_resource_dimen(ate))
#elif a_res_type.get_type() == "style":
# c_value["style"].append(self.get_resource_style(ate))
nb_i += 1
nb += 1
def get_resource_string(self, ate):
return [ate.get_value(), ate.get_key_data()]
def get_resource_id(self, ate):
x = [ate.get_value()]
if ate.key.get_data() == 0:
x.append("false")
elif ate.key.get_data() == 1:
x.append("true")
return x
def get_resource_bool(self, ate):
x = [ate.get_value()]
if ate.key.get_data() == 0:
x.append("false")
elif ate.key.get_data() == -1:
x.append("true")
return x
def get_resource_integer(self, ate):
return [ate.get_value(), ate.key.get_data()]
def get_resource_color(self, ate):
entry_data = ate.key.get_data()
return [ate.get_value(), "#%02x%02x%02x%02x" % (((entry_data >> 24) & 0xFF), ((entry_data >> 16) & 0xFF), ((entry_data >> 8) & 0xFF), (entry_data & 0xFF))]
def get_resource_dimen(self, ate):
try:
return [ate.get_value(), "%s%s" % (complexToFloat(ate.key.get_data()), DIMENSION_UNITS[ate.key.get_data() & COMPLEX_UNIT_MASK])]
except Exception, why:
androconf.warning(why.__str__())
return [ate.get_value(), ate.key.get_data()]
# FIXME
def get_resource_style(self, ate):
return ["", ""]
def get_packages_names(self):
return self.packages.keys()
def get_locales(self, package_name):
self._analyse()
return self.values[package_name].keys()
def get_types(self, package_name, locale):
self._analyse()
return self.values[package_name][locale].keys()
def get_public_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["public"]:
buff += '<public type="%s" name="%s" id="0x%08x" />\n' % (i[0], i[1], i[2])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_string_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
buff += '<string name="%s">%s</string>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_strings_resources(self):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += "<packages>\n"
for package_name in self.get_packages_names():
buff += "<package name=\"%s\">\n" % package_name
for locale in self.get_locales(package_name):
buff += "<locale value=%s>\n" % repr(locale)
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["string"]:
buff += '<string name="%s">%s</string>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
buff += '</locale>\n'
buff += "</package>\n"
buff += "</packages>\n"
return buff.encode('utf-8')
def get_id_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["id"]:
if len(i) == 1:
buff += '<item type="id" name="%s"/>\n' % (i[0])
else:
buff += '<item type="id" name="%s">%s</item>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_bool_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["bool"]:
buff += '<bool name="%s">%s</bool>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_integer_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["integer"]:
buff += '<integer name="%s">%s</integer>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_color_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["color"]:
buff += '<color name="%s">%s</color>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_dimen_resources(self, package_name, locale='\x00\x00'):
self._analyse()
buff = '<?xml version="1.0" encoding="utf-8"?>\n'
buff += '<resources>\n'
try:
for i in self.values[package_name][locale]["dimen"]:
buff += '<dimen name="%s">%s</dimen>\n' % (i[0], i[1])
except KeyError:
pass
buff += '</resources>\n'
return buff.encode('utf-8')
def get_id(self, package_name, rid, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["public"]:
if i[2] == rid:
return i
except KeyError:
return None
def get_string(self, package_name, name, locale='\x00\x00'):
self._analyse()
try:
for i in self.values[package_name][locale]["string"]:
if i[0] == name:
return i
except KeyError:
return None
def get_items(self, package_name):
self._analyse()
return self.packages[package_name]
class PackageContext:
def __init__(self, current_package, stringpool_main, mTableStrings, mKeyStrings):
self.stringpool_main = stringpool_main
self.mTableStrings = mTableStrings
self.mKeyStrings = mKeyStrings
self.current_package = current_package
def get_mResId(self):
return self.current_package.mResId
def set_mResId(self, mResId):
self.current_package.mResId = mResId
class ARSCHeader:
def __init__(self, buff):
self.start = buff.get_idx()
self.type = unpack('<h', buff.read(2))[0]
self.header_size = unpack('<h', buff.read(2))[0]
self.size = unpack('<i', buff.read(4))[0]
#print "ARSCHeader", hex(self.start), hex(self.type), hex(self.header_size), hex(self.size)
class ARSCResTablePackage:
def __init__(self, buff):
self.start = buff.get_idx()
self.id = unpack('<i', buff.read(4))[0]
self.name = buff.readNullString(256)
self.typeStrings = unpack('<i', buff.read(4))[0]
self.lastPublicType = unpack('<i', buff.read(4))[0]
self.keyStrings = unpack('<i', buff.read(4))[0]
self.lastPublicKey = unpack('<i', buff.read(4))[0]
self.mResId = self.id << 24
#print "ARSCResTablePackage", hex(self.start), hex(self.id), hex(self.mResId), repr(self.name.decode("utf-16", errors='replace')), hex(self.typeStrings), hex(self.lastPublicType), hex(self.keyStrings), hex(self.lastPublicKey)
def get_name(self):
name = self.name.decode("utf-16", 'replace')
name = name[:name.find("\x00")]
return name
class ARSCResTypeSpec:
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id = unpack('<b', buff.read(1))[0]
self.res0 = unpack('<b', buff.read(1))[0]
self.res1 = unpack('<h', buff.read(2))[0]
self.entryCount = unpack('<i', buff.read(4))[0]
#print "ARSCResTypeSpec", hex(self.start), hex(self.id), hex(self.res0), hex(self.res1), hex(self.entryCount), "table:" + self.parent.mTableStrings.getString(self.id - 1)
self.typespec_entries = []
for i in range(0, self.entryCount):
self.typespec_entries.append(unpack('<i', buff.read(4))[0])
class ARSCResType:
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id = unpack('<b', buff.read(1))[0]
self.res0 = unpack('<b', buff.read(1))[0]
self.res1 = unpack('<h', buff.read(2))[0]
self.entryCount = unpack('<i', buff.read(4))[0]
self.entriesStart = unpack('<i', buff.read(4))[0]
self.mResId = (0xff000000 & self.parent.get_mResId()) | self.id << 16
self.parent.set_mResId(self.mResId)
#print "ARSCResType", hex(self.start), hex(self.id), hex(self.res0), hex(self.res1), hex(self.entryCount), hex(self.entriesStart), hex(self.mResId), "table:" + self.parent.mTableStrings.getString(self.id - 1)
self.config = ARSCResTableConfig(buff)
def get_type(self):
return self.parent.mTableStrings.getString(self.id - 1)
class ARSCResTableConfig:
def __init__(self, buff):
self.start = buff.get_idx()
self.size = unpack('<i', buff.read(4))[0]
self.imsi = unpack('<i', buff.read(4))[0]
self.locale = unpack('<i', buff.read(4))[0]
self.screenType = unpack('<i', buff.read(4))[0]
self.input = unpack('<i', buff.read(4))[0]
self.screenSize = unpack('<i', buff.read(4))[0]
self.version = unpack('<i', buff.read(4))[0]
self.screenConfig = 0
self.screenSizeDp = 0
if self.size >= 32:
self.screenConfig = unpack('<i', buff.read(4))[0]
if self.size >= 36:
self.screenSizeDp = unpack('<i', buff.read(4))[0]
self.exceedingSize = self.size - 36
if self.exceedingSize > 0:
androconf.warning("too much bytes !")
self.padding = buff.read(self.exceedingSize)
#print "ARSCResTableConfig", hex(self.start), hex(self.size), hex(self.imsi), hex(self.locale), repr(self.get_language()), repr(self.get_country()), hex(self.screenType), hex(self.input), hex(self.screenSize), hex(self.version), hex(self.screenConfig), hex(self.screenSizeDp)
def get_language(self):
x = self.locale & 0x0000ffff
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
def get_country(self):
x = (self.locale & 0xffff0000) >> 16
return chr(x & 0x00ff) + chr((x & 0xff00) >> 8)
class ARSCResTableEntry:
def __init__(self, buff, mResId, parent=None):
self.start = buff.get_idx()
self.mResId = mResId
self.parent = parent
self.size = unpack('<h', buff.read(2))[0]
self.flags = unpack('<h', buff.read(2))[0]
self.index = unpack('<i', buff.read(4))[0]
#print "ARSCResTableEntry", hex(self.start), hex(self.mResId), hex(self.size), hex(self.flags), hex(self.index), self.is_complex()#, hex(self.mResId)
if self.flags & 1:
self.item = ARSCComplex(buff, parent)
else:
self.key = ARSCResStringPoolRef(buff, self.parent)
def get_index(self):
return self.index
def get_value(self):
return self.parent.mKeyStrings.getString(self.index)
def get_key_data(self):
return self.key.get_data_value()
def is_public(self):
return self.flags == 0 or self.flags == 2
def is_complex(self):
return (self.flags & 1) == 1
class ARSCComplex:
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.id_parent = unpack('<i', buff.read(4))[0]
self.count = unpack('<i', buff.read(4))[0]
self.items = []
for i in range(0, self.count):
self.items.append((unpack('<i', buff.read(4))[0], ARSCResStringPoolRef(buff, self.parent)))
#print "ARSCComplex", hex(self.start), self.id_parent, self.count, repr(self.parent.mKeyStrings.getString(self.id_parent))
class ARSCResStringPoolRef:
def __init__(self, buff, parent=None):
self.start = buff.get_idx()
self.parent = parent
self.skip_bytes = buff.read(3)
self.data_type = unpack('<b', buff.read(1))[0]
self.data = unpack('<i', buff.read(4))[0]
#print "ARSCResStringPoolRef", hex(self.start), hex(self.data_type), hex(self.data)#, "key:" + self.parent.mKeyStrings.getString(self.index), self.parent.stringpool_main.getString(self.data)
def get_data_value(self):
return self.parent.stringpool_main.getString(self.data)
def get_data(self):
return self.data
def get_data_type(self):
return self.data_type
| thanatoskira/AndroGuard | build/lib.linux-x86_64-2.7/androguard/core/bytecodes/apk.py | Python | lgpl-3.0 | 55,407 |
# -*- coding: utf-8 -*-
################################################################################
# Module: utils.py
# Description: Test imports and network extraction
# License: GPL3, see full license in LICENSE.txt
# Web: https://github.com/DavidBreuer/CytoSeg
################################################################################
#%%############################################################################# imports
import itertools
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import os
import pandas as pd
import random
import scipy as sp
import scipy.misc
import scipy.ndimage
import scipy.optimize
import scipy.spatial
import scipy.stats
import scipy.cluster
import skimage
import skimage.filters
import skimage.morphology
import skimage.feature
import skimage.segmentation
import shapely
import shapely.geometry
import sys
import xml
import xml.dom
import xml.dom.minidom
import utils
#%%############################################################################# help functions
def xmlread(name,threed=0):
"""Read Fiji-Trackmate xml file to Python list of lists.
Parameters
----------
name : name and directory of xml file
threed : set to 1 for three-dimensional data
Returns
-------
T : list of tracks
"""
xmldoc=xml.dom.minidom.parse(name)
spots=xmldoc.getElementsByTagName('Spot')
tracs=xmldoc.getElementsByTagName('Track')
S=[]
N=[]
for spot in spots:
n=int(spot.attributes['ID'].value)
t=float(spot.attributes['POSITION_T'].value)
x=float(spot.attributes['POSITION_X'].value)
y=float(spot.attributes['POSITION_Y'].value)
if(threed): z=float(spot.attributes['POSITION_Z'].value)
else: z=0
mi=float(spot.attributes['MEAN_INTENSITY'].value)
mt=float(spot.attributes['TOTAL_INTENSITY'].value)
mq=float(spot.attributes['QUALITY'].value)
md=float(spot.attributes['ESTIMATED_DIAMETER'].value)
N.append(n)
S.append([n,t,x,y,z,mi,mt,mq,md])
T=[]
for trac in tracs:
n=int(trac.attributes['TRACK_ID'].value)
dur=int(float(trac.attributes['TRACK_DURATION'].value))
dis=float(trac.attributes['TRACK_DISPLACEMENT'].value)
edges=trac.getElementsByTagName('Edge')
E=[]
for edge in edges:
id0=int(edge.attributes['SPOT_SOURCE_ID'].value)
id1=float(edge.attributes['SPOT_TARGET_ID'].value)
vel=float(edge.attributes['VELOCITY'].value)
n0=N.index(id0)
n1=N.index(id1)
m0,t0,x0,y0,z0,mi0,mt0,mq0,md0=S[n0]
m1,t1,x1,y1,z1,mi1,mt1,mq1,md1=S[n1]
E.append([t0,x0,y0,z0,mi0,mt0,mq0,md0,t1,x1,y1,z1,mi1,mt1,mq1,md1])
E=np.array(E)
if(len(E)>0):
E=E[E[:,0].argsort()]
T.append(E)
return T
def angle360(dxy):
"""Compute angle of two-dimensional vector relative to y-axis in degrees.
Parameters
----------
dxy : two-dimensional vector
Returns
-------
angle : angle in degrees
"""
dx,dy=dxy
rad2deg=180.0/np.pi
angle=np.mod(np.arctan2(-dx,-dy)*rad2deg+180.0,360.0)
return angle
def im2d3d(im):
"""Convert two-dimensional array to three-dimensional array.
Parameters
----------
im : array or image
Returns
-------
im : array or image
"""
if(len(im.shape)==2):
im=im[:,:,np.newaxis]
else:
im=im
return im
def remove_duplicates(points):
"""Remove duplicates from list.
Parameters
----------
points : list
Returns
-------
pointz : list without duplicates
"""
pointz=pd.DataFrame(points).drop_duplicates().values
return pointz
def tube_filter(imO,sigma):
"""Apply tubeness filter to image.
Parameters
----------
imO : original two-dimensional image
sigma : width parameter of tube-like structures
Returns
-------
imT : filtered and rescaled image
"""
imH=skimage.feature.hessian_matrix(imO,sigma=sigma,mode='reflect')
imM=skimage.feature.hessian_matrix_eigvals(imH[0],imH[1],imH[2])
imR=-1.0*imM[1]
imT=255.0*(imR-imR.min())/(imR.max()-imR.min())
return imT
def cell_sample(mask,R):
"""Sample random points uniformly across masked area.
Parameters
----------
mask : sampling area
R : number of sampling points
Returns
-------
coords : sampled random points
"""
wh=np.array(np.where(mask)).T
W=len(wh)
idx=sp.random.randint(0,W,R)
coords=wh[idx]+sp.rand(R,2)
return coords
def multi_line_intersect(seg,segs):
"""Check intersections of line segments.
Parameters
----------
seg : single line segment
sigma : multiple line segments
Returns
-------
intersects : Boolean array indicating intersects
"""
intersects=np.array([False])
if(len(segs)>0):
d3=segs[:,1,:]-segs[:,0,:]
d1=seg[1,:]-seg[0,:]
c1x=np.cross(d3,seg[0,:]-segs[:,0,:])
c1y=np.cross(d3,seg[1,:]-segs[:,0,:])
c3x=np.cross(d1,segs[:,0,:]-seg[0,:])
c3y=np.cross(d1,segs[:,1,:]-seg[0,:])
intersect=np.logical_and(c1x*c1y<0,c3x*c3y<0)
return intersects
def bounds(x,xmin,xmax):
"""Restrict number to interval.
Parameters
----------
x : number
xmin : lower bound
xmax : upper bound
Returns
-------
x : bounded number
"""
if(x<xmin):
x=xmin
elif(x>xmax):
x=xmax
return x
def node_condense(imM,imG,ones):
"""Condense neighboring to single node located at center of mass.
Parameters
----------
imM : binary node array (0 = background; 1 = nodes)
imG : gray-scale intensity image
ones : array defining neighborhood structure
Returns
-------
imL : condensed and labeled node array (0 = background; 1-N = nodes)
"""
imL,N=sp.ndimage.label(imM,structure=ones) # label nodes
sizes=sp.ndimage.sum(imL>0,imL,range(1,N+1)) # compute size of nodes (clusters)
coms=sp.ndimage.center_of_mass(imG,imL,range(1,N+1)) # compute center of mass of nodes (clusters)
for n in range(N): # for each node...
if(sizes[n]>1): # if cluster...
idx=(imL==n+1) # get cluster coordinates
idm=tuple(np.add(coms[n],0.5).astype('int')) # get center of mass coordinates
imL[idx]=0 # remove node cluster
imL[idm]=n+1 # set node at center of mass
imL,N=sp.ndimage.label(imL>0,structure=ones) # label nodes
imL=imL.astype('int')
return imL
def node_find(im):
"""Find nodes in binary filament image.
Parameters
----------
im : section of binary filament image
Returns
-------
val : central pixel of image section (0 = not a node; 1 = node)
"""
ims=np.reshape(im,(3,3,3)) # convert image section of 3x3x3 array
val=0
if(ims[1,1,1]==1): # if central pixel lies on filament...
ims[1,1,1]=0 # remove central pixel
iml,L=sp.ndimage.label(ims) # label remaining filaments
if(L!=0 and L!=2): # if there is one (set end node) or more than two filaments (set crossing node)...
val=1 # set node
return val
def connected_components(graph):
"""Compute connected components of graph after removal of edges with capacities below 50th percentile.
Parameters
----------
graph : original graph
Returns
-------
ca : list of sizes of connected components
"""
gc=graph.copy()
edges=gc.edges(data=True)
ec=1.0*np.array([d['capa'] for u,v,d in edges])
perc=np.percentile(ec,50.0)
for u,v,d in edges:
if d['capa']<=perc:
gc.remove_edge(u,v)
cc=nx.connected_components(gc)
ca=np.array([len(c) for c in cc])
return ca
def path_lengths(graph):
"""Compute shortest path lengths.
Parameters
----------
graph : original graph
Returns
-------
dist : array of shortest path lengths
"""
dists=nx.all_pairs_dijkstra_path_length(graph,weight='lgth')
dist=np.array([[v for v in u.values()] for u in dists.values()])
dist=np.tril(dist)
dist[dist==0]=np.nan
return dist
def edge_angles(graph,pos,mask):
"""Compute distribution of angles between network edges and cell axis.
Parameters
----------
graph : original graph
pos : node positions
mask : binary array of cellular region of interest
Returns
-------
degs : list of angles between edges and cell axis
"""
c0,c1,vc,vd,an,rot=utils.mask2rot(mask) # compute angle of cell axis
degs=[]
for u,v,d in graph.edges(data=True): # for each edge...
degs.append(np.mod(utils.angle360(1.0*(pos[u]-pos[v]))+360.0-an,180.0)) # compute angle between edge and cell axis
return degs
def crossing_number(graph,pos):
"""Compute number of edge intersections per edge.
Parameters
----------
graph : original graph
pos : node positions
Returns
-------
cns : list of edge crossing numbers
"""
ee=np.array(graph.edges()) # get edge edges
edges=[]
cns=[]
for i,(n1,n2) in enumerate(graph.edges_iter()): # for each edge...
edge=np.array([[pos[n1][0],pos[n1][1]],[pos[n2][0],pos[n2][1]]]) # append edge as line segment
edges.append(edge)
for i,(n1,n2) in enumerate(graph.edges_iter()): # for each edge...
idx=(ee[:,0]!=n1)*(ee[:,1]!=n1)*(ee[:,0]!=n2)*(ee[:,1]!=n2) # exclude edge that share a node with the selected edge
idx[i]=False # exclude selected edge itself
edge=np.array([[pos[n1][0],pos[n1][1]],[pos[n2][0],pos[n2][1]]]) # treat edge as line segment
cross=utils.multi_line_intersect(np.array(edge),np.array(edges)[idx]) # check intersections of selected edge with remaining edges
cns.append(cross.sum()) # append crossing number of selected edge
return cns
#%%############################################################################# graph functions
def skeletonize_graph(imO,mask,sigma,block,small,factr):
"""Filter and skeletonize image of filament structures.
Parameters
----------
imO : original image
mask : binary array of cellular region of interest
sigma : width of tubeness filter and filament structures
block : block size of adaptive median filter
small : size of smallest components
factr : fraction of average intensity below which components are removed
Returns
-------
imR : image after application of tubeness filter
imA : filtered and skeletonized image
"""
imO-=imO[mask].min()
imO*=255.0/imO.max()
ly,lx,lz=imO.shape
imR=imO.copy()*0
imT=imO.copy()*0
for z in range(lz):
imR[:,:,z]=tube_filter(imO[:,:,z],sigma)
imT[:,:,z]=skimage.filters.threshold_adaptive(imR[:,:,z],block)
imS=skimage.morphology.skeletonize_3d(imT>0)
ones=np.ones((3,3,3))
imC=skimage.morphology.remove_small_objects(imS,small,connectivity=2)>0
for z in range(lz):
imC[:,:,z]=imC[:,:,z]*mask
imC=imC>0
imL,N=sp.ndimage.label(imC,structure=ones)
mean=imO[imC].mean()
means=[np.mean(imO[imL==n]) for n in range(1,N+1)]
imA=1.0*imC.copy()
for n in range(1,N+1):
if(means[n-1]<mean*factr):
imA[imL==n]=0
imA=skimage.morphology.remove_small_objects(imA>0,2,connectivity=8)
return imR,imA
def node_graph(imA,imG):
"""Construct image indicating background (=0), filaments (=1), and labeled nodes (>1).
Parameters
----------
imA : skeletonized image of filament structures
imG : Gaussian filtered image of filament structures
Returns
-------
imE : image indicating background, filaments, and nodes
"""
ones=np.ones((3,3,3)) # neighborhood structure of pixel
imM=sp.ndimage.generic_filter(imA,utils.node_find,footprint=ones,mode='constant',cval=0) # find nodes as endpoints or crossings of filaments
imN=utils.node_condense(imM,imG,ones) # condense neighboring nodes
imL=skimage.segmentation.relabel_sequential(imN)[0] # relabel nodes
imB,B=sp.ndimage.label(imA,structure=ones) # label components of skeletoninzed image
for b in range(1,B+1): # for each component...
no=np.max((imB==b)*(imL>0)) # if component does not contain node...
if(no==0):
imA[imB==b]=0 # remove component
imE=1*((imA+imL)>0)+imL # construct image indicating background (=0) filaments (=1) and labeled nodes (>1).
return imE
def make_graph(imE,imG):
"""Construct network representation from image of filament structures.
Parameters
----------
imE : image indicating background (=0), filaments (=1), and labeled nodes (>1)
imG : Gaussian filtered image of filament structures
Returns
-------
graph : network representation of filament structures
pos : node positions
"""
N=imE.max()-1 # number of nodes
sq2=np.sqrt(2.0) # distance between diagonal pixels
sq3=np.sqrt(3.0) # distance between room diagonal pixels
diag=np.array([[[sq3,sq2,sq3],[sq2,1,sq2],[sq3,sq2,sq3]],[[sq2,1,sq2],[1,0,1],[sq2,1,sq2]],[[sq3,sq2,sq3],[sq2,1,sq2],[sq3,sq2,sq3]]]) # distance matrix of 3x3x3 neighborhood
pos=np.array(np.where(imE>1)).T[:,::-1].astype('int') # node positions
pos=pos[:,[1,2,0]] # change order of node positions (x,y,z)
imY=imE.copy() # array to propagate nodes
imL=1.0*(imE.copy()>0) # array to remember summed length of filament up to current position
imS=1.0*(imE.copy()>0) # array to remember summed intensity of filament up to current position
ly,lx,lz=imE.shape # get image dimensions
ys=(imY==1).sum() # get points in image which are neither background (=0), nor nodes (>1), but filament (=1)
while(ys>0): # while there is still "filament" in the image
c=np.transpose(np.where(imY>1)) # positions of node pixels (>1)
for y,x,z in c: # for each node pixel (>1)...
xmin,xmax=utils.bounds(x-1,0,lx),utils.bounds(x+2,0,lx) # consider 3x3x3 neighborhood around our pixel of interest which is cropped at the borders of the image
ymin,ymax=utils.bounds(y-1,0,ly),utils.bounds(y+2,0,ly)
zmin,zmax=utils.bounds(z-1,0,lz),utils.bounds(z+2,0,lz)
sec=imY[ymin:ymax,xmin:xmax,zmin:zmax] # get 3x3x3 neighborhood of node array
lgt=imL[ymin:ymax,xmin:xmax,zmin:zmax] # get 3x3x3 neighborhood of filament length array
stg=imS[ymin:ymax,xmin:xmax,zmin:zmax] # get 3x3x3 neighborhood of filament intensity array
imY[ymin:ymax,xmin:xmax,zmin:zmax]=np.where(sec==1,imY[y,x,z],sec) # if 3x3x3 neighborhood contains node (>1) set all filament pixels to this node index
imL[ymin:ymax,xmin:xmax,zmin:zmax]=np.where(lgt==1,diag[0:ymax-ymin,0:xmax-xmin,0:zmax-zmin]+imL[y,x,z],lgt) # if 3x3x3 neighborhood contains filament, increase straight/diagonal/room diagonal surrounding pixels in length array by 1/sqrt(2)/sqrt(3), respectively
imS[ymin:ymax,xmin:xmax,zmin:zmax]=np.where(stg==1,imG[y,x,z]+imS[y,x,z],stg) # if 3x3x3 neighborhood contains filament, increase intensity array by intensity of the original image
ys=(imY==1).sum() # compute remaining amout of filament
graph=nx.empty_graph(N,nx.MultiGraph()) # create empty multi graph
ys,xs,zs=np.where(imY>1) # get all labeled filament pixels
for y,x,z in zip(ys,xs,zs): # for each labeled filament pixel...
xy=imY[y,x,z] # get node index
xmin,xmax=utils.bounds(x-1,0,lx),utils.bounds(x+2,0,lx) # consider 3x3x3 neighborhood around our pixel of interest which is cropped at the borders of the image
ymin,ymax=utils.bounds(y-1,0,ly),utils.bounds(y+2,0,ly)
zmin,zmax=utils.bounds(z-1,0,lz),utils.bounds(z+2,0,lz)
sec=imY[ymin:ymax,xmin:xmax,zmin:zmax].flatten() # get 3x3x3 neighborhood of filament image
lgt=imL[ymin:ymax,xmin:xmax,zmin:zmax].flatten()
stg=imS[ymin:ymax,xmin:xmax,zmin:zmax].flatten()
for idx,i in enumerate(sec): # check all pixels in 3x3x3 neighborhood...
if(i!=xy and i>1): # if the center and neighboring pixels have different labels...
u,v=np.sort([xy-2,i-2]) # sort nodes to avoid adding bidirectional edges (A->B and B->A)
edist=sp.linalg.norm(pos[u]-pos[v]) # compute Euklidean distance between the corresponding nodes
fdist=imL[y,x,z]+lgt[idx] # compute sum of the two partial filament lengths
weight=imS[y,x,z]+stg[idx] # compute sum of the two partial filament intensities
weight=max(1e-9,weight) # set minimum edge weight
capa=1.0*weight/fdist # compute edge capacity as ration of filament weight and length
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=0 # set edge connectivity variable indicating that edge belongs to original, non-connected network
jump=0 # set edge jump variable indicating that edge belongs to original, non-periodic network
graph.add_edge(u,v,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump) # add edge to network
return graph,pos
def unify_graph(graph):
"""Project multigraph to simple graph.
Parameters
----------
graph : original graph
Returns
-------
graphz : simple graph
"""
graphz=nx.empty_graph(graph.number_of_nodes()) # construct new empty graph with the same number of nodes
for u,v,d in graph.edges(data=True): # for each edge in the multigraph...
edist=d['edist'] # get edge properties
fdist=d['fdist']
weight=d['weight']
capa=d['capa']
lgth=d['lgth']
conn=d['conn']
jump=d['jump']
multi=1 # set edge multiplicity to one
if graphz.has_edge(u,v): # if simple graph already contains the edge in question...
graphz[u][v]['multi']+=1.0 # increase edge multiplicity by one
graphz[u][v]['capa']+=capa # compute sum of edge capacities
if(graphz[u][v]['lgth']>lgth): # compute minimum of edge lengths
graphz[u][v]['lgth']=lgth
else:
graphz.add_edge(u,v,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge to simple graph otherwise
return graphz
def connect_graph(graph,pos,imG):
"""Connect graph by adding edges of minimum edge length.
Parameters
----------
graph : original graph
pos : node positions
imG : Gaussian filtered image of filament structures
Returns
-------
graphz : connect graph
"""
dists=sp.spatial.distance_matrix(pos,pos) # compute distance matrix between all node positions
graphz=graph.copy() # copy original graph
N=graphz.number_of_nodes() # get number of nodes
comp=nx.connected_components(graphz) # compute connected components
comp=sorted(comp,key=len)[::-1] # sort connected components in descending order according to size
while len(comp)>1: # while network is disconnected...
compo=comp[0] # get nodes in largest component
compl=list(compo)
compi=list(set(range(N)).difference(compo)) # get remaining nodes
dist=dists[compl][:,compi] # get distance matrix between nodes of largest component and remaining network
n0,ni=np.unravel_index(dist.argmin(),dist.shape) # find pair of nodes with minimum distance
p0,pi=pos[compl][n0],pos[compi][ni]
edist=sp.linalg.norm(p0-pi) # compute distance between nodes
edist=max(1.0,edist) # set minimum distance between nodes
fdist=1.0*np.ceil(edist) # approximate filament length by rounding node distance
aa=np.array([p0[0],p0[1],pi[0],pi[1]]) # draw line between nodes
yy,xx=skimage.draw.line(*aa.astype('int'))
zz=(np.linspace(p0[2],pi[2],len(xx))).astype('int')
weight=np.sum(imG[xx,yy,zz]) # compute edge weight as image intensity along line
weight=max(1e-9,weight) # set minimum edge weight
capa=1.0*weight/fdist # compute edge capacity as ration of filament weight and length
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=1 # set edge connectivity variable indicating that edge belongs to new, connected network
jump=0 # set edge jump variable indicating that edge belongs to original, non-periodic network
multi=1 # set edge mutiplicity variable
graphz.add_edge(compi[ni],compl[n0],edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge to network
comp=nx.connected_components(graphz) # compute connected components
comp=sorted(comp,key=len)[::-1] # sort connected components in descending order according to size
return graphz
def randomize_graph(graph,pos,mask,planar=0,weights=0,iterations=1000):
"""Randomize graph by shuffling node positions and edges or edge capacities only.
Parameters
----------
graph : original graph
pos : node positions
mask : binary array of cellular region of interest
planar : ignore edge crossings (=0) or favor planar graph by reducing number of edge crossings (=1)
weights : shuffle only edge capacities (=0) or node positions and edges (=1)
iterations : number of iterations before returning original graph
Returns
-------
graphz : randomized graph
poz : randomized node positions
"""
if(weights==0): # if shuffling of edge capacities only...
ec=np.array([d for u,v,d in graph.edges(data=True)]) # get edge properties
random.shuffle(ec) # shuffle edge capacities
graphz=graph.copy() # copy graph
for j,(u,v,d) in enumerate(graphz.edges(data=True)): # for each edge...
for k in d.keys(): # copy shuffled edge properties
d[k]=ec[j][k]
poz=pos # copy node positions
else: # shuffling of node positions and edges otherwise
N=graph.number_of_nodes() # get node number
E=graph.number_of_edges() # get edge number
graphz=nx.empty_graph(N,nx.MultiGraph()) # create new, empty multigraph
diste=np.array([d['edist'] for u,v,d in graph.edges(data=True)]) # get Euclidean edge lengths
bins=[0,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200,9999] # set bin boundaries for edge lengths
B=len(bins)-1 # get number of bins
dibse=np.zeros(E).astype('int') # create array for assigning bin numbers to edges
for i,(b1,b2) in enumerate(zip(bins[:-1],bins[1:])): # for each bin...
ide=(diste>=b1)*(diste<b2) # get edges with Euclidean lengths in the given bin
dibse[ide]=i # assign bin number to edges
eweight=np.array([d['weight'] for u,v,d in graph.edges(data=True)]) # get edge weights
ecapa=np.array([d['capa'] for u,v,d in graph.edges(data=True)]) # get edge capacities
redo=1 # variable indicating that no suitable randomization was obtained yet
iteration=0 # number of iterations
while(redo==1 and iteration<iterations): # while neither a suitable randomization nor the number of allowed iterations were reached yet...
iteration+=1 # increase iteration by one
poz=utils.cell_sample(mask,N)[:,::-1].astype('int') # shuffle xy-components of node positions
zzz=pos[:,2] # keep z-component of node positions
poz=np.vstack([poz.T,zzz]).T # merge xyz-components of node positions
dista=scipy.spatial.distance_matrix(poz,poz) # compute distance matrix between new node positions
dibsa=np.zeros((N,N)).astype('int') # assign bin numbers to all new, potential edges
for i,(b1,b2) in enumerate(zip(bins[:-1],bins[1:])):
ida=(dista>=b1)*(dista<b2)
dibsa[ida]=i
dibsa[np.tri(N)>0]=-9999 # set lower part of the bin number matrix to negativ number to exclude loops (A->A) and bidirectional edges (A->B and B->A)
redo=1*np.max([(dibsa==b).sum()<(dibse==b).sum() for b in range(B)]) # check that each original edge can be accommodated given the new node positions
if(iteration<iterations): # if the number of allowed iterations was not reached yet...
isort=np.argsort(diste)[::-1] # sort bin assignments, edge weights, and edge capacities by Euclidean length
diste=diste[isort]
dibse=dibse[isort]
eweight=eweight[isort]
ecapa=ecapa[isort]
edges=[] # list of added edges
for e in range(E): # for each edge...
candidates=np.where(dibsa==dibse[e]) # get candidate pairs of new nodes whose distance matches the Euclidean length of the selected edge
C=len(candidates[0]) # get number of candidate pairs
cromm=9999 # dummy variable for number of edge crossings
ii=random.sample(range(C),min(50,C)) # select up to 50 candidate pairs
for i in ii: # for each candidate pair...
n1=candidates[0][i] # get nodes
n2=candidates[1][i]
edge=np.array([[poz[n1][0],poz[n2][0]],[poz[n1][1],poz[n2][1]]]).T # create line segment between candidate nodes
cross=planar*utils.multi_line_intersect(np.array(edge),np.array(edges)).sum() # compute number of line segment crossings with existing edges
if(cross<cromm and dibsa[n1,n2]>=0): # if edge is allowed and number of crossings is smaller than for previous candidates...
cromm=cross # store crossing number
edgem=edge # store edge
m1,m2=n1,n2 # store nodes
edges.append(edgem) # add edge to list of edges
edist=dista[m1,m2] # set Euclidean distance
fdist=1.0*np.ceil(edist) # approximate filament length by rounding node distance
weight=eweight[e] # set edge weight
capa=ecapa[e] # set edge capacity
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=0 # set edge connectivity variable indicating that edge belongs to randomized, non-connected connected network
jump=0 # set edge jump variable indicating that edge belongs to randomized, non-periodic network
multi=1 # set edge mutiplicity variable
graphz.add_edge(m1,m2,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge to network
dibsa[m1,m2]=-9999 # remove edge from allowed edges
dibsa[m2,m1]=-9999
else:
graphz,poz=graph,pos # copy original network and node positions otherwise
return graphz,poz
def centralize_graph(graph,epb='lgth',efb='capa',ndg='capa',nec='capa',npr='capa'):
"""Compute edge centralities.
Parameters
----------
graph : original graph
epb : edge property used for computation of edge path betweenness
efb : " flow betweenness
ndg : " degree centrality
nec : " eigenvector centrality
npr : " page rank
Returns
-------
graphz : graph with computed edge centralities
"""
graphz=graph.copy() # copy graph
edges=graphz.edges(data=True) # get edge capacities
ec=1.0*np.array([d['capa'] for u,v,d in edges])
ec/=ec.sum() # normalize edge capacities
el=1.0/ec
for i,(u,v,d) in enumerate(edges): # update edge capacities and lengths
d['capa']=ec[i]
d['lgth']=el[i]
epb=nx.edge_betweenness_centrality(graphz,weight=epb) # compute edge path betweenness
efb=nx.edge_current_flow_betweenness_centrality(graphz,weight=efb) # compute edge flow betweenness
lineg=nx.line_graph(graphz) # compute line graph
degree=graphz.degree(weight=ndg) # get capacity weighted edge degree
for u,v,d in lineg.edges(data=True): # set edge capacity of linegraph to node degree of original graph
n=list(set(u).intersection(v))[0]
d[ndg]=degree[n]
nec=nx.eigenvector_centrality_numpy(lineg,weight=ndg) # compute edge degree, eigenvector, and page rank centrality
npr=nx.pagerank(lineg,weight=ndg)
ndg=lineg.degree(weight=ndg)
for i,(u,v,d) in enumerate(edges): # set edge centralities
e=(u,v)
if(e in epb.keys()):
d['epb']=epb[e]
else:
d['epb']=epb[e[::-1]]
if(e in efb.keys()):
d['efb']=efb[e]
else:
d['efb']=efb[e[::-1]]
if(e in ndg.keys()):
d['ndg']=ndg[e]
else:
d['ndg']=ndg[e[::-1]]
if(e in nec.keys()):
d['nec']=nec[e]
else:
d['nec']=nec[e[::-1]]
if(e in npr.keys()):
d['npr']=npr[e]
else:
d['npr']=npr[e[::-1]]
return graphz
def normalize_graph(graph):
"""Normalize edge properties.
Parameters
----------
graph : original graph
Returns
-------
graph : graph with normalized edge properties
"""
ec=1.0*np.array([d['capa'] for u,v,d in graph.edges(data=True)])
ec/=ec.sum()
el=1.0/ec
el/=el.sum()
epb=1.0*np.array([d['epb'] for u,v,d in graph.edges(data=True)])
epb/=epb.sum()
efb=1.0*np.array([d['efb'] for u,v,d in graph.edges(data=True)])
efb/=efb.sum()
ndg=1.0*np.array([d['ndg'] for u,v,d in graph.edges(data=True)])
ndg/=ndg.sum()
nec=1.0*np.array([d['nec'] for u,v,d in graph.edges(data=True)])
nec/=nec.sum()
npr=1.0*np.array([d['npr'] for u,v,d in graph.edges(data=True)])
npr/=npr.sum()
for i,(u,v,d) in enumerate(graph.edges(data=True)):
d['capa']=ec[i]
d['lgth']=el[i]
d['epb']=epb[i]
d['efb']=efb[i]
d['ndg']=ndg[i]
d['nec']=nec[i]
d['npr']=npr[i]
return graph
def boundary_graph(jnet,graph,pos,SP,SL,JV,JH,imG,dthres=10.0,jthres=2.5):
"""Generate graph with periodic boundary conditions.
Parameters
----------
jnet : jump network
graph : original graph
pos : node positions
SP : shortest paths
SL : shortest path lengths
JV : number of vertical jumps along shortest path
JH : number of horizontal jumps along shortest path
imG : Gaussian filtered image of filament structures
Returns
-------
graphz : graph with periodic boundary conditions
"""
B=jnet.number_of_nodes() # get number of nodes of jump network
C=np.tril((SL<dthres)*((JV+JH)>0)*((JV+JH)<jthres))[B:,B:] # get pairs of nodes in jump network that are less than dthres apart and that are connected by at least/most 0/jthres
wh=np.array(np.where(C)).T
graphz=nx.MultiGraph(graph.copy()) # create new, empty multigraph
for idx,(w1,w2) in enumerate(wh): # for each pair of nodes, i.e., each potential edge...
path=SP[B+w1][B+w2] # get shortest path between selected nodes
pairs=zip(path[0:],path[1:])
weight=0.0
for n0,n1 in pairs: # for each edge along path...
if(jnet[n0][n1]['jump']==0): # if it is not a jump edge...
rr,cc=skimage.draw.line(pos[n0][1],pos[n0][0],pos[n1][1],pos[n1][0]) # draw line along edge
weight+=imG[cc,rr].sum() # add edge weight as sum of intensities in the underlying image along the line
edist=SL[B+w1,B+w2] # set edge Euclidean length
edist=max(1.0,edist)
fdist=1.0*np.ceil(edist) # approximate filament arc length
weight=max(1e-9,weight)
capa=1.0*weight/fdist # compute edge capacity
lgth=1.0*fdist/weight # compute edge length as inverse capacity
conn=0 # set edge connectivity variable indicating that edge belongs to periodic, non-connected connected network
jump=1 # set edge jump variable indicating that edge belongs to periodic network
multi=1 # set edge mutiplicity variable
graphz.add_edge(w2,w1,edist=edist,fdist=fdist,weight=weight,capa=capa,lgth=lgth,conn=conn,jump=jump,multi=multi) # add edge
return graphz
def compute_graph(graph,pos,mask):
"""Compute graph properties.
Parameters
----------
graph : original graph
pos : node positions
mask : binary array of cellular region of interest
Returns
-------
quanta : list of graph properties
"""
N=graph.number_of_nodes() # number of nodes
E=graph.number_of_edges() # number of edges
ca=utils.connected_components(graph) # compute sizes of connected components
C=len(ca) # number of connected components
ec=1.0*np.array([d['capa'] for u,v,d in graph.edges(data=True)]) # get edge capacities
bund=np.nanmean(ec) # compute average edge capacity ('bundling')
assort=nx.degree_pearson_correlation_coefficient(graph,weight='capa') # compute assortativity ('heterogeneity')
dist=utils.path_lengths(graph) # compute shortest path lengths
distMU=np.nanmean(dist) # compute average path length ('reachability')
distSD=np.nanstd(dist) # compute standard deviation of path lengths
distCV=1.0*distSD/distMU # compute coefficient of variation of path lengths ('disperal')
ac=np.sort(nx.laplacian_spectrum(graph,weight='capa'))[1] # compute algebraic connectivity ('robustness')
degs=utils.edge_angles(graph,pos[:,:2],mask) # compute edge angles relative to cell axis
angleMU=np.nanmean(degs) # compute average angle
angleSD=np.nanstd(degs) # compute standard deviation of angles
angleCV=1.0*angleSD/angleMU # compute coefficient of variation of angles ('contortion')
cns=utils.crossing_number(graph,pos[:,:2]) # compute number of edge crossings per edge
crossing=np.nanmean(cns) # compute average crossing number
quants=['# nodes','# edges','# connected components','avg. edge capacity','assortativity','avg. path length','CV path length','algebraic connectivity','CV edge angles','crossing number'] # list of graph property names
quanta=[N,E,C,bund,assort,distMU,distCV,ac,angleCV,crossing] # list of graph properties
return quanta
#%%############################################################################# periodic functions
def mask2rot(mask):
"""Compute main axis of cellular region of interest.
Parameters
----------
mask : binary array of cellular region of interest
Returns
-------
c0,c1 : coordinates along cell axis
vc,vd : center point and direction vector of cell axis
angle : angle between y-axis and main cell axis
rot : rotation matrix
"""
line=skimage.morphology.skeletonize(mask) # skeletonize mask
co=np.array(np.where(line>0)).T[:,::-1] # get coordinates of skeleton line
L=int(len(co)*0.2) # get points 20% and 80% along the cell axis
c0=co[L]
c1=co[-L]
vc=co[int(len(co)*0.5)] # get center point and direction vector of cell axis
vd=c0-c1
angle=utils.angle360(vd) # compute angle of cell axis
angli=angle*np.pi/180.0 # convert angle to radian
rot=np.array([[np.cos(angli),-np.sin(angli)],[np.sin(angli),np.cos(angli)]]) # compute rotation matrix
return c0,c1,vc,vd,angle,rot
def mask2poly(mask):
"""Convert cellular region of interest to polygon.
Parameters
----------
mask : binary array of cellular region of interest
Returns
-------
polya : original polygon
polyn : rotated polygon aligned with y-axis
"""
maski=sp.ndimage.minimum_filter(mask,3,mode='constant',cval=0) # shrink mask
polya=skimage.measure.find_contours(maski,0)[0] # find contours
polya=skimage.measure.approximate_polygon(polya,tolerance=0.0) # approximate polygon
polya=1.0*remove_duplicates(polya) # remove duplicate points
c0,c1,vc,vd,an,rot=mask2rot(maski) # compute cell axis
polyn=np.dot(polya,rot) # rotate polygon
return polya[:,::-1],polyn[:,::-1]
def pbc_jnet_border(polyn):
"""Compute border of jump network.
Parameters
----------
polyn : rotated polygon of cellular region of interest
Returns
-------
graph : border of jump network
"""
polyi=1.0*polyn.astype('int') # convert coordinates to integers
polys=shapely.geometry.Polygon(polyi) # convert polygon to shapely polygon
B=len(polyi) # get number of polygon points
graph=nx.empty_graph(B) # create new, empty graph
for i in range(2): # for both x- and y-components...
bx=polyi[:,i] # get coordinate
for idx,x in enumerate(set(bx)): # for each point
yy=np.sort(np.where(x==bx)[0]) # get other points with same coordinate
Y=len(yy)
for y in range(Y-1): # for each other point with same coordinate
y1,y2=yy[y],yy[y+1]
line=shapely.geometry.LineString([polyi[y1],polyi[y2]]) # draw line between the two selected points
if(line.within(polys)): # if the line is fully contained within the polygon...
graph.add_edge(y1,y2,weight=0.0,jump=0.001**i) # add the to network (jump=0.001/0.00001 lines parallel to along x/y-axis)
distb=sp.spatial.distance_matrix(polyn,polyn) # compute distance matrix between point of polygon
for b1 in range(B): # for each point along polygon
b2=np.mod(b1+1,B)
graph.add_edge(b1,b2,weight=distb[b1,b2],jump=0.0) # add edge no neighboring point
return graph
def pbc_jnet_interior(pos,polya,jborder,cthres=10.0):
"""Compute interier of jump network.
Parameters
----------
pos : node positions
polya : original polygon of cellular region of interest
jborder : border of jump network
cthres : maximum edge length between nodes of original network and border of jump network
Returns
-------
jnet : complete jump network
SP : array of shortest path lengths
SL : array of jump sizes
JV : get number of vertical jumps
JH : get number of horizonal jumps
"""
jnet=jborder.copy() # copy border of jump network
B=jnet.number_of_nodes() # get number of nodes
distn=sp.spatial.distance_matrix(pos,polya) # compute distances between node positions and border of jump network
for n in range(len(pos)): # for each node...
jnet.add_node(B+n) # add node to jump network
for e in np.where(distn[n]<cthres)[0]: # add edge if node is close enough to border of jump network
jnet.add_edge(B+n,e,weight=distn[n,e],jump=0.0)
for n in range(len(pos)): # for each node...
if(jnet.degree(B+n)==0): # add dummy edge to make network connected if node is disconnected
jnet.add_edge(B+n,0,weight=9999.0,jump=0.0)
SP=utils.all_pairs_dijkstra_path(jnet,weight='weight',jump='jump') # compute all shortest path in jump network
SX=utils.all_pairs_dijkstra_path_length(jnet,weight='weight',jump='jump') # compute all shortest path lengths in jump network
SL=1.0*np.array([[d1 for d1 in d2[0].values()] for d2 in SX.values()]) # array of shortest path lengths
SJ=1.0*np.array([[d1 for d1 in d2[1].values()] for d2 in SX.values()]) # array of jump sizes
JV=np.floor(SJ+0.5) # get number of vertical jumps
JH=np.floor(np.mod(SJ,1.0)*1000.0+0.5) # get number of horizonal jumps
return jnet,SP,SL,JV,JH
#%%############################################################################# NetworkX: shortest path algorithms for weighed graphs
# -*- coding: utf-8 -*-
#"""
#Shortest path algorithms for weighed graphs.
#"""
#__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
# 'Loïc Séguin-C. <[email protected]>',
# 'Dan Schult <[email protected]>'])
## Copyright (C) 2004-2011 by
## Aric Hagberg <[email protected]>
## Dan Schult <[email protected]>
## Pieter Swart <[email protected]>
## All rights reserved.
## BSD license.
#
#__all__ = ['dijkstra_path',
# 'dijkstra_path_length',
# 'bidirectional_dijkstra',
# 'single_source_dijkstra',
# 'single_source_dijkstra_path',
# 'single_source_dijkstra_path_length',
# 'all_pairs_dijkstra_path',
# 'all_pairs_dijkstra_path_length',
# 'dijkstra_predecessor_and_distance',
# 'bellman_ford','negative_edge_cycle']
import heapq
import networkx as nx
from networkx.utils import generate_unique_node
def dijkstra_path(G, source, target, weight='weight',jump= 'jump'):
"""Returns the shortest path from source to target in a weighted graph G.
Parameters
----------
G : NetworkX graph
source : node
Starting node
target : node
Ending node
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
path : list
List of nodes in a shortest path.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.dijkstra_path(G,0,4))
[0, 1, 2, 3, 4]
Notes
------
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
bidirectional_dijkstra()
"""
(length,path)=single_source_dijkstra(G, source, target=target,
weight=weight,jump=jump)
try:
return path[target]
except KeyError:
raise nx.NetworkXNoPath("node %s not reachable from %s"%(source,target))
def dijkstra_path_length(G, source, target, weight='weight',jump= 'jump'):
"""Returns the shortest path length from source to target
in a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
length : number
Shortest path length.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.dijkstra_path_length(G,0,4))
4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
bidirectional_dijkstra()
"""
length=single_source_dijkstra_path_length(G, source, weight=weight,jump= jump)
try:
return length[target]
except KeyError:
raise nx.NetworkXNoPath("node %s not reachable from %s"%(source,target))
def single_source_dijkstra_path(G,source, cutoff=None, weight='weight',jump= 'jump'):
"""Compute shortest path between source and all other reachable
nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
paths : dictionary
Dictionary of shortest path lengths keyed by target.
Examples
--------
>>> G=nx.path_graph(5)
>>> path=nx.single_source_dijkstra_path(G,0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra()
"""
(length,path)=single_source_dijkstra(G,source, weight = weight,jump= jump)
return path
def single_source_dijkstra_path_length(G, source, cutoff= None,
weight= 'weight',jump= 'jump'):
"""Compute the shortest path length between source and all other
reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
length : dictionary
Dictionary of shortest lengths keyed by target.
Examples
--------
>>> G=nx.path_graph(5)
>>> length=nx.single_source_dijkstra_path_length(G,0)
>>> length[4]
4
>>> print(length)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra()
"""
dist = {} # dictionary of final distances
jumq={}
seen = {source:0}
fringe=[] # use heapq with (distance,label) tuples
heapq.heappush(fringe,(0,source,0))
while fringe:
(d,v,j)=heapq.heappop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
jumq[v] = j#jumq[v]+vw_jumq
#for ignore,w,edgedata in G.edges_iter(v,data=True):
#is about 30% slower than the following
if G.is_multigraph():
edata=[]
for w,keydata in G[v].items():
minweight=min((dd.get(weight,1)
for k,dd in keydata.items()))
edata.append((w,{weight:minweight}))
else:
edata=iter(G[v].items())
for w,edgedata in edata:
vw_jumq = jumq[v] + edgedata.get(jump,1)
ddist=edgedata.get(weight,1)
vw_dist = dist[v] + ddist
if(vw_dist<9999.0):
if(int(vw_jumq)>1 or int(vw_jumq%1.0*1000.0+0.5)>1):
ddist=9999.0
vw_dist = dist[v] + ddist
if cutoff is not None:
if vw_dist>cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w,vw_jumq))
return dist,jumq
def single_source_dijkstra(G,source,target=None,cutoff=None,weight='weight',jump='jump'):
"""Compute shortest paths and lengths in a weighted graph G.
Uses Dijkstra's algorithm for shortest paths.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance,path : dictionaries
Returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from the source.
The second stores the path from the source to that node.
Examples
--------
>>> G=nx.path_graph(5)
>>> length,path=nx.single_source_dijkstra(G,0)
>>> print(length[4])
4
>>> print(length)
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
>>> path[4]
[0, 1, 2, 3, 4]
Notes
---------
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
Based on the Python cookbook recipe (119466) at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/119466
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
single_source_dijkstra_path()
single_source_dijkstra_path_length()
"""
if source==target:
return ({source:0}, {source:[source]})
dist = {} # dictionary of final distances
paths = {source:[source]} # dictionary of paths
seen = {source:0}
fringe=[] # use heapq with (distance,label) tuples
heapq.heappush(fringe,(0,source))
while fringe:
(d,v)=heapq.heappop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
if v == target:
break
#for ignore,w,edgedata in G.edges_iter(v,data=True):
#is about 30% slower than the following
if G.is_multigraph():
edata=[]
for w,keydata in G[v].items():
minweight=min((dd.get(weight,1)
for k,dd in keydata.items()))
edata.append((w,{weight:minweight}))
else:
edata=iter(G[v].items())
for w,edgedata in edata:
vw_dist = dist[v] + edgedata.get(weight,1)
if cutoff is not None:
if vw_dist>cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
heapq.heappush(fringe,(vw_dist,w))
paths[w] = paths[v]+[w]
return (dist,paths)
def dijkstra_predecessor_and_distance(G,source, cutoff=None, weight='weight'):
"""Compute shortest path length and predecessors on shortest paths
in weighted graphs.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
pred,distance : dictionaries
Returns two dictionaries representing a list of predecessors
of a node and the distance to each node.
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The list of predecessors contains more than one element only when
there are more than one shortest paths to the key node.
"""
push=heapq.heappush
pop=heapq.heappop
dist = {} # dictionary of final distances
pred = {source:[]} # dictionary of predecessors
seen = {source:0}
fringe=[] # use heapq with (distance,label) tuples
push(fringe,(0,source))
while fringe:
(d,v)=pop(fringe)
if v in dist: continue # already searched this node.
dist[v] = d
if G.is_multigraph():
edata=[]
for w,keydata in G[v].items():
minweight=min((dd.get(weight,1)
for k,dd in keydata.items()))
edata.append((w,{weight:minweight}))
else:
edata=iter(G[v].items())
for w,edgedata in edata:
vw_dist = dist[v] + edgedata.get(weight,1)
if cutoff is not None:
if vw_dist>cutoff:
continue
if w in dist:
if vw_dist < dist[w]:
raise ValueError('Contradictory paths found:',
'negative weights?')
elif w not in seen or vw_dist < seen[w]:
seen[w] = vw_dist
push(fringe,(vw_dist,w))
pred[w] = [v]
elif vw_dist==seen[w]:
pred[w].append(v)
return (pred,dist)
def all_pairs_dijkstra_path_length(G, cutoff=None, weight='weight',jump= 'jump'):
""" Compute shortest path lengths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest path lengths.
Examples
--------
>>> G=nx.path_graph(5)
>>> length=nx.all_pairs_dijkstra_path_length(G)
>>> print(length[1][4])
3
>>> length[1]
{0: 1, 1: 0, 2: 1, 3: 2, 4: 3}
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionary returned only has keys for reachable node pairs.
"""
paths={}
for n in G:
paths[n]=single_source_dijkstra_path_length(G,n, cutoff=cutoff,
weight=weight,jump=jump)
return paths
def all_pairs_dijkstra_path(G, cutoff=None, weight='weight',jump='jump'):
""" Compute shortest paths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
cutoff : integer or float, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G=nx.path_graph(5)
>>> path=nx.all_pairs_dijkstra_path(G)
>>> print(path[0][4])
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
floyd_warshall()
"""
paths={}
for n in G:
paths[n]=single_source_dijkstra_path(G, n, cutoff=cutoff,
weight=weight,jump=jump)
return paths
def bellman_ford(G, source, weight = 'weight'):
"""Compute shortest path lengths and predecessors on shortest paths
in weighted graphs.
The algorithm has a running time of O(mn) where n is the number of
nodes and m is the number of edges. It is slower than Dijkstra but
can handle negative edge weights.
Parameters
----------
G : NetworkX graph
The algorithm works for all types of graphs, including directed
graphs and multigraphs.
source: node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
pred, dist : dictionaries
Returns two dictionaries keyed by node to predecessor in the
path and to the distance from the source respectively.
Raises
------
NetworkXUnbounded
If the (di)graph contains a negative cost (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative cost (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cost cycle.
Examples
--------
>>> import networkx as nx
>>> G = nx.path_graph(5, create_using = nx.DiGraph())
>>> pred, dist = nx.bellman_ford(G, 0)
>>> pred
{0: None, 1: 0, 2: 1, 3: 2, 4: 3}
>>> dist
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4}
>>> from nose.tools import assert_raises
>>> G = nx.cycle_graph(5, create_using = nx.DiGraph())
>>> G[1][2]['weight'] = -7
>>> assert_raises(nx.NetworkXUnbounded, nx.bellman_ford, G, 0)
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionaries returned only have keys for nodes reachable from
the source.
In the case where the (di)graph is not connected, if a component
not containing the source contains a negative cost (di)cycle, it
will not be detected.
"""
if source not in G:
raise KeyError("Node %s is not found in the graph"%source)
numb_nodes = len(G)
dist = {source: 0}
pred = {source: None}
if numb_nodes == 1:
return pred, dist
if G.is_multigraph():
def get_weight(edge_dict):
return min([eattr.get(weight,1) for eattr in edge_dict.values()])
else:
def get_weight(edge_dict):
return edge_dict.get(weight,1)
for i in range(numb_nodes):
no_changes=True
# Only need edges from nodes in dist b/c all others have dist==inf
for u, dist_u in list(dist.items()): # get all edges from nodes in dist
for v, edict in G[u].items(): # double loop handles undirected too
dist_v = dist_u + get_weight(edict)
if v not in dist or dist[v] > dist_v:
dist[v] = dist_v
pred[v] = u
no_changes = False
if no_changes:
break
else:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
return pred, dist
def negative_edge_cycle(G, weight = 'weight'):
"""Return True if there exists a negative edge cycle anywhere in G.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
negative_cycle : bool
True if a negative edge cycle exists, otherwise False.
Examples
--------
>>> import networkx as nx
>>> G = nx.cycle_graph(5, create_using = nx.DiGraph())
>>> print(nx.negative_edge_cycle(G))
False
>>> G[1][2]['weight'] = -7
>>> print(nx.negative_edge_cycle(G))
True
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
This algorithm uses bellman_ford() but finds negative cycles
on any component by first adding a new node connected to
every node, and starting bellman_ford on that node. It then
removes that extra node.
"""
newnode = generate_unique_node()
G.add_edges_from([ (newnode,n) for n in G])
try:
bellman_ford(G, newnode, weight)
except nx.NetworkXUnbounded:
G.remove_node(newnode)
return True
G.remove_node(newnode)
return False
def bidirectional_dijkstra(G, source, target, weight = 'weight'):
"""Dijkstra's algorithm for shortest paths using bidirectional search.
Parameters
----------
G : NetworkX graph
source : node
Starting node.
target : node
Ending node.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
length : number
Shortest path length.
Returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from the source.
The second stores the path from the source to that node.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> length,path=nx.bidirectional_dijkstra(G,0,4)
>>> print(length)
4
>>> print(path)
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
In practice bidirectional Dijkstra is much more than twice as fast as
ordinary Dijkstra.
Ordinary Dijkstra expands nodes in a sphere-like manner from the
source. The radius of this sphere will eventually be the length
of the shortest path. Bidirectional Dijkstra will expand nodes
from both the source and the target, making two spheres of half
this radius. Volume of the first sphere is pi*r*r while the
others are 2*pi*r/2*r/2, making up half the volume.
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
shortest_path
shortest_path_length
"""
if source == target: return (0, [source])
#Init: Forward Backward
dists = [{}, {}]# dictionary of final distances
paths = [{source:[source]}, {target:[target]}] # dictionary of paths
fringe = [[], []] #heap of (distance, node) tuples for extracting next node to expand
seen = [{source:0}, {target:0} ]#dictionary of distances to nodes seen
#initialize fringe heap
heapq.heappush(fringe[0], (0, source))
heapq.heappush(fringe[1], (0, target))
#neighs for extracting correct neighbor information
if G.is_directed():
neighs = [G.successors_iter, G.predecessors_iter]
else:
neighs = [G.neighbors_iter, G.neighbors_iter]
#variables to hold shortest discovered path
#finaldist = 1e30000
finalpath = []
dir = 1
while fringe[0] and fringe[1]:
# choose direction
# dir == 0 is forward direction and dir == 1 is back
dir = 1-dir
# extract closest to expand
(dist, v )= heapq.heappop(fringe[dir])
if v in dists[dir]:
# Shortest path to v has already been found
continue
# update distance
dists[dir][v] = dist #equal to seen[dir][v]
if v in dists[1-dir]:
# if we have scanned v in both directions we are done
# we have now discovered the shortest path
return (finaldist,finalpath)
for w in neighs[dir](v):
if(dir==0): #forward
if G.is_multigraph():
minweight=min((dd.get(weight,1)
for k,dd in G[v][w].items()))
else:
minweight=G[v][w].get(weight,1)
vwLength = dists[dir][v] + minweight #G[v][w].get(weight,1)
else: #back, must remember to change v,w->w,v
if G.is_multigraph():
minweight=min((dd.get(weight,1)
for k,dd in G[w][v].items()))
else:
minweight=G[w][v].get(weight,1)
vwLength = dists[dir][v] + minweight #G[w][v].get(weight,1)
if w in dists[dir]:
if vwLength < dists[dir][w]:
raise ValueError("Contradictory paths found: negative weights?")
elif w not in seen[dir] or vwLength < seen[dir][w]:
# relaxing
seen[dir][w] = vwLength
heapq.heappush(fringe[dir], (vwLength,w))
paths[dir][w] = paths[dir][v]+[w]
if w in seen[0] and w in seen[1]:
#see if this path is better than than the already
#discovered shortest path
totaldist = seen[0][w] + seen[1][w]
if finalpath == [] or finaldist > totaldist:
finaldist = totaldist
revpath = paths[1][w][:]
revpath.reverse()
finalpath = paths[0][w] + revpath[1:]
raise nx.NetworkXNoPath("No path between %s and %s." % (source, target))
| DavidBreuer/CytoSeg | CytoSeg/utils.py | Python | gpl-3.0 | 74,513 |
#! /usr/bin/env python
#
# blend.py
#
"""
Example usage of Conceptual Blending API.
Instantiates blender with a simple dataset stored in an AtomSpace
and learns a new concept.
For complete documentation on how to pass additional parameters to
blender, refer to the documentation at the following link:
https://github.com/opencog/opencog/tree/master/opencog/python/blending/doc/blend-config-format.md
"""
__author__ = 'DongMin Kim'
from opencog.utilities import initialize_opencog
from opencog.type_constructors import *
from opencog.atomspace import AtomSpace
from blending.blend import ConceptualBlending
"""
Second Example:
- Blend with custom config.
- Give focus atom manually.
- Atoms that have STI value above 12 will be considered to blend.
- Force to start blend, and choose 2 nodes randomly.
"""
print "--------Start second example--------"
a = AtomSpace()
initialize_opencog(a)
# Make custom concept network.
car = ConceptNode("car")
man = ConceptNode("man")
block = ConceptNode("block")
build = ConceptNode("build")
a.set_av(car.h, 17)
a.set_av(man.h, 13)
a.set_av(block.h, 5)
a.set_av(build.h, 5)
focus_atoms = [car, man, block, build]
print "Source data:\n" + \
str(focus_atoms) + "\n"
# Make custom config.
InheritanceLink(
ConceptNode("my-config"),
ConceptNode("BLEND")
)
ListLink(
SchemaNode("BLEND:atoms-chooser"),
ConceptNode("my-config"),
ConceptNode("ChooseInSTIRange")
)
ListLink(
SchemaNode("BLEND:choose-sti-min"),
ConceptNode("my-config"),
ConceptNode("12")
)
ListLink(
SchemaNode("BLEND:blending-decider"),
ConceptNode("my-config"),
ConceptNode("DecideRandom")
)
ListLink(
SchemaNode("BLEND:decide-result-atoms-count"),
ConceptNode("my-config"),
ConceptNode("2")
)
# Start Conceptual Blending.
result = ConceptualBlending(a).run(
focus_atoms,
ConceptNode("my-config")
)
print "Newly blended node: \n" + \
str(result)
| kim135797531/opencog | examples/python/conceptual_blending/2_blend_with_config.py | Python | agpl-3.0 | 1,929 |
from django.shortcuts import render, get_object_or_404 as go404, redirect
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django import forms
from .models import Problem, Code, Upvote, Hint, HintUpvote
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import oj
# Create your views here.
class SearchForm(forms.Form):
oj_id = forms.IntegerField(label='Problem ID', widget=forms.TextInput)
def index(request):
if request.GET:
form = SearchForm(request.GET)
if form.is_valid():
try:
problem = Problem.objects.get(**form.cleaned_data)
except Problem.DoesNotExist:
if oj.test_problem_existence(form.cleaned_data['oj_id']):
return redirect('problem:problem', oj_id=form.cleaned_data['oj_id'])
messages.warning(request, 'The problem id={} does not exist'.format(form.cleaned_data['oj_id']))
else:
return redirect(problem)
else:
form = SearchForm()
return render(
request,
'index.html',
{
'form': form
}
)
def about(request):
return render(request, 'about.html')
def history(request):
return render(request, 'history.html')
def problem(request, oj_id):
problem, created = Problem.objects.get_or_create(oj_id=oj_id)
if (
request.user.is_authenticated() and
Code.objects.filter(problem=problem, user=request.user).exists()
):
shared = True
hinted = Hint.objects.filter(problem=problem, user=request.user).exists()
else:
shared = False
hinted = False
code_list = Code.objects.filter(problem=problem).order_by('-upvotes', 'pk')
hint_list = Hint.objects.filter(problem=problem).order_by('-upvotes', 'pk')
# pagination
paginator = Paginator(code_list, 10) # Show 25 contacts per page
page = request.GET.get('page')
try:
code_list_pagination = paginator.page(page)
except PageNotAnInteger:
if(page=='last'):
code_list_pagination = paginator.page(paginator.num_pages)
else:
code_list_pagination = paginator.page(1)
except EmptyPage:
code_list_pagination = paginator.page(paginator.num_pages)
return render(
request,
'problem.html',
{
'problem': problem,
'problem_id': oj_id,
'shared': shared,
'hinted': hinted,
'code_list_count': code_list.count(),
'code_list_pagination': code_list_pagination,
'hint_list': hint_list,
'hint_form': HintForm()
}
)
@login_required
def problem_share(request, oj_id):
problem = go404(Problem, oj_id=oj_id)
try:
thecodeitself = oj.fetch_ac_code(request.user.username, request.session['password'], oj_id)
except oj.YouNoAc:
messages.warning(request, "You have not AC'd this problem.")
return redirect(problem)
code, created = Code.objects.get_or_create(problem=problem, user=request.user)
code.text = thecodeitself
code.save()
messages.info(request, 'Successfully shared code of problem {}'.format(oj_id))
return redirect(problem)
@login_required
def code_upvote(request, pk):
code = go404(Code, pk=pk)
upvote, created = Upvote.objects.get_or_create(user=request.user, code=code)
if created:
code.upvotes += 1
code.save()
messages.info(request, "upvoted #{}".format(pk))
else:
messages.warning(request, "You've upvoted this already!")
return redirect(code.problem)
class HintForm(forms.Form):
hint = forms.CharField(widget=forms.Textarea)
@login_required
@require_POST
def hint_share(request, oj_id):
problem = go404(Problem, oj_id=oj_id)
if not Code.objects.filter(problem=problem, user=request.user).exists():
messages.warning(request, "You cannot share a hint until you share your AC code")
return redirect(problem)
form = HintForm(request.POST)
if form.is_valid():
hint, created = Hint.objects.get_or_create(user=request.user, problem=problem)
hint.text = form.cleaned_data['hint']
hint.save()
else:
messages.error(request, 'invalid input.')
return redirect(problem)
@login_required
def hint_upvote(request, pk):
hint = go404(Hint, pk=pk)
upvote, created = HintUpvote.objects.get_or_create(user=request.user, hint=hint)
if created:
hint.upvotes += 1
hint.save()
messages.info(request, "upvoted #{}".format(pk))
else:
messages.warning(request, "You've upvoted this already!")
return redirect(hint.problem)
| afg984/happycoding | problem/views.py | Python | mit | 4,814 |
import tweepy
import sqlite3
import time
conn = sqlite3.connect("twitter_data.db")
consumer_key = "4kTL89hW5YqX4GPUUpzJ6lYbF"
consumer_secret = "uEAmYCALLO5CSKlm0Yql39RTeF8PeRByYt3G8kASM8JjVRzrS1"
def authenticate(access_token,access_token_secret):
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token,access_token_secret)
return tweepy.API(auth)
except:
return None
def follow_back(api):
# return tweepy.Cursor(api.followers).items()
ff = api.followers_ids()
frnd = api.friends_ids()
for i in ff:
if i in frnd:
print "Already following"
else:
u = api.get_user(i)
u.follow()
print "Followed {0}".format(u.name)
def retrieve_users():
with conn:
cur = conn.cursor()
cur.execute("SELECT user_name,latest_tweet FROM USERS")
rows = cur.fetchall()
return rows
def retweet(user_tup,api):
user_name = user_tup[0]
latest_tweet = user_tup[1]
user = api.get_user(user_name)
new_tweets = user.timeline(since_id=latest_tweet)
new_tweets = new_tweets[::-1]
failed = False
if new_tweets:
for i in new_tweets:
try:
api.retweet(i.id)
print "ReTweeting.."
except:
if not failed:
latest_tweet = new_tweets[0].id
with conn:
t = (latest_tweet,user_name)
cur = conn.cursor()
cur.execute("UPDATE USERS SET latest_tweet=? WHERE user_name=?",t)
failed = True
print "RT Failed :("
def getapis():
with conn:
cur = conn.cursor()
cur.execute("SELECT access_token,secret FROM ACCOUNTS")
rows = cur.fetchall()
apis = []
for i in rows:
apis.append(authenticate(i[0],i[1]))
return apis
def main_procedure(api):
try:
users = retrieve_users()
except Exception,e:
pass
if users:
for i in users:
try:
retweet(i,api)
except Exception,e:
pass
try:
follow_back(api)
except Exception,e:
pass
while True:
apis = getapis()
for api in apis:
main_procedure(api)
| privatecaptain/twitterbot | main.py | Python | mit | 1,931 |
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2019 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test Zenodo deposit REST API."""
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from helpers import login_user_via_session
from zenodo.modules.deposit.resolvers import deposit_resolver
@pytest.mark.parametrize('user_email,status,use_scope', [
# anonymous user
(None, 403, False),
# owner
('[email protected]', 403, False),
# owner with scope headers
('[email protected]', 200, True),
# not owner
('[email protected]', 403, False),
# admin user
('[email protected]', 200, False),
])
def test_extra_formats_permissions(
api, api_client, db, users, deposit, extra_auth_headers,
user_email, status, use_scope):
if use_scope:
user_headers = extra_auth_headers
else:
user_headers = []
if user_email:
# Login as user
login_user_via_session(api_client, email=user_email)
response = api_client.options(
'/deposit/depositions/{0}/formats'.format(deposit['recid']),
headers=user_headers)
assert response.status_code == status
@pytest.mark.parametrize('user_email,status', [
# anonymous user
(None, 404),
# owner
('[email protected]', 404),
# not owner
('[email protected]', 404),
# admin user
('[email protected]', 200),
])
def test_extra_formats_buckets_permissions(
api, api_client, minimal_deposit, deposit_url, db, es, users,
locations, json_extra_auth_headers, extra_auth_headers, license_record,
user_email, status
):
"""Test Files-REST permissions for the extra formats bucket and files."""
# Create deposit
response = api_client.post(
deposit_url, json=minimal_deposit, headers=json_extra_auth_headers)
data = response.json
# Get identifier and links
depid = data['record_id']
links = data['links']
# Upload 1 files
response = api_client.put(
links['bucket'] + '/test1.txt',
data='ctx',
headers=extra_auth_headers,
)
# Add extra_formats bucket with a file
response = api_client.put(
'/deposit/depositions/{0}/formats'.format(depid),
data='foo file',
headers=[('Content-Type', 'application/foo+xml')] +
extra_auth_headers
)
dep_uuid, deposit = deposit_resolver.resolve(depid)
if user_email:
# Login as user
login_user_via_session(api_client, email=user_email)
response = api_client.get(
'/files/' + str(deposit.extra_formats.bucket.id)
)
assert response.status_code == status
response = api_client.put(
'/files/' + str(deposit.extra_formats.bucket.id) +
'/application/foo+xml',
data='ctx'
)
assert response.status_code == status
# Publish deposition
response = api_client.post(links['publish'], headers=extra_auth_headers)
if user_email:
# Login as user
login_user_via_session(api_client, email=user_email)
response = api_client.get(
'/files/' + str(deposit.extra_formats.bucket.id)
)
assert response.status_code == status
response = api_client.put(
'/files/' + str(deposit.extra_formats.bucket.id) +
'/application/foo+xml',
data='ctx'
)
assert response.status_code == status
| slint/zenodo | tests/unit/deposit/test_extra_buckets_permissions.py | Python | gpl-2.0 | 4,275 |
## @package enhanced_grid
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
## @brief Contains 2D and 3D Grid containers that supports extended slicing syntax.
##
# These classes are provided for rapid prototyping, the methods defined on them might be slow.
# @code
# grid1 = Grid3D(10, 10, 10, 0)
# grid2 = Grid3D(10, 10, 10, 1)
# grid1[0, 0, 0] = grid2[0, 0, 0]
# grid1[0, 0, 2:6:2] = grid2[0, 0, 4:5]
# grid1[0, 0, ...] = grid2[0, 0, ...]
# grid1[0, ..., 0] = grid2[..., 0, 0]
# grid1[..., ..., ...] = grid2[..., ...., ....]
# @endcode
#
# Slicing does not copy elements - an auxiliary window container is created that delegates
# further operations to the underlying container.
# Note that assignments to slices from the same object might not behave as espected.
# Parallel assignment also does not always work as expected.
# For example:
#@code
# grid[..., 0], grid[..., 1] = grid[..., 1], grid[..., 0]
#@endcode
# does not correctly swop two rows, but the following does:
#@code
# grid[..., 0], grid[..., 1] = grid[..., 1].clone(), grid[..., 0].clone()
#@endcode
#Strictly speaking, it is necessary only to clone the one object,
#but it is hard to remember which, so it is better to clone both (?).
#
#@todo Implement a way to use copy containers instead of window containers
from __future__ import division
from random import randint
from math import floor
from math import ceil
from array import array
##@brief A class that works just like a queue or a stack, except
## that a randomly selected element is returned.
##
# This class is useful for implementing algorithms that gather
# elements, and need to process them randomly. Something along the
# lines of:
#
# @code
# while not rqueue.empty():
# #generates 3 new elements to process
# for i in range(3):
# rqueue.push(process(rqueue.pop()))
# @endcode
class RandomQueue:
## Constructs a new empty RandomQueue
def __init__(self):
## The internal list to store objects.
self.array = []
##Returns True if this RandomQueue is empty.
def empty(self):
return len(self.array) <= 0
## Push a new element into the RandomQueue.
def push(self, x):
self.array.append(x)
## @brief Pops a randomly selected element from the queue.
##
# All elements can be selected equiprobably
def pop(self):
n = len(self.array)
if n <= 0:
raise IndexError('Cannot pop from emty container!')
elif n == 1:
return self.array.pop()
else:
i = randint(0, n - 1)
j = n - 1
self.array[i], self.array[j] = self.array[j], self.array[i]
return self.array.pop()
## @brief Class that represents a 2D array.
##
# The following convenient syntax is supported:
# @code
# p = 2, 3 # a coordinate in the grid
# grid[p] = 5
# print grid[p]
# print grid[2, 3]
# @endcode
#
def signum(x):
if x > 0:
return 1
elif x < 0:
return -1
else:
return 0
## Truncates a point to integer coordinates.
def int_point_2d(p):
x, y = p
return int(x), int(y)
## Truncates a point to integer coordinates.
def int_point_3d(p):
x, y, z = p
return int(x), int(y), int(z)
# Every point in the sample set is represented with
# a 1 in th grid; all other points are represented with 0.
# The returned grid is usefull for producing image data.
def points_to_grid(points, dimensions):
grid = Grid2D(dimensions, 0)
for point in points:
grid[int_point_2d(point)] = 1
return grid
## Converts a list of points to a 3D grid.
# Every point in the sample set is represented with
# a 1 in th grid; all other points are represented with 0.
# The returned grid is usefull for producing image data.
def points_to_grid_3d(points, dimensions):
grid = Grid3D(dimensions, 0)
for point in points:
grid[int_point_3d(point)] = 1
return grid
def make_grid_1d(width, initial_item=None):
grid = [initial_item] * width
return grid
## @brief Makes 2 list of lists.
def make_grid_2d(width, height, initial_item=None):
grid = [None] * width
for i in xrange(width):
grid[i] = [None] * height
for j in xrange(height):
grid[i][j] = initial_item
return grid
## @brief Makes 2 list of lists.
def make_grid_3d(width, height, depth, initial_item):
grid = [None] * width
for i in xrange(width):
grid[i] = [None] * height
for j in xrange(height):
grid[i][j] = [None] * depth
for k in xrange(depth):
grid[i][j][k] = initial_item
return grid
## @brief Returns an xrange that can be used to iterate over
## the slice of the container.
#
# The following snippets are equivalent
#@code
# s = slice(3, 18, 3)
# for i in srange(s):
# print list[i]
#@endcode
#
#@code
# for item in list[s]:
# print item
#@endcode
def srange(s, length):
if s == Ellipsis:
return xrange(length)
else:
b, e, s = s.indices(length)
return xrange(b, e, s)
## @brief Returns true if s is a slice or an Ellipsis.
def is_slice(s):
return type(s) is slice or s == Ellipsis
## @brief Returns the number of elements this slice will return, provided the provided
## primary is large enough.
def slice_len(s, length):
if s == Ellipsis:
return length
b, e, s, = s.indices(length)
tmp = int(ceil((e - b) / s))
if tmp < 0:
return 0
else:
return min(tmp, length)
if s.stop > s.start and s.step > 0:
return (s.stop - s.start) // s.step
elif s.stop < s.start and s.step < 0:
return (s.start - s.stop) // -s.step
else:
return 0
## @brief Returns a slice that is equivalent to the two slices combined.
##
# The following snippets are equivalent:
#@code
# list[s1][s2]
#@endcode
#@code
# list[slice_mul(s1, s2)]
#@endcode
def slice_mul(slice1, slice2, length):
if type(slice2) is int:
if type(slice1) is type(Ellipsis):
return slice2
b1, e1, s1 = slice1.indices(length)
s2 = slice2
if s2 < 0:
s2 += length
if s2 < 0:
s2 = 0
return b1 + s2*s1
elif type(slice2) is slice:
if type(slice1) is type(Ellipsis):
return slice2
else:
b1, e1, s1 = slice1.indices(length)
b2, e2, s2 = slice2.indices(length)
b = b1 + b2*s1
s = s1*s2
e = min(b1 + e2*s1, e1)
if e < 0 and s < 0:
return slice(b, None, s)
else:
return slice(b, e, s)
b = slice1.start + slice2.start*slice1.step
s = slice1.step*slice2.step
return slice(b, min(slice1.start + slice2.stop*slice1.step, slice1.stop), s)
elif slice2 == Ellipsis:
return slice1
## @brief Completes this slice for a given length.
##
# The resulting slice will give the same elements for a container of the given length, but
# none of the start, stop, or step attributes will be None. If s is the Ellipses, then
# the slice (0, length, 1) is returned.
# @deprecated
def complete_slice(s, length):
return s
## @brief Sub-classes of this container can be used directly.
##
## A principle container will do assignment on a 1D point.
## Assignment of larger chunks is delegated to the AuxilaryContainer
## with the right dimensions.
##
## The enherritance is merely for documentation purposes.
class PrincipleContainer:
pass
## @brief Sub-classes of this container is used as windows by a
##PrincipleContainer, and should not be used directly!
##
## An AuxiliaryContainer will do assignment a chunk with the same
## dimensionality as itself, otherwise it delegates it to the underlying
## principle container, which will, in turn, construct the correct
## AuxiliaryContainer to perform the assignment, or perform it if it is
## a 1D point.
##
## The enherritance is merely for documentation purposes.
class AuxiliaryContainer:
pass
##Abstract super class of all grid-like containers.
##These containers are static, that is, once they are created,
##their dimensions cannot change.
#
# Children of this class must implement
# the attribute __clonetype__, which must
# hold a callable type that can be constructed
# for clones.
#
# Children must also implement the iterators
# cell_iter() and index_iter().
class Container:
##
##
# @param dim
# The dimensions of this grid
def __init__(self, dims):
self.dims = dims
count = 1
for dim in dims:
count *= dim
self.count = count
##Test whether two containers have the same dimensions
##and the same items at equal indices.
def __eq__(self, other):
if other == None:
return False
if self.dims != other.dims:
return False
for cell1, cell2 in zip(self.cell_iter(), other.cell_iter()):
if cell1 != cell2:
return False
return True
##Equivalent to @code not (self == other) @endcode .
def __ne__(self, other):
return not (self == other)
## Returns the length (1D) or width (nD) of this container.
#
# The length of a container is defined such that the length behaves as it would
# for a list of lists.
def __len__(self):
return self.dims[0]
## @brief Returns the minimum and maximum elements
## of this grid as a tuple.
##
#This method assumes the grid is filled.
def min_max(self):
cell_iter = self.cell_iter()
min = max = cell_iter.next()
for cell in cell_iter:
if cell > max:
max = cell
elif cell < min:
min = cell
return min, max
def copy_from(self, other):
for index in self.index_iter():
self[index] = other[index]
##Makes a shallow copy of this container.
#
#This method constructs an instance of
#this instance's __clonetype__. In general,
#if this class is an AuxiliaryContainer,
#the clone will be a PrincipleContainer
#of the same dimension.
def clone(self):
new_grid = self.__clonetype__(self.dims)
new_grid.copy_from(self)
return new_grid
## Class that implements __str__ and __iter__.
class Container1D (Container):
def __init__(self, length):
Container.__init__(self, (length,))
self.length = length
self.__clonetype__ = Grid1D
def __str__(self):
#slow...
glst = []
for i in xrange(self.length):
glst.append(self[i])
return glst.__repr__()
def __iter__(self):
for i in xrange(self.length):
yield self[i]
raise StopIteration
## @brief Returns the same iterator as __iter__.
#
# Provided so that all containers have consistent cell_iter methods.
def cell_iter(self):
return self.__iter__()
## @brief Returns an iterator that iterates over a subgrid of this grid.
##
# The iteratir will iterate over all cells x, y in the grid
# such that
#@code
# x0 <= x < x1
# y0 <= y < y1
#@endcode
# The iterator does not iterate outside the grid.
def window_iter(self, x1, x0):
for i in xrange(max(0, x0), min(x1, self.length)):
yield self[i]
raise StopIteration
## @brief Returns an iterator that iterates over a subgrid of this grid.
##
# The iteratir will iterate over all cells x, y in the grid
# such that
#@code
# x0 <= x < x1
# y0 <= y < y1
#@endcode
# The iterator wraps over the grid. For example, if x is one unit too high
# (it is outside the grid to the right), the iterator will return first
# cell in that row.
def wrapped_window_iter(self, x1, x0):
for i in xrange(x0, x1):
yield self[i % self.length]
raise StopIteration
## @brief Returns an iterator that iterates over all cells in the square
## surrounding the given point.
##
#The square is 2*n + 1 units.
def square_iter(self, x, n):
return self.window_iter(x - n, x + n + 1)
def wrapped_square_iter(self, x, n):
return self.wrapped_window_iter(x - n, x + n + 1)
## @brief Returns an iterator that iterates over the indeces of this
## container.
##
# If grid is a 2 by 2 grid, then:
# @code
# for p in index_iter(grid):
# print p
# @endcode
# will produce
# @code
# 0, 0
# 0, 1
# 1, 0
# 1, 1
# @endcode
# This iterator is useful for assigning elements of grids:
# @code
# for p in index_iter(grid):
# grid[p] = random()
# @endcode
def index_iter(self):
for i in xrange(self.length):
yield i
raise StopIteration
## Class that implements __str__ and __iter__.
class Container2D (Container):
def __init__(self, width, height):
Container.__init__(self, (width, height))
self.width = width
self.height = height
self.__clonetype__ = Grid2D
def __str__(self):
#slow...
glst = []
for i in xrange(self.width):
gcol = []
for j in xrange(self.height):
gcol.append(self[i, j])
glst.append(gcol)
return glst.__repr__()
## @brief Returns an iterator that iterates over columns.
##
# This iterator is provided so that a Grid2D better emulates a list of
# lists, as in the following example:
#@code
# for col in grid:
# for item in col:
# process(item)
#@endcode
# Use of this iterator is discouraged - it is slow
def __iter__(self):
for i in xrange(self.width):
yield self[i, ...]
raise StopIteration
## @brief Returns an iterator that iterates over all cells in the grid.
##
# This allows you to write:
#@code
# for cell in cell_iter(grid):
# process(cell)
#@endcode
def cell_iter(self):
for i in xrange(self.width):
for j in xrange(self.height):
yield self[i, j]
raise StopIteration
## @brief Returns an iterator that iterates over a subgrid of this grid.
##
# The iterator will iterate over all cells x, y in the grid
# such that
#@code
# x0 <= x < x1
# y0 <= y < y1
#@endcode
#
def window_index_iter(self, p0, p1):
x0, y0 = p0
x1, y1 = p1
for i in xrange(max(0, x0), min(x1, self.width)):
for j in xrange(max(0, y0), min(y1, self.height)):
yield (i, j)
raise StopIteration
## @brief Returns an iterator that iterates over a subgrid of this grid.
##
# The iterator will iterate over all cells x, y in the grid
# such that
#@code
# x0 <= x < x1
# y0 <= y < y1
#@endcode
#
# The iterator wraps over the grid. For example, if x is one unit too high
# (it is outside the grid to the right), the iterator will return the index of the
# first cell in that row.
def wrapped_window_index_iter(self, p0, p1):
x0, y0 = p0
x1, y1 = p1
for i in xrange(x0, x1):
for j in xrange(y0, y1):
yield (i % self.width, j % self.height)
raise StopIteration
## @brief Returns an iterator that iterates over a subgrid of this grid.
##
# The iterator will iterate over all cells x, y in the grid
# such that
#@code
# x0 <= x < x1
# y0 <= y < y1
#@endcode
def window_iter(self, p0, p1):
x0, y0 = p0
x1, y1 = p1
for i in xrange(max(0, x0), min(x1, self.width)):
for j in xrange(max(0, y0), min(y1, self.height)):
yield self[i, j]
raise StopIteration
## @brief Returns an iterator that iterates over a subgrid of this grid.
##
# The iterator will iterate over all cells x, y in the grid
# such that
#@code
# x0 <= x < x1
# y0 <= y < y1
#@endcode
#
# The iterator wraps over the grid. For example, if x is one unit too high
# (it is outside the grid to the right), the iterator will return first
# cell in that row.
def wrapped_window_iter(self, p0, p1):
x0, y0 = p0
x1, y1 = p1
for i in xrange(x0, x1):
for j in xrange(y0, y1):
yield self[i % self.width, j % self.height]
raise StopIteration
## @brief Returns an iterator that iterates over all cells in the square
## surrounding the given point.
##
#The square is 2*n + 1 units.
def square_index_iter(self, p, n):
x, y = p
return self.window_index_iter((x - n, y - n), (x + n + 1, y + n +1))
## @brief Returns an iterator that iterates over all cells in the square
## surrounding the given point.
##
#The square is 2*n + 1 units.
# The iterator wraps over the grid. For example, if x is one unit too high
# (it is outside the grid to the right), the iterator will return first
# cell in that row.
def wrapped_square_index_iter(self, p, n):
x, y = p
return self.wrapped_window_index_iter((x - n, y - n), (x + n + 1, y + n +1))
## @brief Returns an iterator that iterates over all cells in the square
## surrounding the given point.
##
#The square is 2*n + 1 units.
def square_iter(self, p, n):
x, y = p
return self.window_iter((x - n, y - n), (x + n + 1, y + n +1))
## @brief Returns an iterator that iterates over all cells in the square
## surrounding the given point.
##
#The square is 2*n + 1 units.
# The iterator wraps over the grid. For example, if x is one unit too high
# (it is outside the grid to the right), the iterator will return first
# cell in that row.
def wrapped_square_iter(self, p, n):
x, y = p
return self.wrapped_window_iter((x - n, y - n), (x + n + 1, y + n +1))
## @brief Returns an iterator that iterates over the indeces of this
## grid as tuples.
##
# If grid is a 2 by 2 grid, then:
# @code
# for p in index_iter(grid):
# print p
# @endcode
# will produce
# @code
# 0, 0
# 0, 1
# 1, 0
# 1, 1
# @endcode
# This iterator is useful for assigning elements of grids:
# @code
# for p in index_iter(grid):
# grid[p] = random()
# @endcode
def index_iter(self):
for i in xrange(self.width):
for j in xrange(self.height):
yield i, j
raise StopIteration
## Class that implements __str__ and __iter__.
class Container3D (Container):
def __init__(self, width, height, depth):
Container.__init__(self, (width, height, depth))
self.width = width
self.height = height
self.depth = depth
self.__clonetype__ = Grid3D
def __str__(self):
#slow...
glst = []
for i in xrange(self.width):
gcol = []
for j in xrange(self.height):
gslice = []
for k in xrange(self.depth):
gslice.append(self[i, j, k])
gcol.append(gslice)
glst.append(gcol)
return glst.__repr__()
def __iter__(self):
for i in xrange(self.width):
yield self[i, ..., ...]
raise StopIteration
## Returns an iterator that iterates over all cells in the grid
def cell_iter(self):
for i in xrange(self.width):
for j in xrange(self.height):
for k in xrange(self.depth):
yield self[i, j, k]
raise StopIteration
## @brief Returns an iterator that iterates over the indeces of this
## grid as tuples.
##
# If grid is a 2 by 2 grid, then:
# @code
# for p in index_iter(grid):
# print p
# @endcode
# will produce
# @code
# 0, 0
# 0, 1
# 1, 0
# 1, 1
# @endcode
# This iterator is useful for assigning elements of grids:
# @code
# for p in index_iter(grid):
# grid[p] = random()
# @endcode
def index_iter(self):
for i in xrange(self.width):
for j in xrange(self.height):
for k in xrange (self.depth):
yield i, j, k
raise StopIteration
## @brief Returns an iterator that iterates over a subgrid of this grid.
##
# The iterator will iterate over all cells x, y, z in the grid
# such that
#@code
# x0 <= x < x1
# y0 <= y < y1
# z0 <= z < z1
#@endcode
#
def window_iter(self, p0, p1):
x0, y0, z0 = p0
x1, y1, z1 = p1
for i in xrange(max(0, x0), min(x1, self.width)):
for j in xrange(max(0, y0), min(y1, self.height)):
for k in xrange(max(0, z0), min(z1, self.depth)):
yield self[i, j, k]
raise StopIteration
## @brief Returns an iterator that iterates over a subgrid of this grid.
##
# The iterator will iterate over all cells x, y, z in the grid
# such that
#@code
# x0 <= x < x1
# y0 <= y < y1
# z0 <= z < z1
#@endcode
#wrapping around the edges as necessary.
def wrapped_window_iter(self, p0, p1):
x0, y0, z0 = p0
x1, y1, z1 = p1
for i in xrange(x0, x1):
for j in xrange(y0, y1):
for k in xrange(z0, z1):
yield self[i % self.width, j % self.height, k % self.depth]
raise StopIteration
## @brief Returns an iterator that iterates over all cells in the square
## surrounding the given point.
##
#The cube is 2*n + 1 units.
def square_iter(self, p, n):
x, y, z = p
return self.window_iter(
(x - n, y - n, z - n),
(x + n + 1, y + n +1, z + n +1))
## @brief Returns an iterator that iterates over all cells in the square
## surrounding the given point, wrapping around as necessary.
##
#The cube is 2*n + 1 units.
def wrapped_square_iter(self, p, n):
x, y, z = p
return self.wrapped_window_iter(
(x - n, y - n, z - n),
(x + n + 1, y + n +1, z + n +1))
class GridWindow1D (Container1D, AuxiliaryContainer): #Constant y
def __init__(self, grid, col_slice):
self.grid = grid
self.x = complete_slice(col_slice, grid.width)
Container1D.__init__(self, slice_len(self.x), grid.width)
def __getitem__(self, x):
new_x = slice_mul(self.x, x, self.grid.width)
return self.grid[new_x]
def __setitem__(self, x, item):
new_x = slice_mul(self.x, x, self.grid.width)
if type(x) is int:
self.grid[new_x] = item
else: #slice!
for i, item_i in zip(srange(new_x, self.grid.width), item):
self.grid[i] = item_i
## Class that represent a 2D grid, with enhanced slicing notation.
class Grid1D (Container1D, PrincipleContainer):
def __init__(self, dims, initial_item = None):
(width,) = dims
Container1D.__init__(self, width)
self.grid = make_grid_1d(width, initial_item)
self.width = width
## @brief Returns an iterator that iterates over all cells in the grid.
##
# This allows you to write:
#@code
# for cell in cell_iter(grid):
# process(cell)
#@endcode
def cell_iter(self):
for i in xrange(self.width):
yield self.grid[i]
raise StopIteration
def __getitem__(self, x):
if isinstance(x, int):
return self.grid[x]
elif is_slice(x):
GridWindow1D(self, x)
raise TypeError
def __setitem__(self, x, item):
if type(x) is int:
self.grid[x] = item
elif is_slice(x):
g = GridWindow1D(self, x)
g[...] = item
else:
raise TypeError
class GridRow2D (Container1D, AuxiliaryContainer): #Constant y
def __init__(self, grid, col_slice, row):
self.grid = grid
self.x = complete_slice(col_slice, grid.width)
self.y = row
Container1D.__init__(self, slice_len(self.x, grid.width))
def __getitem__(self, x):
new_x = slice_mul(self.x, x, self.grid.width)
return self.grid[new_x, self.y]
def __setitem__(self, x, item):
new_x = slice_mul(self.x, x, self.grid.width)
if type(x) is int:
self.grid[new_x] = item
else: #slice!
for i, item_i in zip(srange(new_x, self.grid.width), item):
self.grid[i, self.y] = item_i
class GridCol2D (Container1D, AuxiliaryContainer): #Constant x
def __init__(self, grid, col, row_slice):
self.grid = grid
self.x = col
self.y = complete_slice(row_slice, grid.height)
Container1D.__init__(self, slice_len(self.y, grid.height))
def __getitem__(self, y):
new_y = slice_mul(self.y, y, self.grid.height)
return self.grid[self.x, new_y]
def __setitem__(self, y, item):
new_y = slice_mul(self.y, y, self.grid.height)
if type(y) is int:
self.grid[self.x, new_y] = item
else: #slice!
for i, item_i in zip(srange(new_y, self.grid.height), item):
self.grid[self.x, i] = item_i
class GridWindow2D (Container2D):
def __init__(self, grid, x, y):
self.grid = grid
self.x = complete_slice(x, grid.width)
self.y = complete_slice(y, grid.height)
Container2D.__init__(self, slice_len(self.x, grid.width), slice_len(self.y, grid.height))
def __getitem__(self, p):
if isinstance(p, int):
return self[p, ...]
x, y = p
new_x = slice_mul(self.x, x, self.grid.width)
new_y = slice_mul(self.y, y, self.grid.height)
return self.grid[new_x, new_y]
def __setitem__(self, p, item):
if isinstance(p, int):
self[p, ...] = item
x, y = p
new_x = slice_mul(self.x, x, self.grid.width)
new_y = slice_mul(self.y, y, self.grid.height)
if type(x) is int or type(y) is int:
#delegate!
self.grid[new_x, new_y] = item
else: #slice!
for i, item_i in zip(srange(new_x, self.grid.width), item):
for j, item_j in zip(srange(new_y, self.grid.height), item_i):
self.grid[i, j] = item_j
def __repr__(self):
#slow...
glst = []
for i in xrange(slice_len(self.x, self.grid.width)):
gcol = []
for j in xrange(slice_len(self.y, self.grid.height)):
gcol.append(self[i, j])
glst.append(gcol)
return glst.__repr__()
## Class that represent a 2D grid, with enhanced slicing notation.
class Grid2D (Container2D, PrincipleContainer):
def __init__(self, dims, initial_item = None):
(width, height) = dims
Container2D.__init__(self, width, height)
self.grid = make_grid_2d(width, height, initial_item)
## @brief Returns an iterator that iterates over all cells in the grid.
##
# This allows you to write:
#@code
# for cell in cell_iter(grid):
# process(cell)
#@endcode
def cell_iter(self):
for i in xrange(self.width):
for j in xrange(self.height):
yield self.grid[i][j]
raise StopIteration
def __getitem__(self, p):
if isinstance(p, int):
return self[p, ...]
x, y = p
if isinstance(x, int):
if isinstance(y, int):
return self.grid[x][y]
elif is_slice(y):
return GridCol2D(self, x, y)
elif is_slice(x):
if isinstance(y, int):
return GridRow2D(self, x, y)
elif is_slice(y):
return GridWindow2D(self, x, y)
raise TypeError
def __setitem__(self, p, item):
x, y = p
if type(x) is int:
if type(y) is int:
self.grid[x][y] = item
elif is_slice(y):
g = GridCol2D(self, x, y)
g[...] = item
elif is_slice(x):
if type(y) is int:
g = GridRow2D(self, x, y)
g[...] = item
elif is_slice(y):
g = GridWindow2D(self, x, y)
g[..., ...] = item
else:
raise TypeError
class GridBar3D (Container1D, AuxiliaryContainer): #constant x, y
def __init__(self, grid, x, y, z):
self.grid = grid
self.x = x
self.y = y
self.z = complete_slice(z, grid.depth)
Container1D.__init__(self, slice_len(self.z, grid.depth))
def __getitem__(self, z):
new_z = slice_mul(self.z, z, self.grid.depth)
return self.grid[self.x, self.y, new_z]
def __setitem__(self, z, item):
new_z = slice_mul(self.z, z, self.grid.depth)
if type(z) is int:
self.grid[new_z] = item
else: #slice!
for i, item_i in zip(srange(new_z, self.grid.depth), item):
self.grid[self.x, self.y, i] = item_i
class GridCol3D (Container1D, AuxiliaryContainer): #constant x, z
def __init__(self, grid, x, y, z):
self.grid = grid
self.x = x
self.y = complete_slice(y, grid.height)
self.z = z
Container1D.__init__(self, slice_len(self.y, grid.height))
def __getitem__(self, y):
new_y = slice_mul(self.y, y, self.grid.height)
return self.grid[self.x, new_y, self.z]
def __setitem__(self, y, item):
new_y = slice_mul(self.y, y, self.grid.height)
if type(y) is int:
self.grid[new_y] = item
else: #slice!
for i, item_i in zip(srange(new_y, self.grid.height), item):
self.grid[self.x, i, self.z] = item_i
class GridRow3D (Container1D, AuxiliaryContainer): #constant y, z
def __init__(self, grid, x, y, z):
self.grid = grid
self.x = complete_slice(x, grid.width)
self.y = y
self.z = z
Container1D.__init__(self, slice_len(self.x, grid.width))
def __getitem__(self, x):
new_x = slice_mul(self.x, x, self.grid.width)
return self.grid[new_x, self.y, self.z]
def __setitem__(self, x, item):
new_x = slice_mul(self.x, x, self.grid.width)
if type(x) is int:
self.grid[new_x] = item
else: #slice!
for i, item_i in zip(srange(new_x, self.grid.width), item):
self.grid[i, self.y, self.y] = item_i
class GridSliceXY (Container2D, AuxiliaryContainer): #constant z
def __init__(self, grid, x, y, z):
self.grid = grid
self.x = complete_slice(x, grid.width)
self.y = complete_slice(y, grid.height)
self.z = z
Container2D.__init__(self, slice_len(self.x, grid.width), slice_len(self.y, grid.height))
def __getitem__(self, p):
if isinstance(p, int):
return self[p, ...]
x, y = p
new_x = slice_mul(self.x, x, self.grid.width)
new_y = slice_mul(self.y, y, self.grid.height)
return self.grid[new_x, new_y, self.z]
def __setitem__(self, p, item):
if isinstance(p, int):
self[p, ...] = item
x, y = p
new_x = slice_mul(self.x, x, self.grid.width)
new_y = slice_mul(self.y, y, self.grid.height)
if type(x) is int or type(y) is int:
#delegate!
self.grid[new_x, new_y, self.z] = item
else: #slice!
for i, item_i in zip(srange(new_x, self.grid.width), item):
for j, item_j in zip(srange(new_y, self.grid.height), item_i):
self.grid[i, j, self.z] = item_j
class GridSliceXZ (Container2D, AuxiliaryContainer): #constant Y
def __init__(self, grid, x, y, z):
self.grid = grid
self.x = complete_slice(x, grid.width)
self.y = y
self.z = complete_slice(z, grid.depth)
Container2D.__init__(self, slice_len(self.x, grid.width), slice_len(self.z, grid.depth))
def __getitem__(self, p):
if isinstance(p, int):
return self[p, ...]
x, z = p
new_x = slice_mul(self.x, x, self.grid.width)
new_z = slice_mul(self.z, z, self.grid.depth)
return self.grid[new_x, self.y, new_z]
def __setitem__(self, p, item):
if isinstance(p, int):
return self[p, ...]
x, z = p
new_x = slice_mul(self.x, x, self.grid.width)
new_z = slice_mul(self.z, z, self.grid.depth)
if type(x) is int or type(z) is int:
#delegate!
self.grid[new_x, self.y, new_z] = item
else: #slice!
for i, item_i in zip(srange(new_x, self.grid.width), item):
for j, item_j in zip(srange(new_z, self.grid.depth), item_i):
self.grid[i, self.y, j] = item_j
class GridSliceYZ (Container2D, AuxiliaryContainer): #constant x
def __init__(self, grid, x, y, z):
self.grid = grid
self.x = x
self.y = complete_slice(y, grid.height)
self.z = complete_slice(z, grid.depth)
Container2D.__init__(self, slice_len(self.y, grid.height), slice_len(self.z, grid.depth))
def __getitem__(self, p):
if isinstance(p, int):
return self[p, ...]
y, z = p
new_y = slice_mul(self.y, y, self.grid.height)
new_z = slice_mul(self.z, z, self.grid.depth)
return self.grid[self.x, new_y, new_z]
def __setitem__(self, p, item):
if isinstance(p, int):
self[p, ...] = item
y, z = p
new_y = slice_mul(self.y, y, self.grid.height)
new_z = slice_mul(self.z, z, self.grid.depth)
if type(y) is int or type(z) is int:
#delegate!
self.grid[self.x, new_y, new_z] = item
else: #slice!
for i, item_i in zip(srange(new_y), item):
for j, item_j in zip(srange(new_z), item_i):
self.grid[self.x, i, j] = item_j
class GridWindow3D (Container3D, AuxiliaryContainer):
def __init__(self, grid, x, y, z):
self.grid = grid
self.x = complete_slice(x, grid.width)
self.y = complete_slice(y, grid.height)
self.z = complete_slice(z, grid.height)
Container3D.__init__(self, slice_len(self.x, grid.width), slice_len(self.y, grid.height), slice_len(self.z, grid.depth))
def __getitem__(self, p):
if isinstance(p, int):
return self[p, ..., ...]
x, y, z = p
new_x = slice_mul(self.x, x, self.grid.width)
new_y = slice_mul(self.y, y, self.grid.height)
new_z = slice_mul(self.z, z, self.grid.depth)
return self.grid[new_x, new_y, new_z]
def __setitem__(self, p, item):
if isinstance(p, int):
self[p, ..., ...] = item
x, y, z = p
new_x = slice_mul(self.x, x, self.grid.width)
new_y = slice_mul(self.y, y, self.grid.height)
new_z = slice_mul(self.z, z, self.grid.depth)
if type(x) is int or type(y) is int or type(z) is int:
#delegate!
self.grid[new_x, new_y, self.z] = item
else: #slice!
for i, item_i in zip(srange(new_x, self.grid.width), item):
for j, item_j in zip(srange(new_y, self.grid.height), item_i):
for k, item_k in zip(srange(new_z, self.grid.depth), item_j):
self.grid[i, j, k] = item_k
## Class that represent a 3D grid, with enhanced slicing notation.
class Grid3D (Container3D, PrincipleContainer):
def __init__(self, dims, initial_item = None):
(width, height, depth) = dims
Container3D.__init__(self, width, height, depth)
self.grid = make_grid_3d(width, height, depth, initial_item)
def __getitem__(self, p):
if isinstance(p, int):
return self[p, ..., ...]
x, y, z = p
if type(x) is int:
if type(y) is int:
if type(z) is int:
return self.grid[x][y][z]
elif is_slice(z):
return GridBar3D(self, x, y, z)
elif is_slice(y):
if type(z) is int:
return GridCol3D(self, x, y, z)
elif is_slice(z):
return GridSliceYZ(self, x, y, z)
elif is_slice(x):
if type(y) is int:
if type(z) is int:
return GridRow3D(self, x, y, z)
elif is_slice(z):
return GridSliceXZ(self, x, y, z)
elif is_slice(y):
if type(z) is int:
return GridSliceXY(self, x, y, z)
elif is_slice(z):
return GridWindow3D(self, x, y, z)
raise TypeError
def __setitem__(self, p, item):
(x, y, z) = p
if type(x) is int:
if type(y) is int:
if type(z) is int:
self.grid[x][y][z] = item
elif is_slice(z):
g = GridBar3D(self, x, y, z)
g[...] = item
elif is_slice(y):
if type(z) is int:
g = GridCol3D(self, x, y, z)
g[...] = item
elif is_slice(z):
g = GridSliceYZ(self, x, y, z)
g[..., ...] = item
elif is_slice(x):
if type(y) is int:
if type(z) is int:
g = GridRow3D(self, x, y, z)
g[...] = item
elif is_slice(z):
g = GridSliceXZ(self, x, y, z)
g[..., ...] = item
elif is_slice(y):
if type(z) is int:
g = GridSliceXY(self, x, y, z)
g[..., ...] = item
elif is_slice(z):
g = GridWindow3D(self, x, y, z)
g[..., ..., ...] = item
else:
raise TypeError
class ListGrid3D (Grid3D):
def __init__(self, dims):
(width, height, depth) = dims
Grid3D.__init__(self, (width, height, depth))
for index in self.index_iter():
self[index] = []
## @brief Sets the item at x, y.
##
# Use a tuplet (x, y) to access the item.
def additem(self, p, b):
(x, y, z) = p
self[x, y, z].append(b)
class ListGrid2D (Grid2D):
def __init__(self, dims):
(width, height) = dims
Grid2D.__init__(self, (width, height))
for index in self.index_iter():
self[index] = []
## @brief Sets the item at x, y.
##
# Use a tuplet (x, y) to access the item.
def additem(self, p, b):
(x, y) = p
self[x, y].append(b)
| danaukes/popupcad | popupcad_gazebo/enhanced_grid.py | Python | mit | 40,856 |
import hashlib
import os.path
from contextlib import contextmanager
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.dispatch import Signal
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from taggit.managers import TaggableManager
from wagtail.admin.utils import get_object_usage
from wagtail.core.models import CollectionMember
from wagtail.search import index
from wagtail.search.queryset import SearchableQuerySetMixin
class DocumentQuerySet(SearchableQuerySetMixin, models.QuerySet):
pass
class AbstractDocument(CollectionMember, index.Indexed, models.Model):
title = models.CharField(max_length=255, verbose_name=_('title'))
file = models.FileField(upload_to='documents', verbose_name=_('file'))
created_at = models.DateTimeField(verbose_name=_('created at'), auto_now_add=True)
uploaded_by_user = models.ForeignKey(
settings.AUTH_USER_MODEL,
verbose_name=_('uploaded by user'),
null=True,
blank=True,
editable=False,
on_delete=models.SET_NULL
)
tags = TaggableManager(help_text=None, blank=True, verbose_name=_('tags'))
file_size = models.PositiveIntegerField(null=True, editable=False)
# A SHA-1 hash of the file contents
file_hash = models.CharField(max_length=40, blank=True, editable=False)
objects = DocumentQuerySet.as_manager()
search_fields = CollectionMember.search_fields + [
index.SearchField('title', partial_match=True, boost=10),
index.AutocompleteField('title'),
index.FilterField('title'),
index.RelatedFields('tags', [
index.SearchField('name', partial_match=True, boost=10),
index.AutocompleteField('name'),
]),
index.FilterField('uploaded_by_user'),
]
def is_stored_locally(self):
"""
Returns True if the image is hosted on the local filesystem
"""
try:
self.file.path
return True
except NotImplementedError:
return False
@contextmanager
def open_file(self):
# Open file if it is closed
close_file = False
f = self.file
if f.closed:
# Reopen the file
if self.is_stored_locally():
f.open('rb')
else:
# Some external storage backends don't allow reopening
# the file. Get a fresh file instance. #1397
storage = self._meta.get_field('file').storage
f = storage.open(f.name, 'rb')
close_file = True
# Seek to beginning
f.seek(0)
try:
yield f
finally:
if close_file:
f.close()
def get_file_size(self):
if self.file_size is None:
try:
self.file_size = self.file.size
except Exception:
# File doesn't exist
return
self.save(update_fields=['file_size'])
return self.file_size
def _set_file_hash(self, file_contents):
self.file_hash = hashlib.sha1(file_contents).hexdigest()
def get_file_hash(self):
if self.file_hash == '':
with self.open_file() as f:
self._set_file_hash(f.read())
self.save(update_fields=['file_hash'])
return self.file_hash
def __str__(self):
return self.title
@property
def filename(self):
return os.path.basename(self.file.name)
@property
def file_extension(self):
return os.path.splitext(self.filename)[1][1:]
@property
def url(self):
return reverse('wagtaildocs_serve', args=[self.id, self.filename])
def get_usage(self):
return get_object_usage(self)
@property
def usage_url(self):
return reverse('wagtaildocs:document_usage',
args=(self.id,))
def is_editable_by_user(self, user):
from wagtail.documents.permissions import permission_policy
return permission_policy.user_has_permission_for_instance(user, 'change', self)
class Meta:
abstract = True
verbose_name = _('document')
verbose_name_plural = _('documents')
class Document(AbstractDocument):
admin_form_fields = (
'title',
'file',
'collection',
'tags'
)
def get_document_model():
"""
Get the document model from the ``WAGTAILDOCS_DOCUMENT_MODEL`` setting.
Defauts to the standard :class:`~wagtail.documents.models.Document` model
if no custom model is defined.
"""
from django.conf import settings
from django.apps import apps
try:
app_label, model_name = settings.WAGTAILDOCS_DOCUMENT_MODEL.split('.')
except AttributeError:
return Document
except ValueError:
raise ImproperlyConfigured("WAGTAILDOCS_DOCUMENT_MODEL must be of the form 'app_label.model_name'")
document_model = apps.get_model(app_label, model_name)
if document_model is None:
raise ImproperlyConfigured(
"WAGTAILDOCS_DOCUMENT_MODEL refers to model '%s' that has not been installed" %
settings.WAGTAILDOCS_DOCUMENT_MODEL
)
return document_model
document_served = Signal(providing_args=['request'])
| nealtodd/wagtail | wagtail/documents/models.py | Python | bsd-3-clause | 5,407 |
#!/usr/bin/python
import cv
import errno
import getopt
import math
import os
import sys
from PIL import Image
########################################################################
# CONFIG VARIABLES #
########################################################################
# 0 = top of head, 1 = chin
MUSTACHE_VERTICAL_POSITION_RATIO = 0.71
# Vertical offset - use for fine adjustments
# 0 = no adjust, 1 = move up 1 nose height
MUSTACHE_VERTICAL_POSITION_FINE_OFFSET = 0.08
# 0 = point, 1 = width of face
MUSTACHE_TO_FACE_SIZE_RATIO = 0.6
DEFAULT_MUSTACHE_WIDTH = 50
DEFAULT_MUSTACHE_IMAGE_FILE = "mustache_03.png"
########################################################################
# END CONFIG VARIABLES #
########################################################################
# XML Haar cascade file paths
EYES = "cascades/haarcascade_eye.xml"
NOSE = "cascades/haarcascade_mcs_nose.xml"
FACE = "cascades/haarcascade_frontalface_default.xml"
DEBUG_MODE = False
def usage():
print """mustachify.py [-i input file] [-o output file] [options]
Options:
-h, --help Display usage information
-i, --inputfile Input image file
-o, --outputfile Output image file
-m, --mustache Mustache image file
-d, --debug Print debugging information and overlay debugg
ing rectangles on image"""
def detect_features(image, cascade, minsize):
features = []
storage = cv.CreateMemStorage()
loaded_cascade = cv.Load(cascade)
detected = cv.HaarDetectObjects(image, loaded_cascade, storage, 1.2, 2,
cv.CV_HAAR_DO_CANNY_PRUNING, minsize)
if DEBUG_MODE:
print "\t\tFound: " + str(detected)
if detected:
for (x, y, w, h), n in detected:
features.append((x, y, w, h))
return features
def draw_rectangle(image, (x, y, w, h), color):
cv.Rectangle(image, (x, y), (x+w, y+h), color)
def main(argv=None):
# Process arguments
if argv is None:
argv = sys.argv
input_file = ""
output_file = ""
mustache_file = DEFAULT_MUSTACHE_IMAGE_FILE
DEBUG_MODE = False
try:
opts, args = getopt.getopt(argv[1:], "hi:o:m:d", ["help", "inputfile=", "outputfile=", "mustache=", "debug"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-i", "--inputfile"):
input_file = arg
try:
with open(input_file):
pass
except IOError:
print "Error: File " + input_file + " does not exist."
sys.exit(errno.ENOENT)
elif opt in ("-o", "--outputfile"):
output_file = arg
elif opt in ("-m", "--mustache"):
mustache_file = arg
try:
with open(mustache_file):
pass
except IOError:
print "Error: File " + mustache_file + " does not exist."
sys.exit(errno.ENOENT)
elif opt in ("-d", "--debug"):
DEBUG_MODE = True
else:
usage()
sys.exit()
if input_file == "" or output_file == "":
print "Error: Specify an input and output file."
usage()
# End argument processing
if DEBUG_MODE:
print "Processing " + input_file
pil_image = Image.open(input_file)
cv_image = cv.CreateImageHeader(pil_image.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cv_image, pil_image.tostring())
mustache = Image.open(os.path.join("mustaches", mustache_file))
# calculate mustache image aspect ratio so proportions are preserved
# when scaling it to face
mustache_aspect_ratio = 1.0*mustache.size[0]/mustache.size[1]
if DEBUG_MODE:
print "\tFinding eyes:"
eyes = detect_features(cv_image, EYES, (10, 10))
if DEBUG_MODE:
print "\tFinding nose:"
noses = detect_features(cv_image, NOSE, (10, 10))
if DEBUG_MODE:
print "\tFinding face:"
faces = detect_features(cv_image, FACE, (70, 70))
if DEBUG_MODE:
print "\tFound " + str(len(eyes)) + " eye(s), " + str(len(noses)) + \
" nose(s), and " + str(len(faces)) + " face(s)."
# mustache_x and mustache_y represent the top left the mustache
mustache_w = mustache_h = mustache_x = mustache_y = mustache_angle = 0
if len(eyes) == 2:
#order eyes from left to right
if eyes[0][0] > eyes[1][0]:
temp = eyes[1]
eyes[1] = eyes[0]
eyes[0] = temp
eye_L_x = eyes[0][0]
eye_L_y = eyes[0][1]
eye_L_w = eyes[0][2]
eye_L_h = eyes[0][3]
eye_R_x = eyes[1][0]
eye_R_y = eyes[1][1]
eye_R_w = eyes[1][2]
eye_R_h = eyes[1][3]
# Redefine x and y coordinates as center of eye
eye_L_x = int(1.0 * eye_L_x + (eye_L_w / 2))
eye_L_y = int(1.0 * eye_L_y + (eye_L_h / 2))
eye_R_x = int(1.0 * eye_R_x + (eye_R_w / 2))
eye_R_y = int(1.0 * eye_R_y + (eye_R_h / 2))
mustache_angle = math.degrees(math.atan(-1.0 *
(eye_R_y-eye_L_y)/(eye_R_x-eye_L_x)))
# Don't rotate mustache if it looks like one of the eyes
# was misplaced
if math.fabs(mustache_angle) > 25:
mustache_angle = 0
if DEBUG_MODE:
draw_rectangle(cv_image, eyes[0], (0, 255, 0))
draw_rectangle(cv_image, eyes[1], (0, 255, 0))
print "\tMustache angle = " + str(mustache_angle)
else:
mustache_angle = 0
if DEBUG_MODE:
print "\tTwo eyes not found - using mustache angle of 0"
if len(faces) > 0:
face_x = faces[0][0]
face_y = faces[0][1]
face_w = faces[0][2]
face_h = faces[0][3]
# Scale mustache
# Change MUSTACHE_TO_FACE_SIZE_RATIO to adjust mustache size
# relative to face
mustache_w = int(1.0 * face_w * MUSTACHE_TO_FACE_SIZE_RATIO)
mustache_h = int(1.0 * mustache_w / mustache_aspect_ratio)
if DEBUG_MODE:
draw_rectangle(cv_image, faces[0], (0, 0, 255))
print "\tMustache width = " + str(mustache_w)
else:
# If for some reason a face wasn't found, guess a mustache width
# and scale
mustache_w = DEFAULT_MUSTACHE_WIDTH
mustache_h = int(1.0 * mustache_w / mustache_aspect_ratio)
if DEBUG_MODE:
print "\tNo face found - using default mustache width (" + \
str(mustache_w) + ")"
# Guess location of mustache based on face
# Change MUSTACHE_VERTICAL_POSITION_RATIO to ajust
# vertical positioning of mustache
def place_on_face_guessed():
mustache_x = int(1.0 * face_x + (face_w/2)) - \
int(1.0 * mustache_w / 2)
mustache_y = int(1.0 * face_y +
(MUSTACHE_VERTICAL_POSITION_RATIO * face_h)) - \
int(1.0 * mustache_h / 2)
return (mustache_x, mustache_y)
if DEBUG_MODE:
print "\tNo nose found - guessing nose center of (" + \
str(mustache_x) + ", " + str(mustache_y) + ")"
if len(noses) > 0:
# If more than one nose found, take noses[0] (the one with the
# highest confidence value)
nose_x = noses[0][0]
nose_y = noses[0][1]
nose_w = noses[0][2]
nose_h = noses[0][3]
# Redefine x and y as center of bottom of nose
nose_x = int(1.0 * nose_x + (nose_w / 2))
nose_y = int(1.0 * nose_y + (nose_h / 2))
# Check that nose is inside face
use_nose = True
if len(faces) > 0:
if not (face_x < nose_x and face_x + face_w > nose_x and face_y +
int(face_h / 3.0) < nose_y and face_y +
int((2.0 / 3.0) * face_h) > nose_y):
use_nose = False
mustache_x, mustache_y = place_on_face_guessed()
if use_nose:
mustache_x = nose_x - int(1.0 * mustache_w / 2)
mustache_y = nose_y + int(1.0 * mustache_h / 2 -
(MUSTACHE_VERTICAL_POSITION_FINE_OFFSET *
nose_w))
if DEBUG_MODE:
draw_rectangle(cv_image, noses[0], (255, 0, 0))
print "\tMustache center = (" + str(mustache_x) + ", " + \
str(mustache_y) + ")"
else:
if len(faces) > 0:
mustache_x, mustache_y = place_on_face_guessed()
else:
# Don't apply a mustache - not enough information available
# Save original image and exit
pil_image.save(output_file, "JPEG")
sys.exit()
# Convert image to a format that supports image overlays with alpha
pil_image = Image.fromstring("RGB", cv.GetSize(cv_image), cv_image.tostring())
# Redefine mustache x and y to be center of mustache
mustache_x += int(mustache_w / 2.0)
mustache_y += int(mustache_h / 2.0)
# Rotate and resize the mustache
# Rotate first so the final filter applied is ANTIALIAS
mustache = mustache.rotate(mustache_angle, Image.BICUBIC, True)
mrad = math.fabs(math.radians(mustache_angle))
rotated_width = int(math.fabs(mustache_w * math.cos(mrad) + mustache_h *
math.sin(mrad)))
rotated_height = int(math.fabs(mustache_w * math.sin(mrad) + mustache_h *
math.cos(mrad)))
mustache = mustache.resize((rotated_width, rotated_height),
Image.ANTIALIAS)
# Superimpose the mustache on the face
pil_image.paste(mustache, (mustache_x-int(mustache.size[0] / 2.0),
mustache_y-int(mustache.size[1] / 2.0)), mustache)
# Save the image into the output file
pil_image.save(output_file, "JPEG")
if __name__ == "__main__":
sys.exit(main())
| ravikiranj/rkjanardhana-dot-com | mustachify/mustachify.py | Python | bsd-2-clause | 10,141 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
from openerp.tests.common import TransactionCase
class TestResPartnerBank(TransactionCase):
"""Tests acc_number
"""
def test_sanitized_acc_number(self):
partner_bank_model = self.env['res.partner.bank']
acc_number = " BE-001 2518823 03 "
vals = partner_bank_model.search([('acc_number', '=', acc_number)])
self.assertEquals(0, len(vals))
partner_bank = partner_bank_model.create({
'acc_number': acc_number,
'partner_id': self.ref('base.res_partner_2'),
'acc_type': 'bank',
})
vals = partner_bank_model.search([('acc_number', '=', acc_number)])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
vals = partner_bank_model.search([('acc_number', 'in', [acc_number])])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
self.assertEqual(partner_bank.acc_number, acc_number)
# sanitaze the acc_number
sanitized_acc_number = 'BE001251882303'
vals = partner_bank_model.search(
[('acc_number', '=', sanitized_acc_number)])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
vals = partner_bank_model.search(
[('acc_number', 'in', [sanitized_acc_number])])
self.assertEquals(1, len(vals))
self.assertEquals(partner_bank, vals[0])
self.assertEqual(partner_bank.sanitized_acc_number,
sanitized_acc_number)
# search is case insensitive
vals = partner_bank_model.search(
[('acc_number', '=', sanitized_acc_number.lower())])
self.assertEquals(1, len(vals))
vals = partner_bank_model.search(
[('acc_number', '=', acc_number.lower())])
self.assertEquals(1, len(vals))
| vileopratama/vitech | src/openerp/addons/base/tests/test_res_partner_bank.py | Python | mit | 2,017 |
import zipfile
import imghdr
from django import forms
from .models import Image, ImageBatchUpload, Album
class AlbumAdminForm(forms.ModelForm):
class Meta:
model = Album
fields = '__all__'
def clean(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('authorized_users') is None:
pass
else:
if cleaned_data.get('all_users') and cleaned_data.get('authorized_users').count() != 0:
cleaned_data['all_users'] = False
return cleaned_data
class ImageAdminForm(forms.ModelForm):
class Meta:
model = Image
fields = ('public', 'title', 'image', 'albums', 'user')
def clean_image(self):
image = self.cleaned_data['image']
if image is None:
return image
elif not imghdr.what(image):
raise forms.ValidationError(u"The file is not an image file")
else:
return image
class ImageBatchUploadAdminForm(forms.ModelForm):
class Meta:
model = ImageBatchUpload
fields = ('public', 'title', 'zip_file', 'albums', 'user')
def clean_zip_file(self):
image_zip = self.cleaned_data['zip_file']
if image_zip is None:
return image_zip
elif not zipfile.is_zipfile(image_zip):
raise forms.ValidationError(u"The file is not a zip file")
else:
return image_zip
| dsimandl/teamsurmandl | gallery/forms.py | Python | mit | 1,431 |
import atexit
import os
import sqlite3
import LCEngine4 as LCEngine
from Code import AperturasStd
from Code import Books
from Code import ControlPosicion
from Code import PGNreader
from Code.QT import QTUtil2
from Code.QT import QTVarios
import Code.SQL.DBF as SQLDBF
from Code import Util
from Code import VarGen
class UnMove:
def __init__(self, bookGuide, father):
self.bookGuide = bookGuide
self.dbAnalisis = bookGuide.dbAnalisis
self._rowid = None
self._father = father
self._pv = ""
self._xpv = ""
self._nag = 0
self._adv = 0
self._comment = ""
self._pos = 0
self._xdata = {}
self._graphics = ""
self._mark = ""
self._children = []
self._item = None
if father:
self._siBlancas = not father.siBlancas()
self._numJugada = father.numJugada() + (1 if self._siBlancas else 0)
else: # root
self._siBlancas = False
self._numJugada = 0
self._fen = ""
self._pgn = "" # set al crear el pv
self.readedEXT = False
def __str__(self):
return "%s %s %s %s %s %s %s" % (
self.rowid(), self.father().rowid(), self.pv(), self.xpv(), self.siBlancas(), self.numJugada(), self.fen())
def rowid(self, valor=None):
if valor is not None:
self._rowid = valor
return self._rowid
def siBlancas(self):
return self._siBlancas
def numJugada(self):
return self._numJugada
def father(self):
return self._father
def pv(self, valor=None):
if valor is not None:
self._pv = valor
return self._pv
def xpv(self, valor=None):
if valor is not None:
self._xpv = valor
return self._xpv
def mark(self, valor=None, siLeyendo=False):
if valor is not None:
ant = self._mark
self._mark = valor
if not siLeyendo:
if ant != valor:
self.bookGuide.pteGrabar(self)
self.bookGuide.actualizaBookmark(self, len(valor) > 0)
return self._mark
def graphics(self, valor=None, siLeyendo=False):
if valor is None:
if not self.readedEXT:
self.readEXT()
else:
ant = self._graphics
self._graphics = valor
if not siLeyendo and ant != valor:
self.bookGuide.pteGrabar(self)
return self._graphics
def nag(self, valor=None, siLeyendo=False):
if valor is not None:
ant = self._nag
self._nag = valor
if not siLeyendo and ant != valor:
self.bookGuide.pteGrabar(self)
return self._nag
def adv(self, valor=None, siLeyendo=False):
if valor is not None:
ant = self._adv
self._adv = valor
if not siLeyendo and ant != valor:
self.bookGuide.pteGrabar(self)
return self._adv
def comment(self, valor=None, siLeyendo=False):
if valor is None:
self.readEXT()
else:
ant = self._comment
self._comment = valor
if not siLeyendo and ant != valor:
self.bookGuide.pteGrabar(self)
return self._comment
def commentLine(self):
c = self.comment()
if c:
li = c.split("\n")
c = li[0]
if len(li) > 1:
c += "..."
return c
def xdata(self, valor=None, siLeyendo=False):
if valor is None:
self.readEXT()
else:
ant = self._xdata
self._xdata = valor
if not siLeyendo and ant != valor:
self.bookGuide.pteGrabar(self)
self._xdata = valor
return self._xdata
def pos(self, valor=None, siLeyendo=False):
if valor is not None:
ant = self._pos
self._pos = valor
if not siLeyendo and ant != valor:
self.bookGuide.pteGrabar(self)
return self._pos
def fen(self, valor=None):
if valor is not None:
self._fen = valor
self.bookGuide.setTransposition(self)
return self._fen
def fenM2(self):
fen = self._fen
sp2 = fen.rfind(" ", 0, fen.rfind(" "))
return fen[:sp2]
def transpositions(self):
return self.bookGuide.getTranspositions(self)
def fenBase(self):
return self._father._fen
def pgn(self):
if not self._pgn:
pv = self._pv
d, h, c = pv[:2], pv[2:4], pv[4:]
cp = ControlPosicion.ControlPosicion()
cp.leeFen(self._father.fen())
self._pgn = cp.pgnSP(d, h, c)
cp.mover(d, h, c)
self._fen = cp.fen()
return self._pgn
def pgnEN(self):
cp = ControlPosicion.ControlPosicion()
cp.leeFen(self._father.fen())
pv = self._pv
d, h, c = pv[:2], pv[2:4], pv[4:]
return cp.pgn(d, h, c)
def pgnNum(self):
if self._siBlancas:
return "%d.%s" % (self._numJugada, self.pgn())
return self.pgn()
def item(self, valor=None):
if valor is not None:
self._item = valor
return self._item
def children(self):
return self._children
def addChildren(self, move):
self._children.append(move)
self._children = sorted(self._children, key=lambda uno: uno._pos)
def delChildren(self, move):
for n, mv in enumerate(self._children):
if move == mv:
del self._children[n]
return
def brothers(self):
li = []
for mv in self._father.children():
if mv.pv() != self.pv():
li.append(mv)
return li
def analisis(self):
return self.dbAnalisis.move(self)
def etiPuntos(self):
rm = self.dbAnalisis.move(self)
if rm:
return rm.abrTextoBase()
else:
return ""
def historia(self):
li = []
p = self
while True:
li.insert(0, p)
if not p.father():
break
p = p.father()
return li
def allPV(self):
return LCEngine.xpv2pv(self._xpv)
def allPGN(self):
li = []
for mv in self.historia():
if mv._pv:
if mv.siBlancas():
li.append("%d." % mv.numJugada())
li.append(mv.pgn())
return " ".join(li)
def readEXT(self):
if self.readedEXT:
return
self.readedEXT = True
self.bookGuide.readEXT(self)
class OpeningGuide:
def __init__(self, wowner, nomFichero=None):
self.configuracion = VarGen.configuracion
siGenerarStandard = False
if nomFichero is None:
nomFichero = self.configuracion.ficheroBookGuide
if not os.path.isfile(nomFichero):
siGenerarStandard = "Standard opening guide" in nomFichero
self.name = os.path.basename(nomFichero)[:-4]
self.ultPos = 0
self.dicPtes = {}
self.nomFichero = nomFichero
self.conexion = sqlite3.connect(nomFichero)
self.conexion.text_factory = lambda x: unicode(x, "utf-8", "ignore")
atexit.register(self.cerrar)
self.tablaDatos = "GUIDE"
self.checkInitBook(wowner, siGenerarStandard)
self.transpositions = {}
self.bookmarks = []
self.dbAnalisis = DBanalisis()
def pathGuide(self, nameGuide):
return Util.dirRelativo(os.path.join(self.configuracion.carpeta, nameGuide + ".pgo"))
def getOtras(self):
li = Util.listdir(self.configuracion.carpeta)
lwbase = self.name.lower()
liresp = []
for uno in li:
lw = uno.name.lower()
if lw.endswith(".pgo"):
if lwbase != lw[:-4]:
liresp.append(uno.name[:-4])
return liresp
def getTodas(self):
li = self.getOtras()
li.append(self.name)
return li
def changeTo(self, wowner, nomGuide):
self.grabar()
nomFichero = self.pathGuide(nomGuide)
self.name = nomGuide
self.ultPos = 0
self.dicPtes = {}
self.nomFichero = nomFichero
self.conexion.close()
self.conexion = sqlite3.connect(nomFichero)
self.conexion.text_factory = lambda x: unicode(x, "utf-8", "ignore")
atexit.register(self.cerrar)
self.checkInitBook(wowner, False)
self.transpositions = {}
self.bookmarks = []
self.root = UnMove(self, None)
self.root._fen = ControlPosicion.FEN_INICIAL
self.readAllDB()
self.configuracion.ficheroBookGuide = nomFichero
self.configuracion.graba()
def copyTo(self, otraGuide):
self.grabar()
otroFichero = self.pathGuide(otraGuide)
Util.copiaFichero(self.nomFichero, otroFichero)
def renameTo(self, wowner, otraGuide):
self.grabar()
self.conexion.close()
otroFichero = self.pathGuide(otraGuide)
Util.renombraFichero(self.nomFichero, otroFichero)
self.changeTo(wowner, otraGuide)
def removeOther(self, otraGuide):
self.grabar()
otroFichero = self.pathGuide(otraGuide)
Util.borraFichero(otroFichero)
def appendFrom(self, wowner, otraGuide):
self.grabar()
otroFichero = self.pathGuide(otraGuide)
otraConexion = sqlite3.connect(otroFichero)
otraConexion.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = otraConexion.cursor()
cursor.execute("pragma table_info(%s)" % self.tablaDatos)
liCamposOtra = cursor.fetchall()
cursor.close()
if not liCamposOtra:
return False
st = set()
for x in liCamposOtra:
st.add(x[1]) # nombre
liselect = ("XPV", "PV", "NAG", "ADV", "COMMENT", "FEN", "MARK", "GRAPHICS", "XDATA")
libasic = ("XPV", "PV", "FEN")
li = []
for x in liselect:
if x not in st:
if x in libasic:
otraConexion.close()
QTUtil2.mensError(wowner, _("This guide file is not valid"))
return False
else:
li.append(x)
select = ",".join(li)
dbfOtra = SQLDBF.DBF(otraConexion, self.tablaDatos, select)
dbfOtra.leer()
reccount = dbfOtra.reccount()
bp = QTUtil2.BarraProgreso(wowner, otraGuide, "", reccount).mostrar()
liReg = []
for recno in range(reccount):
bp.pon(recno)
if bp.siCancelado():
break
dbfOtra.goto(recno)
reg = dbfOtra.registroActual()
liReg.append(reg)
dbfOtra.cerrar()
otraConexion.close()
def dispatch(recno):
bp.pon(recno)
if liReg:
dbf = SQLDBF.DBF(self.conexion, self.tablaDatos, select)
dbf.insertarLista(liReg, dispatch)
dbf.cerrar()
bp.cerrar()
return len(liReg) > 0
def generarStandard(self, ventana, siBasic):
oLista = AperturasStd.apTrain
dic = oLista.dic
titulo = _("Openings")
tmpBP2 = QTUtil2.BarraProgreso2(ventana, titulo)
tf = len(dic)
tmpBP2.ponTotal(1, tf)
tmpBP2.ponRotulo(1, "1. " + _X(_("Reading %1"), titulo))
tmpBP2.ponTotal(2, tf)
tmpBP2.ponRotulo(2, "")
tmpBP2.mostrar()
liRegs = []
dRegs = {} # se guarda una lista con los pv, para determinar el padre
for nR, k in enumerate(oLista.dic):
tmpBP2.pon(1, nR)
tmpBP2.siCancelado()
ae = oLista.dic[k]
if siBasic and not ae.siBasic:
continue
liPV = ae.a1h8.split(" ")
ult = len(liPV) - 1
seqFather = ""
cp = ControlPosicion.ControlPosicion()
cp.posInicial()
for pos, pv in enumerate(liPV):
desde, hasta, coronacion = pv[:2], pv[2:4], pv[4:]
seq = seqFather + LCEngine.pv2xpv(pv)
cp.mover(desde, hasta, coronacion)
if seq not in dRegs:
reg = SQLDBF.Almacen()
reg.XPV = seq
reg.PV = pv
reg.FEN = cp.fen()
reg.COMMENT = ae.trNombre if pos == ult else ""
self.ultPos += 1
reg.POS = self.ultPos
liRegs.append(reg)
dRegs[seq] = reg
seqFather = seq
tmpBP2.ponRotulo(2, "2. " + _X(_("Converting %1"), titulo))
tmpBP2.ponTotal(2, len(liRegs))
select = "XPV,PV,COMMENT,FEN,POS"
dbf = SQLDBF.DBF(self.conexion, self.tablaDatos, select)
def dispatch(num):
tmpBP2.pon(2, num)
tmpBP2.siCancelado()
dbf.insertarLista(liRegs, dispatch)
dbf.cerrar()
tmpBP2.cerrar()
def creaTabla(self):
cursor = self.conexion.cursor()
sql = ("CREATE TABLE %s( XPV TEXT UNIQUE,PV VARCHAR(5),NAG INTEGER,ADV INTEGER,COMMENT TEXT,"
"FEN VARCHAR,MARK VARCHAR, POS INTEGER,GRAPHICS TEXT,XDATA BLOB);") % self.tablaDatos
cursor.execute(sql)
self.conexion.commit()
cursor.close()
def checkInitBook(self, wowner, siGenerarStandard):
cursor = self.conexion.cursor()
cursor.execute("pragma table_info(%s)" % self.tablaDatos)
liCampos = cursor.fetchall()
cursor.close()
if not liCampos:
self.creaTabla()
if siGenerarStandard:
self.generarStandard(wowner, True)
def grabarPGN(self, ventana, ficheroPGN, maxDepth):
select = "XPV,PV,COMMENT,NAG,ADV,FEN,POS"
SQLDBF.DBF(self.conexion, self.tablaDatos, select)
erroneos = duplicados = importados = 0
dlTmp = QTVarios.ImportarFicheroPGN(ventana)
dlTmp.hideDuplicados()
dlTmp.show()
select = "XPV,PV,COMMENT,NAG,ADV,FEN,POS"
dbf = SQLDBF.DBF(self.conexion, self.tablaDatos, select)
dnag = {"!!": 3, "!": 1, "?": 2, "??": 4, "!?": 5, "?!": 6}
n = 0
liReg = []
for n, g in enumerate(PGNreader.readGames(ficheroPGN), 1):
if not dlTmp.actualiza(n, erroneos, duplicados, importados):
break
if g.erroneo:
erroneos += 1
continue
if not g.moves:
erroneos += 1
continue
liReg = []
def addMoves(moves, depth, seq):
for mv in moves.liMoves:
if depth > maxDepth:
break
seqM1 = seq
pv = mv.pv
seq += LCEngine.pv2xpv(pv)
reg = SQLDBF.Almacen()
reg.PV = pv
reg.XPV = seq
reg.COMMENT = "\n".join(mv.comentarios)
reg.FEN = mv.fen
reg.NAG = 0
reg.ADV = 0
self.ultPos += 1
reg.POS = self.ultPos
for critica in mv.criticas:
if critica.isdigit():
t = int(critica)
if t in (4, 2, 1, 3, 5, 6):
reg.NAG = t
elif t in (11, 14, 15, 16, 17, 18, 19):
reg.ADV = t
else:
if critica in dnag:
reg.NAG = dnag[critica]
liReg.append(reg)
if mv.variantes:
for variante in mv.variantes:
addMoves(variante, depth, seqM1)
depth += 1
addMoves(g.moves, 1, "")
if liReg:
dbf.insertarLista(liReg, None)
dbf.cerrar()
dlTmp.actualiza(n, erroneos, duplicados, importados)
dlTmp.ponContinuar()
return len(liReg) > 0
def grabarPolyglot(self, ventana, ficheroBIN, depth, whiteBest, blackBest):
titulo = _("Import a polyglot book")
tmpBP2 = QTUtil2.BarraProgreso2(ventana, titulo)
tmpBP2.ponTotal(1, 1)
tmpBP2.ponRotulo(1, "1. " + _X(_("Reading %1"), os.path.basename(ficheroBIN)))
tmpBP2.ponTotal(2, 1)
tmpBP2.ponRotulo(2, "")
tmpBP2.mostrar()
basePos = self.ultPos
book = Books.Libro("P", ficheroBIN, ficheroBIN, True)
book.polyglot()
cp = ControlPosicion.ControlPosicion()
lireg = []
stFenM2 = set() # para que no se produzca un circulo vicioso
def hazFEN(fen, ply, seq):
plyN = ply + 1
siWhite = " w " in fen
siMax = False
if whiteBest:
siMax = siWhite
if blackBest:
siMax = siMax or not siWhite
liPV = book.miraListaPV(fen, siMax)
for pv in liPV:
cp.leeFen(fen)
cp.mover(pv[:2], pv[2:4], pv[4:])
fenN = cp.fen()
reg = SQLDBF.Almacen()
lireg.append(reg)
reg.PV = pv
seqN = seq + LCEngine.pv2xpv(pv)
reg.XPV = seqN
reg.COMMENT = ""
reg.NAG = 0
reg.FEN = fenN
reg.ADV = 0
self.ultPos += 1
reg.POS = self.ultPos
tmpBP2.ponTotal(1, self.ultPos - basePos)
tmpBP2.pon(1, self.ultPos - basePos)
if plyN < depth:
fenM2 = cp.fenM2()
if fenM2 not in stFenM2:
stFenM2.add(fenM2)
hazFEN(fenN, plyN, seqN)
hazFEN(ControlPosicion.FEN_INICIAL, 0, "")
select = "XPV,PV,COMMENT,NAG,ADV,FEN,POS"
dbf = SQLDBF.DBF(self.conexion, self.tablaDatos, select)
tmpBP2.ponTotal(2, len(lireg))
tmpBP2.ponRotulo(2, _("Writing..."))
def dispatch(num):
tmpBP2.pon(2, num)
dbf.insertarLista(lireg, dispatch)
dbf.cerrar()
tmpBP2.cerrar()
return len(lireg) > 0
def reset(self):
self.grabar()
self.dicPtes = {}
self.root = UnMove(self, None)
self.root._fen = ControlPosicion.FEN_INICIAL
self.readAllDB()
def readAllDB(self):
self.transpositions = {}
self.bookmarks = []
self.ultPos = 0
select = "ROWID,XPV,PV,NAG,ADV,FEN,MARK,POS"
orden = "XPV"
condicion = ""
dbf = SQLDBF.DBFT(self.conexion, self.tablaDatos, select, condicion, orden)
dbf.leer()
dicMoves = {}
dicMoves[""] = self.root
for recno in range(dbf.reccount()):
dbf.goto(recno)
xpv = dbf.XPV
pv = dbf.PV
if not pv:
self.root.rowid(dbf.ROWID)
continue
xpvfather = xpv[:-2 if len(pv) == 4 else -3]
if xpvfather in dicMoves:
father = dicMoves[xpvfather]
mv = UnMove(self, father)
mv.pv(pv)
mv.xpv(xpv)
mv.fen(dbf.FEN)
mv.rowid(dbf.ROWID)
mv.nag(dbf.NAG, True)
mv.adv(dbf.ADV, True)
mark = dbf.MARK
if mark:
self.bookmarks.append(mv)
mv.mark(dbf.MARK, True)
mv.pos(dbf.POS, True)
if dbf.POS >= self.ultPos:
self.ultPos = dbf.POS
dicMoves[xpv] = mv
father.addChildren(mv)
dbf.cerrar()
def setTransposition(self, move):
fenM2 = move.fenM2()
if fenM2 not in self.transpositions:
self.transpositions[fenM2] = [move]
else:
li = self.transpositions[fenM2]
if move not in li:
li.append(move)
def getTranspositions(self, move):
li = self.transpositions.get(move.fenM2(), [])
if len(li) <= 1:
return []
n = li.index(move)
li = li[:]
del li[n]
return li
def getMovesFenM2(self, fenM2):
return self.transpositions.get(fenM2, None)
def actualizaBookmark(self, move, siPoner):
siEsta = move in self.bookmarks
if siEsta:
if not siPoner:
del self.bookmarks[self.bookmarks.index(move)]
else:
if siPoner:
self.bookmarks.append(move)
def readEXT(self, move):
select = "COMMENT,GRAPHICS,XDATA"
condicion = "XPV='%s'" % move.xpv()
dbf = SQLDBF.DBFT(self.conexion, self.tablaDatos, select, condicion)
dbf.leer()
if dbf.reccount():
dbf.goto(0)
move.comment(dbf.COMMENT, True)
move.graphics(dbf.GRAPHICS, True)
move.xdata(Util.blob2var(dbf.XDATA), True)
dbf.cerrar()
def pteGrabar(self, move):
huella = move.xpv()
if huella not in self.dicPtes:
self.dicPtes[huella] = move
if len(self.dicPtes) > 5:
self.grabar()
def mixTable(self, tableFrom, tableTo):
self.grabar()
nameFrom = "DATA%d" % tableFrom
nameTo = "DATA%d" % tableTo
cursor = self.conexion.cursor()
cursor.execute("SELECT ROWID,XPV FROM %s" % nameFrom)
liValores = cursor.fetchall()
for rowid, xpv in liValores:
cursor.execute('SELECT ROWID FROM %s WHERE XPV="%s"' % (nameTo, xpv))
li = cursor.fetchone()
if li:
rowidTo = li[0]
sql = "DELETE FROM %s WHERE rowid = %d" % (nameTo, rowidTo)
cursor.execute(sql)
sql = "INSERT INTO %s SELECT * FROM %s WHERE %s.ROWID = %d;" % (nameTo, nameFrom, nameFrom, rowid)
cursor.execute(sql)
self.conexion.commit()
cursor.close()
def grabarFichSTAT(self, nomGuide, fich):
# Para convertir datos de games a bookGuide
self.changeTo(None, nomGuide)
f = open(fich, "rb")
liRegs = []
for linea in f:
linea = linea.strip()
xpv, pv, fen = linea.split("|")
reg = SQLDBF.Almacen()
reg.XPV = xpv
reg.PV = pv
reg.FEN = fen
self.ultPos += 1
reg.POS = self.ultPos
liRegs.append(reg)
select = "XPV,PV,FEN,POS"
dbf = SQLDBF.DBFT(self.conexion, self.tablaDatos, select)
def dispatch(num):
pass
dbf.insertarLista(liRegs, dispatch)
dbf.cerrar()
def cerrar(self):
if self.conexion:
self.conexion.close()
self.conexion = None
self.dbAnalisis.cerrar()
def grabar(self):
if len(self.dicPtes) == 0:
return
dic = self.dicPtes
self.dicPtes = {}
# Creamos una tabla de trabajo
dbf = SQLDBF.DBF(self.conexion, self.tablaDatos, "")
for k, uno in dic.items():
reg = SQLDBF.Almacen()
reg.XPV = uno.xpv()
reg.PV = uno.pv()
reg.NAG = uno.nag()
reg.ADV = uno.adv()
reg.COMMENT = uno.comment()
reg.POS = uno.pos()
reg.FEN = uno.fen()
reg.MARK = uno.mark()
reg.GRAPHICS = uno.graphics()
reg.XDATA = Util.var2blob(uno.xdata())
if uno.rowid() is None:
xid = dbf.insertarSoloReg(reg)
uno.rowid(xid)
else:
dbf.modificarROWID(uno.rowid(), reg)
dbf.cerrar()
def dameMovimiento(self, father, pv):
mv = UnMove(self, father)
xpv = father.xpv() + LCEngine.pv2xpv(pv)
mv.xpv(xpv)
mv.pv(pv)
cp = ControlPosicion.ControlPosicion()
cp.leeFen(father.fen())
cp.moverPV(pv)
mv.fen(cp.fen())
self.ultPos += 1
mv.pos(self.ultPos)
father.addChildren(mv)
self.pteGrabar(mv)
return mv
def borrar(self, uno):
liBorrados = [uno]
def allChildren(li, uno):
for x in uno.children():
li.append(x.rowid())
liBorrados.append(x)
allChildren(li, x)
liRowid = []
if uno.rowid():
liRowid.append(uno.rowid())
allChildren(liRowid, uno)
if liRowid:
dbf = SQLDBF.DBF(self.conexion, self.tablaDatos, "")
dbf.borrarListaRaw(liRowid)
if len(liRowid) > 10:
dbf.pack()
dbf.cerrar()
liQuitarTrasposition = []
for mov in liBorrados:
if mov in self.bookmarks:
del self.bookmarks[self.bookmarks.index(mov)]
fenM2 = mov.fenM2()
li = self.transpositions[fenM2]
if len(li) <= 1:
del self.transpositions[fenM2]
else:
del li[li.index(mov)]
if len(li) == 1:
xm = li[0]
if xm not in liBorrados:
liQuitarTrasposition.append(xm)
return liBorrados, liQuitarTrasposition
def allLines(self):
rt = self.root
liT = []
def uno(mv):
li = mv.children()
if li:
for mv in li:
uno(mv)
else:
liT.append(mv.historia())
uno(rt)
return liT
class DBanalisis:
def __init__(self):
self.db = Util.DicSQL(VarGen.configuracion.ficheroAnalisisBookGuide, tabla="analisis", maxCache=1024)
def cerrar(self):
self.db.close()
def lista(self, fenM2):
dic = self.db[fenM2]
if dic:
lista = dic.get("LISTA", None)
activo = dic.get("ACTIVO", None)
else:
lista = None
activo = None
return lista, activo
def mrm(self, fenM2):
dic = self.db[fenM2]
if dic:
lista = dic.get("LISTA", None)
if lista:
nactive = dic.get("ACTIVO", None)
if nactive is not None:
return lista[nactive]
return None
def move(self, move):
fenM2 = move.father().fenM2()
dic = self.db[fenM2]
if dic:
numactivo = dic.get("ACTIVO", None)
if numactivo is not None:
lista = dic.get("LISTA", None)
if lista:
if 0 < numactivo >= len(lista):
numactivo = 0
dic["ACTIVO"] = 0
self.db[fenM2] = dic
analisis = lista[numactivo]
if analisis:
rm, k = analisis.buscaRM(move.pv())
return rm
return None
def getAnalisis(self, fenM2):
dic = self.db[fenM2]
if not dic:
dic = {"ACTIVO": None, "LISTA": []}
return dic
def nuevo(self, fenM2, analisis):
dic = self.getAnalisis(fenM2)
li = dic["LISTA"]
li.append(analisis)
dic["ACTIVO"] = len(li) - 1
self.db[fenM2] = dic
def pon(self, fenM2, numActivo):
dic = self.getAnalisis(fenM2)
dic["ACTIVO"] = numActivo
self.db[fenM2] = dic
def activo(self, fenM2):
dic = self.getAnalisis(fenM2)
return dic["ACTIVO"]
def quita(self, fenM2, num):
dic = self.getAnalisis(fenM2)
li = dic["LISTA"]
del li[num]
numActivo = dic["ACTIVO"]
if numActivo is not None:
if numActivo == num:
numActivo = None
elif numActivo > num:
numActivo -= 1
dic["ACTIVO"] = numActivo
self.db[fenM2] = dic
| lukasmonk/lucaschess | Code/OpeningGuide.py | Python | gpl-2.0 | 28,407 |
"""
Representing one movie.
.. module:: movie
:platform: Unix, Windows
:synopis: providing data for a movie
.. moduleauthor:: Thomas Lehmann <[email protected]>
=======
License
=======
Copyright (c) 2017 Thomas Lehmann
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from concept.tools.serialize import Serializable
from concept.data.movies.director import Director
from concept.data.movies.actor import Actor
from concept.data.movies.composer import Composer
from concept.data.movies.tag import Tag
from concept.data.movies.purchase import Purchase
from concept.tools.compatible import TextType
# R0902 = about too many fields
# pylint: disable=R0902
class Movie(Serializable):
"""Representing one movie (DVD, Blu-ray, ...)."""
def __init__(self, title=TextType("")):
"""Initializing fields only (defaults)."""
super(Movie, self).__init__()
self.title = title # german title (or your language)
self.original = "" # original title
self.url = "" # german url to Wikipedia (or your language)
self.directors = [] # list of directors
self.actors = [] # list of actors
self.composers = [] # list of composers
self.runtime = 0 # runtime in minutes
self.aspect_ratio = "" # aspect ratio
self.publication = 0 # year of publication
self.tags = [] # list of tags (strings) to allows grouping by category
self.purchase = Purchase() # purchase information
def is_enabled_for_attributes(self):
"""
Adjusted to true for writing some fields as XML attributes.
:rtype: True for writing the fields as attributes of the tag
"""
return True
def add_director(self, director):
"""
Adding a new director.
:param: director: another director for given movie
:rtype: True if the director has been successfully added otherwise false.
"""
if not isinstance(director, Director):
return False
if director not in self.directors:
self.directors.append(director)
return True
return False
def add_actor(self, actor):
"""
Adding a new actor.
:param: actor: another actor for given movie
:rtype: True if the actor has been successfully added otherwise false.
"""
if not isinstance(actor, Actor):
return False
if actor not in self.actors:
self.actors.append(actor)
return True
return False
def add_composer(self, composer):
"""
Adding a new composer.
:param: composer: another composer for given movie
:rtype: True if the composer has been successfully added otherwise false.
"""
if not isinstance(composer, Composer):
return False
if composer not in self.composers:
self.composers.append(composer)
return True
return False
def add_tag(self, tag):
"""
Adding a new tag.
:param: tag: another tag for given movie
:rtype: True if the tag has been successfully added otherwise false.
"""
if not isinstance(tag, Tag):
return False
if tag not in self.tags:
self.tags.append(tag)
return True
return False
def __eq__(self, other):
"""
Compare this object to be equal with another in type and data.
:param: other: another movie instance (expected)
:rtype: True if movies are identical
"""
if not isinstance(other, Movie):
return False
if not self.title == other.title:
return False
if not self.directors == other.directors:
return False
# we can assume that same title is not directed by same director(s)
# for two or more different movies
return True
def to_xml(self):
"""
Provide this movie instance as XML string.
:return: Movie as an XML string with a final line break
"""
return super(Movie, self).to_xml() + "\n"
| Nachtfeuer/concept-py | concept/data/movies/movie.py | Python | mit | 5,329 |
from fastapi.testclient import TestClient
from docs_src.path_params.tutorial004 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/files/{file_path}": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read File",
"operationId": "read_file_files__file_path__get",
"parameters": [
{
"required": True,
"schema": {"title": "File Path", "type": "string"},
"name": "file_path",
"in": "path",
}
],
}
}
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
def test_openapi():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_file_path():
response = client.get("/files/home/johndoe/myfile.txt")
print(response.content)
assert response.status_code == 200, response.text
assert response.json() == {"file_path": "home/johndoe/myfile.txt"}
def test_root_file_path():
response = client.get("/files//home/johndoe/myfile.txt")
print(response.content)
assert response.status_code == 200, response.text
assert response.json() == {"file_path": "/home/johndoe/myfile.txt"}
| tiangolo/fastapi | tests/test_tutorial/test_path_params/test_tutorial004.py | Python | mit | 3,043 |
"""Class for `mapping' equilibrium coverages and rates through
descriptor space. This class acts as a base class to be inherited
by other mapper classes, but is not functional on its own.
get_rxn_parameter_map(descriptor_ranges,resolution): Uses a
scaler object to determine the reaction parameters as a function of
descriptor space. May be useful for debugging or providing
intuition about rate determining steps. Should return a list of
the form
[[descriptor_1,descriptor_2,...],[rxn_parameter1, rxn_parameter2, ...]]
save_map(map,map_file): creates a pickle of the "map" list and dumps it
to the map_file
load_map(map_file): loads a "map" list by loading a pickle from
the map_file
A functional derived mapper class must also contain the methods:
get_coverage_map(descriptor_ranges,resolution): a function which
returns a list of the form
[[descriptor_1,descriptor_2,...], [cvg_ads1,cvg_ads2,...]]
get_rates_map(descriptor_ranges,resolution): a function which returns
a list of the form
[[descriptor_1,descriptor_2,...], [rate_rxn1,rate_rxn2,...]]
"""
from catmap import griddata
import numpy as np
import mpmath as mp
try:
import cPickle as pickle
except:
import _pickle as pickle
import os
from copy import copy
from catmap.model import ReactionModel
from catmap import ReactionModelWrapper
from catmap import plt
class MapperBase(ReactionModelWrapper):
# XXX : Having an instantiated object as default parameter
# may have side-effects since every instance of MapperBase will have
# the identical instance of ReactionModel as its attribute
# Unless this is deliberately so, one should better use e.g. None
# as the default value and then instantiate ReactionModel in the
# function body of __init__ .
def __init__(self,reaction_model=None):
if reaction_model is None:
reaction_model = ReactionModel()
self._rxm = reaction_model
self._solver_output = ['coverage','rate', #outputs requiring solver
'turnover_frequency','selectivity','rate_control',
'noninteracting_coverages']
def get_point_output(self,descriptors,*args,**kwargs):
self.solver.compile()
self._output_variables = [v for v in self.output_variables]
self._descriptors = descriptors
params = self.scaler.get_rxn_parameters(descriptors)
self._params = params
if True in [v in self._solver_output for v in self.output_variables]:
if 'coverage' not in self._output_variables:
self._output_variables = ['coverage'] + self._output_variables
elif self._output_variables[0] != 'coverage':
self._output_variables.remove('coverage')
self._output_variables = ['coverage'] + self._output_variables
self.output_labels['coverage'] = self.adsorbate_names
self.output_labels['rate'] = self.elementary_rxns
# Need coverages for solver vars
for out in self._output_variables:
if getattr(self,'get_point_'+out):
val = getattr(self,'get_point_'+out)(descriptors,*args,**kwargs)
setattr(self,'_'+out,val)
self.solver.set_output_attrs(params)
self.scaler.set_output_attrs(descriptors)
for out in self.output_variables:
mapp = getattr(self,'_'+out+'_temp',{})
mapp[repr(descriptors)] = getattr(self,'_'+out)
setattr(self,'_'+out+'_temp',mapp)
def get_output_map(self,descriptor_ranges,resolution,*args,**kwargs):
self.solver.compile()
self._output_variables = [v for v in self.output_variables]
if True in [v in self._solver_output for v in self.output_variables]:
#determines whether or not solver is needed
if 'coverage' not in self._output_variables:
self._output_variables = ['coverage'] + self._output_variables
self._coverage_map = None
elif self._output_variables[0] != 'coverage':
self._output_variables.remove('coverage')
self._output_variables = ['coverage'] + self._output_variables
# Need coverages for solver vars
ismapped = False
for out in self._output_variables:
if getattr(self,'get_'+out+'_map'):
val = getattr(self,'get_'+out+'_map')(
descriptor_ranges,resolution,*args,**kwargs)
setattr(self,out+'_map',val)
ismapped = True
if ismapped == False:
d1Vals, d2Vals = self.process_resolution()
for d1V in d1Vals:
for d2V in d2Vals:
self._descriptors = [d1V,d2V]
self.get_point_output(self._descriptors)
for out in self.output_variables:
# if getattr(self,out+'_map'):
# mapp = getattr(self,out+'_map')
# else:
map_dict = getattr(self,'_'+out+'_temp',[])
mapp = []
for key in map_dict:
mapp.append([eval(key),map_dict[key]])
setattr(self,out+'_map',mapp)
if getattr(self,out+'_map_file'):
outfile = getattr(self,out+'_map_file')
self.save_map(mapp,outfile)
def process_resolution(self, descriptor_ranges = None, resolution = None):
if not descriptor_ranges:
descriptor_ranges = self.descriptor_ranges
if resolution is None:
resolution = self.resolution
resolution = np.array(resolution)
if resolution.size == 1:
resx = resy = float(resolution)
elif resolution.size ==2:
resx = resolution[0]
resy = resolution[1]
else:
raise ValueError('Resolution is not the correct shape')
d1min, d1max = descriptor_ranges[0]
d2min, d2max = descriptor_ranges[1]
d1Vals = np.linspace(d1min, d1max, resx)
d2Vals = np.linspace(d2min, d2max, resy)
return d1Vals, d2Vals
| mieand/catmap | catmap/mappers/mapper_base.py | Python | gpl-3.0 | 6,097 |
"""Defaults to call CP2K.
Index
-----
.. currentmodule:: nanoqm.workflows.templates
.. autosummary::
create_settings_from_template
"""
__all__ = ["create_settings_from_template"]
import json
import os
from os.path import join
import pkg_resources as pkg
import yaml
from scm.plams import Molecule
from qmflows.settings import Settings
from qmflows.type_hints import PathLike
from nanoqm.common import UniqueSafeLoader
from typing import Any, Dict, Iterable, FrozenSet
path_valence_electrons = pkg.resource_filename(
"nanoqm", "basis/valence_electrons.json")
path_aux_fit = pkg.resource_filename("nanoqm", "basis/aux_fit.json")
with open(path_valence_electrons, 'r') as f1, open(path_aux_fit, 'r') as f2:
valence_electrons = json.load(f1)
aux_fit = json.load(f2)
def generate_auxiliar_basis(
sett: Settings, auxiliar_basis: str, quality: str) -> Settings:
"""Generate the `auxiliar_basis` for all the atoms in the `sett`.
Use the`quality` of the auxiliar basis provided by the user.
"""
quality_to_number = {"low": 0, "medium": 1,
"good": 2, "verygood": 3, "excellent": 4}
kind = sett.cp2k.force_eval.subsys.kind
for atom in kind.keys():
index = quality_to_number[quality.lower()]
cfit = aux_fit[atom][index]
kind[atom]["basis_set"].append(f"AUX_FIT CFIT{cfit}")
return sett
#: Settings for a PBE calculation to compute a guess wave function
cp2k_pbe_guess = Settings(yaml.load("""
cp2k:
global:
run_type:
energy
force_eval:
subsys:
cell:
periodic: "None"
dft:
xc:
xc_functional pbe: {}
scf:
eps_scf: 1e-6
added_mos: 0
scf_guess: "restart"
ot:
minimizer: "DIIS"
n_diis: 7
preconditioner: "FULL_SINGLE_INVERSE"
""", Loader=UniqueSafeLoader))
#: Settings for a PBE calculation to compute the Molecular orbitals
cp2k_pbe_main = Settings(yaml.load("""
cp2k:
global:
run_type:
energy
force_eval:
subsys:
cell:
periodic: "None"
dft:
xc:
xc_functional pbe: {}
scf:
eps_scf: 5e-4
max_scf: 200
scf_guess: "restart"
""", Loader=UniqueSafeLoader))
#: Settings for a PBE0 calculation to compute a guess wave function
cp2k_pbe0_guess = Settings(yaml.load("""
cp2k:
global:
run_type:
energy
force_eval:
subsys:
cell:
periodic: "None"
dft:
auxiliary_density_matrix_method:
method: "basis_projection"
admm_purification_method: "none"
qs:
method: "gpw"
eps_pgf_orb: 1E-8
xc:
xc_functional:
pbe:
scale_x: 0.75
scale_c: 1.00
hf:
fraction: 0.25
screening:
eps_schwarz: 1.0E-6
screen_on_initial_p: "True"
interaction_potential:
potential_type: "truncated"
cutoff_radius: 2.5
memory:
max_memory: 5000
eps_storage_scaling: "0.1"
scf:
eps_scf: 1e-6
added_mos: 0
scf_guess: "restart"
ot:
minimizer: "DIIS"
n_diis: 7
preconditioner: "FULL_SINGLE_INVERSE"
""", Loader=UniqueSafeLoader))
#: Settings for a PBE0 calculation to compute the Molecular orbitals
cp2k_pbe0_main = Settings(yaml.load("""
cp2k:
global:
run_type:
energy
force_eval:
subsys:
cell:
periodic: "None"
dft:
auxiliary_density_matrix_method:
method: "basis_projection"
admm_purification_method: "none"
qs:
method: "gpw"
eps_pgf_orb: "1.0E-8"
xc:
xc_functional:
pbe:
scale_x: "0.75"
scale_c: "1.00"
hf:
fraction: "0.25"
screening:
eps_schwarz: 1.0E-6
screen_on_initial_p: "True"
interaction_potential:
potential_type: "truncated"
cutoff_radius: 2.5
memory:
max_memory: "5000"
eps_storage_scaling: "0.1"
scf:
eps_scf: 5e-4
max_scf: 200
scf_guess: "restart"
""", Loader=UniqueSafeLoader))
#: Settings for a HSE06 calculation to compute a guess wave function
cp2k_hse06_guess = Settings(yaml.load("""
cp2k:
global:
run_type:
energy
force_eval:
subsys:
cell:
periodic: "None"
dft:
auxiliary_density_matrix_method:
method: "basis_projection"
admm_purification_method: "none"
qs:
method: "gpw"
eps_pgf_orb: 1E-8
xc:
xc_functional:
pbe:
scale_x: 0.00
scale_c: 1.00
xwpbe:
scale_x: -0.25
scale_x0: 1.00
omega: 0.11
hf:
fraction: 0.25
screening:
eps_schwarz: 1.0E-6
screen_on_initial_p: "True"
interaction_potential:
potential_type: "shortrange"
omega: 0.11
memory:
max_memory: 5000
eps_storage_scaling: "0.1"
scf:
eps_scf: 1e-6
added_mos: 0
scf_guess: "restart"
ot:
minimizer: "DIIS"
n_diis: 7
preconditioner: "FULL_SINGLE_INVERSE"
""", Loader=UniqueSafeLoader))
#: Settings for a HSE06 calculation to compute the Molecular orbitals
cp2k_hse06_main = Settings(yaml.load("""
cp2k:
global:
run_type:
energy
force_eval:
subsys:
cell:
periodic: "None"
dft:
auxiliary_density_matrix_method:
method: "basis_projection"
admm_purification_method: "none"
qs:
method: "gpw"
eps_pgf_orb: "1.0E-8"
xc:
xc_functional:
pbe:
scale_x: 0.00
scale_c: 1.00
xwpbe:
scale_x: -0.25
scale_x0: 1.00
omega: 0.11
hf:
fraction: 0.25
screening:
eps_schwarz: 1.0E-6
screen_on_initial_p: "True"
interaction_potential:
potential_type: "shortrange"
omega: 0.11
memory:
max_memory: 5000
eps_storage_scaling: "0.1"
scf:
eps_scf: 1e-6
max_scf: 200
scf_guess: "restart"
""", Loader=UniqueSafeLoader))
#: Settings for a B3LYP calculation to compute a guess wave function
cp2k_b3lyp_guess = Settings(yaml.load("""
cp2k:
global:
run_type:
energy
force_eval:
subsys:
cell:
periodic: "None"
dft:
xc:
xc_functional b3lyp: {}
scf:
eps_scf: 1e-6
added_mos: 0
scf_guess: "restart"
ot:
minimizer: "DIIS"
n_diis: 7
preconditioner: "FULL_SINGLE_INVERSE"
""", Loader=UniqueSafeLoader))
#: Settings for a B3LYP calculation to compute the Molecular orbitals
cp2k_b3lyp_main = Settings(yaml.load("""
cp2k:
global:
run_type:
energy
force_eval:
subsys:
cell:
periodic: "None"
dft:
xc:
xc_functional b3lyp: {}
scf:
eps_scf: 5e-4
max_scf: 200
scf_guess: "restart"
""", Loader=UniqueSafeLoader))
#: Settings to add the CP2K kinds for each atom
kinds_template = Settings(yaml.load("""
cp2k:
force_eval:
subsys:
kind:
C:
basis_set: DZVP-MOLOPT-SR-GTH-q4
potential: GTH-PBE-q4
""", Loader=UniqueSafeLoader))
#: available templates
templates_dict = {
"pbe_guess": cp2k_pbe_guess, "pbe_main": cp2k_pbe_main,
"pbe0_guess": cp2k_pbe0_guess, "pbe0_main": cp2k_pbe0_main,
"hse06_guess": cp2k_hse06_guess, "hse06_main": cp2k_hse06_main,
"b3lyp_guess": cp2k_b3lyp_guess, "b3lyp_main": cp2k_b3lyp_main}
def create_settings_from_template(
general: Dict[str, Any], template_name: str, path_traj_xyz: PathLike) -> Settings:
"""Create a job Settings using the name provided by the user."""
setts = templates_dict[template_name]
if 'pbe0' in template_name:
s = Settings()
return generate_auxiliar_basis(setts + s, general['basis'], general['aux_fit'])
elif 'hse06' in template_name:
return generate_auxiliar_basis(setts, general['basis'], general['aux_fit'])
else:
return setts
def read_unique_atomic_labels(path_traj_xyz: PathLike) -> FrozenSet[str]:
"""Return the unique atomic labels."""
mol = Molecule(path_traj_xyz, 'xyz')
return frozenset(at.symbol for at in mol.atoms)
| SCM-NV/qmworks-namd | nanoqm/workflows/templates.py | Python | mit | 8,739 |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# API网关触发器创建失败。
FAILEDOPERATION_APIGATEWAY = 'FailedOperation.ApiGateway'
# 创建触发器失败。
FAILEDOPERATION_APIGW = 'FailedOperation.Apigw'
# 获取Apm InstanceId失败。
FAILEDOPERATION_APMCONFIGINSTANCEID = 'FailedOperation.ApmConfigInstanceId'
# 当前异步事件状态不支持此操作,请稍后重试。
FAILEDOPERATION_ASYNCEVENTSTATUS = 'FailedOperation.AsyncEventStatus'
# 复制函数失败。
FAILEDOPERATION_COPYFAILED = 'FailedOperation.CopyFailed'
# 不支持复制到该地域。
FAILEDOPERATION_COPYFUNCTION = 'FailedOperation.CopyFunction'
# 操作COS资源失败。
FAILEDOPERATION_COS = 'FailedOperation.Cos'
# 创建别名失败。
FAILEDOPERATION_CREATEALIAS = 'FailedOperation.CreateAlias'
# 操作失败。
FAILEDOPERATION_CREATEFUNCTION = 'FailedOperation.CreateFunction'
# 创建命名空间失败。
FAILEDOPERATION_CREATENAMESPACE = 'FailedOperation.CreateNamespace'
# 当前函数状态无法进行此操作。
FAILEDOPERATION_CREATETRIGGER = 'FailedOperation.CreateTrigger'
# 当前调试状态无法执行此操作。
FAILEDOPERATION_DEBUGMODESTATUS = 'FailedOperation.DebugModeStatus'
# 调试状态下无法更新执行超时时间。
FAILEDOPERATION_DEBUGMODEUPDATETIMEOUTFAIL = 'FailedOperation.DebugModeUpdateTimeOutFail'
# 删除别名失败。
FAILEDOPERATION_DELETEALIAS = 'FailedOperation.DeleteAlias'
# 当前函数状态无法进行此操作,请在函数状态正常时重试。
FAILEDOPERATION_DELETEFUNCTION = 'FailedOperation.DeleteFunction'
# 删除layer版本失败。
FAILEDOPERATION_DELETELAYERVERSION = 'FailedOperation.DeleteLayerVersion'
# 无法删除默认Namespace。
FAILEDOPERATION_DELETENAMESPACE = 'FailedOperation.DeleteNamespace'
# 删除触发器失败。
FAILEDOPERATION_DELETETRIGGER = 'FailedOperation.DeleteTrigger'
# 当前函数状态无法更新代码,请在状态为正常时更新。
FAILEDOPERATION_FUNCTIONNAMESTATUSERROR = 'FailedOperation.FunctionNameStatusError'
# 函数在部署中,无法做此操作。
FAILEDOPERATION_FUNCTIONSTATUSERROR = 'FailedOperation.FunctionStatusError'
# 当前函数版本状态无法进行此操作,请在版本状态为正常时重试。
FAILEDOPERATION_FUNCTIONVERSIONSTATUSNOTACTIVE = 'FailedOperation.FunctionVersionStatusNotActive'
# 获取别名信息失败。
FAILEDOPERATION_GETALIAS = 'FailedOperation.GetAlias'
# 获取函数代码地址失败。
FAILEDOPERATION_GETFUNCTIONADDRESS = 'FailedOperation.GetFunctionAddress'
# 当前账号或命名空间处于欠费状态,请在可用时重试。
FAILEDOPERATION_INSUFFICIENTBALANCE = 'FailedOperation.InsufficientBalance'
# 调用函数失败。
FAILEDOPERATION_INVOKEFUNCTION = 'FailedOperation.InvokeFunction'
# 命名空间已存在,请勿重复创建。
FAILEDOPERATION_NAMESPACE = 'FailedOperation.Namespace'
# 服务开通失败。
FAILEDOPERATION_OPENSERVICE = 'FailedOperation.OpenService'
# 操作冲突。
FAILEDOPERATION_OPERATIONCONFLICT = 'FailedOperation.OperationConflict'
# 创建定时预置任务失败。
FAILEDOPERATION_PROVISIONCREATETIMER = 'FailedOperation.ProvisionCreateTimer'
# 删除定时预置任务失败。
FAILEDOPERATION_PROVISIONDELETETIMER = 'FailedOperation.ProvisionDeleteTimer'
# 当前函数版本已有预置任务处于进行中,请稍后重试。
FAILEDOPERATION_PROVISIONEDINPROGRESS = 'FailedOperation.ProvisionedInProgress'
# 发布layer版本失败。
FAILEDOPERATION_PUBLISHLAYERVERSION = 'FailedOperation.PublishLayerVersion'
# 当前函数状态无法发布版本,请在状态为正常时发布。
FAILEDOPERATION_PUBLISHVERSION = 'FailedOperation.PublishVersion'
# 角色不存在。
FAILEDOPERATION_QCSROLENOTFOUND = 'FailedOperation.QcsRoleNotFound'
# 当前函数已有保留并发设置任务处于进行中,请稍后重试。
FAILEDOPERATION_RESERVEDINPROGRESS = 'FailedOperation.ReservedInProgress'
# Topic不存在。
FAILEDOPERATION_TOPICNOTEXIST = 'FailedOperation.TopicNotExist'
# 用户并发内存配额设置任务处于进行中,请稍后重试。
FAILEDOPERATION_TOTALCONCURRENCYMEMORYINPROGRESS = 'FailedOperation.TotalConcurrencyMemoryInProgress'
# 指定的服务未开通,可以提交工单申请开通服务。
FAILEDOPERATION_UNOPENEDSERVICE = 'FailedOperation.UnOpenedService'
# 更新别名失败。
FAILEDOPERATION_UPDATEALIAS = 'FailedOperation.UpdateAlias'
# 当前函数状态无法更新代码,请在状态为正常时更新。
FAILEDOPERATION_UPDATEFUNCTIONCODE = 'FailedOperation.UpdateFunctionCode'
# UpdateFunctionConfiguration操作失败。
FAILEDOPERATION_UPDATEFUNCTIONCONFIGURATION = 'FailedOperation.UpdateFunctionConfiguration'
# 内部错误。
INTERNALERROR = 'InternalError'
# 创建apigw触发器内部错误。
INTERNALERROR_APIGATEWAY = 'InternalError.ApiGateway'
# ckafka接口失败。
INTERNALERROR_CKAFKA = 'InternalError.Ckafka'
# 删除cmq触发器失败。
INTERNALERROR_CMQ = 'InternalError.Cmq'
# 更新触发器失败。
INTERNALERROR_COS = 'InternalError.Cos'
# ES错误。
INTERNALERROR_ES = 'InternalError.ES'
# 内部服务异常。
INTERNALERROR_EXCEPTION = 'InternalError.Exception'
# 内部服务错误。
INTERNALERROR_GETROLEERROR = 'InternalError.GetRoleError'
# 内部系统错误。
INTERNALERROR_SYSTEM = 'InternalError.System'
# 内部服务错误。
INTERNALERROR_SYSTEMERROR = 'InternalError.SystemError'
# FunctionName取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETER_FUNCTIONNAME = 'InvalidParameter.FunctionName'
# 请求参数不合法。
INVALIDPARAMETER_PAYLOAD = 'InvalidParameter.Payload'
# RoutingConfig参数传入错误。
INVALIDPARAMETER_ROUTINGCONFIG = 'InvalidParameter.RoutingConfig'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# Action取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ACTION = 'InvalidParameterValue.Action'
# AdditionalVersionWeights参数传入错误。
INVALIDPARAMETERVALUE_ADDITIONALVERSIONWEIGHTS = 'InvalidParameterValue.AdditionalVersionWeights'
# 不支持删除默认别名,请修正后重试。
INVALIDPARAMETERVALUE_ALIAS = 'InvalidParameterValue.Alias'
# ApiGateway参数错误。
INVALIDPARAMETERVALUE_APIGATEWAY = 'InvalidParameterValue.ApiGateway'
# ApmConfig参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIG = 'InvalidParameterValue.ApmConfig'
# ApmConfigInstanceId参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGINSTANCEID = 'InvalidParameterValue.ApmConfigInstanceId'
# ApmConfigRegion参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGREGION = 'InvalidParameterValue.ApmConfigRegion'
# Args 参数值有误。
INVALIDPARAMETERVALUE_ARGS = 'InvalidParameterValue.Args'
# 函数异步重试配置参数无效。
INVALIDPARAMETERVALUE_ASYNCTRIGGERCONFIG = 'InvalidParameterValue.AsyncTriggerConfig'
# Cdn传入错误。
INVALIDPARAMETERVALUE_CDN = 'InvalidParameterValue.Cdn'
# cfs配置项重复。
INVALIDPARAMETERVALUE_CFSPARAMETERDUPLICATE = 'InvalidParameterValue.CfsParameterDuplicate'
# cfs配置项取值与规范不符。
INVALIDPARAMETERVALUE_CFSPARAMETERERROR = 'InvalidParameterValue.CfsParameterError'
# cfs参数格式与规范不符。
INVALIDPARAMETERVALUE_CFSSTRUCTIONERROR = 'InvalidParameterValue.CfsStructionError'
# Ckafka传入错误。
INVALIDPARAMETERVALUE_CKAFKA = 'InvalidParameterValue.Ckafka'
# 运行函数时的参数传入有误。
INVALIDPARAMETERVALUE_CLIENTCONTEXT = 'InvalidParameterValue.ClientContext'
# Cls传入错误。
INVALIDPARAMETERVALUE_CLS = 'InvalidParameterValue.Cls'
# 修改Cls配置需要传入Role参数,请修正后重试。
INVALIDPARAMETERVALUE_CLSROLE = 'InvalidParameterValue.ClsRole'
# Cmq传入错误。
INVALIDPARAMETERVALUE_CMQ = 'InvalidParameterValue.Cmq'
# Code传入错误。
INVALIDPARAMETERVALUE_CODE = 'InvalidParameterValue.Code'
# CodeSecret传入错误。
INVALIDPARAMETERVALUE_CODESECRET = 'InvalidParameterValue.CodeSecret'
# CodeSource传入错误。
INVALIDPARAMETERVALUE_CODESOURCE = 'InvalidParameterValue.CodeSource'
# Command[Entrypoint] 参数值有误。
INVALIDPARAMETERVALUE_COMMAND = 'InvalidParameterValue.Command'
# CompatibleRuntimes参数传入错误。
INVALIDPARAMETERVALUE_COMPATIBLERUNTIMES = 'InvalidParameterValue.CompatibleRuntimes'
# Content参数传入错误。
INVALIDPARAMETERVALUE_CONTENT = 'InvalidParameterValue.Content'
# Cos传入错误。
INVALIDPARAMETERVALUE_COS = 'InvalidParameterValue.Cos'
# CosBucketName不符合规范。
INVALIDPARAMETERVALUE_COSBUCKETNAME = 'InvalidParameterValue.CosBucketName'
# CosBucketRegion取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_COSBUCKETREGION = 'InvalidParameterValue.CosBucketRegion'
# CosObjectName不符合规范。
INVALIDPARAMETERVALUE_COSOBJECTNAME = 'InvalidParameterValue.CosObjectName'
# CustomArgument参数长度超限。
INVALIDPARAMETERVALUE_CUSTOMARGUMENT = 'InvalidParameterValue.CustomArgument'
# DateTime传入错误。
INVALIDPARAMETERVALUE_DATETIME = 'InvalidParameterValue.DateTime'
# DeadLetterConfig取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_DEADLETTERCONFIG = 'InvalidParameterValue.DeadLetterConfig'
# 默认Namespace无法创建。
INVALIDPARAMETERVALUE_DEFAULTNAMESPACE = 'InvalidParameterValue.DefaultNamespace'
# Description传入错误。
INVALIDPARAMETERVALUE_DESCRIPTION = 'InvalidParameterValue.Description'
# 环境变量DNS[OS_NAMESERVER]配置有误。
INVALIDPARAMETERVALUE_DNSINFO = 'InvalidParameterValue.DnsInfo'
# EipConfig参数错误。
INVALIDPARAMETERVALUE_EIPCONFIG = 'InvalidParameterValue.EipConfig'
# Enable取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ENABLE = 'InvalidParameterValue.Enable'
# Environment传入错误。
INVALIDPARAMETERVALUE_ENVIRONMENT = 'InvalidParameterValue.Environment'
# 环境变量大小超限,请保持在 4KB 以内。
INVALIDPARAMETERVALUE_ENVIRONMENTEXCEEDEDLIMIT = 'InvalidParameterValue.EnvironmentExceededLimit'
# 不支持修改函数系统环境变量和运行环境变量。
INVALIDPARAMETERVALUE_ENVIRONMENTSYSTEMPROTECT = 'InvalidParameterValue.EnvironmentSystemProtect'
# Filters参数错误。
INVALIDPARAMETERVALUE_FILTERS = 'InvalidParameterValue.Filters'
# Function取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_FUNCTION = 'InvalidParameterValue.Function'
# 函数不存在。
INVALIDPARAMETERVALUE_FUNCTIONNAME = 'InvalidParameterValue.FunctionName'
# GitBranch不符合规范。
INVALIDPARAMETERVALUE_GITBRANCH = 'InvalidParameterValue.GitBranch'
# GitCommitId取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_GITCOMMITID = 'InvalidParameterValue.GitCommitId'
# GitDirectory不符合规范。
INVALIDPARAMETERVALUE_GITDIRECTORY = 'InvalidParameterValue.GitDirectory'
# GitPassword不符合规范。
INVALIDPARAMETERVALUE_GITPASSWORD = 'InvalidParameterValue.GitPassword'
# GitUrl不符合规范。
INVALIDPARAMETERVALUE_GITURL = 'InvalidParameterValue.GitUrl'
# GitUserName不符合规范。
INVALIDPARAMETERVALUE_GITUSERNAME = 'InvalidParameterValue.GitUserName'
# Handler传入错误。
INVALIDPARAMETERVALUE_HANDLER = 'InvalidParameterValue.Handler'
# IdleTimeOut参数传入错误。
INVALIDPARAMETERVALUE_IDLETIMEOUT = 'InvalidParameterValue.IdleTimeOut'
# imageUri 传入有误。
INVALIDPARAMETERVALUE_IMAGEURI = 'InvalidParameterValue.ImageUri'
# InlineZipFile非法。
INVALIDPARAMETERVALUE_INLINEZIPFILE = 'InvalidParameterValue.InlineZipFile'
# InvokeType取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_INVOKETYPE = 'InvalidParameterValue.InvokeType'
# L5Enable取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_L5ENABLE = 'InvalidParameterValue.L5Enable'
# LayerName参数传入错误。
INVALIDPARAMETERVALUE_LAYERNAME = 'InvalidParameterValue.LayerName'
# Layers参数传入错误。
INVALIDPARAMETERVALUE_LAYERS = 'InvalidParameterValue.Layers'
# Limit传入错误。
INVALIDPARAMETERVALUE_LIMIT = 'InvalidParameterValue.Limit'
# 参数超出长度限制。
INVALIDPARAMETERVALUE_LIMITEXCEEDED = 'InvalidParameterValue.LimitExceeded'
# Memory取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_MEMORY = 'InvalidParameterValue.Memory'
# MemorySize错误。
INVALIDPARAMETERVALUE_MEMORYSIZE = 'InvalidParameterValue.MemorySize'
# MinCapacity 参数传入错误。
INVALIDPARAMETERVALUE_MINCAPACITY = 'InvalidParameterValue.MinCapacity'
# Name参数传入错误。
INVALIDPARAMETERVALUE_NAME = 'InvalidParameterValue.Name'
# Namespace参数传入错误。
INVALIDPARAMETERVALUE_NAMESPACE = 'InvalidParameterValue.Namespace'
# 规则不正确,Namespace为英文字母、数字、-_ 符号组成,长度30。
INVALIDPARAMETERVALUE_NAMESPACEINVALID = 'InvalidParameterValue.NamespaceInvalid'
# NodeSpec 参数传入错误。
INVALIDPARAMETERVALUE_NODESPEC = 'InvalidParameterValue.NodeSpec'
# NodeType 参数传入错误。
INVALIDPARAMETERVALUE_NODETYPE = 'InvalidParameterValue.NodeType'
# 偏移量不合法。
INVALIDPARAMETERVALUE_OFFSET = 'InvalidParameterValue.Offset'
# Order传入错误。
INVALIDPARAMETERVALUE_ORDER = 'InvalidParameterValue.Order'
# OrderBy取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ORDERBY = 'InvalidParameterValue.OrderBy'
# 入参不是标准的json。
INVALIDPARAMETERVALUE_PARAM = 'InvalidParameterValue.Param'
# ProtocolType参数传入错误。
INVALIDPARAMETERVALUE_PROTOCOLTYPE = 'InvalidParameterValue.ProtocolType'
# 定时预置的cron配置重复。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERCRONCONFIGDUPLICATE = 'InvalidParameterValue.ProvisionTriggerCronConfigDuplicate'
# TriggerName参数传入错误。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERNAME = 'InvalidParameterValue.ProvisionTriggerName'
# TriggerName重复。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERNAMEDUPLICATE = 'InvalidParameterValue.ProvisionTriggerNameDuplicate'
# ProvisionType 参数传入错误。
INVALIDPARAMETERVALUE_PROVISIONTYPE = 'InvalidParameterValue.ProvisionType'
# PublicNetConfig参数错误。
INVALIDPARAMETERVALUE_PUBLICNETCONFIG = 'InvalidParameterValue.PublicNetConfig'
# 不支持的函数版本。
INVALIDPARAMETERVALUE_QUALIFIER = 'InvalidParameterValue.Qualifier'
# 企业版镜像实例ID[RegistryId]传值错误。
INVALIDPARAMETERVALUE_REGISTRYID = 'InvalidParameterValue.RegistryId'
# RetCode不合法。
INVALIDPARAMETERVALUE_RETCODE = 'InvalidParameterValue.RetCode'
# RoutingConfig取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ROUTINGCONFIG = 'InvalidParameterValue.RoutingConfig'
# Runtime传入错误。
INVALIDPARAMETERVALUE_RUNTIME = 'InvalidParameterValue.Runtime'
# searchkey 不是 Keyword,Tag 或者 Runtime。
INVALIDPARAMETERVALUE_SEARCHKEY = 'InvalidParameterValue.SearchKey'
# SecretInfo错误。
INVALIDPARAMETERVALUE_SECRETINFO = 'InvalidParameterValue.SecretInfo'
# ServiceName命名不规范。
INVALIDPARAMETERVALUE_SERVICENAME = 'InvalidParameterValue.ServiceName'
# Stamp取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_STAMP = 'InvalidParameterValue.Stamp'
# 起始时间传入错误。
INVALIDPARAMETERVALUE_STARTTIME = 'InvalidParameterValue.StartTime'
# 需要同时指定开始日期与结束日期。
INVALIDPARAMETERVALUE_STARTTIMEORENDTIME = 'InvalidParameterValue.StartTimeOrEndTime'
# Status取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_STATUS = 'InvalidParameterValue.Status'
# 系统环境变量错误。
INVALIDPARAMETERVALUE_SYSTEMENVIRONMENT = 'InvalidParameterValue.SystemEnvironment'
# 非法的TempCosObjectName。
INVALIDPARAMETERVALUE_TEMPCOSOBJECTNAME = 'InvalidParameterValue.TempCosObjectName'
# TraceEnable取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_TRACEENABLE = 'InvalidParameterValue.TraceEnable'
# TrackingTarget 参数输入错误。
INVALIDPARAMETERVALUE_TRACKINGTARGET = 'InvalidParameterValue.TrackingTarget'
# TriggerCronConfig参数传入错误。
INVALIDPARAMETERVALUE_TRIGGERCRONCONFIG = 'InvalidParameterValue.TriggerCronConfig'
# TriggerCronConfig参数定时触发间隔小于指定值。
INVALIDPARAMETERVALUE_TRIGGERCRONCONFIGTIMEINTERVAL = 'InvalidParameterValue.TriggerCronConfigTimeInterval'
# TriggerDesc传入参数错误。
INVALIDPARAMETERVALUE_TRIGGERDESC = 'InvalidParameterValue.TriggerDesc'
# TriggerName传入错误。
INVALIDPARAMETERVALUE_TRIGGERNAME = 'InvalidParameterValue.TriggerName'
# TriggerProvisionedConcurrencyNum参数传入错误。
INVALIDPARAMETERVALUE_TRIGGERPROVISIONEDCONCURRENCYNUM = 'InvalidParameterValue.TriggerProvisionedConcurrencyNum'
# Type传入错误。
INVALIDPARAMETERVALUE_TYPE = 'InvalidParameterValue.Type'
# 开启cfs配置的同时必须开启vpc。
INVALIDPARAMETERVALUE_VPCNOTSETWHENOPENCFS = 'InvalidParameterValue.VpcNotSetWhenOpenCfs'
# WebSocketsParams参数传入错误。
INVALIDPARAMETERVALUE_WEBSOCKETSPARAMS = 'InvalidParameterValue.WebSocketsParams'
# 检测到不是标准的zip文件,请重新压缩后再试。
INVALIDPARAMETERVALUE_ZIPFILE = 'InvalidParameterValue.ZipFile'
# 压缩文件base64解码失败: `Incorrect padding`,请修正后再试。
INVALIDPARAMETERVALUE_ZIPFILEBASE64BINASCIIERROR = 'InvalidParameterValue.ZipFileBase64BinasciiError'
# 别名个数超过最大限制。
LIMITEXCEEDED_ALIAS = 'LimitExceeded.Alias'
# Cdn使用超过最大限制。
LIMITEXCEEDED_CDN = 'LimitExceeded.Cdn'
# eip资源超限。
LIMITEXCEEDED_EIP = 'LimitExceeded.Eip'
# 函数数量超出最大限制 ,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_FUNCTION = 'LimitExceeded.Function'
# 同一个主题下的函数超过最大限制。
LIMITEXCEEDED_FUNCTIONONTOPIC = 'LimitExceeded.FunctionOnTopic'
# FunctionProvisionedConcurrencyMemory数量达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_FUNCTIONPROVISIONEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionProvisionedConcurrencyMemory'
# 函数保留并发内存超限。
LIMITEXCEEDED_FUNCTIONRESERVEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionReservedConcurrencyMemory'
# FunctionTotalProvisionedConcurrencyMemory达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_FUNCTIONTOTALPROVISIONEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionTotalProvisionedConcurrencyMemory'
# 函数预置并发总数达到限制。
LIMITEXCEEDED_FUNCTIONTOTALPROVISIONEDCONCURRENCYNUM = 'LimitExceeded.FunctionTotalProvisionedConcurrencyNum'
# InitTimeout达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_INITTIMEOUT = 'LimitExceeded.InitTimeout'
# layer版本数量超出最大限制。
LIMITEXCEEDED_LAYERVERSIONS = 'LimitExceeded.LayerVersions'
# layer数量超出最大限制。
LIMITEXCEEDED_LAYERS = 'LimitExceeded.Layers'
# 内存超出最大限制。
LIMITEXCEEDED_MEMORY = 'LimitExceeded.Memory'
# 函数异步重试配置消息保留时间超过限制。
LIMITEXCEEDED_MSGTTL = 'LimitExceeded.MsgTTL'
# 命名空间数量超过最大限制,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_NAMESPACE = 'LimitExceeded.Namespace'
# Offset超出限制。
LIMITEXCEEDED_OFFSET = 'LimitExceeded.Offset'
# 定时预置数量超过最大限制。
LIMITEXCEEDED_PROVISIONTRIGGERACTION = 'LimitExceeded.ProvisionTriggerAction'
# 定时触发间隔小于最大限制。
LIMITEXCEEDED_PROVISIONTRIGGERINTERVAL = 'LimitExceeded.ProvisionTriggerInterval'
# 配额超限。
LIMITEXCEEDED_QUOTA = 'LimitExceeded.Quota'
# 函数异步重试配置异步重试次数超过限制。
LIMITEXCEEDED_RETRYNUM = 'LimitExceeded.RetryNum'
# Timeout超出最大限制。
LIMITEXCEEDED_TIMEOUT = 'LimitExceeded.Timeout'
# 用户并发内存配额超限。
LIMITEXCEEDED_TOTALCONCURRENCYMEMORY = 'LimitExceeded.TotalConcurrencyMemory'
# 触发器数量超出最大限制,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_TRIGGER = 'LimitExceeded.Trigger'
# UserTotalConcurrencyMemory达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_USERTOTALCONCURRENCYMEMORY = 'LimitExceeded.UserTotalConcurrencyMemory'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# Code没有传入。
MISSINGPARAMETER_CODE = 'MissingParameter.Code'
# 缺失 Runtime 字段。
MISSINGPARAMETER_RUNTIME = 'MissingParameter.Runtime'
# 资源被占用。
RESOURCEINUSE = 'ResourceInUse'
# Alias已被占用。
RESOURCEINUSE_ALIAS = 'ResourceInUse.Alias'
# Cdn已被占用。
RESOURCEINUSE_CDN = 'ResourceInUse.Cdn'
# Cmq已被占用。
RESOURCEINUSE_CMQ = 'ResourceInUse.Cmq'
# Cos已被占用。
RESOURCEINUSE_COS = 'ResourceInUse.Cos'
# 函数已存在。
RESOURCEINUSE_FUNCTION = 'ResourceInUse.Function'
# FunctionName已存在。
RESOURCEINUSE_FUNCTIONNAME = 'ResourceInUse.FunctionName'
# Layer版本正在使用中。
RESOURCEINUSE_LAYERVERSION = 'ResourceInUse.LayerVersion'
# Namespace已存在。
RESOURCEINUSE_NAMESPACE = 'ResourceInUse.Namespace'
# TriggerName已存在。
RESOURCEINUSE_TRIGGER = 'ResourceInUse.Trigger'
# TriggerName已存在。
RESOURCEINUSE_TRIGGERNAME = 'ResourceInUse.TriggerName'
# COS资源不足。
RESOURCEINSUFFICIENT_COS = 'ResourceInsufficient.COS'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 别名不存在。
RESOURCENOTFOUND_ALIAS = 'ResourceNotFound.Alias'
# 未找到指定的AsyncEvent,请创建后再试。
RESOURCENOTFOUND_ASYNCEVENT = 'ResourceNotFound.AsyncEvent'
# Cdn不存在。
RESOURCENOTFOUND_CDN = 'ResourceNotFound.Cdn'
# 指定的cfs下未找到您所指定的挂载点。
RESOURCENOTFOUND_CFSMOUNTINSNOTMATCH = 'ResourceNotFound.CfsMountInsNotMatch'
# 检测cfs状态为不可用。
RESOURCENOTFOUND_CFSSTATUSERROR = 'ResourceNotFound.CfsStatusError'
# cfs与云函数所处vpc不一致。
RESOURCENOTFOUND_CFSVPCNOTMATCH = 'ResourceNotFound.CfsVpcNotMatch'
# Ckafka不存在。
RESOURCENOTFOUND_CKAFKA = 'ResourceNotFound.Ckafka'
# Cmq不存在。
RESOURCENOTFOUND_CMQ = 'ResourceNotFound.Cmq'
# Cos不存在。
RESOURCENOTFOUND_COS = 'ResourceNotFound.Cos'
# 不存在的Demo。
RESOURCENOTFOUND_DEMO = 'ResourceNotFound.Demo'
# 函数不存在。
RESOURCENOTFOUND_FUNCTION = 'ResourceNotFound.Function'
# 函数不存在。
RESOURCENOTFOUND_FUNCTIONNAME = 'ResourceNotFound.FunctionName'
# 函数版本不存在。
RESOURCENOTFOUND_FUNCTIONVERSION = 'ResourceNotFound.FunctionVersion'
# 获取cfs挂载点信息错误。
RESOURCENOTFOUND_GETCFSMOUNTINSERROR = 'ResourceNotFound.GetCfsMountInsError'
# 获取cfs信息错误。
RESOURCENOTFOUND_GETCFSNOTMATCH = 'ResourceNotFound.GetCfsNotMatch'
# 未找到指定的ImageConfig,请创建后再试。
RESOURCENOTFOUND_IMAGECONFIG = 'ResourceNotFound.ImageConfig'
# layer不存在。
RESOURCENOTFOUND_LAYER = 'ResourceNotFound.Layer'
# Layer版本不存在。
RESOURCENOTFOUND_LAYERVERSION = 'ResourceNotFound.LayerVersion'
# Namespace不存在。
RESOURCENOTFOUND_NAMESPACE = 'ResourceNotFound.Namespace'
# 版本不存在。
RESOURCENOTFOUND_QUALIFIER = 'ResourceNotFound.Qualifier'
# 角色不存在。
RESOURCENOTFOUND_ROLE = 'ResourceNotFound.Role'
# Role不存在。
RESOURCENOTFOUND_ROLECHECK = 'ResourceNotFound.RoleCheck'
# Timer不存在。
RESOURCENOTFOUND_TIMER = 'ResourceNotFound.Timer'
# 并发内存配额资源未找到。
RESOURCENOTFOUND_TOTALCONCURRENCYMEMORY = 'ResourceNotFound.TotalConcurrencyMemory'
# 触发器不存在。
RESOURCENOTFOUND_TRIGGER = 'ResourceNotFound.Trigger'
# 版本不存在。
RESOURCENOTFOUND_VERSION = 'ResourceNotFound.Version'
# VPC或子网不存在。
RESOURCENOTFOUND_VPC = 'ResourceNotFound.Vpc'
# 余额不足,请先充值。
RESOURCEUNAVAILABLE_INSUFFICIENTBALANCE = 'ResourceUnavailable.InsufficientBalance'
# Namespace不可用。
RESOURCEUNAVAILABLE_NAMESPACE = 'ResourceUnavailable.Namespace'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# CAM鉴权失败。
UNAUTHORIZEDOPERATION_CAM = 'UnauthorizedOperation.CAM'
# 无访问代码权限。
UNAUTHORIZEDOPERATION_CODESECRET = 'UnauthorizedOperation.CodeSecret'
# 没有权限。
UNAUTHORIZEDOPERATION_CREATETRIGGER = 'UnauthorizedOperation.CreateTrigger'
# 没有权限的操作。
UNAUTHORIZEDOPERATION_DELETEFUNCTION = 'UnauthorizedOperation.DeleteFunction'
# 没有权限。
UNAUTHORIZEDOPERATION_DELETETRIGGER = 'UnauthorizedOperation.DeleteTrigger'
# 不是从控制台调用的该接口。
UNAUTHORIZEDOPERATION_NOTMC = 'UnauthorizedOperation.NotMC'
# Region错误。
UNAUTHORIZEDOPERATION_REGION = 'UnauthorizedOperation.Region'
# 没有权限访问您的Cos资源。
UNAUTHORIZEDOPERATION_ROLE = 'UnauthorizedOperation.Role'
# TempCos的Appid和请求账户的APPID不一致。
UNAUTHORIZEDOPERATION_TEMPCOSAPPID = 'UnauthorizedOperation.TempCosAppid'
# 无法进行此操作。
UNAUTHORIZEDOPERATION_UPDATEFUNCTIONCODE = 'UnauthorizedOperation.UpdateFunctionCode'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
# 资源还有别名绑定,不支持当前操作,请解绑别名后重试。
UNSUPPORTEDOPERATION_ALIASBIND = 'UnsupportedOperation.AliasBind'
# 指定的配置AsyncRunEnable暂不支持,请修正后再试。
UNSUPPORTEDOPERATION_ASYNCRUNENABLE = 'UnsupportedOperation.AsyncRunEnable'
# Cdn不支持。
UNSUPPORTEDOPERATION_CDN = 'UnsupportedOperation.Cdn'
# Cos操作不支持。
UNSUPPORTEDOPERATION_COS = 'UnsupportedOperation.Cos'
# 指定的配置EipFixed暂不支持。
UNSUPPORTEDOPERATION_EIPFIXED = 'UnsupportedOperation.EipFixed'
# 不支持此地域。
UNSUPPORTEDOPERATION_REGION = 'UnsupportedOperation.Region'
# Trigger操作不支持。
UNSUPPORTEDOPERATION_TRIGGER = 'UnsupportedOperation.Trigger'
# 指定的配置暂不支持,请修正后再试。
UNSUPPORTEDOPERATION_UPDATEFUNCTIONEVENTINVOKECONFIG = 'UnsupportedOperation.UpdateFunctionEventInvokeConfig'
# 指定的配置VpcConfig暂不支持。
UNSUPPORTEDOPERATION_VPCCONFIG = 'UnsupportedOperation.VpcConfig'
| tzpBingo/github-trending | codespace/python/tencentcloud/scf/v20180416/errorcodes.py | Python | mit | 27,390 |
#!/usr/bin/env python
import sys
try:
from RuntimeBuilder import *
from Sim import *
except ImportError, e:
print "Couldn't find project-utils modules."
sys.exit(1)
MAXFILES = ['Fragmenter.max']
sources = ['fragmenter.c']
target = 'fragmenter'
includes = []
my_cflags = []
my_ldflags = []
b = MaxRuntimeBuilder(maxfiles=MAXFILES)
s = MaxCompilerSim(dfeModel="ISCA")
e = Executor(logPrefix="[%s] " % (target))
def build():
compile()
link()
def compile():
b.slicCompile()
b.compile(sources, extra_cflags=my_cflags)
def link():
b.link(sources, target, extra_ldflags=my_ldflags)
def clean():
b.clean()
def start_sim():
s.start()
def stop_sim():
s.stop()
def restart_sim():
s.start()
def run_sim():
build()
s.start(netConfig=[])
e.execCommand([ "./" + target ])
e.wait()
s.stop()
def maxdebug():
s.maxdebug(MAXFILES)
if __name__ == '__main__':
fabricate.main()
| maxeler/NetworkingCodeExamples | PacketProcessing/Fragmenter/runtime/build.py | Python | bsd-2-clause | 893 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from neutron.api import extensions
from neutron.db import servicetype_db as sdb
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.services import provider_configuration as pconf
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ServicePluginBase(extensions.PluginInterface):
"""Define base interface for any Advanced Service plugin."""
supported_extension_aliases = []
@abc.abstractmethod
def get_plugin_type(self):
"""Return one of predefined service types.
See neutron/plugins/common/constants.py
"""
pass
@abc.abstractmethod
def get_plugin_name(self):
"""Return a symbolic name for the plugin.
Each service plugin should have a symbolic name. This name
will be used, for instance, by service definitions in service types
"""
pass
@abc.abstractmethod
def get_plugin_description(self):
"""Return string description of the plugin."""
pass
def load_drivers(service_type, plugin):
"""Loads drivers for specific service.
Passes plugin instance to driver's constructor
"""
service_type_manager = sdb.ServiceTypeManager.get_instance()
providers = (service_type_manager.
get_service_providers(
None,
filters={'service_type': [service_type]})
)
if not providers:
msg = (_("No providers specified for '%s' service, exiting") %
service_type)
LOG.error(msg)
raise SystemExit(msg)
drivers = {}
for provider in providers:
try:
drivers[provider['name']] = importutils.import_object(
provider['driver'], plugin
)
LOG.debug(_("Loaded '%(provider)s' provider for service "
"%(service_type)s"),
{'provider': provider['driver'],
'service_type': service_type})
except ImportError:
LOG.exception(_("Error loading provider '%(provider)s' for "
"service %(service_type)s"),
{'provider': provider['driver'],
'service_type': service_type})
raise
default_provider = None
try:
provider = service_type_manager.get_default_service_provider(
None, service_type)
default_provider = provider['name']
except pconf.DefaultServiceProviderNotFound:
LOG.info(_("Default provider is not specified for service type %s"),
service_type)
return drivers, default_provider
| ntt-sic/neutron | neutron/services/service_base.py | Python | apache-2.0 | 3,414 |
def extractEternalpath(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractEternalpath.py | Python | bsd-3-clause | 212 |
import os
import math
import time
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.initializer import NormalInitializer
import reader
def load_reverse_dict(dict_path):
return dict((idx, line.strip().split("\t")[0])
for idx, line in enumerate(open(dict_path, "r").readlines()))
def to_lodtensor(data, place):
seq_lens = [len(seq) for seq in data]
cur_len = 0
lod = [cur_len]
for l in seq_lens:
cur_len += l
lod.append(cur_len)
flattened_data = np.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
res = fluid.LoDTensor()
res.set(flattened_data, place)
res.set_lod([lod])
return res
def ner_net(word_dict_len, label_dict_len):
IS_SPARSE = False
word_dim = 32
mention_dict_len = 57
mention_dim = 20
grnn_hidden = 36
emb_lr = 5
init_bound = 0.1
def _net_conf(word, mark, target):
word_embedding = fluid.layers.embedding(
input=word,
size=[word_dict_len, word_dim],
dtype='float32',
is_sparse=IS_SPARSE,
param_attr=fluid.ParamAttr(
learning_rate=emb_lr,
name="word_emb",
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound)))
mention_embedding = fluid.layers.embedding(
input=mention,
size=[mention_dict_len, mention_dim],
dtype='float32',
is_sparse=IS_SPARSE,
param_attr=fluid.ParamAttr(
learning_rate=emb_lr,
name="mention_emb",
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound)))
word_embedding_r = fluid.layers.embedding(
input=word,
size=[word_dict_len, word_dim],
dtype='float32',
is_sparse=IS_SPARSE,
param_attr=fluid.ParamAttr(
learning_rate=emb_lr,
name="word_emb_r",
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound)))
mention_embedding_r = fluid.layers.embedding(
input=mention,
size=[mention_dict_len, mention_dim],
dtype='float32',
is_sparse=IS_SPARSE,
param_attr=fluid.ParamAttr(
learning_rate=emb_lr,
name="mention_emb_r",
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound)))
word_mention_vector = fluid.layers.concat(
input=[word_embedding, mention_embedding], axis=1)
word_mention_vector_r = fluid.layers.concat(
input=[word_embedding_r, mention_embedding_r], axis=1)
pre_gru = fluid.layers.fc(
input=word_mention_vector,
size=grnn_hidden * 3,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)))
gru = fluid.layers.dynamic_gru(
input=pre_gru,
size=grnn_hidden,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)))
pre_gru_r = fluid.layers.fc(
input=word_mention_vector_r,
size=grnn_hidden * 3,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)))
gru_r = fluid.layers.dynamic_gru(
input=pre_gru_r,
size=grnn_hidden,
is_reverse=True,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)))
gru_merged = fluid.layers.concat(input=[gru, gru_r], axis=1)
emission = fluid.layers.fc(
size=label_dict_len,
input=gru_merged,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-init_bound, high=init_bound),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)))
crf_cost = fluid.layers.linear_chain_crf(
input=emission,
label=target,
param_attr=fluid.ParamAttr(
name='crfw',
learning_rate=0.2, ))
avg_cost = fluid.layers.mean(x=crf_cost)
return avg_cost, emission
word = fluid.layers.data(name='word', shape=[1], dtype='int64', lod_level=1)
mention = fluid.layers.data(
name='mention', shape=[1], dtype='int64', lod_level=1)
target = fluid.layers.data(
name="target", shape=[1], dtype='int64', lod_level=1)
avg_cost, emission = _net_conf(word, mention, target)
return avg_cost, emission, word, mention, target
def test2(exe, chunk_evaluator, inference_program, test_data, place,
cur_fetch_list):
chunk_evaluator.reset()
for data in test_data():
word = to_lodtensor(map(lambda x: x[0], data), place)
mention = to_lodtensor(map(lambda x: x[1], data), place)
target = to_lodtensor(map(lambda x: x[2], data), place)
result_list = exe.run(
inference_program,
feed={"word": word,
"mention": mention,
"target": target},
fetch_list=cur_fetch_list)
number_infer = np.array(result_list[0])
number_label = np.array(result_list[1])
number_correct = np.array(result_list[2])
chunk_evaluator.update(number_infer[0], number_label[0],
number_correct[0])
return chunk_evaluator.eval()
def test(test_exe, chunk_evaluator, inference_program, test_data, place,
cur_fetch_list):
chunk_evaluator.reset()
for data in test_data():
word = to_lodtensor(map(lambda x: x[0], data), place)
mention = to_lodtensor(map(lambda x: x[1], data), place)
target = to_lodtensor(map(lambda x: x[2], data), place)
result_list = test_exe.run(
fetch_list=cur_fetch_list,
feed={"word": word,
"mention": mention,
"target": target})
number_infer = np.array(result_list[0])
number_label = np.array(result_list[1])
number_correct = np.array(result_list[2])
chunk_evaluator.update(number_infer.sum(),
number_label.sum(), number_correct.sum())
return chunk_evaluator.eval()
def main(train_data_file, test_data_file, model_save_dir, num_passes):
if not os.path.exists(model_save_dir):
os.mkdir(model_save_dir)
BATCH_SIZE = 256
word_dict_len = 1942563
label_dict_len = 49
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
avg_cost, feature_out, word, mention, target = ner_net(word_dict_len,
label_dict_len)
crf_decode = fluid.layers.crf_decoding(
input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))
sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3)
sgd_optimizer.minimize(avg_cost)
(precision, recall, f1_score, num_infer_chunks, num_label_chunks,
num_correct_chunks) = fluid.layers.chunk_eval(
input=crf_decode,
label=target,
chunk_scheme="IOB",
num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))
chunk_evaluator = fluid.metrics.ChunkEvaluator()
inference_program = fluid.default_main_program().clone()
with fluid.program_guard(inference_program):
inference_program = fluid.io.get_inference_program(
[num_infer_chunks, num_label_chunks, num_correct_chunks])
train_reader = paddle.batch(
paddle.reader.shuffle(
reader.file_reader(train_data_file), buf_size=2000000),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.reader.shuffle(
reader.file_reader(test_data_file), buf_size=2000000),
batch_size=BATCH_SIZE)
place = fluid.CUDAPlace(0)
feeder = fluid.DataFeeder(
feed_list=[word, mention, target], place=place)
exe = fluid.Executor(place)
exe.run(startup)
train_exe = fluid.ParallelExecutor(
loss_name=avg_cost.name, use_cuda=True)
test_exe = fluid.ParallelExecutor(
use_cuda=True,
main_program=inference_program,
share_vars_from=train_exe)
batch_id = 0
for pass_id in xrange(num_passes):
chunk_evaluator.reset()
train_reader_iter = train_reader()
start_time = time.time()
while True:
try:
cur_batch = next(train_reader_iter)
cost, nums_infer, nums_label, nums_correct = train_exe.run(
fetch_list=[
avg_cost.name, num_infer_chunks.name,
num_label_chunks.name, num_correct_chunks.name
],
feed=feeder.feed(cur_batch))
chunk_evaluator.update(
np.array(nums_infer).sum(),
np.array(nums_label).sum(),
np.array(nums_correct).sum())
cost_list = np.array(cost)
batch_id += 1
except StopIteration:
break
end_time = time.time()
print("pass_id:" + str(pass_id) + ", time_cost:" + str(
end_time - start_time) + "s")
precision, recall, f1_score = chunk_evaluator.eval()
print("[Train] precision:" + str(precision) + ", recall:" + str(
recall) + ", f1:" + str(f1_score))
p, r, f1 = test2(
exe, chunk_evaluator, inference_program, test_reader, place,
[num_infer_chunks, num_label_chunks, num_correct_chunks])
print("[Test] precision:" + str(p) + ", recall:" + str(r) + ", f1:"
+ str(f1))
save_dirname = os.path.join(model_save_dir,
"params_pass_%d" % pass_id)
fluid.io.save_inference_model(save_dirname, ['word', 'mention'],
[crf_decode], exe)
if __name__ == "__main__":
main(
train_data_file="./data/train_files",
test_data_file="./data/test_files",
model_save_dir="./output",
num_passes=1000)
| qingqing01/models | fluid/chinese_ner/train.py | Python | apache-2.0 | 11,344 |
# -*- coding: utf-8 -*-
from navmazing import NavigateToAttribute
from widgetastic.widget import Text, Checkbox, Table, View
from widgetastic_patternfly import Tab, BootstrapSelect
from widgetastic_manageiq import TimelinesChart
from utils.update import Updateable
from utils.pretty import Pretty
from utils.appliance import Navigatable
from utils.appliance.implementations.ui import navigator, CFMENavigateStep
from . import BottlenecksView
class BottlenecksTabsView(BottlenecksView):
title = Text("#explorer_title_text")
# TODO: add chart widget
@property
def is_displayed(self):
return (
super(BottlenecksView, self).is_displayed and
self.title.text == 'Region "Region {}" Bottlenecks Summary'
.format(self.browser.appliance.server_region()) and
self.bottlenecks.is_opened and
self.bottlenecks.tree.currently_selected == ["Bottlenecks"])
@View.nested
class summary(Tab): # noqa
TAB_NAME = 'Summary'
event_groups = BootstrapSelect('tl_summ_fl_grp1')
show_host_events = Checkbox(locator='//input[@name="tl_summ_hosts"]')
time_zone = BootstrapSelect("tl_summ_tz")
chart = TimelinesChart(locator='//div/*[@class="timeline-pf-chart"]')
@View.nested
class report(Tab): # noqa
TAB_NAME = 'Report'
event_details = Table("//div[@id='bottlenecks_report_div']/table")
event_groups = BootstrapSelect('tl_report_fl_grp1')
show_host_events = Checkbox(locator='//input[@name="tl_report_hosts"]')
time_zone = BootstrapSelect("tl_report_tz")
class Bottlenecks(Updateable, Pretty, Navigatable):
def __init__(self, appliance=None):
Navigatable.__init__(self, appliance)
@navigator.register(Bottlenecks, 'All')
class All(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'Bottlenecks')
VIEW = BottlenecksTabsView
def resetter(self):
""" Set values to default """
self.view.report.event_groups.fill('<ALL>')
self.view.report.show_host_events.fill(False)
self.view.report.time_zone.fill('(GMT+00:00) UTC')
self.view.summary.event_groups.fill('<ALL>')
self.view.summary.show_host_events.fill(False)
self.view.summary.time_zone.fill('(GMT+00:00) UTC')
| dajohnso/cfme_tests | cfme/optimize/bottlenecks.py | Python | gpl-2.0 | 2,335 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.