code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from setuptools import setup
setup(extras_require={':python_version < "3.4"': ['enum34']})
| madsmtm/fbchat | setup.py | Python | bsd-3-clause | 180 |
"""
sentry.models.tagvalue
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from sentry.constants import MAX_TAG_KEY_LENGTH, MAX_TAG_VALUE_LENGTH
from sentry.db.models import (
Model, BoundedPositiveIntegerField, FlexibleForeignKey, GzippedDictField,
BaseManager, sane_repr
)
from sentry.utils.http import absolute_uri
class TagValue(Model):
"""
Stores references to available filters.
"""
__core__ = False
project = FlexibleForeignKey('sentry.Project', null=True)
key = models.CharField(max_length=MAX_TAG_KEY_LENGTH)
value = models.CharField(max_length=MAX_TAG_VALUE_LENGTH)
data = GzippedDictField(blank=True, null=True)
times_seen = BoundedPositiveIntegerField(default=0)
last_seen = models.DateTimeField(
default=timezone.now, db_index=True, null=True)
first_seen = models.DateTimeField(
default=timezone.now, db_index=True, null=True)
objects = BaseManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_filtervalue'
unique_together = (('project', 'key', 'value'),)
__repr__ = sane_repr('project_id', 'key', 'value')
def get_label(self):
# HACK(dcramer): quick and dirty way to hack in better display states
if self.key == 'sentry:user':
return self.data.get('email') or self.value
elif self.key == 'sentry:function':
return '%s in %s' % (self.data['function'], self.data['filename'])
elif self.key == 'sentry:filename':
return self.data['filename']
elif self.key == 'sentry:release' and len(self.value) == 40:
return self.value[:12]
return self.value
def get_absolute_url(self):
# HACK(dcramer): quick and dirty way to support code/users
if self.key == 'sentry:user':
url_name = 'sentry-user-details'
elif self.key == 'sentry:filename':
url_name = 'sentry-explore-code-details'
elif self.key == 'sentry:function':
url_name = 'sentry-explore-code-details-by-function'
else:
url_name = 'sentry-explore-tag-value'
return absolute_uri(reverse(url_name, args=[
self.project.organization.slug, self.project.slug, self.key, self.id]))
return absolute_uri(reverse(url_name, args=[
self.project.organization.slug, self.project.slug, self.id]))
| imankulov/sentry | src/sentry/models/tagvalue.py | Python | bsd-3-clause | 2,667 |
"""
sentry.plugins.base.structs
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
__all__ = ['ReleaseHook']
from sentry.models import Commit, Release
from sentry.plugins import ReleaseHook
from sentry.testutils import TestCase
class StartReleaseTest(TestCase):
def test_minimal(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
hook = ReleaseHook(project)
hook.start_release(version)
release = Release.objects.get(
project=project,
version=version,
)
assert release.date_started
assert release.organization
def test_update_release(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
Release.objects.create(project=project, version=version)
hook = ReleaseHook(project)
hook.start_release(version)
release = Release.objects.get(
project=project,
version=version,
)
assert release.date_started
assert release.organization == project.organization
class FinishReleaseTest(TestCase):
def test_minimal(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
hook = ReleaseHook(project)
hook.finish_release(version)
release = Release.objects.get(
project=project,
version=version,
)
assert release.date_released
assert release.organization
def test_update_release(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
Release.objects.create(project=project, version=version)
hook = ReleaseHook(project)
hook.start_release(version)
release = Release.objects.get(
project=project,
version=version,
)
assert release.date_started
assert release.organization == project.organization
class SetCommitsTest(TestCase):
def test_minimal(self):
project = self.create_project()
version = 'bbee5b51f84611e4b14834363b8514c2'
data_list = [
{
'id': 'c7155651831549cf8a5e47889fce17eb',
'message': 'foo',
'author_email': '[email protected]',
},
{
'id': 'bbee5b51f84611e4b14834363b8514c2',
'message': 'bar',
'author_name': 'Joe^^',
},
]
hook = ReleaseHook(project)
hook.set_commits(version, data_list)
release = Release.objects.get(
project=project,
version=version,
)
commit_list = list(Commit.objects.filter(
releasecommit__release=release,
).select_related(
'author',
).order_by('releasecommit__order'))
assert len(commit_list) == 2
assert commit_list[0].key == 'c7155651831549cf8a5e47889fce17eb'
assert commit_list[0].message == 'foo'
assert commit_list[0].author.name is None
assert commit_list[0].author.email == '[email protected]'
assert commit_list[1].key == 'bbee5b51f84611e4b14834363b8514c2'
assert commit_list[1].message == 'bar'
assert commit_list[1].author.name == 'Joe^^'
assert commit_list[1].author.email == 'joe@localhost'
| zenefits/sentry | tests/sentry/plugins/interfaces/test_releasehook.py | Python | bsd-3-clause | 3,557 |
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
def set_publish(modeladmin, request, queryset):
if not request.user.is_staff:
raise PermissionDenied
queryset.update(publish=True)
set_publish.short_description = _("Set the publish flag for the selected site paths")
def clear_publish(modeladmin, request, queryset):
if not request.user.is_staff:
raise PermissionDenied
queryset.update(publish=False)
clear_publish.short_description = _("Clear the publish flag for the selected site paths")
def set_sitemap(modeladmin, request, queryset):
if not request.user.is_staff:
raise PermissionDenied
queryset.update(sitemap=True)
set_sitemap.short_description = _("Set the sitemap flag for the selected site paths")
def clear_sitemap(modeladmin, request, queryset):
if not request.user.is_staff:
raise PermissionDenied
queryset.update(sitemap=False)
clear_sitemap.short_description = _("Clear the sitemap flag for the selected site paths")
| StuartMacKay/django-sitepaths | sitepaths/actions.py | Python | bsd-3-clause | 1,062 |
import urllib.request
from user_define_cookie import UserDefineCookie
class XueqiuApi(object):
def __init__(self, name=None):
self.name = name
self.__req_url = 'https://xueqiu.com/stock/screener/screen.json?category=SH&exchange=&areacode=&indcode=&_=1425870008963'
# 动态市盈率
def append_pettm(self, start='0', end='20'):
self.__req_url = self.__req_url + '&pettm=' + start + '_' + end
# 市净率
def append_pb(self, start='0', end='30'):
self.__req_url = self.__req_url + '&pb=' + start + '_' + end
# 股息率
def append_dy(self, start='0', end='100'):
self.__req_url = self.__req_url + '&dy=' + start + '_' + end
# 净资产收益率
def append_roediluted(self, time, start='5', end='100', is_order_by_this=False):
key_name = 'roediluted.' + time
self.__req_url = self.__req_url + '&' + key_name + '=' + start + '_' + end
if is_order_by_this:
self.__req_url = self.__req_url + '&orderby=' + key_name + '&order=desc'
# 毛利率
def append_gross(self, time, start='0', end='100'):
key_name = 'sgpr.' + time
self.__req_url = self.__req_url + '&' + key_name + '=' + start + '_' + end
# 净利率
def append_interest(self, time, start='0', end='100'):
key_name = 'snpr.' + time
self.__req_url = self.__req_url + '&' + key_name + '=' + start + '_' + end
# 收入增长
def append_income_grow(self, time, start='-30', end='500'):
key_name = 'mbig.' + time
self.__req_url = self.__req_url + '&' + key_name + '=' + start + '_' + end
# 利润增长
def append_profie_grow(self, time, start='-30', end='500'):
key_name = 'nig.' + time
self.__req_url = self.__req_url + '&' + key_name + '=' + start + '_' + end
# 资产负债率
def append_debt_assert_rate(self, time, start='0', end='100'):
key_name = 'dar.' + time
self.__req_url = self.__req_url + '&' + key_name + '=' + start + '_' + end
# 一年内涨跌幅
def append_pct_rate(self, duration='1y', start='-100', end='100'):
key_name = 'pct' + duration
self.__req_url = self.__req_url + '&' + key_name + '=' + start + '_' + end
def get_req_url(self):
return self.__req_url
def submit_req(self, page=1):
headers = XueqiuApi.get_req_headers()
url = self.get_req_url()
print('---req:\n' + url)
if page > 1:
url += '&page=%d' % page
req = urllib.request.Request(url, headers=headers)
content = urllib.request.urlopen(req).read().decode("utf8")
return content
@staticmethod
def get_req_headers():
return {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Cookie': UserDefineCookie.get_xueqiu_cookie(),
}
| pinguo-chexing/stock_discover | xueqiu_api.py | Python | apache-2.0 | 3,047 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 29 20:44:27 2017
@author: finn
"""
import cv2
import sys
import cvutil as u
cap = u.source_video(sys.argv)
while(True):
_, frame = cap.read()
cv2.imshow('frame',frame)
k = cv2.waitKey(5) & 0xFF
if k == ord('q'):
break
cv2.destroyAllWindows()
| finnhacks42/opencv-play | templates/display_video.py | Python | mit | 328 |
# -*- coding: utf-8 -*-
"""
Module for the generation of docx format documents.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import jinja2
# -----------------------------------------------------------------------------
def build(_, section_list, filepath):
"""
Build and save the specified document.
"""
environment = jinja2.Environment(
loader = jinja2.PackageLoader(
'da.report', 'templates'),
trim_blocks = True,
lstrip_blocks = True)
template = environment.get_template('engineering_document.template.html')
# Filter out empty sections
filtered_list = []
for section in section_list:
if section['level'] != 1 and len(section['para']) == 0:
continue
filtered_list.append(section)
html = template.render( # pylint: disable=E1101
section_list = filtered_list)
with open(filepath, 'wt') as file:
file.write(html)
# _add_title_section(document, doc_data['_metadata'])
# _add_toc_section(document)
# for item in sorted(_generate_content_items(doc_data),
# key = _doc_data_sortkey):
# if item['section_level'] == 1:
# _add_content_section(document)
# if 0 < len(item['paragraph_list']):
# _add_content_para(document,
# level = item['section_level'],
# title = item['section_title'],
# type = item['section_type'],
# content = item['paragraph_list'])
# else:
# print('Skipping section: ' + item['section_title'])
# # Save the document.
# da.util.ensure_dir_exists(os.path.dirname(filepath))
# document.save(filepath)
| wtpayne/hiai | a3_src/h70_internal/da/report/html_builder.py | Python | apache-2.0 | 2,607 |
from nose.tools import *
from networkx.utils import reverse_cuthill_mckee_ordering
import networkx as nx
def test_reverse_cuthill_mckee():
# example graph from
# http://www.boost.org/doc/libs/1_37_0/libs/graph/example/cuthill_mckee_ordering.cpp
G = nx.Graph([(0,3),(0,5),(1,2),(1,4),(1,6),(1,9),(2,3),
(2,4),(3,5),(3,8),(4,6),(5,6),(5,7),(6,7)])
rcm = list(reverse_cuthill_mckee_ordering(G))
assert_equal(rcm,[0, 8, 5, 7, 3, 6, 4, 2, 1, 9])
def test_rcm_alternate_heuristic():
# example from
G = nx.Graph([(0, 0),
(0, 4),
(1, 1),
(1, 2),
(1, 5),
(1, 7),
(2, 2),
(2, 4),
(3, 3),
(3, 6),
(4, 4),
(5, 5),
(5, 7),
(6, 6),
(7, 7)])
answer = [6,3,7,5,1,2,4,0]
def smallest_degree(G):
node,deg = sorted(G.degree().items(), key = lambda x:x[1])[0]
return node
rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree))
assert_equal(rcm, answer)
| ReganBell/QReview | networkx/utils/tests/test_rcm.py | Python | bsd-3-clause | 1,186 |
from datetime import datetime, timedelta
import logging
import traceback
from openerp import api, models, fields, tools, SUPERUSER_ID
from openerp.addons.booking_calendar.models import SLOT_START_DELAY_MINS, SLOT_DURATION_MINS
_logger = logging.getLogger(__name__)
class pitch_booking_venue(models.Model):
_name = 'pitch_booking.venue'
name = fields.Char('Name')
company_id = fields.Many2one('res.company', 'Company')
class pitch_booking_pitch(models.Model):
_name = 'pitch_booking.pitch'
_inherits = {'resource.resource': 'resource_id'}
_defaults = {
'to_calendar': True,
}
venue_id = fields.Many2one('pitch_booking.venue', required=True)
resource_id = fields.Many2one('resource.resource', ondelete='cascade', required=True)
class sale_order_line(models.Model):
_inherit = 'sale.order.line'
venue_id = fields.Many2one('pitch_booking.venue', string='Venue', related='product_id.venue_id')
pitch_id = fields.Many2one('pitch_booking.pitch', string='Pitch')
resource_id = fields.Many2one('resource.resource', 'Resource', related='pitch_id.resource_id', store=True)
@api.onchange('resource_id')
def _on_change_resource(self):
if self.resource_id:
pitch = self.env['pitch_booking.pitch'].search([('resource_id','=',self.resource_id.id)])
if pitch:
self.pitch_id = pitch[0].id
@api.onchange('pitch_id')
def _on_change_pitch(self):
if self.pitch_id:
self.venue_id = self.pitch_id.venue_id.id
@api.model
def _prepare_order_line_invoice_line(self, line, account_id=False):
res = super(sale_order_line, self)._prepare_order_line_invoice_line(line, account_id)
res.update({
'venue_id': line.venue_id.id,
'pitch_id': line.pitch_id.id,
'booking_start': line.booking_start,
'booking_end': line.booking_end
})
return res
@api.model
def get_resources(self, venue_id, pitch_id):
pitch_obj = self.env['pitch_booking.pitch'].sudo()
venue_obj = self.env['pitch_booking.venue'].sudo()
if not venue_id:
venues = venue_obj.search([])
venue_id = venues[0].id if venues else None
resources = []
if pitch_id:
resources = [pitch_obj.browse(int(pitch_id)).resource_id]
elif venue_id:
resources = [p.resource_id for p in pitch_obj.search([('venue_id','=',int(venue_id))])]
return [{
'name': r.name,
'id': r.id,
'color': r.color
} for r in resources]
@api.model
def get_free_slots(self, start, end, offset, domain):
start_dt = datetime.strptime(start, '%Y-%m-%d %H:%M:%S') - timedelta(minutes=offset)
fixed_start_dt = start_dt
end_dt = datetime.strptime(end, '%Y-%m-%d %H:%M:%S') - timedelta(minutes=offset)
pitch_domain = []
for cond in domain:
if type(cond) in (tuple, list):
if cond[0] == 'venue_id':
pitch_domain.append(tuple(cond));
elif cond[0] == 'pitch_id':
pitch_domain.append(('name',cond[1], cond[2]));
pitch_domain.append(('to_calendar','=',True));
resources = self.env['pitch_booking.pitch'].search(pitch_domain)
slots = {}
now = datetime.now() - timedelta(minutes=SLOT_START_DELAY_MINS) - timedelta(minutes=offset)
while start_dt < end_dt:
if start_dt < now:
start_dt += timedelta(minutes=SLOT_DURATION_MINS)
continue
for r in resources:
if not r.id in slots:
slots[r.id] = {}
slots[r.id][start_dt.strftime('%Y-%m-%d %H:%M:%S')] = {
'start': start_dt.strftime('%Y-%m-%d %H:%M:%S'),
'end': (start_dt + timedelta(minutes=SLOT_DURATION_MINS)).strftime('%Y-%m-%d %H:%M:%S'),
'title': r.name,
'color': r.color,
'className': 'free_slot resource_%s' % r.id,
'editable': False,
'resource_id': r.resource_id.id
}
start_dt += timedelta(minutes=SLOT_DURATION_MINS)
lines = self.search_booking_lines(start, end, [('pitch_id', 'in', [r['id'] for r in resources])])
for l in lines:
line_start_dt = datetime.strptime(l.booking_start, '%Y-%m-%d %H:%M:00') - timedelta(minutes=offset)
line_start_dt -= timedelta(minutes=divmod(line_start_dt.minute, SLOT_DURATION_MINS)[1])
line_end_dt = datetime.strptime(l.booking_end, '%Y-%m-%d %H:%M:%S') - timedelta(minutes=offset)
while line_start_dt < line_end_dt:
if line_start_dt >= end_dt:
break
elif line_start_dt < fixed_start_dt or line_start_dt < now:
line_start_dt += timedelta(minutes=SLOT_DURATION_MINS)
continue
try:
del slots[l.pitch_id.id][line_start_dt.strftime('%Y-%m-%d %H:%M:%S')]
except:
_logger.warning('cannot free slot %s %s' % (
l.pitch_id.id,
line_start_dt.strftime('%Y-%m-%d %H:%M:%S')
))
line_start_dt += timedelta(minutes=SLOT_DURATION_MINS)
res = []
for slot in slots.values():
for pitch in slot.values():
res.append(pitch)
return res
class account_invoice_line(models.Model):
_inherit = 'account.invoice.line'
venue_id = fields.Many2one('pitch_booking.venue', string='Venue')
pitch_id = fields.Many2one('pitch_booking.pitch', string='Pitch')
booking_start = fields.Datetime(string="Date start")
booking_end = fields.Datetime(string="Date end")
class product_template(models.Model):
_inherit = 'product.template'
venue_id = fields.Many2one('pitch_booking.venue', string='Venue')
class sale_order(models.Model):
_inherit = 'sale.order'
@api.multi
def _add_booking_line(self, product_id, resource, start, end):
if resource:
for rec in self:
line = super(sale_order, rec)._add_booking_line(product_id, resource, start, end)
sol = rec.env['sale.order.line'].sudo()
pitch_obj = rec.env['pitch_booking.pitch'].sudo()
pitchs = pitch_obj.search([('resource_id','=',resource)], limit=1)
if pitchs:
line.write({
'pitch_id': pitchs[0].id,
'venue_id': pitchs[0].venue_id.id
})
return line
| ufaks/addons-yelizariev | pitch_booking/models.py | Python | lgpl-3.0 | 6,779 |
"""
Utilities for validating inputs to user-facing API functions.
"""
from textwrap import dedent
from types import CodeType
from functools import wraps
from inspect import getargspec
from uuid import uuid4
from toolz.curried.operator import getitem
from six import viewkeys, exec_, PY3
_code_argorder = (
('co_argcount', 'co_kwonlyargcount') if PY3 else ('co_argcount',)
) + (
'co_nlocals',
'co_stacksize',
'co_flags',
'co_code',
'co_consts',
'co_names',
'co_varnames',
'co_filename',
'co_name',
'co_firstlineno',
'co_lnotab',
'co_freevars',
'co_cellvars',
)
NO_DEFAULT = object()
def preprocess(*_unused, **processors):
"""
Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (func, argname, argvalue).
`func` is the the function for which we're processing args.
`argname` is the name of the argument we're processing.
`argvalue` is the value of the argument we're processing.
Examples
--------
>>> def _ensure_tuple(func, argname, arg):
... if isinstance(arg, tuple):
... return argvalue
... try:
... return tuple(arg)
... except TypeError:
... raise TypeError(
... "%s() expected argument '%s' to"
... " be iterable, but got %s instead." % (
... func.__name__, argname, arg,
... )
... )
...
>>> @preprocess(arg=_ensure_tuple)
... def foo(arg):
... return arg
...
>>> foo([1, 2, 3])
(1, 2, 3)
>>> foo("a")
('a',)
>>> foo(2)
Traceback (most recent call last):
...
TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead.
"""
if _unused:
raise TypeError("preprocess() doesn't accept positional arguments")
def _decorator(f):
args, varargs, varkw, defaults = argspec = getargspec(f)
if defaults is None:
defaults = ()
no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults))
args_defaults = list(zip(args, no_defaults + defaults))
if varargs:
args_defaults.append((varargs, NO_DEFAULT))
if varkw:
args_defaults.append((varkw, NO_DEFAULT))
argset = set(args) | {varargs, varkw} - {None}
# Arguments can be declared as tuples in Python 2.
if not all(isinstance(arg, str) for arg in args):
raise TypeError(
"Can't validate functions using tuple unpacking: %s" %
(argspec,)
)
# Ensure that all processors map to valid names.
bad_names = viewkeys(processors) - argset
if bad_names:
raise TypeError(
"Got processors for unknown arguments: %s." % bad_names
)
return _build_preprocessed_function(
f, processors, args_defaults, varargs, varkw,
)
return _decorator
def call(f):
"""
Wrap a function in a processor that calls `f` on the argument before
passing it along.
Useful for creating simple arguments to the `@preprocess` decorator.
Parameters
----------
f : function
Function accepting a single argument and returning a replacement.
Examples
--------
>>> @preprocess(x=call(lambda x: x + 1))
... def foo(x):
... return x
...
>>> foo(1)
2
"""
@wraps(f)
def processor(func, argname, arg):
return f(arg)
return processor
def _build_preprocessed_function(func,
processors,
args_defaults,
varargs,
varkw):
"""
Build a preprocessed function with the same signature as `func`.
Uses `exec` internally to build a function that actually has the same
signature as `func.
"""
format_kwargs = {'func_name': func.__name__}
def mangle(name):
return 'a' + uuid4().hex + name
format_kwargs['mangled_func'] = mangled_funcname = mangle(func.__name__)
def make_processor_assignment(arg, processor_name):
template = "{arg} = {processor}({func}, '{arg}', {arg})"
return template.format(
arg=arg,
processor=processor_name,
func=mangled_funcname,
)
exec_globals = {mangled_funcname: func, 'wraps': wraps}
defaults_seen = 0
default_name_template = 'a' + uuid4().hex + '_%d'
signature = []
call_args = []
assignments = []
star_map = {
varargs: '*',
varkw: '**',
}
def name_as_arg(arg):
return star_map.get(arg, '') + arg
for arg, default in args_defaults:
if default is NO_DEFAULT:
signature.append(name_as_arg(arg))
else:
default_name = default_name_template % defaults_seen
exec_globals[default_name] = default
signature.append('='.join([name_as_arg(arg), default_name]))
defaults_seen += 1
if arg in processors:
procname = mangle('_processor_' + arg)
exec_globals[procname] = processors[arg]
assignments.append(make_processor_assignment(arg, procname))
call_args.append(name_as_arg(arg))
exec_str = dedent(
"""\
@wraps({wrapped_funcname})
def {func_name}({signature}):
{assignments}
return {wrapped_funcname}({call_args})
"""
).format(
func_name=func.__name__,
signature=', '.join(signature),
assignments='\n '.join(assignments),
wrapped_funcname=mangled_funcname,
call_args=', '.join(call_args),
)
compiled = compile(
exec_str,
func.__code__.co_filename,
mode='exec',
)
exec_locals = {}
exec_(compiled, exec_globals, exec_locals)
new_func = exec_locals[func.__name__]
code = new_func.__code__
args = {
attr: getattr(code, attr)
for attr in dir(code)
if attr.startswith('co_')
}
# Copy the firstlineno out of the underlying function so that exceptions
# get raised with the correct traceback.
# This also makes dynamic source inspection (like IPython `??` operator)
# work as intended.
try:
# Try to get the pycode object from the underlying function.
original_code = func.__code__
except AttributeError:
try:
# The underlying callable was not a function, try to grab the
# `__func__.__code__` which exists on method objects.
original_code = func.__func__.__code__
except AttributeError:
# The underlying callable does not have a `__code__`. There is
# nothing for us to correct.
return new_func
args['co_firstlineno'] = original_code.co_firstlineno
new_func.__code__ = CodeType(*map(getitem(args), _code_argorder))
return new_func
| bartosh/zipline | zipline/utils/preprocess.py | Python | apache-2.0 | 7,205 |
import os.path
class ProjectReport(object):
def __init__(self, project: 'Project'):
self.project = {
"target": project.target,
"location": os.path.abspath(project.location),
"name": project.name,
"templates": [{"name": t.name, "version": t.version, "origin": t.origin} for t in
project.templates.values()]
}
def __str__(self):
import tabulate
s = f'PROS Project for {self.project["target"]} at: {self.project["location"]}' \
f' ({self.project["name"]})' if self.project["name"] else ''
s += '\n'
rows = [t.values() for t in self.project["templates"]]
headers = [h.capitalize() for h in self.project["templates"][0].keys()]
s += tabulate.tabulate(rows, headers=headers)
return s
def __getstate__(self):
return self.__dict__
| purduesigbots/pros-cli | pros/conductor/project/ProjectReport.py | Python | mpl-2.0 | 905 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Document'
db.create_table('projects_document', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')()),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['projects.Project'])),
('uploaded_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('projects', ['Document'])
def backwards(self, orm):
# Deleting model 'Document'
db.delete_table('projects_document')
models = {
'projects.document': {
'Meta': {'object_name': 'Document'},
'description': ('django.db.models.fields.TextField', [], {}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'description': ('django.db.models.fields.TextField', [], {}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sponsor': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['projects']
| nsi-iff/nsi_site | apps/projects/migrations/0003_auto__add_document.py | Python | mit | 2,496 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class BayModel(resource.Resource):
"""A resource for the BayModel in Magnum."""
support_status = support.SupportStatus(version='5.0.0')
PROPERTIES = (
NAME, IMAGE, FLAVOR, MASTER_FLAVOR, KEYPAIR,
EXTERNAL_NETWORK, FIXED_NETWORK, DNS_NAMESERVER,
DOCKER_VOLUME_SIZE, SSH_AUTHORIZED_KEY, COE, NETWORK_DRIVER,
HTTP_PROXY, HTTPS_PROXY, NO_PROXY, LABELS, TLS_DISABLED
) = (
'name', 'image', 'flavor', 'master_flavor', 'keypair',
'external_network', 'fixed_network', 'dns_nameserver',
'docker_volume_size', 'ssh_authorized_key', 'coe', 'network_driver',
'http_proxy', 'https_proxy', 'no_proxy', 'labels', 'tls_disabled'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('The bay model name.'),
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('The image name or UUID to use as a base image for this '
'baymodel.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
required=True
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('The flavor of this bay model.'),
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
MASTER_FLAVOR: properties.Schema(
properties.Schema.STRING,
_('The flavor of the master node for this bay model.'),
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
KEYPAIR: properties.Schema(
properties.Schema.STRING,
_('The name or id of the nova ssh keypair.'),
constraints=[
constraints.CustomConstraint('nova.keypair')
],
required=True
),
EXTERNAL_NETWORK: properties.Schema(
properties.Schema.STRING,
_('The external network to attach the Bay.'),
constraints=[
constraints.CustomConstraint('neutron.network')
],
required=True
),
FIXED_NETWORK: properties.Schema(
properties.Schema.STRING,
_('The fixed network to attach the Bay.'),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
DNS_NAMESERVER: properties.Schema(
properties.Schema.STRING,
_('The DNS nameserver address.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
DOCKER_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size in GB of the docker volume.'),
constraints=[
constraints.Range(min=1),
]
),
SSH_AUTHORIZED_KEY: properties.Schema(
properties.Schema.STRING,
_('The SSH Authorized Key.'),
),
COE: properties.Schema(
properties.Schema.STRING,
_('The Container Orchestration Engine for this bay model.'),
constraints=[
constraints.AllowedValues(['kubernetes', 'swarm'])
],
required=True
),
NETWORK_DRIVER: properties.Schema(
properties.Schema.STRING,
_('The name of the driver used for instantiating '
'container networks. By default, Magnum will choose the '
'pre-configured network driver based on COE type.'),
support_status=support.SupportStatus(version='6.0.0')
),
HTTP_PROXY: properties.Schema(
properties.Schema.STRING,
_('The http_proxy address to use for nodes in bay.'),
support_status=support.SupportStatus(version='6.0.0')
),
HTTPS_PROXY: properties.Schema(
properties.Schema.STRING,
_('The https_proxy address to use for nodes in bay.'),
support_status=support.SupportStatus(version='6.0.0')
),
NO_PROXY: properties.Schema(
properties.Schema.STRING,
_('A comma separated list of addresses for which proxies should '
'not be used in the bay.'),
support_status=support.SupportStatus(version='6.0.0')
),
LABELS: properties.Schema(
properties.Schema.MAP,
_('Arbitrary labels in the form of key=value pairs to '
'associate with a baymodel.'),
support_status=support.SupportStatus(version='6.0.0')
),
TLS_DISABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Disable TLS in the Bay.'),
default=False,
support_status=support.SupportStatus(version='6.0.0')
),
}
default_client_name = 'magnum'
entity = 'baymodels'
def handle_create(self):
args = {
'name': self.properties[self.NAME],
'image_id': self.properties[self.IMAGE],
'flavor_id': self.properties[self.FLAVOR],
'master_flavor_id': self.properties[self.MASTER_FLAVOR],
'keypair_id': self.properties[self.KEYPAIR],
'external_network_id': self.properties[self.EXTERNAL_NETWORK],
'fixed_network': self.properties[self.FIXED_NETWORK],
'dns_nameserver': self.properties[self.DNS_NAMESERVER],
'docker_volume_size': self.properties[self.DOCKER_VOLUME_SIZE],
'ssh_authorized_key': self.properties[self.SSH_AUTHORIZED_KEY],
'coe': self.properties[self.COE],
}
if self.properties[self.NETWORK_DRIVER]:
args['network_driver'] = self.properties[self.NETWORK_DRIVER]
if self.properties[self.HTTP_PROXY]:
args['http_proxy'] = self.properties[self. HTTP_PROXY]
if self.properties[self.HTTPS_PROXY]:
args['https_proxy'] = self.properties[self.HTTPS_PROXY]
if self.properties[self.NO_PROXY]:
args['no_proxy'] = self.properties[self.NO_PROXY]
if self.properties[self.LABELS]:
args['labels'] = self.properties[self.LABELS]
if self.properties[self.TLS_DISABLED]:
args['tls_disabled'] = self.properties[self.TLS_DISABLED]
bm = self.client().baymodels.create(**args)
self.resource_id_set(bm.uuid)
def resource_mapping():
return {
'OS::Magnum::BayModel': BayModel
}
| dragorosson/heat | heat/engine/resources/openstack/magnum/baymodel.py | Python | apache-2.0 | 7,315 |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate LTC2309 voltage/current monitor configuration.
This module looks for an 'ltc2309_config' tuple in the configuration file
(specified on command line). The ltc2309_config tuple should contain the
hardware revision EnumHelper and a dictionary that maps the board's hardware
revision to a list of dictionaries. Each dictionary in this list specifies
the configuration for each LTC2309 chip.
"""
import sys
import textwrap
from makani.avionics.firmware.drivers import ltc2309_types
from makani.avionics.firmware.monitors import generate_monitor_base
from makani.lib.python import c_helpers
select_helper = c_helpers.EnumHelper('Ltc2309Select', ltc2309_types)
mode_helper = c_helpers.EnumHelper('Ltc2309ConversionMode', ltc2309_types)
power_helper = c_helpers.EnumHelper('Ltc2309PowerSavingMode', ltc2309_types)
class Ltc2309DeviceConfig(generate_monitor_base.DeviceConfigBase):
"""Generate LTC2309 voltage/current monitor configuration."""
# TODO: Add unit tests.
def __init__(self, config):
expected_parameters = {
'name',
'address',
'channel',
'conversion_mode',
'power_saving',
'input_divider',
'input_offset',
'nominal',
'min',
'max'}
super(Ltc2309DeviceConfig, self).__init__(config, expected_parameters)
def CheckParameterValues(self, config):
name = config['name']
if config['input_divider'] == 0.0:
assert ValueError('Invalid input_divider specified for %s.' % name)
def ComputeParameters(self, config):
"""Update per-input configuration with computed data."""
input_divider = config['input_divider']
config['volts_per_count'] = 4.096 / 2**12 / input_divider
config['offset'] = config['input_offset'] / input_divider
config['binary'] = ltc2309_types.Ltc2309BuildCommand(
config['channel'], config['conversion_mode'], config['power_saving'])
def GetConfigAsString(self, config, enum_values, index):
"""Generate an initialization array for the Ltc2309Monitors structure."""
string = textwrap.dedent("""\
[{index}] = {{
.monitor = {monitor_name},
.config = {{
.addr = 0x{address:02X},
.command = 0x{binary:02X}}},
.volts_per_count = {volts_per_count}f,
.offset = {offset}f,
.nominal = {nominal}f,
.min = {min}f,
.max = {max}f}},
""").format(
index=index,
monitor_name=enum_values['monitor'],
**config)
return string
def GetHeaderFiles(self):
return ['avionics/firmware/monitors/ltc2309_types.h']
def Main(argv):
"""Entry point."""
flags = generate_monitor_base.ParseFlags(argv)
config_module = generate_monitor_base.GetConfigModule(flags)
gen = generate_monitor_base.GenerateMonitorConfig(
'ltc2309', flags.prefix, Ltc2309DeviceConfig)
gen.LoadConfig(config_module.ltc2309_config,
multiple_configs_per_device=True)
generate_monitor_base.WriteOutputFiles(flags, gen)
if __name__ == '__main__':
Main(sys.argv)
| google/makani | avionics/firmware/monitors/generate_ltc2309_monitor.py | Python | apache-2.0 | 3,670 |
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return None
p = head
while p.next is not None:
if p.val == p.next.val:
p.next = p.next.next
else:
p = p.next
return head
| lunabox/leetcode | python/problems/s83_Remove_Duplicates_from_Sorted_List.py | Python | apache-2.0 | 387 |
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.domains = ['hdmovie14.net']
self.base_link = 'http://hdmovie14.net'
self.search_link = 'aHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vY3VzdG9tc2VhcmNoL3YxZWxlbWVudD9rZXk9QUl6YVN5Q1ZBWGlVelJZc01MMVB2NlJ3U0cxZ3VubU1pa1R6UXFZJnJzej1maWx0ZXJlZF9jc2UmbnVtPTEwJmhsPWVuJmN4PTAwNjkxOTYxOTI2MzYxNzgyMDM4ODpkYmljLTZweGt4cyZnb29nbGVob3N0PXd3dy5nb29nbGUuY29tJnE9JXM='
self.moviesearch_link = '/watch/%s-%s'
self.tvsearch_link = '/watch/%s-%s-season-%s/%s'
def movie(self, imdb, title, year):
try:
t = cleantitle.get(title)
q = '%s %s' % (title, year)
q = self.search_link.decode('base64') % urllib.quote_plus(q)
r = client.request(q)
r = json.loads(r)['results']
r = [(i['url'], i['titleNoFormatting']) for i in r]
r = [(i[0].split('%')[0], re.findall('(?:^Watch |)(.+?)(?:\(|)(\d{4})', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if i[1]]
r = [i for i in r if '/watch/' in i[0] and not '-season-' in i[0]]
r = [i for i in r if t == cleantitle.get(i[1]) and year == i[2]]
r = r[0][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
pass
try:
url = re.sub('[^A-Za-z0-9]', '-', title).lower()
url = self.moviesearch_link % (url, year)
r = urlparse.urljoin(self.base_link, url)
r = client.request(r, output='geturl')
if not year in r: raise Exception()
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
year = re.findall('(\d{4})', premiered)[0]
if int(year) >= 2016: raise Exception()
url = re.sub('[^A-Za-z0-9]', '-', data['tvshowtitle']).lower()
url = self.tvsearch_link % (url, data['year'], '%01d' % int(season), '%01d' % int(episode))
r = urlparse.urljoin(self.base_link, url)
r = client.request(r, output='geturl')
if not data['year'] in r: raise Exception()
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'div', attrs = {'class': 'player_wraper'})
r = client.parseDOM(r, 'iframe', ret='src')
for u in r:
try:
u = urlparse.urljoin(self.base_link, u)
u = client.request(u, referer=url)
u = re.findall('"(?:url|src)"\s*:\s*"(.+?)"', u)
for i in u:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Movies14', 'url': i, 'direct': True, 'debridonly': False})
except: pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return directstream.googlepass(url)
| JamesLinEngineer/RKMC | addons/plugin.video.phstreams/resources/lib/sources/movies14_mv_tv.py | Python | gpl-2.0 | 4,810 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova common internal object model"""
import contextlib
import copy
import datetime
import functools
import traceback
import netaddr
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import timeutils
from oslo_utils import versionutils
from oslo_versionedobjects import base as ovoo_base
import six
from nova import context
from nova import exception
from nova.i18n import _, _LE
from nova import objects
from nova.objects import fields as obj_fields
from nova import utils
LOG = logging.getLogger('object')
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
# FIXME(danms): This is just until we use o.vo's class properties
# and object base.
return '_obj_' + name
class NovaObjectRegistry(ovoo_base.VersionedObjectRegistry):
def registration_hook(self, cls, index):
# NOTE(danms): Set the *latest* version of this class
newest = self._registry._obj_classes[cls.obj_name()][0]
setattr(objects, cls.obj_name(), newest)
# These are decorators that mark an object's method as remotable.
# If the metaclass is configured to forward object methods to an
# indirection service, these will result in making an RPC call
# instead of directly calling the implementation in the object. Instead,
# the object implementation on the remote end will perform the
# requested action and the result will be returned here.
def remotable_classmethod(fn):
"""Decorator for remotable classmethods."""
@functools.wraps(fn)
def wrapper(cls, context, *args, **kwargs):
if NovaObject.indirection_api:
result = NovaObject.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.VERSION,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, NovaObject):
result._context = context
return result
# NOTE(danms): Make this discoverable
wrapper.remotable = True
wrapper.original_fn = fn
return classmethod(wrapper)
# See comment above for remotable_classmethod()
#
# Note that this will use either the provided context, or the one
# stashed in the object. If neither are present, the object is
# "orphaned" and remotable methods cannot be called.
def remotable(fn):
"""Decorator for remotable object methods."""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
if args and isinstance(args[0], context.RequestContext):
raise exception.ObjectActionError(
action=fn.__name__,
reason='Calling remotables with context is deprecated')
if self._context is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
if NovaObject.indirection_api:
updates, result = NovaObject.indirection_api.object_action(
self._context, self, fn.__name__, args, kwargs)
for key, value in six.iteritems(updates):
if key in self.fields:
field = self.fields[key]
# NOTE(ndipanov): Since NovaObjectSerializer will have
# deserialized any object fields into objects already,
# we do not try to deserialize them again here.
if isinstance(value, NovaObject):
setattr(self, key, value)
else:
setattr(self, key,
field.from_primitive(self, key, value))
self.obj_reset_changes()
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, *args, **kwargs)
wrapper.remotable = True
wrapper.original_fn = fn
return wrapper
class NovaObject(object):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
# Object versioning rules
#
# Each service has its set of objects, each with a version attached. When
# a client attempts to call an object method, the server checks to see if
# the version of that object matches (in a compatible way) its object
# implementation. If so, cool, and if not, fail.
#
# This version is allowed to have three parts, X.Y.Z, where the .Z element
# is reserved for stable branch backports. The .Z is ignored for the
# purposes of triggering a backport, which means anything changed under
# a .Z must be additive and non-destructive such that a node that knows
# about X.Y can consider X.Y.Z equivalent.
VERSION = '1.0'
# The fields present in this object as key:field pairs. For example:
#
# fields = { 'foo': fields.IntegerField(),
# 'bar': fields.StringField(),
# }
fields = {}
obj_extra_fields = []
# Table of sub-object versioning information
#
# This contains a list of version mappings, by the field name of
# the subobject. The mappings must be in order of oldest to
# newest, and are tuples of (my_version, subobject_version). A
# request to backport this object to $my_version will cause the
# subobject to be backported to $subobject_version.
#
# obj_relationships = {
# 'subobject1': [('1.2', '1.1'), ('1.4', '1.2')],
# 'subobject2': [('1.2', '1.0')],
# }
#
# In the above example:
#
# - If we are asked to backport our object to version 1.3,
# subobject1 will be backported to version 1.1, since it was
# bumped to version 1.2 when our version was 1.4.
# - If we are asked to backport our object to version 1.5,
# no changes will be made to subobject1 or subobject2, since
# they have not changed since version 1.4.
# - If we are asked to backlevel our object to version 1.1, we
# will remove both subobject1 and subobject2 from the primitive,
# since they were not added until version 1.2.
obj_relationships = {}
# Temporary until we inherit from o.vo.base.VersionedObject
indirection_api = None
def __init__(self, context=None, **kwargs):
self._changed_fields = set()
self._context = context
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def __repr__(self):
return '%s(%s)' % (
self.obj_name(),
','.join(['%s=%s' % (name,
(self.obj_attr_is_set(name) and
field.stringify(getattr(self, name)) or
'<?>'))
for name, field in sorted(self.fields.items())]))
@classmethod
def obj_name(cls):
"""Return a canonical name for this object which will be used over
the wire for remote hydration.
"""
return cls.__name__
@classmethod
def obj_class_from_name(cls, objname, objver):
"""Returns a class from the registry based on a name and version."""
if objname not in NovaObjectRegistry.obj_classes():
LOG.error(_LE('Unable to instantiate unregistered object type '
'%(objtype)s'), dict(objtype=objname))
raise exception.UnsupportedObjectError(objtype=objname)
# NOTE(comstud): If there's not an exact match, return the highest
# compatible version. The objects stored in the class are sorted
# such that highest version is first, so only set compatible_match
# once below.
compatible_match = None
obj_classes = NovaObjectRegistry.obj_classes()
for objclass in obj_classes[objname]:
if objclass.VERSION == objver:
return objclass
if (not compatible_match and
versionutils.is_compatible(objver, objclass.VERSION)):
compatible_match = objclass
if compatible_match:
return compatible_match
# As mentioned above, latest version is always first in the list.
latest_ver = obj_classes[objname][0].VERSION
raise exception.IncompatibleObjectVersion(objname=objname,
objver=objver,
supported=latest_ver)
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = cls()
self._context = context
self.VERSION = objver
objdata = primitive['nova_object.data']
changes = primitive.get('nova_object.changes', [])
for name, field in self.fields.items():
if name in objdata:
setattr(self, name, field.from_primitive(self, name,
objdata[name]))
self._changed_fields = set([x for x in changes if x in self.fields])
return self
@classmethod
def obj_from_primitive(cls, primitive, context=None):
"""Object field-by-field hydration."""
if primitive['nova_object.namespace'] != 'nova':
# NOTE(danms): We don't do anything with this now, but it's
# there for "the future"
raise exception.UnsupportedObjectError(
objtype='%s.%s' % (primitive['nova_object.namespace'],
primitive['nova_object.name']))
objname = primitive['nova_object.name']
objver = primitive['nova_object.version']
objclass = cls.obj_class_from_name(objname, objver)
return objclass._obj_from_primitive(context, objver, primitive)
def __deepcopy__(self, memo):
"""Efficiently make a deep copy of this object."""
# NOTE(danms): A naive deepcopy would copy more than we need,
# and since we have knowledge of the volatile bits of the
# object, we can be smarter here. Also, nested entities within
# some objects may be uncopyable, so we can avoid those sorts
# of issues by copying only our field data.
nobj = self.__class__()
nobj._context = self._context
for name in self.fields:
if self.obj_attr_is_set(name):
nval = copy.deepcopy(getattr(self, name), memo)
setattr(nobj, name, nval)
nobj._changed_fields = set(self._changed_fields)
return nobj
def obj_clone(self):
"""Create a copy."""
return copy.deepcopy(self)
def obj_calculate_child_version(self, target_version, child):
"""Calculate the appropriate version for a child object.
This is to be used when backporting an object for an older client.
A sub-object will need to be backported to a suitable version for
the client as well, and this method will calculate what that
version should be, based on obj_relationships.
:param target_version: Version this object is being backported to
:param child: The child field for which the appropriate version
is to be calculated
:returns: None if the child should be omitted from the backport,
otherwise, the version to which the child should be
backported
"""
target_version = utils.convert_version_to_tuple(target_version)
for index, versions in enumerate(self.obj_relationships[child]):
my_version, child_version = versions
my_version = utils.convert_version_to_tuple(my_version)
if target_version < my_version:
if index == 0:
# We're backporting to a version from before this
# subobject was added: delete it from the primitive.
return None
else:
# We're in the gap between index-1 and index, so
# backport to the older version
return self.obj_relationships[child][index - 1][1]
elif target_version == my_version:
# This is the first mapping that satisfies the
# target_version request: backport the object.
return child_version
# No need to backport, as far as we know, so return the latest
# version of the sub-object we know about
return self.obj_relationships[child][-1][1]
def _obj_make_obj_compatible(self, primitive, target_version, field):
"""Backlevel a sub-object based on our versioning rules.
This is responsible for backporting objects contained within
this object's primitive according to a set of rules we
maintain about version dependencies between objects. This
requires that the obj_relationships table in this object is
correct and up-to-date.
:param:primitive: The primitive version of this object
:param:target_version: The version string requested for this object
:param:field: The name of the field in this object containing the
sub-object to be backported
"""
def _do_backport(to_version):
obj = getattr(self, field)
if obj is None:
return
if isinstance(obj, NovaObject):
if to_version != primitive[field]['nova_object.version']:
obj.obj_make_compatible(
primitive[field]['nova_object.data'],
to_version)
primitive[field]['nova_object.version'] = to_version
elif isinstance(obj, list):
for i, element in enumerate(obj):
element.obj_make_compatible(
primitive[field][i]['nova_object.data'],
to_version)
primitive[field][i]['nova_object.version'] = to_version
child_version = self.obj_calculate_child_version(target_version, field)
if child_version is None:
del primitive[field]
else:
_do_backport(child_version)
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version.
This is responsible for taking the primitive representation of
an object and making it suitable for the given target_version.
This may mean converting the format of object attributes, removing
attributes that have been added since the target version, etc. In
general:
- If a new version of an object adds a field, this routine
should remove it for older versions.
- If a new version changed or restricted the format of a field, this
should convert it back to something a client knowing only of the
older version will tolerate.
- If an object that this object depends on is bumped, then this
object should also take a version bump. Then, this routine should
backlevel the dependent object (by calling its obj_make_compatible())
if the requested version of this object is older than the version
where the new dependent object was added.
:param:primitive: The result of self.obj_to_primitive()
:param:target_version: The version string requested by the recipient
of the object
:raises: nova.exception.UnsupportedObjectError if conversion
is not possible for some reason
"""
for key, field in self.fields.items():
if not isinstance(field, (obj_fields.ObjectField,
obj_fields.ListOfObjectsField)):
continue
if not self.obj_attr_is_set(key):
continue
if key not in self.obj_relationships:
# NOTE(danms): This is really a coding error and shouldn't
# happen unless we miss something
raise exception.ObjectActionError(
action='obj_make_compatible',
reason='No rule for %s' % key)
self._obj_make_obj_compatible(primitive, target_version, key)
def obj_to_primitive(self, target_version=None):
"""Simple base-case dehydration.
This calls to_primitive() for each item in fields.
"""
primitive = dict()
for name, field in self.fields.items():
if self.obj_attr_is_set(name):
primitive[name] = field.to_primitive(self, name,
getattr(self, name))
if target_version:
self.obj_make_compatible(primitive, target_version)
obj = {'nova_object.name': self.obj_name(),
'nova_object.namespace': 'nova',
'nova_object.version': target_version or self.VERSION,
'nova_object.data': primitive}
if self.obj_what_changed():
obj['nova_object.changes'] = list(self.obj_what_changed())
return obj
def obj_set_defaults(self, *attrs):
if not attrs:
attrs = [name for name, field in self.fields.items()
if field.default != obj_fields.UnspecifiedDefault]
for attr in attrs:
default = copy.deepcopy(self.fields[attr].default)
if default is obj_fields.UnspecifiedDefault:
raise exception.ObjectActionError(
action='set_defaults',
reason='No default set for field %s' % attr)
if not self.obj_attr_is_set(attr):
setattr(self, attr, default)
def obj_load_attr(self, attrname):
"""Load an additional attribute from the real object.
This should use self._conductor, and cache any data that might
be useful for future load operations.
"""
raise NotImplementedError(
_("Cannot load '%s' in the base class") % attrname)
def save(self, context):
"""Save the changed fields back to the store.
This is optional for subclasses, but is presented here in the base
class for consistency among those that do.
"""
raise NotImplementedError(_('Cannot save anything in the base class'))
def obj_what_changed(self):
"""Returns a set of fields that have been modified."""
changes = set(self._changed_fields)
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(getattr(self, field), NovaObject) and
getattr(self, field).obj_what_changed()):
changes.add(field)
return changes
def obj_get_changes(self):
"""Returns a dict of changed fields and their new values."""
changes = {}
for key in self.obj_what_changed():
changes[key] = getattr(self, key)
return changes
def obj_reset_changes(self, fields=None, recursive=False):
"""Reset the list of fields that have been changed.
:param fields: List of fields to reset, or "all" if None.
:param recursive: Call obj_reset_changes(recursive=True) on
any sub-objects within the list of fields
being reset.
NOTE: This is NOT "revert to previous values"
NOTE: Specifying fields on recursive resets will only be
honored at the top level. Everything below the top
will reset all.
"""
if recursive:
for field in self.obj_get_changes():
# Ignore fields not in requested set (if applicable)
if fields and field not in fields:
continue
# Skip any fields that are unset
if not self.obj_attr_is_set(field):
continue
value = getattr(self, field)
# Don't reset nulled fields
if value is None:
continue
# Reset straight Object and ListOfObjects fields
if isinstance(self.fields[field], obj_fields.ObjectField):
value.obj_reset_changes(recursive=True)
elif isinstance(self.fields[field],
obj_fields.ListOfObjectsField):
for thing in value:
thing.obj_reset_changes(recursive=True)
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
def obj_attr_is_set(self, attrname):
"""Test object to see if attrname is present.
Returns True if the named attribute has a value set, or
False if not. Raises AttributeError if attrname is not
a valid attribute for this object.
"""
if attrname not in self.obj_fields:
raise AttributeError(
_("%(objname)s object has no attribute '%(attrname)s'") %
{'objname': self.obj_name(), 'attrname': attrname})
return hasattr(self, get_attrname(attrname))
@property
def obj_fields(self):
return self.fields.keys() + self.obj_extra_fields
# NOTE(danms): This is nova-specific, so don't copy this to o.vo
@contextlib.contextmanager
def obj_alternate_context(self, context):
original_context = self._context
self._context = context
try:
yield
finally:
self._context = original_context
@contextlib.contextmanager
def obj_as_admin(self):
"""Context manager to make an object call as an admin.
This temporarily modifies the context embedded in an object to
be elevated() and restores it after the call completes. Example
usage:
with obj.obj_as_admin():
obj.save()
"""
if self._context is None:
raise exception.OrphanedObjectError(method='obj_as_admin',
objtype=self.obj_name())
original_context = self._context
self._context = self._context.elevated()
try:
yield
finally:
self._context = original_context
class NovaObjectDictCompat(ovoo_base.VersionedObjectDictCompat):
def __iter__(self):
for name in self.obj_fields:
if (self.obj_attr_is_set(name) or
name in self.obj_extra_fields):
yield name
def keys(self):
return list(self)
class NovaTimestampObject(object):
"""Mixin class for db backed objects with timestamp fields.
Sqlalchemy models that inherit from the oslo_db TimestampMixin will include
these fields and the corresponding objects will benefit from this mixin.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
}
class NovaPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for most persistent objects.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
'deleted_at': obj_fields.DateTimeField(nullable=True),
'deleted': obj_fields.BooleanField(default=False),
}
class ObjectListBase(ovoo_base.ObjectListBase):
# NOTE(danms): These are for transition to using the oslo
# base object and can be removed when we move to it.
@classmethod
def _obj_primitive_key(cls, field):
return 'nova_object.%s' % field
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=obj_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == obj_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
class NovaObjectSerializer(messaging.NoOpSerializer):
"""A NovaObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize NovaObject entities. Any service
that needs to accept or return NovaObjects as arguments or result values
should pass this to its RPCClient and RPCServer objects.
"""
@property
def conductor(self):
if not hasattr(self, '_conductor'):
from nova import conductor
self._conductor = conductor.API()
return self._conductor
def _process_object(self, context, objprim):
try:
objinst = NovaObject.obj_from_primitive(objprim, context=context)
except exception.IncompatibleObjectVersion as e:
objver = objprim['nova_object.version']
if objver.count('.') == 2:
# NOTE(danms): For our purposes, the .z part of the version
# should be safe to accept without requiring a backport
objprim['nova_object.version'] = \
'.'.join(objver.split('.')[:2])
return self._process_object(context, objprim)
objinst = self.conductor.object_backport(context, objprim,
e.kwargs['supported'])
return objinst
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if issubclass(iterable, dict):
return iterable(**{k: action_fn(context, v)
for k, v in six.iteritems(values)})
else:
# NOTE(danms, gibi) A set can't have an unhashable value inside,
# such as a dict. Convert the set to list, which is fine, since we
# can't send them over RPC anyway. We convert it to list as this
# way there will be no semantic change between the fake rpc driver
# used in functional test and a normal rpc driver.
if iterable == set:
iterable = list
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'nova_object.name' in entity:
entity = self._process_object(context, entity)
elif isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A NovaObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, NovaObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The NovaObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def serialize_args(fn):
"""Decorator that will do the arguments serialization before remoting."""
def wrapper(obj, *args, **kwargs):
args = [timeutils.strtime(at=arg) if isinstance(arg, datetime.datetime)
else arg for arg in args]
for k, v in six.iteritems(kwargs):
if k == 'exc_val' and v:
kwargs[k] = str(v)
elif k == 'exc_tb' and v and not isinstance(v, six.string_types):
kwargs[k] = ''.join(traceback.format_tb(v))
elif isinstance(v, datetime.datetime):
kwargs[k] = timeutils.strtime(at=v)
if hasattr(fn, '__call__'):
return fn(obj, *args, **kwargs)
# NOTE(danms): We wrap a descriptor, so use that protocol
return fn.__get__(None, obj)(*args, **kwargs)
# NOTE(danms): Make this discoverable
wrapper.remotable = getattr(fn, 'remotable', False)
wrapper.original_fn = fn
return (functools.wraps(fn)(wrapper) if hasattr(fn, '__call__')
else classmethod(wrapper))
def obj_equal_prims(obj_1, obj_2, ignore=None):
"""Compare two primitives for equivalence ignoring some keys.
This operation tests the primitives of two objects for equivalence.
Object primitives may contain a list identifying fields that have been
changed - this is ignored in the comparison. The ignore parameter lists
any other keys to be ignored.
:param:obj1: The first object in the comparison
:param:obj2: The second object in the comparison
:param:ignore: A list of fields to ignore
:returns: True if the primitives are equal ignoring changes
and specified fields, otherwise False.
"""
def _strip(prim, keys):
if isinstance(prim, dict):
for k in keys:
prim.pop(k, None)
for v in prim.values():
_strip(v, keys)
if isinstance(prim, list):
for v in prim:
_strip(v, keys)
return prim
if ignore is not None:
keys = ['nova_object.changes'] + ignore
else:
keys = ['nova_object.changes']
prim_1 = _strip(obj_1.obj_to_primitive(), keys)
prim_2 = _strip(obj_2.obj_to_primitive(), keys)
return prim_1 == prim_2
| yatinkumbhare/openstack-nova | nova/objects/base.py | Python | apache-2.0 | 32,043 |
from collections import namedtuple
import pytest
from cfme import test_requirements
from cfme.containers.container import ContainerAllView
from cfme.containers.image_registry import ImageRegistryAllView
from cfme.containers.node import NodeAllView
from cfme.containers.overview import ContainersOverviewView
from cfme.containers.pod import PodAllView
from cfme.containers.project import ProjectAllView
from cfme.containers.provider import ContainerProvidersView
from cfme.containers.provider import ContainersProvider
from cfme.containers.replicator import ReplicatorAllView
from cfme.containers.route import RouteAllView
from cfme.containers.service import ServiceAllView
from cfme.containers.template import TemplateAllView
from cfme.containers.volume import VolumeAllView
from cfme.utils.appliance.implementations.ui import navigate_to
pytestmark = [
pytest.mark.usefixtures("setup_provider"),
pytest.mark.provider([ContainersProvider], scope='function'),
test_requirements.containers
]
DataSet = namedtuple('DataSet', ['obj_view', 'page_name'])
data_sets = (
DataSet(ContainersOverviewView, 'Compute / Containers / Overview'),
DataSet(ContainerProvidersView, 'Compute / Containers / Providers'),
DataSet(NodeAllView, 'Compute / Containers / Container Nodes'),
DataSet(PodAllView, 'Compute / Containers / Pods'),
DataSet(ServiceAllView, 'Compute / Containers / Container Services'),
DataSet(ProjectAllView, 'Compute / Containers / Projects'),
DataSet(ImageRegistryAllView, 'Compute / Containers / Image Registries'),
DataSet(TemplateAllView, 'Compute / Containers / Container Templates'),
DataSet(ReplicatorAllView, 'Compute / Containers / Replicators'),
DataSet(RouteAllView, 'Compute / Containers / Routes'),
DataSet(VolumeAllView, 'Compute / Containers / Volumes'),
DataSet(ContainerAllView, 'Compute / Containers / Containers'))
def test_start_page(appliance, soft_assert):
"""
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
for data_set in data_sets:
appliance.user.my_settings.visual.login_page = data_set.page_name
login_page = navigate_to(appliance.server, 'LoginScreen')
login_page.login_admin()
view = appliance.browser.create_view(data_set.obj_view)
soft_assert(
view.is_displayed,
'Configured start page is "{page_name}", but the start page now is "{cur_page}".'
.format(page_name=data_set.page_name, cur_page=view.navigation.currently_selected)
)
| nachandr/cfme_tests | cfme/tests/containers/test_start_page.py | Python | gpl-2.0 | 2,618 |
from flask_security.core import RoleMixin
from hangman.models import db
class Role(db.Model, RoleMixin):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
| jrichte43/hangman | hangman/models/role.py | Python | gpl-2.0 | 278 |
def Setup(Settings,DefaultModel):
# set6_variable-base-cnn-model/var_cnn_test_proper_dataset_vgg19.py
Settings["experiment_name"] = "var_cnn_test_proper_dataset_xception_kfold"
Settings["graph_histories"] = ['together'] #['all','together',[],[1,0],[0,0,0],[]]
n=0
Settings["models"][n]["dataset_pointer"] = -1 # 0 - reuse the first dataset
Settings["models"][n]["dataset_name"] = "5556x_markable_640x640"
Settings["models"][n]["pixels"] = 640
Settings["models"][n]["cnn_model"] = 'vgg19'
Settings["models"][n]["unique_id"] = 'vgg19_cnn'
Settings["models"][n]["cooking_method"] = 'generators' # 'direct' or 'generators'
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables.dump'
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["epochs"] = 500
Settings["models"][n]["k_fold_crossvalidation"] = True
Settings["models"][n]["crossvalidation_k"] = 10
Settings["graph_histories"] = []
return Settings
| previtus/MGR-Project-Code | Settings/set6_variable-base-cnn-model/var_cnn_test_proper_dataset_vgg19.py | Python | mit | 1,026 |
# parse_qs
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
# json
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
# httplib2
import httplib2
# socks
try:
from httplib2 import socks
from httplib2.socks import (
PROXY_TYPE_HTTP,
PROXY_TYPE_SOCKS4,
PROXY_TYPE_SOCKS5
)
except ImportError:
import socks
from socks import (
PROXY_TYPE_HTTP,
PROXY_TYPE_SOCKS4,
PROXY_TYPE_SOCKS5
)
| jessekl/flixr | venv/lib/python2.7/site-packages/twilio/rest/resources/imports.py | Python | mit | 602 |
from django.conf.urls import url, patterns, include
from django.contrib.admin import site
from django.conf.urls.i18n import i18n_patterns
from cms_seoutils.sitemaps import CMSI18nSitemap
urlpatterns = patterns(
'',
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap',
{'sitemaps': {'cmspages': CMSI18nSitemap}}, name='sitemap'),
url(r'^admin/', include(site.urls)),
)
urlpatterns += i18n_patterns(
'',
url(r'^', include('cms.urls')),
)
| mpaolini/django-cms-seoutils | cms_seoutils/test_utils/project/urls.py | Python | bsd-3-clause | 480 |
#1/usr/bin/env python3
import setuptools
setuptools.setup(
name = 'rheedsim',
version = '0.1.0',
packages = ['rheedsim'],
entry_points = {
'console_scripts':[
'rheedsim = rheedsim.__main__:main'
]
},
)
| chanjr/rheedsim | src/setup.py | Python | mit | 305 |
N, M = map(int, input().split())
S = input()
actors = list()
for _ in range(M):
_, c = map(int, input().split())
dic = set(list(input()))
actors.append((c, dic))
actors.sort()
ans = 0
for s in S:
for c, d in actors:
if s in d:
ans += c
break
else:
print(-1)
exit(0)
print(ans)
| knuu/competitive-programming | yandex/yandex2016_b_a.py | Python | mit | 346 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Write your forwards methods here."""
# Replace all null values with blanks
orm.TweetChunk.objects.filter(tz_country__isnull=True).update(tz_country='')
def backwards(self, orm):
"""Write your backwards methods here."""
# Nothing to do -- blanks are still ok in the previous version
models = {
u'map.maptimeframe': {
'Meta': {'object_name': 'MapTimeFrame'},
'analysis_time': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'calculated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'chunks_added': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missing_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'node_cache_hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'node_cache_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'nodes_added': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'tweet_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'map.treenode': {
'Meta': {'object_name': 'TreeNode', 'index_together': "[['parent', 'word']]"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['map.TreeNode']"}),
'word': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'map.tweetchunk': {
'Meta': {'object_name': 'TweetChunk'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['map.TreeNode']"}),
'tweet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['twitter_stream.Tweet']"}),
'tz_country': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
u'map.tz_country': {
'Meta': {'object_name': 'Tz_Country'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_time_zone': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'twitter_stream.tweet': {
'Meta': {'object_name': 'Tweet'},
'analyzed_by': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'favorite_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'filter_level': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_reply_to_status_id': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '9', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'retweet_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'retweeted_status_id': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'truncated': ('django.db.models.fields.BooleanField', [], {}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {}),
'user_followers_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user_friends_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user_geo_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_id': ('django.db.models.fields.BigIntegerField', [], {}),
'user_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'user_screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_time_zone': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'user_utc_offset': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'user_verified': ('django.db.models.fields.BooleanField', [], {})
}
}
complete_apps = ['map']
symmetrical = True
| michaelbrooks/twitter-feels | twitter_feels/apps/map/migrations/0003_replace_null_tweetchunk_tz_country.py | Python | mit | 5,898 |
"""
Transform asc files into ply or stl.
"""
from numpy import sqrt, arcsin, arctan2, floor, pi
from collections import namedtuple
Patch = namedtuple('Patch', ['points', 'faces'])
Point = namedtuple('Point', ['pid', 'x', 'y', 'z'])
def get_points_raw(fname):
"Return a list of points from an asc file"
points_raw = []
for line in open(fname):
if line.strip():
points_raw.append([float(num) for num in line.split()[:3]])
return points_raw
def get_points(points_raw, row_length=0):
"Return points (list of rows) from a list of raw points"
fast_angle = find_fast_angle(points_raw)
points = []
row = []
pid = 0
delta_threshold = 0.001 * pi / sqrt(len(points_raw))
for x, y, z in points_raw:
r = sqrt(x**2 + y**2 + z**2)
theta = arctan2(y, x)
phi = arcsin(z / r)
# See if we have to add a new row.
if pid == 0:
pass
elif row_length > 0:
if pid % row_length == 0:
points.append(row)
row = []
else:
d_theta = mod(theta - theta_last, 2 * pi)
d_phi = mod(phi - phi_last, pi)
if fast_angle == 'theta':
if abs(d_phi) > delta_threshold:
points.append(row)
row = []
elif fast_angle == 'phi':
if abs(d_theta) > delta_threshold:
points.append(row)
row = []
row.append(Point(pid, x, y, z))
theta_last, phi_last = theta, phi
pid += 1
points.append(row) # don't forget to append the last row!
return points
def find_fast_angle(points_raw):
"Return the angle that changes faster between consecutive points"
d_thetas, d_phis = [], []
pid = 0
for x, y, z in points_raw:
r = sqrt(x**2 + y**2 + z**2)
theta = arctan2(y, x)
phi = arcsin(z / r)
if pid > 0:
d_thetas.append(abs(mod(theta - theta_last, 2 * pi)))
d_phis.append(abs(mod(phi - phi_last, pi)))
theta_last, phi_last = theta, phi
pid += 1
if pid > 10: # enough to get an idea
break
return 'theta' if sum(d_thetas) > sum(d_phis) else 'phi'
def mod(x, y):
"Return the representative of x between -y/2 and y/2 for the group R/yR"
x0 = x - y * floor(x / y)
return x0 if x0 < y / 2 else x0 - y
| jordibc/mapelia | asc.py | Python | gpl-3.0 | 2,430 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import unittest
from bllipparser import Sentence, tokenize, RerankingParser, Tree
from bllipparser.RerankingParser import (NBestList, ScoredParse,
get_unified_model_parameters)
# throughout: reprs are called to ensure they don't crash, but we don't
# rely on their value
class MiscToolTests(unittest.TestCase):
def test_sentence(self):
s = Sentence('Hi there.')
self.assertEqual(s.tokens(), ['Hi', 'there', '.'])
self.assertEqual(len(s), 3)
repr(s)
s2 = Sentence(s)
self.assertEqual(s2.tokens(), ['Hi', 'there', '.'])
self.assertEqual(len(s2), 3)
s3 = Sentence(s.sentrep)
self.assertEqual(s3.tokens(), ['Hi', 'there', '.'])
self.assertEqual(len(s3), 3)
def test_sentences_from_string(self):
sentences = Sentence.sentences_from_string('<s> Test </s>')
self.assertEqual(len(sentences), 1)
self.assertEqual(sentences[0].tokens(), ['Test'])
self.assertEqual(sentences[0].tokens()[0], 'Test')
sentences2 = Sentence.sentences_from_string('''<s> Sentence 1 </s>
<s> Can't have just one. </s>
<s last> The last sentence </s>
<s> Just kidding. </s>''')
self.assertEqual(len(sentences2), 4)
self.assertEqual(sentences2[0].tokens(), ['Sentence', '1'])
self.assertEqual(sentences2[1].tokens(), ['Can', "n't", 'have',
'just', 'one', '.'])
self.assertEqual(sentences2[2].tokens(), ['The', 'last', 'sentence'])
self.assertEqual(sentences2[3].tokens(), ['Just', 'kidding', '.'])
def test_sentences_from_file(self):
sentences = Sentence.sentences_from_file('sample-text/fails.sgml')
self.assertEqual(len(sentences), 4)
self.assertEqual(sentences[0].tokens(), 'A -RSB- -LSB- B -RSB- -LSB- C -RSB- -LSB- D -RSB- -LSB- A -RSB- -LSB- B -RSB- -LSB- C -RSB- -LSB- D -RSB- -LSB- E -RSB- -LSB- G -RSB- -LSB- F -RSB- -LSB- G -RSB- -LSB- H -RSB- -LSB- I -RSB- -LSB- J -RSB- -LSB- K -RSB- -LSB- L -RSB- -LSB- M -RSB- -LSB- N -RSB- -LSB- N -RSB- .'.split())
self.assertEqual(sentences[1].tokens(), '# ! ? : -'.split())
self.assertEqual(sentences[2].tokens(),
'744 644 413 313 213 231 131 544 444 344 543 443 613 513 921 821 721 621 521 001'.split())
self.assertEqual(sentences[3].tokens(), list(map(str, range(1, 501))))
def test_tokenizer(self):
tokens1 = tokenize("Tokenize this sentence, please.")
self.assertEqual(tokens1, ['Tokenize', 'this', 'sentence', ',',
'please', '.'])
tokens2 = tokenize("Whoa! What's going on here? @($*")
self.assertEqual(tokens2, ['Whoa', '!', 'What', "'s", 'going',
'on', 'here', '?', '@', '-LRB-', '$', '*'])
# arguably, this is a bug as 3 should have been separated from -LSB-
tokens3 = tokenize("You can't do that (or can you?). [3]")
self.assertEqual(tokens3, ['You', 'can', "n't", 'do', 'that',
'-LRB-', 'or', 'can', 'you', '?',
'-RRB-', '.', '-LSB-3', '-RSB-'])
def test_unified_model_params(self):
self.assertRaises(IOError, get_unified_model_parameters,
'/path/to/nowhere/hopefully')
self.assertRaises(IOError, RerankingParser.from_unified_model_dir,
'/path/to/nowhere/hopefully')
# rest is hard to test given that we can only load one model...
class RerankingParserTests(unittest.TestCase):
def test_1_loading_errors(self):
# parser loading errors
rrp = RerankingParser()
repr(rrp)
self.assertRaises(ValueError, rrp.load_parser_model,
'/path/to/nowhere/hopefully')
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, False)
self.assertRaises(ValueError, rrp.load_parser_model, u'\u2602')
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, False)
self.assertRaises(ValueError, rrp.load_reranker_model, u'\u2602',
'second-stage/models/ec50spfinal/cvlm-l1c10P1-'
'weights.gz')
self.assertRaises(ValueError, rrp.load_reranker_model,
'second-stage/models/ec50spfinal/features.gz',
u'\u2602')
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, True)
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, True)
# tree function loading errors
tree = Tree('(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) '
'(NN sentence))) (. .)))')
self.assertRaises(ValueError, tree.evaluate, tree)
self.assertRaises(ValueError, tree.log_prob)
self.assertRaises(ValueError, tree.head)
s = Sentence('(Sentence for when the parser is not loaded)')
self.assertRaises(ValueError, s.independent_tags)
def test_2_basics(self):
rrp = RerankingParser()
# make sure we're starting fresh
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, False)
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, True)
self.assertRaises(ValueError, rrp.check_models_loaded_or_error,
'auto')
rrp.load_parser_model('first-stage/DATA/EN')
repr(rrp)
self.assertEqual(rrp.check_models_loaded_or_error(False), False)
self.assertRaises(ValueError, rrp.check_models_loaded_or_error, True)
rrp.load_reranker_model('second-stage/models/ec50spfinal/features.gz',
'second-stage/models/ec50spfinal/cvlm-'
'l1c10P1-weights.gz')
repr(rrp)
self.assertEqual(rrp.check_models_loaded_or_error(False), False)
self.assertEqual(rrp.check_models_loaded_or_error(True), True)
self.assertEqual(rrp.check_models_loaded_or_error('auto'), True)
self.assertEqual(rrp.parser_model_dir, 'first-stage/DATA/EN')
self.assertEqual(rrp.simple_parse('This is simple.'),
'(S1 (S (NP (DT This)) (VP (AUX is) (ADJP '
'(JJ simple))) (. .)))')
nbest_list = rrp.parse('This is a sentence.')
self.failUnless(isinstance(nbest_list, NBestList))
self.assertNBestListStringsAlmostEqual(str(nbest_list).strip(), '''
13 x
-8.88655845608 -30.3981669701
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NN sentence))) (. .)))
-13.936145728 -46.4346864304
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (NN sentence))) (. .)))
-14.3607122818 -47.4390055933
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (NN sentence))) (. .)))
-14.7026007585 -41.4723634172
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (NN sentence)))) (. .)))
-15.3583543915 -48.567244735
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (NN sentence))) (. .)))
-19.285724575 -56.2161267587
(S1 (SBARQ (WHNP (DT This)) (SQ (AUX is) (NP (DT a) (NN sentence))) (. .)))
-19.7521880305 -57.5088828776
(S1 (S (NP (NNP This)) (VP (AUX is) (S (NP (DT a) (NN sentence)))) (. .)))
-20.1767545843 -58.5132020405
(S1 (S (NP (NN This)) (VP (AUX is) (S (NP (DT a) (NN sentence)))) (. .)))
-20.2330660538 -55.5759876981
(S1 (SBARQ (WHNP (DT This)) (SQ (VP (AUX is) (NP (DT a) (NN sentence)))) (. .)))
-20.3467824313 -59.0747445934
(S1 (S (ADVP (DT This)) (VP (AUX is) (NP (DT a) (NN sentence))) (. .)))
-21.174396694 -59.6414411821
(S1 (S (DT This) (VP (AUX is) (S (NP (DT a) (NN sentence)))) (. .)))
-26.1628247309 -70.1489410336
(S1 (S (ADVP (DT This)) (VP (AUX is) (S (NP (DT a) (NN sentence)))) (. .)))
-26.7808410125 -68.4818143615
(S1 (SBARQ (WHNP (DT This)) (SQ (VP (AUX is) (S (NP (DT a) (NN sentence))))) (. .)))'''.strip())
self.failUnless(isinstance(nbest_list[0], ScoredParse))
self.assertEqual(str(nbest_list[0]), '-30.398166970085 -8.886558456079 '
'(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NN '
'sentence))) (. .)))')
repr(nbest_list)
repr(nbest_list[0])
self.failUnless(isinstance(nbest_list[0].ptb_parse, Tree))
self.assertEqual(str(nbest_list[0].ptb_parse),
'(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NN '
'sentence))) (. .)))')
self.assertAlmostEqual(nbest_list[0].parser_score, -30.3981669701)
self.assertAlmostEqual(nbest_list[0].reranker_score, -8.88655845608)
self.assertEqual(len(nbest_list), 13)
self.assertEqual(len(list(iter(nbest_list))), 13)
self.assertAlmostEqual(nbest_list[0].ptb_parse.log_prob(),
-30.3981669701)
self.assertEqual(nbest_list[0].ptb_parse.log_prob(),
nbest_list[0].parser_score)
self.assertEqual(str(nbest_list.fuse()),
'(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) '
'(NN sentence))) (. .)))')
self.assertEqual(str(nbest_list.fuse(use_parser_scores=True)),
'(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) '
'(NN sentence))) (. .)))')
nbest_list2 = rrp.parse(['This', 'is', 'a', 'pretokenized',
'sentence', '.'])
self.assertEqual(len(nbest_list2), 50)
nbest_list2_reranker_str = '''
50 x
-13.9140458986 -49.4538516291
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (JJ pretokenized) (NN sentence))) (. .)))
-16.0212658926 -54.3324639691
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (VBN pretokenized) (NN sentence))) (. .)))
-17.5114530692 -58.3751587397
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NN pretokenized) (NN sentence))) (. .)))
-17.6880864662 -58.7187102935
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (JJS pretokenized) (NN sentence))) (. .)))
-17.8510752677 -60.8294536479
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (JJR pretokenized) (NN sentence))) (. .)))
-18.9636331706 -65.4903710895
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (JJ pretokenized) (NN sentence))) (. .)))
-19.3881997244 -66.4946902523
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (JJ pretokenized) (NN sentence))) (. .)))
-19.4209711415 -59.1651025727
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (ADJP (VBN pretokenized)) (NN sentence))) (. .)))
-19.7727647746 -67.2653539515
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (VBD pretokenized) (NN sentence))) (. .)))
-20.3192490811 -63.5531840148
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (JJ pretokenized) (NN sentence)))) (. .)))
-20.4170868341 -67.622929394
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (JJ pretokenized) (NN sentence))) (. .)))
-20.4219469891 -59.1944994701
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (ADJP (JJ pretokenized)) (NN sentence))) (. .)))
-21.0708531645 -70.3689834294
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (VBN pretokenized) (NN sentence))) (. .)))
-21.4954197183 -71.3733025923
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (VBN pretokenized) (NN sentence))) (. .)))
-21.512397545 -67.4255190551
(S1 (S (NP (DT This)) (VP (AUX is) (DT a) (VP (VBN pretokenized) (NP (NN sentence)))) (. .)))
-21.5209365225 -65.5850885241
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (ADJP (VBD pretokenized)) (NN sentence))) (. .)))
-21.5396901802 -65.9911116663
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (VBN pretokenized) (NN sentence)))) (. .)))
-22.1177810904 -67.5021157814
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (ADJP (JJR pretokenized)) (NN sentence))) (. .)))
-22.2524735497 -66.9699233838
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NX (JJ pretokenized) (NN sentence)))) (. .)))
-22.3283866387 -71.4009340869
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (ADJP (JJS pretokenized)) (NN sentence))) (. .)))
-22.524306828 -72.501541734
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (VBN pretokenized) (NN sentence))) (. .)))
-22.5610403412 -74.4116782
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (NN pretokenized) (NN sentence))) (. .)))
-22.6931925386 -69.1503165846
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NX (VBN pretokenized) (NN sentence)))) (. .)))
-22.7376737381 -74.7552297539
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (JJS pretokenized) (NN sentence))) (. .)))
-22.8282464284 -67.9116806301
(S1 (S (NP (DT This)) (VP (AUX is) (NP (NP (DT a)) (JJ pretokenized) (NN sentence))) (. .)))
-22.9006625397 -76.8659731082
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (JJR pretokenized) (NN sentence))) (. .)))
-22.985606895 -75.4159973629
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (NN pretokenized) (NN sentence))) (. .)))
-23.1622402947 -75.7595489168
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (JJS pretokenized) (NN sentence))) (. .)))
-23.243264351 -71.0336954504
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (NN pretokenized) (NN sentence)))) (. .)))
-23.2687569853 -75.4784919433
(S1 (S (NP (DT This)) (VP (AUX is) (DT a) (VP (VBD pretokenized) (NP (NN sentence)))) (. .)))
-23.3252290934 -77.8702922711
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (JJR pretokenized) (NN sentence))) (. .)))
-23.4064078284 -71.7516211232
(S1 (S (NP (DT This)) (VP (AUX is) (NP (NP (DT a)) (VBN pretokenized) (NN sentence))) (. .)))
-23.4583333097 -71.3666592906
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (JJS pretokenized) (NN sentence)))) (. .)))
-23.6237874662 -73.7851216229
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (JJR pretokenized) (NN sentence)))) (. .)))
-24.0144940047 -76.5442365046
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (NN pretokenized) (NN sentence))) (. .)))
-24.1911274044 -76.8877880584
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (JJS pretokenized) (NN sentence))) (. .)))
-24.4705584135 -75.2016220331
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (ADJP (VBN pretokenized)) (NN sentence))) (. .)))
-24.8951249701 -76.205941196
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (ADJP (VBN pretokenized)) (NN sentence))) (. .)))
-25.0212681665 -71.3769439695
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (ADJP (VBN pretokenized)) (NN sentence)))) (. .)))
-25.1973992739 -78.1304292524
(S1 (S (ADVP (DT This)) (VP (AUX is) (NP (DT a) (JJ pretokenized) (NN sentence))) (. .)))
-25.4715342611 -75.2310189305
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (ADJP (JJ pretokenized)) (NN sentence))) (. .)))
-25.4803163686 -72.8673204341
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NX (ADJP (JJ pretokenized)) (NN sentence)))) (. .)))
-25.6554871258 -73.3251916009
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NX (ADJP (VBN pretokenized)) (NN sentence)))) (. .)))
-25.8961008149 -76.2353380934
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (ADJP (JJ pretokenized)) (NN sentence))) (. .)))
-25.9240120798 -77.3341803376
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (ADJP (VBN pretokenized)) (NN sentence))) (. .)))
-26.4170898917 -71.9105721867
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (ADJP (JJ pretokenized)) (NN sentence)))) (. .)))
-26.9249879274 -77.3635772351
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (ADJP (JJ pretokenized)) (NN sentence))) (. .)))
-27.1124193372 -77.7040813639
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (ADJP (VBD pretokenized)) (NN sentence)))) (. .)))
-27.2836774072 -77.313649149
(S1 (S (NP (DT This)) (VP (AUX is) (NP (NP (DT a)) (ADJP (VBN pretokenized)) (NN sentence))) (. .)))
-28.0026072817 -76.0416349799
(S1 (S (NP (DT This)) (VP (AUX is) (NP (NP (DT a)) (ADJP (JJ pretokenized)) (NN sentence))) (. .)))'''.strip()
self.assertNBestListStringsAlmostEqual(str(nbest_list2).strip(), nbest_list2_reranker_str)
nbest_list2.sort_by_parser_scores()
self.assertNBestListStringsAlmostEqual(str(nbest_list2).strip(), '''50 x
-13.914045898638 -49.453851629115
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (JJ pretokenized) (NN sentence))) (. .)))
-16.021265892586 -54.332463969050
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (VBN pretokenized) (NN sentence))) (. .)))
-17.511453069247 -58.375158739667
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NN pretokenized) (NN sentence))) (. .)))
-17.688086466190 -58.718710293517
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (JJS pretokenized) (NN sentence))) (. .)))
-19.420971141523 -59.165102572744
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (ADJP (VBN pretokenized)) (NN sentence))) (. .)))
-20.421946989137 -59.194499470145
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (ADJP (JJ pretokenized)) (NN sentence))) (. .)))
-17.851075267698 -60.829453647853
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (JJR pretokenized) (NN sentence))) (. .)))
-20.319249081103 -63.553184014836
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (JJ pretokenized) (NN sentence)))) (. .)))
-18.963633170595 -65.490371089474
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (JJ pretokenized) (NN sentence))) (. .)))
-21.520936522492 -65.585088524066
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (ADJP (VBD pretokenized)) (NN sentence))) (. .)))
-21.539690180195 -65.991111666347
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (VBN pretokenized) (NN sentence)))) (. .)))
-19.388199724373 -66.494690252349
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (JJ pretokenized) (NN sentence))) (. .)))
-22.252473549687 -66.969923383825
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NX (JJ pretokenized) (NN sentence)))) (. .)))
-19.772764774577 -67.265353951546
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (VBD pretokenized) (NN sentence))) (. .)))
-21.512397544985 -67.425519055141
(S1 (S (NP (DT This)) (VP (AUX is) (DT a) (VP (VBN pretokenized) (NP (NN sentence)))) (. .)))
-22.117781090411 -67.502115781364
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (ADJP (JJR pretokenized)) (NN sentence))) (. .)))
-20.417086834072 -67.622929394020
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (JJ pretokenized) (NN sentence))) (. .)))
-22.828246428437 -67.911680630137
(S1 (S (NP (DT This)) (VP (AUX is) (NP (NP (DT a)) (JJ pretokenized) (NN sentence))) (. .)))
-22.693192538556 -69.150316584631
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NX (VBN pretokenized) (NN sentence)))) (. .)))
-21.070853164543 -70.368983429409
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (VBN pretokenized) (NN sentence))) (. .)))
-23.243264351014 -71.033695450412
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (NN pretokenized) (NN sentence)))) (. .)))
-23.458333309709 -71.366659290552
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (JJS pretokenized) (NN sentence)))) (. .)))
-21.495419718321 -71.373302592284
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (VBN pretokenized) (NN sentence))) (. .)))
-25.021268166466 -71.376943969501
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (ADJP (VBN pretokenized)) (NN sentence)))) (. .)))
-22.328386638748 -71.400934086901
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (ADJP (JJS pretokenized)) (NN sentence))) (. .)))
-23.406407828443 -71.751621123244
(S1 (S (NP (DT This)) (VP (AUX is) (NP (NP (DT a)) (VBN pretokenized) (NN sentence))) (. .)))
-26.417089891719 -71.910572186723
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (ADJP (JJ pretokenized)) (NN sentence)))) (. .)))
-22.524306828021 -72.501541733955
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (VBN pretokenized) (NN sentence))) (. .)))
-25.480316368580 -72.867320434050
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NX (ADJP (JJ pretokenized)) (NN sentence)))) (. .)))
-25.655487125780 -73.325191600939
(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) (NX (ADJP (VBN pretokenized)) (NN sentence)))) (. .)))
-23.623787466208 -73.785121622894
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (JJR pretokenized) (NN sentence)))) (. .)))
-22.561040341204 -74.411678200026
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (NN pretokenized) (NN sentence))) (. .)))
-22.737673738147 -74.755229753876
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (JJS pretokenized) (NN sentence))) (. .)))
-24.470558413480 -75.201622033104
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (ADJP (VBN pretokenized)) (NN sentence))) (. .)))
-25.471534261094 -75.231018930505
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (ADJP (JJ pretokenized)) (NN sentence))) (. .)))
-22.985606894982 -75.415997362900
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (NN pretokenized) (NN sentence))) (. .)))
-23.268756985256 -75.478491943299
(S1 (S (NP (DT This)) (VP (AUX is) (DT a) (VP (VBD pretokenized) (NP (NN sentence)))) (. .)))
-23.162240294725 -75.759548916751
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (JJS pretokenized) (NN sentence))) (. .)))
-28.002607281660 -76.041634979928
(S1 (S (NP (DT This)) (VP (AUX is) (NP (NP (DT a)) (ADJP (JJ pretokenized)) (NN sentence))) (. .)))
-24.895124970057 -76.205941195978
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (ADJP (VBN pretokenized)) (NN sentence))) (. .)))
-25.896100814872 -76.235338093379
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (ADJP (JJ pretokenized)) (NN sentence))) (. .)))
-24.014494004681 -76.544236504571
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (NN pretokenized) (NN sentence))) (. .)))
-22.900662539655 -76.865973108212
(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) (JJR pretokenized) (NN sentence))) (. .)))
-24.191127404424 -76.887788058422
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (JJS pretokenized) (NN sentence))) (. .)))
-27.283677407209 -77.313649149009
(S1 (S (NP (DT This)) (VP (AUX is) (NP (NP (DT a)) (ADJP (VBN pretokenized)) (NN sentence))) (. .)))
-25.924012079757 -77.334180337649
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (ADJP (VBN pretokenized)) (NN sentence))) (. .)))
-26.924987927370 -77.363577235050
(S1 (S (DT This) (VP (AUX is) (NP (DT a) (ADJP (JJ pretokenized)) (NN sentence))) (. .)))
-27.112419337186 -77.704081363896
(S1 (S (NP (DT This)) (VP (AUX is) (S (NP (DT a) (ADJP (VBD pretokenized)) (NN sentence)))) (. .)))
-23.325229093433 -77.870292271087
(S1 (S (NP (NN This)) (VP (AUX is) (NP (DT a) (JJR pretokenized) (NN sentence))) (. .)))
-25.197399273862 -78.130429252394
(S1 (S (ADVP (DT This)) (VP (AUX is) (NP (DT a) (JJ pretokenized) (NN sentence))) (. .)))'''.strip())
# restore original sorting
nbest_list2.sort_by_reranker_scores()
self.assertNBestListStringsAlmostEqual(str(nbest_list2).strip(), nbest_list2_reranker_str)
nbest_list3 = rrp.parse('Parser only!', rerank=False,
sentence_id='parser_only')
self.assertEqual(len(nbest_list3), 50)
nbest_list3_str = '''50 parser_only
-52.57783414
(S1 (S (VP (VB Parser) (ADVP (RB only))) (. !)))
-53.19573267
(S1 (FRAG (NP (NN Parser) (RB only)) (. !)))
-54.54836523
(S1 (FRAG (ADVP (RBR Parser) (RB only)) (. !)))
-55.09170692
(S1 (FRAG (NP (NN Parser)) (ADVP (RB only)) (. !)))
-55.14038635
(S1 (FRAG (NP (NNP Parser)) (ADVP (RB only)) (. !)))
-57.25584872
(S1 (FRAG (ADJP (JJR Parser)) (ADVP (RB only)) (. !)))
-57.39656583
(S1 (S (VP (VBP Parser) (ADVP (RB only))) (. !)))
-57.60634106
(S1 (S (VP (VB Parser) (ADVP (JJ only))) (. !)))
-57.85039025
(S1 (S (VP (VB Parser) (RB only)) (. !)))
-57.87021346
(S1 (FRAG (ADJP (JJ Parser)) (ADVP (RB only)) (. !)))
-57.89165223
(S1 (FRAG (ADVP (JJR Parser)) (RB only) (. !)))
-58.64850061
(S1 (FRAG (ADJP (RBR Parser) (JJ only)) (. !)))
-58.71571915
(S1 (FRAG (NP (NN Parser)) (ADJP (RB only)) (. !)))
-58.75007348
(S1 (FRAG (ADVP (RB Parser)) (RB only) (. !)))
-58.76439858
(S1 (FRAG (NP (NNP Parser)) (ADJP (RB only)) (. !)))
-58.92639016
(S1 (FRAG (ADVP (RB Parser) (RB only)) (. !)))
-59.10118489
(S1 (FRAG (NP (NNP Parser) (RB only)) (. !)))
-59.42661454
(S1 (FRAG (NP (NNP Parser)) (ADJP (JJ only)) (. !)))
-59.59006341
(S1 (FRAG (RB Parser) (ADVP (RB only)) (. !)))
-59.65817632
(S1 (FRAG (NP (NN Parser)) (ADJP (JJ only)) (. !)))
-59.73616513
(S1 (FRAG (ADJP (JJR Parser)) (NP (RB only)) (. !)))
-59.93976344
(S1 (FRAG (NP (NP (NN Parser)) (ADVP (RB only))) (. !)))
-60.35052988
(S1 (FRAG (ADJP (JJ Parser)) (NP (RB only)) (. !)))
-60.38657945
(S1 (S (VP (VB Parser) (NP (RB only))) (. !)))
-60.57674496
(S1 (SQ (VP (VB Parser) (ADVP (RB only))) (. !)))
-60.62371178
(S1 (FRAG (ADJP (NNP Parser) (JJ only)) (. !)))
-60.63872478
(S1 (FRAG (RB Parser) (RB only) (. !)))
-60.68395245
(S1 (FRAG (ADVP (JJ Parser)) (RB only) (. !)))
-60.69738473
(S1 (NP (NP (NNP Parser)) (NP (RB only)) (. !)))
-60.70443033
(S1 (NP (NN Parser) (RB only) (. !)))
-60.75513913
(S1 (FRAG (ADJP (RBR Parser) (RB only)) (. !)))
-60.81313407
(S1 (FRAG (ADVP (RBR Parser)) (RB only) (. !)))
-60.83595554
(S1 (FRAG (ADJP (JJR Parser) (JJ only)) (. !)))
-60.893467
(S1 (S (VP (VB Parser) (ADJP (RB only))) (. !)))
-61.02350358
(S1 (FRAG (ADVP (NN Parser)) (RB only) (. !)))
-61.22216468
(S1 (FRAG (NP (NP (NNP Parser)) (ADVP (RB only))) (. !)))
-61.34291471
(S1 (NP (NNP Parser) (RB only) (. !)))
-61.38022269
(S1 (FRAG (NP (JJ Parser)) (ADVP (RB only)) (. !)))
-61.43308909
(S1 (FRAG (ADVP (JJR Parser)) (ADVP (RB only)) (. !)))
-61.4726006
(S1 (FRAG (NP (NN Parser)) (NP (RB only)) (. !)))
-61.49864523
(S1 (FRAG (NP (JJR Parser)) (ADVP (RB only)) (. !)))
-61.52128003
(S1 (FRAG (NP (NNP Parser)) (NP (RB only)) (. !)))
-61.59037588
(S1 (S (VP (VB Parser) (ADJP (JJ only))) (. !)))
-61.60397522
(S1 (FRAG (JJ Parser) (ADVP (RB only)) (. !)))
-61.67405796
(S1 (S (VP (NN Parser) (ADVP (RB only))) (. !)))
-61.6908843
(S1 (FRAG (ADVP (NNP Parser)) (RB only) (. !)))
-61.74601035
(S1 (S (NP (NNP Parser)) (ADJP (JJ only)) (. !)))
-61.91324518
(S1 (FRAG (RB Parser) (ADJP (RB only)) (. !)))
-61.94221948
(S1 (S (ADJP (RBR Parser) (JJ only)) (. !)))
-61.97779994
(S1 (FRAG (ADJP (JJ Parser) (JJ only)) (. !)))'''.strip()
self.assertNBestListStringsAlmostEqual(str(nbest_list3).strip(), nbest_list3_str)
nbest_list3.sort_by_parser_scores()
self.assertNBestListStringsAlmostEqual(str(nbest_list3).strip(), nbest_list3_str)
nbest_list3.sort_by_reranker_scores()
self.assertNBestListStringsAlmostEqual(str(nbest_list3).strip(), nbest_list3_str)
self.assertEqual(str(nbest_list3.fuse(use_parser_scores=True)),
'(S1 (S (VP (VB Parser) (ADVP (RB only))) (. !)))')
self.assertEqual(str(nbest_list3.fuse(use_parser_scores=False)),
'(S1 (S (VP (VB Parser) (ADVP (RB only))) (. !)))')
# parsing with tag constraints
trees1 = rrp.parse_tagged(['Time', 'flies'], possible_tags={})
self.assertEqual(str(trees1[0].ptb_parse),
'(S1 (S (NP (NNP Time)) (VP (VBZ flies))))')
trees2 = rrp.parse_tagged(['Time', 'flies'],
possible_tags={0: 'VB', 1: 'NNS'})
self.assertEqual(str(trees2[0].ptb_parse),
'(S1 (NP (VB Time) (NNS flies)))')
trees3 = rrp.parse_tagged(['Time', 'flies'], possible_tags={0: 'VB'})
self.assertEqual(str(trees3[0].ptb_parse),
'(S1 (S (VP (VB Time) (NP (VBZ flies)))))')
trees4 = rrp.parse_tagged(['Time', 'flies'],
possible_tags={0: ['VB', 'JJ', 'NN']})
self.assertEqual(str(trees4[0].ptb_parse),
'(S1 (NP (NN Time) (VBZ flies)))')
self.assertRaises(ValueError, rrp.parse_tagged, ['Time', 'flies'],
possible_tags={0: 'BADTAG'})
self.assertEqual(rrp.set_parser_options(nbest=10),
dict(case_insensitive=False, debug=0,
language='En', nbest=10, overparsing=21,
small_corpus=True, smooth_pos=0))
nbest_list = rrp.parse('The list is smaller now.', rerank=False)
self.assertEqual(len(nbest_list), 10)
# tagging tests
self.assertEqual(rrp.tag("Time flies while you're having fun."),
[('Time', 'NNP'), ('flies', 'VBZ'),
('while', 'IN'), ('you', 'PRP'), ("'re", 'AUX'),
('having', 'AUXG'), ('fun', 'NN'), ('.', '.')])
self.assertEqual(rrp.tag('British left waffles on '
'Falklands .'.split()),
[('British', 'JJ'), ('left', 'NN'),
('waffles', 'VBZ'), ('on', 'IN'),
('Falklands', 'NNP'), ('.', '.')])
sentence = Sentence('British left waffles on Falklands .')
self.assertEqual(sentence.independent_tags(),
('JJ', 'VBN', 'NNS', 'IN', 'NNP', '.'))
# parsing with constraints tests
constraints = {(1, 5): ['VP']}
nbest_list = rrp.parse_constrained('British left waffles on Falklands .'.split(), constraints)
self.assertNBestListStringsAlmostEqual(str(nbest_list).strip(), '''
10 x
-25.836244321 -93.6286744642
(S1 (S (NP (NNPS British)) (VP (VBD left) (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands)))) (. .)))
-25.9966925705 -95.7474111377
(S1 (S (NP (NNPS British)) (VP (VBD left) (NP (NNS waffles)) (PP (IN on) (NP (NNPS Falklands)))) (. .)))
-26.6154733928 -93.1372330926
(S1 (S (NP (NNPS British)) (VP (VBD left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands))))) (. .)))
-26.9453743621 -94.8459445679
(S1 (S (NP (NNPS British)) (VP (VBD left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNPS Falklands))))) (. .)))
-27.0537353446 -93.6112342559
(S1 (S (NP (NNP British)) (VP (VBD left) (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands)))) (. .)))
-27.3221541914 -95.7299709295
(S1 (S (NP (NNP British)) (VP (VBD left) (NP (NNS waffles)) (PP (IN on) (NP (NNPS Falklands)))) (. .)))
-27.9003378837 -93.1197928843
(S1 (S (NP (NNP British)) (VP (VBD left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands))))) (. .)))
-28.2198807661 -95.9050765306
(S1 (S (NP (NNS British)) (VP (VBD left) (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands)))) (. .)))
-28.338209453 -94.8285043597
(S1 (S (NP (NNP British)) (VP (VBD left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNPS Falklands))))) (. .)))
-29.122754708 -95.4136351589
(S1 (S (NP (NNS British)) (VP (VBD left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands))))) (. .)))'''.strip())
self.assertEqual(nbest_list.get_parser_best(), nbest_list[6])
nbest_list2 = rrp.parse_constrained('British left waffles on '
'Falklands .'.split(), {})
self.assertNBestListStringsAlmostEqual(str(nbest_list2).strip(), '''
10 x
-25.8126695909 -90.2342444645
(S1 (S (NP (JJ British) (NN left)) (VP (VBZ waffles) (PP (IN on) (NP (NNP Falklands)))) (. .)))
-25.836244321 -93.6286744642
(S1 (S (NP (NNPS British)) (VP (VBD left) (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands)))) (. .)))
-26.0312053125 -92.352981138
(S1 (S (NP (JJ British) (NN left)) (VP (VBZ waffles) (PP (IN on) (NP (NNPS Falklands)))) (. .)))
-26.6154733928 -93.1372330926
(S1 (S (NP (NNPS British)) (VP (VBD left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands))))) (. .)))
-26.9371121677 -93.9026623336
(S1 (S (NP (JJ British) (NN left)) (VP (VBZ waffles) (PP (IN on) (NP (NNS Falklands)))) (. .)))
-26.9453743621 -94.8459445679
(S1 (S (NP (NNPS British)) (VP (VBD left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNPS Falklands))))) (. .)))
-27.0537353446 -93.6112342559
(S1 (S (NP (NNP British)) (VP (VBD left) (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands)))) (. .)))
-27.3630657512 -95.1571335758
(S1 (S (NP (NNP British) (NN left)) (VP (VBZ waffles) (PP (IN on) (NP (NNP Falklands)))) (. .)))
-27.9003378837 -93.1197928843
(S1 (S (NP (NNP British)) (VP (VBD left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands))))) (. .)))
-28.338209453 -94.8285043597
(S1 (S (NP (NNP British)) (VP (VBD left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNPS Falklands))))) (. .)))'''.strip())
nbest_list3 = rrp.parse_constrained('British left waffles on '
'Falklands .'.split(),
constraints,
possible_tags={1: 'VBD'})
self.assertEqual(str(nbest_list), str(nbest_list3))
nbest_list4 = rrp.parse_constrained('British left waffles on '
'Falklands .'.split(),
constraints,
possible_tags={1: 'VBZ'})
self.assertNBestListStringsAlmostEqual(str(nbest_list4).strip(), '''
10 x
-30.0747237573 -106.808764217
(S1 (S (NP (NNP British)) (VP (VBZ left) (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands)))) (. .)))
-30.4801072424 -108.927500891
(S1 (S (NP (NNP British)) (VP (VBZ left) (NP (NNS waffles)) (PP (IN on) (NP (NNPS Falklands)))) (. .)))
-30.5333433842 -108.948330724
(S1 (S (NP (NNPS British)) (VP (VBZ left) (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands)))) (. .)))
-30.8151980896 -104.805165121
(S1 (S (NP (NNP British)) (VP (VBZ left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands))))) (. .)))
-31.2292881945 -106.513876597
(S1 (S (NP (NNP British)) (VP (VBZ left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNPS Falklands))))) (. .)))
-31.2785161465 -106.944731628
(S1 (S (NP (NNPS British)) (VP (VBZ left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands))))) (. .)))
-31.5846356514 -108.653443103
(S1 (S (NP (NNPS British)) (VP (VBZ left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNPS Falklands))))) (. .)))
-31.7626299938 -108.627394514
(S1 (S (NP (NNP British)) (VP (VBZ left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNS Falklands))))) (. .)))
-33.2085090166 -107.19852478
(S1 (S (NP (JJ British)) (VP (VBZ left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNP Falklands))))) (. .)))
-33.5519491115 -108.907236256
(S1 (S (NP (JJ British)) (VP (VBZ left) (NP (NP (NNS waffles)) (PP (IN on) (NP (NNPS Falklands))))) (. .)))'''.strip())
constraints[(1, 2)] = ['VBZ']
nbest_list5 = rrp.parse_constrained('British left waffles on '
'Falklands .'.split(),
constraints)
self.assertEqual(str(nbest_list4), str(nbest_list5))
self.assertEqual(nbest_list4.get_parser_best(), nbest_list4[3])
self.assertEqual(nbest_list4.get_reranker_best(), nbest_list4[0])
self.assertAlmostEqual(nbest_list4.get_reranker_best().reranker_score,
-30.0747237573)
constraints = {(2, 4): ['NP'], (0, 1): ['VP']}
nbest_list6 = rrp.parse_constrained('British left waffles on '
'Falklands .'.split(),
constraints)
self.assertEqual(len(nbest_list6), 0)
self.assertNBestListStringsAlmostEqual(str(nbest_list6), '0 x')
self.assertEqual(nbest_list6.get_parser_best(), None)
self.assertEqual(nbest_list6.get_reranker_best(), None)
constraints = {(1, 5): 'VP'}
nbest_list7 = rrp.parse_constrained('British left waffles on '
'Falklands .'.split(),
constraints)
self.assertEqual(str(nbest_list), str(nbest_list7))
# sentence length tests
self.assertEqual(len(rrp.parse('a ' * 397)), 0)
self.assertEqual(len(rrp.parse('a ' * 398)), 0)
self.assertRaises(ValueError, rrp.parse, 'a ' * 399)
self.assertRaises(ValueError, rrp.parse, 'a ' * 400)
self.assertRaises(ValueError, rrp.parse, 'a ' * 401)
self.assertRaises(ValueError, rrp.tag, '# ! ? : -',
allow_failures=True)
self.assertEqual(rrp.tag('# ! ? : -', allow_failures=False),
[('#', '#'), ('!', '.'), ('?', '.'),
(':', ':'), ('-', ':')])
self.assertEqual(rrp.set_parser_options(nbest=50),
dict(case_insensitive=False, debug=0,
language='En', nbest=50, overparsing=21,
small_corpus=True, smooth_pos=0))
# test one actually complex sentence with fusion options
complex_sentence = 'Economists are divided as to how much '\
'manufacturing strength they expect to see in ' \
'September reports on industrial production ' \
'and capacity utilization, also due tomorrow.'
self.assertEqual(rrp.simple_parse(complex_sentence), '(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NP (NN capacity) (NN utilization)) (, ,) (NP (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow)))))))))))))))) (. .)))')
nbest_list_complex = rrp.parse(complex_sentence)
self.assertNBestListStringsAlmostEqual(str(nbest_list_complex).strip(), '''
50 x
-82.7890527858 -256.862754458
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NP (NN capacity) (NN utilization)) (, ,) (NP (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow)))))))))))))))) (. .)))
-82.9165082389 -255.862921846
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NP (NN capacity) (NN utilization)) (, ,) (NP (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow)))))))))))))))) (. .)))
-83.2414322173 -257.424786092
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADVP (WRB how) (JJ much) (NN manufacturing)) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (NP (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow))))))))))))))) (. .)))
-83.3396120234 -257.449514707
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))))) (, ,) (PP (ADVP (RB also)) (RB due) (NP (NN tomorrow))))))))))))) (. .)))
-83.3590774333 -257.207857799
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADVP (WRB how) (JJ much) (NN manufacturing)) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization)))) (, ,) (RRC (ADVP (RB also)) (NP (JJ due) (NN tomorrow)))))))))))))) (. .)))
-83.4670675044 -256.449682095
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))))) (, ,) (PP (ADVP (RB also)) (RB due) (NP (NN tomorrow))))))))))))) (. .)))
-83.5673600986 -255.564606573
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADVP (WRB how) (JJ much) (NN manufacturing)) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NP (NN capacity) (NN utilization)) (PRN (, ,) (ADVP (RB also)) (NP (JJ due) (NN tomorrow))))))))))))))))) (. .)))
-83.5718465213 -256.514298268
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (NP (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow))))))))))))))) (. .)))
-83.6156462512 -256.638395881
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))))) (, ,) (NP (RB also) (JJ due) (NN tomorrow)))))))))))) (. .)))
-83.6894917374 -256.297369974
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization)))) (, ,) (RRC (ADVP (RB also)) (NP (JJ due) (NN tomorrow)))))))))))))) (. .)))
-83.6959323955 -257.337579615
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (NP (ADJP (RB also) (JJ due)) (NN tomorrow))))))))))))))) (. .)))
-83.6993020024 -255.514465655
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (NP (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow))))))))))))))) (. .)))
-83.8169472184 -255.297537362
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization)))) (, ,) (RRC (ADVP (RB also)) (NP (JJ due) (NN tomorrow)))))))))))))) (. .)))
-83.8977743747 -254.654118748
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NP (NN capacity) (NN utilization)) (PRN (, ,) (ADVP (RB also)) (NP (JJ due) (NN tomorrow))))))))))))))))) (. .)))
-83.9520871226 -256.615389727
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization) (, ,) (RB also) (JJ due) (NN tomorrow))))))))))))))) (. .)))
-84.0252298558 -253.654286136
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NP (NN capacity) (NN utilization)) (PRN (, ,) (ADVP (RB also)) (NP (JJ due) (NN tomorrow))))))))))))))))) (. .)))
-84.031467802 -256.699200765
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization) (, ,) (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow))))))))))))))) (. .)))
-84.0782566086 -255.763954352
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADVP (WRB how) (JJ much) (NN manufacturing)) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))))) (, ,) (RB also) (NP (JJ due) (NN tomorrow))))))))))))) (. .)))
-84.1589232831 -255.699368153
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization) (, ,) (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow))))))))))))))) (. .)))
-84.207911268 -257.445803806
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization)))) (, ,) (ADVP (RB also)) (NP (JJ due) (NN tomorrow))))))))))))) (. .)))
-84.2172200016 -256.757941126
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (ADVP (RB also)) (NP (JJ due) (NN tomorrow))))))))))))))) (. .)))
-84.4019196191 -257.117933507
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADVP (WRB how) (JJ much) (NN manufacturing)) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (RB also) (NP (JJ due) (NN tomorrow))))))))))))))) (. .)))
-84.4086708847 -254.853466527
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))))) (, ,) (RB also) (NP (JJ due) (NN tomorrow))))))))))))) (. .)))
-84.4202693182 -257.522482005
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization) (, ,) (ADJP (RB also) (JJ due)) (NN tomorrow))))))))))))))) (. .)))
-84.5361263658 -253.853633915
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))))) (, ,) (RB also) (NP (JJ due) (NN tomorrow))))))))))))) (. .)))
-84.562221394 -257.037905456
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization)))) (, ,) (RB also) (RB due) (NP (NN tomorrow)))))))))))))) (. .)))
-84.7211610162 -257.10106261
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADVP (WRB how) (JJ much) (NN manufacturing)) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization)))) (, ,) (ADVP (RB also) (NP (JJ due) (NN tomorrow)))))))))))))) (. .)))
-84.7323339231 -256.207445682
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (RB also) (NP (JJ due) (NN tomorrow))))))))))))))) (. .)))
-84.8597894042 -255.20761307
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (RB also) (NP (JJ due) (NN tomorrow))))))))))))))) (. .)))
-84.9655762188 -257.511603996
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (ADVP (RB also) (JJ due)) (NP (NN tomorrow))))))))))))))) (. .)))
-85.0357092054 -256.760509325
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NX (NX (NN production)) (CC and) (NX (NN capacity) (NN utilization)))) (, ,) (NP (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow))))))))))))))) (. .)))
-85.0404031204 -257.493696355
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NP (NNP September) (NNS reports)) (PP (IN on) (NP (JJ industrial) (NN production) (CC and) (NN capacity))))) (NP (NN utilization) (, ,) (RB also) (JJ due) (NN tomorrow)))))))))))) (. .)))
-85.0515753202 -256.190574785
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization)))) (, ,) (ADVP (RB also) (NP (JJ due) (NN tomorrow)))))))))))))) (. .)))
-85.1790308013 -255.190742173
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization)))) (, ,) (ADVP (RB also) (NP (JJ due) (NN tomorrow)))))))))))))) (. .)))
-85.6747155736 -256.886955055
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))))) (, ,) (PP (RB also) (NP (JJ due) (NN tomorrow))))))))))))) (. .)))
-85.8137347183 -256.601249244
(S1 (S (NP (NNS Economists)) (VP (AUX are) (ADJP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NP (NN capacity) (NN utilization)) (PRN (, ,) (ADVP (RB also)) (NP (JJ due) (NN tomorrow))))))))))))))))) (. .)))
-85.8725230701 -257.453489352
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NX (NX (NN production)) (CC and) (NX (NN capacity) (NN utilization)))) (, ,) (RB also) (NP (JJ due) (NN tomorrow))))))))))))))) (. .)))
-85.9999785232 -256.45365674
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NX (NX (NN production)) (CC and) (NX (NN capacity) (NN utilization)))) (, ,) (RB also) (NP (JJ due) (NN tomorrow))))))))))))))) (. .)))
-86.3246312283 -256.800597023
(S1 (S (NP (NNS Economists)) (VP (AUX are) (ADJP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))))) (, ,) (RB also) (NP (JJ due) (NN tomorrow))))))))))))) (. .)))
-86.3311587192 -257.556264765
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADVP (WRB how) (JJ much) (NN manufacturing)) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NP (NN capacity)) (NP (NN utilization) (, ,) (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow)))))))))))))))) (. .)))
-86.6615729953 -256.64577694
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NP (NN capacity)) (NP (NN utilization) (, ,) (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow)))))))))))))))) (. .)))
-86.6783812339 -257.253941453
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NP (NNS reports)) (PP (IN on) (NP (JJ industrial) (NX (NX (NN production)) (CC and) (NX (NN capacity) (NN utilization)))))) (, ,) (RB also) (NP (JJ due) (NN tomorrow))))))))))))) (. .)))
-86.7890284763 -255.645944328
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NP (NN capacity)) (NP (NN utilization) (, ,) (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow)))))))))))))))) (. .)))
-86.805836687 -256.254108841
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NP (NNS reports)) (PP (IN on) (NP (JJ industrial) (NX (NX (NN production)) (CC and) (NX (NN capacity) (NN utilization)))))) (, ,) (RB also) (NP (JJ due) (NN tomorrow))))))))))))) (. .)))
-86.8163694052 -257.300308221
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADVP (WRB how) (JJ much) (NN manufacturing)) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production) (CC and) (NN capacity)) (NP (NN utilization) (, ,) (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow))))))))))))))) (. .)))
-86.8234508895 -257.469058181
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NP (NN capacity)) (NP (NN utilization) (, ,) (ADJP (RB also) (JJ due)) (NN tomorrow)))))))))))))))) (. .)))
-86.8276269607 -256.807156413
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NX (NX (NN production)) (CC and) (NX (NN capacity) (NN utilization)))) (PRN (, ,) (ADVP (RB also)) (NP (JJ due) (NN tomorrow)))))))))))))))) (. .)))
-87.1467837093 -256.389820397
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WHADJP (WRB how) (JJ much)) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production) (CC and) (NN capacity)) (NP (NN utilization) (, ,) (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow))))))))))))))) (. .)))
-87.2742391903 -255.389987785
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production) (CC and) (NN capacity)) (NP (NN utilization) (, ,) (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow))))))))))))))) (. .)))
-87.3086615755 -257.213101637
(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (JJ industrial) (NN production) (CC and) (NN capacity)) (NP (NN utilization) (, ,) (ADJP (RB also) (JJ due)) (NN tomorrow))))))))))))))) (. .)))'''.strip())
self.assertEqual(str(nbest_list_complex.fuse()),
'(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (ADVP (RB also)) (JJ due) (NN tomorrow)))))))))))))) (. .)))')
self.assertEqual(str(nbest_list_complex.fuse(use_parser_scores=True)),
'(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (ADVP (RB also)) (NP (JJ due) (NN tomorrow))))))))))))))) (. .)))')
self.assertEqual(str(nbest_list_complex.fuse(num_parses=10)),
'(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (PP (IN on) (NP (NP (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization))) (, ,) (NP (ADJP (ADVP (RB also)) (JJ due)) (NN tomorrow))))))))))))))) (. .)))')
self.assertEqual(str(nbest_list_complex.fuse(num_parses=10,
threshold=0.75)),
'(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NNS reports)) (IN on) (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization)) (, ,) (ADVP (RB also)) (JJ due) (NN tomorrow))))))))))) (. .)))')
self.assertEqual(str(nbest_list_complex.fuse(num_parses=10,
threshold=0.75,
exponent=1.3)),
'(S1 (S (NP (NNS Economists)) (VP (AUX are) (VP (VBN divided) (PP (IN as) (PP (TO to) (SBAR (WHNP (WRB how) (JJ much) (NN manufacturing) (NN strength)) (S (NP (PRP they)) (VP (VBP expect) (S (VP (TO to) (VP (VB see) (PP (IN in) (NP (NNP September))) (NP (NP (NNS reports)) (IN on) (NP (JJ industrial) (NN production)) (CC and) (NP (NN capacity) (NN utilization)) (, ,) (ADVP (RB also)) (JJ due) (NN tomorrow)))))))))))) (. .)))')
nbest_list_fail = rrp.parse('# ! ? : -')
self.assertEqual(len(nbest_list_fail), 0)
nbest_list_fail = rrp.parse('# ! ? : -', rerank=False)
self.assertEqual(len(nbest_list_fail), 0)
self.assertEqual(str(nbest_list_fail), '0 x')
nbest_list_fail = rrp.parse_constrained('# ! ? : -'.split(), {})
self.assertEqual(len(nbest_list_fail), 0)
def test_3_tree_funcs(self):
# these are here and not in test_tree since they require a parsing
# model to have been loaded
tree1 = Tree('(S1 (S (NP (DT This)) (VP (AUX is) (NP (DT a) '
'(NN sentence))) (. .)))')
tree2 = Tree('(S1 (S (NP (NNP This)) (VP (AUX is) (NP (DT a) '
'(NN sentence))) (. .)))')
tree3 = Tree('(S1 (SBARQ (WHNP (DT This)) (SQ (VP (AUX is) (NP '
'(DT a) (NN sentence)))) (. .)))')
self.assertEqual(tree1.evaluate(tree2),
dict(fscore=1.00, gold=4, matched=4,
precision=1.00, recall=1.00, test=4))
self.assertEqual(tree2.evaluate(tree1),
dict(fscore=1.00, gold=4, matched=4,
precision=1.00, recall=1.00, test=4))
self.assertDictAlmostEqual(tree3.evaluate(tree1),
dict(fscore=0.44, gold=4, matched=2,
precision=0.40, recall=0.50, test=5))
# tree log prob
self.assertAlmostEqual(tree1.log_prob(), -30.398166970084645)
self.assertAlmostEqual(tree2.log_prob(), -46.434686430443755)
self.assertAlmostEqual(tree3.log_prob(), -55.57598769806526)
# head finding and dependencies
self.assertEqual(tree1.head().token, 'is')
self.assertEqual(tree1.head().label, 'AUX')
deps = list(tree1.dependencies())
self.assertEqual(len(deps), 4)
gov, dep = deps[0]
self.assertEqual(gov.token, 'sentence')
self.assertEqual(dep.token, 'a')
def assertDictAlmostEqual(self, d1, d2, places=2):
self.assertEqual(sorted(d1.keys()), sorted(d2.keys()))
for k, v1 in d1.items():
v2 = d2[k]
self.assertAlmostEqual(v1, v2, places=places)
def assertNBestListStringsAlmostEqual(self, nbest_list1, nbest_list2,
places=7):
lines1 = nbest_list1.splitlines()
lines2 = nbest_list2.splitlines()
self.assertEqual(len(lines1), len(lines2))
line_iter1, line_iter2 = iter(lines1), iter(lines2)
# header should match
header1 = next(line_iter1)
self.assertEqual(header1, next(line_iter2))
num_parses = int(header1.split()[0])
self.assertEqual(len(lines1), (num_parses * 2) + 1)
for parse in range(num_parses):
# check scores for each parse
scores1 = [float(piece) for piece in next(line_iter1).split()]
scores2 = [float(piece) for piece in next(line_iter2).split()]
self.assertEqual(len(scores1), len(scores2))
for score1, score2 in zip(scores1, scores2):
self.assertAlmostEqual(score1, score2, places=places)
# check the parses themselves
parse1 = next(line_iter1)
parse2 = next(line_iter2)
self.assertEqual(parse1, parse2)
| dmcc/bllip-parser | python/tests/test_reranking_parser.py | Python | apache-2.0 | 68,004 |
import ast
import io
import operator
import os
import sys
import token
import tokenize
class Visitor(ast.NodeVisitor):
def __init__(self, lines):
self._lines = lines
self.line_numbers_with_nodes = set()
self.line_numbers_with_statements = []
def generic_visit(self, node):
if hasattr(node, 'col_offset') and hasattr(node, 'lineno') and node.col_offset == 0:
self.line_numbers_with_nodes.add(node.lineno)
if isinstance(node, ast.stmt):
self.line_numbers_with_statements.append(node.lineno)
ast.NodeVisitor.generic_visit(self, node)
def _tokenize(source):
"""Tokenize Python source code."""
# Using an undocumented API as the documented one in Python 2.7 does not work as needed
# cross-version.
return tokenize.generate_tokens(io.StringIO(source).readline)
def _indent_size(line):
for index, char in enumerate(line):
if not char.isspace():
return index
def _get_global_statement_blocks(source, lines):
"""Return a list of all global statement blocks.
The list comprises of 3-item tuples that contain the starting line number,
ending line number and whether the statement is a single line.
"""
tree = ast.parse(source)
visitor = Visitor(lines)
visitor.visit(tree)
statement_ranges = []
for index, line_number in enumerate(visitor.line_numbers_with_statements):
remaining_line_numbers = visitor.line_numbers_with_statements[index+1:]
end_line_number = len(lines) if len(remaining_line_numbers) == 0 else min(remaining_line_numbers) - 1
current_statement_is_oneline = line_number == end_line_number
if len(statement_ranges) == 0:
statement_ranges.append((line_number, end_line_number, current_statement_is_oneline))
continue
previous_statement = statement_ranges[-1]
previous_statement_is_oneline = previous_statement[2]
if previous_statement_is_oneline and current_statement_is_oneline:
statement_ranges[-1] = previous_statement[0], end_line_number, True
else:
statement_ranges.append((line_number, end_line_number, current_statement_is_oneline))
return statement_ranges
def normalize_lines(source):
"""Normalize blank lines for sending to the terminal.
Blank lines within a statement block are removed to prevent the REPL
from thinking the block is finished. Newlines are added to separate
top-level statements so that the REPL does not think there is a syntax
error.
"""
lines = source.splitlines(False)
# If we have two blank lines, then add two blank lines.
# Do not trim the spaces, if we have blank lines with spaces, its possible
# we have indented code.
if (len(lines) > 1 and len(''.join(lines[-2:])) == 0) \
or source.endswith(('\n\n', '\r\n\r\n')):
trailing_newline = '\n' * 2
# Find out if we have any trailing blank lines
elif len(lines[-1].strip()) == 0 or source.endswith(('\n', '\r\n')):
trailing_newline = '\n'
else:
trailing_newline = ''
# Step 1: Remove empty lines.
tokens = _tokenize(source)
newlines_indexes_to_remove = (spos[0] for (toknum, tokval, spos, epos, line) in tokens
if len(line.strip()) == 0
and token.tok_name[toknum] == 'NL'
and spos[0] == epos[0])
for line_number in reversed(list(newlines_indexes_to_remove)):
del lines[line_number-1]
# Step 2: Add blank lines between each global statement block.
# A consequtive single lines blocks of code will be treated as a single statement,
# just to ensure we do not unnecessarily add too many blank lines.
source = '\n'.join(lines)
tokens = _tokenize(source)
dedent_indexes = (spos[0] for (toknum, tokval, spos, epos, line) in tokens
if toknum == token.DEDENT and _indent_size(line) == 0)
global_statement_ranges = _get_global_statement_blocks(source, lines)
start_positions = map(operator.itemgetter(0), reversed(global_statement_ranges))
for line_number in filter(lambda x: x > 1, start_positions):
lines.insert(line_number-1, '')
sys.stdout.write('\n'.join(lines) + trailing_newline)
sys.stdout.flush()
if __name__ == '__main__':
contents = sys.argv[1]
try:
default_encoding = sys.getdefaultencoding()
encoded_contents = contents.encode(default_encoding, 'surrogateescape')
contents = encoded_contents.decode(default_encoding, 'replace')
except (UnicodeError, LookupError):
pass
if isinstance(contents, bytes):
contents = contents.decode('utf8')
normalize_lines(contents)
| lgeiger/ide-python | lib/debugger/VendorLib/vs-py-debugger/pythonFiles/normalizeForInterpreter.py | Python | mit | 4,823 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubnetAssociation(Model):
"""Network interface and its custom security rules.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Subnet ID.
:vartype id: str
:param security_rules: Collection of custom security rules.
:type security_rules:
list[~azure.mgmt.network.v2016_09_01.models.SecurityRule]
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rules': {'key': 'securityRules', 'type': '[SecurityRule]'},
}
def __init__(self, *, security_rules=None, **kwargs) -> None:
super(SubnetAssociation, self).__init__(**kwargs)
self.id = None
self.security_rules = security_rules
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/subnet_association_py3.py | Python | mit | 1,314 |
# This file is part of MyPaint.
# Copyright (C) 2011-2018 by the MyPaint Development Team.
# Copyright (C) 2007-2012 by Martin Renold <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Core layer classes etc."""
## Imports
from __future__ import division, print_function
import logging
import os
import xml.etree.ElementTree as ET
import weakref
from warnings import warn
import abc
from lib.gettext import C_
import lib.mypaintlib
import lib.strokemap
import lib.helpers as helpers
import lib.fileutils
import lib.pixbuf
from lib.modes import PASS_THROUGH_MODE
from lib.modes import STANDARD_MODES
from lib.modes import DEFAULT_MODE
from lib.modes import ORA_MODES_BY_OPNAME
from lib.modes import MODES_EFFECTIVE_AT_ZERO_ALPHA
from lib.modes import MODES_DECREASING_BACKDROP_ALPHA
import lib.xml
import lib.tiledsurface
from .rendering import Renderable
from lib.pycompat import unicode
logger = logging.getLogger(__name__)
## Base class defs
class LayerBase (Renderable):
"""Base class defining the layer API
Layers support the Renderable interface, and are rendered with the
"render_*()" methods of their root layer stack.
Layers are minimally aware of the tree structure they reside in, in
that they contain a reference to the root of their tree for
signalling purposes. Updates to the tree structure and to layers'
graphical contents are announced via the RootLayerStack object
representing the base of the tree.
"""
## Class constants
#: Forms the default name, may be suffixed per lib.naming consts.
DEFAULT_NAME = C_(
"layer default names",
u"Layer",
)
#: A string for the layer type.
TYPE_DESCRIPTION = None
PERMITTED_MODES = set(STANDARD_MODES)
INITIAL_MODE = DEFAULT_MODE
## Construction, loading, other lifecycle stuff
def __init__(self, name=None, **kwargs):
"""Construct a new layer
:param name: The name for the new layer.
:param **kwargs: Ignored.
All layer subclasses must permit construction without
parameters.
"""
super(LayerBase, self).__init__()
# Defaults for the notifiable properties
self._opacity = 1.0
self._name = name
self._visible = True
self._locked = False
self._mode = self.INITIAL_MODE
self._group_ref = None
self._root_ref = None
self._thumbnail = None
self._bumpself = True
self._bumpself_rough = 1.0
self._bumpself_amp = 0.8
#: True if the layer was marked as selected when loaded.
self.initially_selected = False
@classmethod
def new_from_openraster(cls, orazip, elem, cache_dir, progress,
root, x=0, y=0, **kwargs):
"""Reads and returns a layer from an OpenRaster zipfile
This implementation just creates a new instance of its class and
calls `load_from_openraster()` on it. This should suffice for
all subclasses which support parameterless construction.
"""
layer = cls()
layer.load_from_openraster(
orazip,
elem,
cache_dir,
progress,
x=x, y=y,
**kwargs
)
return layer
@classmethod
def new_from_openraster_dir(cls, oradir, elem, cache_dir, progress,
root, x=0, y=0, **kwargs):
"""Reads and returns a layer from an OpenRaster-like folder
This implementation just creates a new instance of its class and
calls `load_from_openraster_dir()` on it. This should suffice
for all subclasses which support parameterless construction.
"""
layer = cls()
layer.load_from_openraster_dir(
oradir,
elem,
cache_dir,
progress,
x=x, y=y,
**kwargs
)
return layer
def load_from_openraster(self, orazip, elem, cache_dir, progress,
x=0, y=0, **kwargs):
"""Loads layer data from an open OpenRaster zipfile
:param orazip: An OpenRaster zipfile, opened for extracting
:type orazip: zipfile.ZipFile
:param elem: <layer/> or <stack/> element to load (stack.xml)
:type elem: xml.etree.ElementTree.Element
:param cache_dir: Cache root dir for this document
:param progress: Provides feedback to the user.
:type progress: lib.feedback.Progress or None
:param x: X offset of the top-left point for image data
:param y: Y offset of the top-left point for image data
:param **kwargs: Extensibility
The base implementation loads the common layer flags from a `<layer/>`
or `<stack/>` element, but does nothing more than that. Loading layer
data from the zipfile or recursing into stack contents is deferred to
subclasses.
"""
self._load_common_flags_from_ora_elem(elem)
def load_from_openraster_dir(self, oradir, elem, cache_dir, progress,
x=0, y=0, **kwargs):
"""Loads layer data from an OpenRaster-style folder.
Parameters are the same as for load_from_openraster, with the
following exception (replacing ``orazip``):
:param unicode/str oradir: Folder with a .ORA-like tree structure.
"""
self._load_common_flags_from_ora_elem(elem)
def _load_common_flags_from_ora_elem(self, elem):
attrs = elem.attrib
self.name = unicode(attrs.get('name', ''))
compop = str(attrs.get('composite-op', ''))
self.mode = ORA_MODES_BY_OPNAME.get(compop, DEFAULT_MODE)
self.opacity = helpers.clamp(float(attrs.get('opacity', '1.0')),
0.0, 1.0)
bumpself = attrs.get("bumpself", 'false').lower()
self.bumpself = lib.xml.xsd2bool(bumpself)
bumpbg = attrs.get("bumpself", 'false').lower()
self.bumpbg = lib.xml.xsd2bool(bumpbg)
self.bumpself_rough = helpers.clamp(float(attrs.get('bumpself_rough', '1.0')),
0.0, 1.0)
self.bumpself_amp = helpers.clamp(float(attrs.get('bumpself_amp', '0.8')),
0.0, 1.0)
self.bumpbg_rough = helpers.clamp(float(attrs.get('bumpbg_rough', '1.0')),
0.0, 1.0)
self.bumpbg_amp = helpers.clamp(float(attrs.get('bumpbg_amp', '0.8')),
0.0, 1.0)
visible = attrs.get('visibility', 'visible').lower()
self.visible = (visible != "hidden")
locked = attrs.get("edit-locked", 'false').lower()
self.locked = lib.xml.xsd2bool(locked)
selected = attrs.get("selected", 'false').lower()
self.initially_selected = lib.xml.xsd2bool(selected)
def __deepcopy__(self, memo):
"""Returns an independent copy of the layer, for Duplicate Layer
>>> from copy import deepcopy
>>> orig = _StubLayerBase()
>>> dup = deepcopy(orig)
Everything about the returned layer must be a completely
independent copy of the original layer. If the copy can be
worked on, working on it must leave the original unaffected.
This base implementation can be reused/extended by subclasses if
they support zero-argument construction. It will use the derived
class's snapshotting implementation (see `save_snapshot()` and
`load_snapshot()`) to populate the copy.
"""
layer = self.__class__()
layer.load_snapshot(self.save_snapshot())
return layer
def clear(self):
"""Clears the layer"""
pass
## Properties
@property
def group(self):
"""The group of the current layer.
Returns None if the layer is not in a group.
>>> from . import group
>>> outer = group.LayerStack()
>>> inner = group.LayerStack()
>>> scribble = _StubLayerBase()
>>> outer.append(inner)
>>> inner.append(scribble)
>>> outer.group is None
True
>>> inner.group == outer
True
>>> scribble.group == inner
True
"""
if self._group_ref is not None:
return self._group_ref()
return None
@group.setter
def group(self, group):
if group is None:
self._group_ref = None
else:
self._group_ref = weakref.ref(group)
@property
def root(self):
"""The root of the layer tree structure
Only RootLayerStack instances or None are permitted.
You won't normally need to adjust this unless you're doing
something fancy: it's automatically maintained by intermediate
and root `LayerStack` elements in the tree whenever layers are
added or removed from a rooted tree structure.
>>> from . import tree
>>> root = tree.RootLayerStack(doc=None)
>>> layer = _StubLayerBase()
>>> root.append(layer)
>>> layer.root #doctest: +ELLIPSIS
<RootLayerStack...>
>>> layer.root is root
True
"""
if self._root_ref is not None:
return self._root_ref()
return None
@root.setter
def root(self, newroot):
if newroot is None:
self._root_ref = None
else:
self._root_ref = weakref.ref(newroot)
@property
def opacity(self):
"""Opacity multiplier for the layer.
Values must permit conversion to a `float` in [0, 1].
Changing this property issues ``layer_properties_changed`` and
appropriate ``layer_content_changed`` notifications via the root
layer stack if the layer is within a tree structure.
Layers with a `mode` of `PASS_THROUGH_MODE` have immutable
opacities: the value is always 100%. This restriction only
applies to `LayerStack`s - i.e. layer groups - because those are
the only kinds of layer which can be put into pass-through mode.
"""
return self._opacity
@opacity.setter
def opacity(self, opacity):
opacity = helpers.clamp(float(opacity), 0.0, 1.0)
if opacity == self._opacity:
return
if self.mode == PASS_THROUGH_MODE:
warn("Cannot change the change the opacity multiplier "
"of a layer group in PASS_THROUGH_MODE",
RuntimeWarning, stacklevel=2)
return
self._opacity = opacity
self._properties_changed(["opacity"])
# Note: not the full_redraw_bbox here.
# Changing a layer's opacity multiplier alone cannot change the
# calculated alpha of an outlying empty tile in the layer.
# Those are always zero. Even if the layer has a fancy masking
# mode, that won't affect redraws arising from mere opacity
# multiplier updates.
bbox = tuple(self.get_bbox())
self._content_changed(*bbox)
@property
def name(self):
"""The layer's name, for display purposes
Values must permit conversion to a unicode string. If the
layer is part of a tree structure, ``layer_properties_changed``
notifications will be issued via the root layer stack. In
addition, assigned names may be corrected to be unique within
the tree.
"""
return self._name
@name.setter
def name(self, name):
if name is not None:
name = unicode(name)
else:
name = self.DEFAULT_NAME
oldname = self._name
self._name = name
root = self.root
if root is not None:
self._name = root.get_unique_name(self)
if self._name != oldname:
self._properties_changed(["name"])
@property
def visible(self):
"""Whether the layer has a visible effect on its backdrop.
Some layer modes normally have an effect even if the calculated
alpha of a pixel is zero. This switch turns that off too.
Values must permit conversion to a `bool`.
Changing this property issues ``layer_properties_changed`` and
appropriate ``layer_content_changed`` notifications via the root
layer stack if the layer is within a tree structure.
"""
return self._visible
@visible.setter
def visible(self, visible):
visible = bool(visible)
if visible == self._visible:
return
self._visible = visible
self._properties_changed(["visible"])
# Toggling the visibility flag always causes the mode to stop
# or start having its normal effect. Need the full redraw bbox
# so that outlying empty tiles will be updated properly.
bbox = tuple(self.get_full_redraw_bbox())
self._content_changed(*bbox)
@property
def bumpself(self):
"""Whether the layer applies a bumpmap to itself using its own data
"""
return self._bumpself
@bumpself.setter
def bumpself(self, bumpself):
bumpself = bool(bumpself)
if bumpself == self._bumpself:
return
self._bumpself = bumpself
self._properties_changed(["bumpself"])
# Toggling the bumpself flag always causes the mode to stop
# or start having bumpmap effect. Need the full redraw bbox
# so that outlying empty tiles will be updated properly.
bbox = tuple(self.get_full_redraw_bbox())
self._content_changed(*bbox)
@property
def bumpself_amp(self):
"""bumpself_amp
amplifies bump mapping
"""
return self._bumpself_amp
@bumpself_amp.setter
def bumpself_amp(self, bumpself_amp):
bumpself_amp = helpers.clamp(float(bumpself_amp), 0.0, 1.0)
if bumpself_amp == self._bumpself_amp:
return
self._bumpself_amp = bumpself_amp
self._properties_changed(["bumpself_amp"])
bbox = tuple(self.get_bbox())
self._content_changed(*bbox)
@property
def bumpself_rough(self):
"""bumpself_rough
amplifies bump mapping
"""
return self._bumpself_rough
@bumpself_rough.setter
def bumpself_rough(self, bumpself_rough):
bumpself_rough = helpers.clamp(float(bumpself_rough), 0.0, 1.0)
if bumpself_rough == self._bumpself_rough:
return
self._bumpself_rough = bumpself_rough
self._properties_changed(["bumpself_rough"])
bbox = tuple(self.get_bbox())
self._content_changed(*bbox)
@property
def branch_visible(self):
"""Check whether the layer's branch is visible.
Returns True if the layer's group and all of its parents are visible,
False otherwise.
Returns True if the layer is not in a group.
>>> from . import group
>>> outer = group.LayerStack()
>>> inner = group.LayerStack()
>>> scribble = _StubLayerBase()
>>> outer.append(inner)
>>> inner.append(scribble)
>>> outer.branch_visible
True
>>> inner.branch_visible
True
>>> scribble.branch_visible
True
>>> outer.visible = False
>>> outer.branch_visible
True
>>> inner.branch_visible
False
>>> scribble.branch_visible
False
"""
group = self.group
if group is None:
return True
return group.visible and group.branch_visible
@property
def locked(self):
"""Whether the layer is locked (immutable).
Values must permit conversion to a `bool`.
Changing this property issues `layer_properties_changed` via the
root layer stack if the layer is within a tree structure.
"""
return self._locked
@locked.setter
def locked(self, locked):
locked = bool(locked)
if locked != self._locked:
self._locked = locked
self._properties_changed(["locked"])
@property
def branch_locked(self):
"""Check whether the layer's branch is locked.
Returns True if the layer's group or at least one of its parents
is locked, False otherwise.
Returns False if the layer is not in a group.
>>> from . import group
>>> outer = group.LayerStack()
>>> inner = group.LayerStack()
>>> scribble = _StubLayerBase()
>>> outer.append(inner)
>>> inner.append(scribble)
>>> outer.branch_locked
False
>>> inner.branch_locked
False
>>> scribble.branch_locked
False
>>> outer.locked = True
>>> outer.branch_locked
False
>>> inner.branch_locked
True
>>> scribble.branch_locked
True
"""
group = self.group
if group is None:
return False
return group.locked or group.branch_locked
@property
def mode(self):
"""How this layer combines with its backdrop.
Values must permit conversion to an int, and must be permitted
for the mode's class.
Changing this property issues ``layer_properties_changed`` and
appropriate ``layer_content_changed`` notifications via the root
layer stack if the layer is within a tree structure.
In addition to the modes supported by the base implementation,
layer groups permit `lib.modes.PASS_THROUGH_MODE`, an
additional mode where group contents are rendered as if their
group were not present. Setting the mode to this value also
sets the opacity to 100%.
For layer groups, "Normal" mode implies group isolation
internally. These semantics differ from those of OpenRaster and
the W3C, but saving and loading applies the appropriate
transformation.
See also: PERMITTED_MODES.
"""
return self._mode
@mode.setter
def mode(self, mode):
mode = int(mode)
if mode not in self.PERMITTED_MODES:
mode = DEFAULT_MODE
if mode == self._mode:
return
# Forcing the opacity for layer groups here allows a redraw to
# be subsumed. Only layer groups permit PASS_THROUGH_MODE.
propchanges = []
if mode == PASS_THROUGH_MODE:
self._opacity = 1.0
propchanges.append("opacity")
# When changing the mode, the before and after states may have
# different treatments of outlying empty tiles. Need the full
# redraw bboxes of both states to ensure correct redraws.
redraws = [self.get_full_redraw_bbox()]
self._mode = mode
redraws.append(self.get_full_redraw_bbox())
self._content_changed(*tuple(combine_redraws(redraws)))
propchanges.append("mode")
self._properties_changed(propchanges)
## Notifications
def _content_changed(self, *args):
"""Notifies the root's content observers
If this layer's root stack is defined, i.e. if it is part of a
tree structure, the root's `layer_content_changed()` event
method will be invoked with this layer and the supplied
arguments. This reflects a region of pixels in the document
changing.
"""
root = self.root
if root is not None:
root.layer_content_changed(self, *args)
def _properties_changed(self, properties):
"""Notifies the root's layer properties observers
If this layer's root stack is defined, i.e. if it is part of a
tree structure, the root's `layer_properties_changed()` event
method will be invoked with the layer and the supplied
arguments. This reflects details about the layer like its name
or its locked status changing.
"""
root = self.root
if root is not None:
root._notify_layer_properties_changed(self, set(properties))
## Info methods
def get_icon_name(self):
"""The name of the icon to display for the layer
Ideally symbolic. A value of `None` means that no icon should be
displayed.
"""
return None
@property
def effective_opacity(self):
"""The opacity used when rendering a layer: zero if invisible
This must match the appearance produced by the layer's
Renderable.get_render_ops() implementation when it is called
with no explicit "layers" specification. The base class's
effective opacity is zero because the base get_render_ops() is
unimplemented.
"""
return 0.0
def get_alpha(self, x, y, radius):
"""Gets the average alpha within a certain radius at a point
:param x: model X coordinate
:param y: model Y coordinate
:param radius: radius over which to average
:rtype: float
The return value is not affected by the layer opacity, effective or
otherwise. This is used by `Document.pick_layer()` and friends to test
whether there's anything significant present at a particular point.
The default alpha at a point is zero.
"""
return 0.0
def get_bbox(self):
"""Returns the inherent (data) bounding box of the layer
:rtype: lib.helpers.Rect
The returned rectangle is generally tile-aligned, but isn't
required to be. In this base implementation, the returned bbox
is a zero-size default Rect, which is also how a full redraw is
signalled. Subclasses should override this with a better
implementation.
The data bounding box is used for certain classes of redraws.
See also get_full_redraw_bbox().
"""
return helpers.Rect()
def get_full_redraw_bbox(self):
"""Gets the full update notification bounding box of the layer
:rtype: lib.helpers.Rect
This is the appropriate bounding box for redraws if a layer-wide
property like visibility or combining mode changes.
Normally this is the layer's inherent data bounding box, which
allows the GUI to skip outlying empty tiles when redrawing the
layer stack. If instead the layer's compositing mode dictates
that a calculated pixel alpha of zero would affect the backdrop
regardless - something that's true of certain masking modes -
then the returned bbox is a zero-size rectangle, which is the
signal for a full redraw.
See also get_bbox().
"""
if self.mode in MODES_EFFECTIVE_AT_ZERO_ALPHA:
return helpers.Rect()
else:
return self.get_bbox()
def is_empty(self):
"""Tests whether the surface is empty
Always true in the base implementation.
"""
return True
def get_paintable(self):
"""True if this layer currently accepts painting brushstrokes
Always false in the base implementation.
"""
return False
def get_fillable(self):
"""True if this layer currently accepts flood fill
Always false in the base implementation.
"""
return False
def get_stroke_info_at(self, x, y):
"""Return the brushstroke at a given point
:param x: X coordinate to pick from, in model space.
:param y: Y coordinate to pick from, in model space.
:rtype: lib.strokemap.StrokeShape or None
Returns None for the base class.
"""
return None
def get_last_stroke_info(self):
"""Return the most recently painted stroke
:rtype lib.strokemap.StrokeShape or None
Returns None for the base class.
"""
return None
def get_mode_normalizable(self):
"""True if this layer can be normalized"""
unsupported = set(MODES_EFFECTIVE_AT_ZERO_ALPHA)
# Normalizing would have to make an infinite number of tiles
unsupported.update(MODES_DECREASING_BACKDROP_ALPHA)
# Normal mode cannot decrease the bg's alpha
return self.mode not in unsupported
def get_trimmable(self):
"""True if this layer currently accepts trim()"""
return False
def has_interesting_name(self):
"""True if the layer looks as if it has a user-assigned name
Interesting means non-blank, and not the default name or a
numbered version of it. This is used when merging layers: Merge
Down is used on temporary layers a lot, and those probably have
boring names.
"""
name = self._name
if name is None or name.strip() == '':
return False
if name == self.DEFAULT_NAME:
return False
match = lib.naming.UNIQUE_NAME_REGEX.match(name)
if match is not None:
base = unicode(match.group("name"))
if base == self.DEFAULT_NAME:
return False
return True
## Flood fill
def flood_fill(self, fill_args, dst_layer=None):
"""Fills a point on the surface with a color
See PaintingLayer.flood_fill() for parameters and semantics.
The base implementation does nothing.
"""
pass
## Rendering
def get_tile_coords(self):
"""Returns all data tiles in this layer
:returns: All tiles with data
:rtype: sequence
This method should return a sequence listing the coordinates for
all tiles with data in this layer.
It is used when computing layer merges. Tile coordinates must
be returned as ``(tx, ty)`` pairs.
The base implementation returns an empty sequence.
"""
return []
## Translation
def get_move(self, x, y):
"""Get a translation/move object for this layer
:param x: Model X position of the start of the move
:param y: Model X position of the start of the move
:returns: A move object
"""
raise NotImplementedError
def translate(self, dx, dy):
"""Translate a layer non-interactively
:param dx: Horizontal offset in model coordinates
:param dy: Vertical offset in model coordinates
:returns: full redraw bboxes for the move: ``[before, after]``
:rtype: list
The base implementation uses `get_move()` and the object it returns.
"""
update_bboxes = [self.get_full_redraw_bbox()]
move = self.get_move(0, 0)
move.update(dx, dy)
move.process(n=-1)
move.cleanup()
update_bboxes.append(self.get_full_redraw_bbox())
return update_bboxes
## Standard stuff
def __repr__(self):
"""Simplified repr() of a layer"""
if self.name:
return "<%s %r>" % (self.__class__.__name__, self.name)
else:
return "<%s>" % (self.__class__.__name__)
def __nonzero__(self):
"""Layers are never false in Py2."""
return self.__bool__()
def __bool__(self):
"""Layers are never false in Py3.
>>> sample = _StubLayerBase()
>>> bool(sample)
True
"""
return True
def __eq__(self, layer):
"""Two layers are only equal if they are the same object
This is meaningful during layer repositions in the GUI, where
shallow copies are used.
"""
return self is layer
def __hash__(self):
"""Return a hash for the layer (identity only)"""
return id(self)
## Saving
def save_as_png(self, filename, *rect, **kwargs):
"""Save to a named PNG file
:param filename: filename to save to
:param *rect: rectangle to save, as a 4-tuple
:param **kwargs: passthrough opts for underlying implementations
:rtype: Gdk.Pixbuf
The base implementation does nothing.
"""
pass
def save_to_openraster(self, orazip, tmpdir, path,
canvas_bbox, frame_bbox, **kwargs):
"""Saves the layer's data into an open OpenRaster ZipFile
:param orazip: a `zipfile.ZipFile` open for write
:param tmpdir: path to a temp dir, removed after the save
:param path: Unique path of the layer, for encoding in filenames
:type path: tuple of ints
:param canvas_bbox: Bounding box of all layers, absolute coords
:type canvas_bbox: tuple
:param frame_bbox: Bounding box of the image being saved
:type frame_bbox: tuple
:param **kwargs: Keyword args used by the save implementation
:returns: element describing data written
:rtype: xml.etree.ElementTree.Element
There are three bounding boxes which need to considered. The
inherent bbox of the layer as returned by `get_bbox()` is always
tile aligned and refers to absolute model coordinates, as is
`canvas_bbox`.
All of the above bbox's coordinates are defined relative to the
canvas origin. However, when saving, the data written must be
translated so that `frame_bbox`'s top left corner defines the
origin (0, 0), of the saved OpenRaster file. The width and
height of `frame_bbox` determine the saved image's dimensions.
More than one file may be written to the zipfile. The etree
element returned should describe everything that was written.
Paths must be unique sequences of ints, but are not necessarily
valid RootLayerStack paths. It's faked for the normally
unaddressable background layer right now, for example.
"""
raise NotImplementedError
def _get_stackxml_element(self, tag, x=None, y=None):
"""Internal: get a basic etree Element for .ora saving"""
elem = ET.Element(tag)
attrs = elem.attrib
if self.name:
attrs["name"] = str(self.name)
if x is not None:
attrs["x"] = str(x)
if y is not None:
attrs["y"] = str(y)
attrs["opacity"] = str(self.opacity)
if self.initially_selected:
attrs["selected"] = "true"
if self.locked:
attrs["edit-locked"] = "true"
if self.visible:
attrs["visibility"] = "visible"
else:
attrs["visibility"] = "hidden"
if self.bumpself:
attrs["bumpself"] = "true"
attrs["bumpself_rough"] = str(self.bumpself_rough)
attrs["bumpself_amp"] = str(self.bumpself_amp)
# NOTE: This *will* be wrong for the PASS_THROUGH_MODE case.
# NOTE: LayerStack will need to override this attr.
mode_info = lib.mypaintlib.combine_mode_get_info(self.mode)
if mode_info is not None:
compop = mode_info.get("name")
if compop is not None:
attrs["composite-op"] = str(compop)
return elem
## Painting symmetry axis
def set_symmetry_state(self, active, center_x, center_y,
symmetry_type, rot_symmetry_lines):
"""Set the surface's painting symmetry axis and active flag.
:param bool active: Whether painting should be symmetrical.
:param int center_x: X coord of the axis of symmetry.
:param int center_y: Y coord of the axis of symmetry.
:param int symmetry_type: symmetry type that will be applied if active
:param int rot_symmetry_lines: number of rotational
symmetry lines for angle dependent symmetry modes.
The symmetry axis is only meaningful to paintable layers.
Received strokes are reflected along the line ``x=center_x``
when symmetrical painting is active.
This method is used by RootLayerStack only,
propagating a central shared flag and value to all layers.
The base implementation does nothing.
"""
pass
## Snapshot
def save_snapshot(self):
"""Snapshots the state of the layer, for undo purposes
The returned data should be considered opaque, useful only as a
memento to be restored with load_snapshot().
"""
return LayerBaseSnapshot(self)
def load_snapshot(self, sshot):
"""Restores the layer from snapshot data"""
sshot.restore_to_layer(self)
## Thumbnails
@property
def thumbnail(self):
"""The layer's cached preview thumbnail.
:rtype: GdkPixbuf.Pixbuf or None
Thumbnail pixbufs are always 256x256 pixels, and correspond to
the data bounding box of the layer only.
See also: render_thumbnail().
"""
return self._thumbnail
def update_thumbnail(self):
"""Safely updates the cached preview thumbnail.
This method updates self.thumbnail using render_thumbnail() and
the data bounding box, and eats any NotImplementedErrors.
This is used by the layer stack to keep the preview thumbnail up
to date. It is called automatically after layer data is changed
and stable for a bit, so there is normally no need to call it in
client code.
"""
try:
self._thumbnail = self.render_thumbnail(
self.get_bbox(),
alpha=True,
)
except NotImplementedError:
self._thumbnail = None
def render_thumbnail(self, bbox, **options):
"""Renders a 256x256 thumb of the layer in an arbitrary bbox.
:param tuple bbox: Bounding box to make a thumbnail of.
:param **options: Passed to RootLayerStack.render_layer_preview().
:rtype: GtkPixbuf or None
Use the thumbnail property if you just want a reasonably
up-to-date preview thumbnail for a single layer.
See also: RootLayerStack.render_layer_preview().
"""
root = self.root
if root is None:
return None
return root.render_layer_preview(self, bbox=bbox, **options)
## Trimming
def trim(self, rect):
"""Trim the layer to a rectangle, discarding data outside it
:param rect: A trimming rectangle in model coordinates
:type rect: tuple (x, y, w, h)
The base implementation does nothing.
"""
pass
class _StubLayerBase (LayerBase):
"""An instantiable (but broken) LayerBase, for testing."""
def get_render_ops(self, *argv, **kwargs):
pass
class LayerBaseSnapshot (object):
"""Base snapshot implementation
Snapshots are stored in commands, and used to implement undo and redo.
They must be independent copies of the data, although copy-on-write
semantics are fine. Snapshot objects must be complete enough clones of the
layer's data for duplication to work.
"""
def __init__(self, layer):
super(LayerBaseSnapshot, self).__init__()
self.name = layer.name
self.mode = layer.mode
self.opacity = layer.opacity
self.visible = layer.visible
self.locked = layer.locked
self.bumpself = layer.bumpself
self.bumpself_rough = layer.bumpself_rough
self.bumpself_amp = layer.bumpself_amp
def restore_to_layer(self, layer):
layer.name = self.name
layer.mode = self.mode
layer.opacity = self.opacity
layer.visible = self.visible
layer.locked = self.locked
layer.bumpself = self.bumpself
layer.bumpself_rough = self.bumpself_rough
layer.bumpself_amp = self.bumpself_amp
class ExternallyEditable:
"""Interface for layers which can be edited in an external app"""
__metaclass__ = abc.ABCMeta
_EDITS_SUBDIR = u"edits"
@abc.abstractmethod
def new_external_edit_tempfile(self):
"""Get a tempfile for editing in an external app
:rtype: unicode/str
:returns: Absolute path to a newly-created tempfile for editing
The returned tempfiles are only expected to persist on disk
until a subsequent call to this method is made.
"""
@abc.abstractmethod
def load_from_external_edit_tempfile(self, tempfile_path):
"""Load content from an external-edit tempfile
:param unicode/str tempfile_path: Tempfile to load.
"""
@property
def external_edits_dir(self):
"""Directory to use for external edit files"""
cache_dir = self.root.doc.cache_dir
edits_dir = os.path.join(cache_dir, self._EDITS_SUBDIR)
if not os.path.isdir(edits_dir):
os.makedirs(edits_dir)
return edits_dir
## Helper functions
def combine_redraws(bboxes):
"""Combine multiple rectangles representing redraw areas into one
:param iterable bboxes: Sequence of redraw bboxes (lib.helpers.Rect)
:returns: A single redraw bbox.
:rtype: lib.helpers.Rect
This is best used for small, related redraws, since the GUI may have
better ways of combining rectangles into update regions. Pairs of
before and after states are good candidates for using this.
If any of the input bboxes have zero size, the first such bbox is
returned. Zero-size update bboxes are the conventional way of
requesting a full-screen update.
"""
redraw_bbox = helpers.Rect()
for bbox in bboxes:
if bbox.w == 0 and bbox.h == 0:
return bbox
redraw_bbox.expandToIncludeRect(bbox)
return redraw_bbox
## Module testing
def _test():
"""Run doctest strings"""
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
_test()
| briend/mypaint | lib/layer/core.py | Python | gpl-2.0 | 38,035 |
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from django.utils.translation import activate
from shuup.front.views.checkout import BaseCheckoutView
from shuup.testing.utils import apply_request_middleware
class CheckoutMethodsOnlyCheckoutView(BaseCheckoutView):
phase_specs = ["shuup.front.checkout.checkout_method:CheckoutMethodPhase"]
@pytest.mark.django_db
def test_checkout_method_phase_basic(rf):
activate("en")
view = CheckoutMethodsOnlyCheckoutView.as_view()
request = apply_request_middleware(rf.get("/"))
response = view(request=request, phase="checkout_method")
if hasattr(response, "render"):
response.render()
assert response.status_code == 200
| shoopio/shoop | shuup_tests/front/test_checkout_methods_phase.py | Python | agpl-3.0 | 898 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010 kazacube (http://kazacube.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Maroc - Accounting',
'version' : '1.0',
'author' : 'kazacube',
'category' : 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Maroc.
=================================================================
Ce Module charge le modèle du plan de comptes standard Marocain et permet de
générer les états comptables aux normes marocaines (Bilan, CPC (comptes de
produits et charges), balance générale à 6 colonnes, Grand livre cumulatif...).
L'intégration comptable a été validé avec l'aide du Cabinet d'expertise comptable
Seddik au cours du troisième trimestre 2010.""",
'website': 'http://www.kazacube.com',
'depends' : ['base', 'account'],
'data' : [
'security/ir.model.access.csv',
'account_type.xml',
'account_pcg_morocco.xml',
'l10n_ma_wizard.xml',
'l10n_ma_tax.xml',
'l10n_ma_journal.xml',
],
'demo' : [],
'auto_install': False,
'installable': True,
'images': ['images/config_chart_l10n_ma.jpeg','images/l10n_ma_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| diogocs1/comps | web/addons/l10n_ma/__openerp__.py | Python | apache-2.0 | 2,154 |
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from tempfile import NamedTemporaryFile
from urllib.parse import urlparse
import yaml
from behave import given
from behave import when
from itest_utils import get_service_connection_string
from paasta_tools import marathon_tools
from paasta_tools import utils
from paasta_tools.api.client import get_paasta_oapi_client_by_url
from paasta_tools.frameworks import native_scheduler
from paasta_tools.utils import decompose_job_id
def _get_marathon_connection_string(service="marathon"):
return "http://%s" % get_service_connection_string(service)
def _get_zookeeper_connection_string(chroot):
return "zk://{}/{}".format(get_service_connection_string("zookeeper"), chroot)
def setup_system_paasta_config():
zk_connection_string = _get_zookeeper_connection_string("mesos-testcluster")
system_paasta_config = utils.SystemPaastaConfig(
{
"cluster": "testcluster",
"deployd_log_level": "DEBUG",
"docker_volumes": [],
"docker_registry": "docker-dev.yelpcorp.com",
"zookeeper": zk_connection_string,
"synapse_port": 3212,
"marathon_servers": [
# if you're updating this list, you should update
# paasta_tools/yelp_package/dockerfiles/itest/api/marathon.json as well
{
"url": _get_marathon_connection_string("marathon"),
"user": None,
"password": None,
},
{
"url": _get_marathon_connection_string("marathon1"),
"user": None,
"password": None,
},
{
"url": _get_marathon_connection_string("marathon2"),
"user": None,
"password": None,
},
],
"dashboard_links": {
"testcluster": {
"Marathon RO": [
"http://accessible-marathon",
"http://accessible-marathon1",
"http://accessible-marathon2",
]
}
},
},
"/some_fake_path_to_config_dir/",
)
return system_paasta_config
def setup_marathon_clients():
system_paasta_config = setup_system_paasta_config()
marathon_servers = marathon_tools.get_marathon_servers(system_paasta_config)
clients = marathon_tools.get_marathon_clients(marathon_servers)
return (clients, marathon_servers, system_paasta_config)
def get_paasta_api_url():
return "http://{}/{}".format(get_service_connection_string("api"), "swagger.json")
def setup_paasta_api_client():
return get_paasta_oapi_client_by_url(urlparse(get_paasta_api_url()))
def _generate_mesos_cli_config(zk_host_and_port):
config = {
"profile": "default",
"default": {
"master": zk_host_and_port,
"log_level": "warning",
"log_file": "None",
"response_timeout": 5,
},
}
return config
def write_mesos_cli_config(config):
with NamedTemporaryFile(mode="w", delete=False) as mesos_cli_config_file:
mesos_cli_config_file.write(json.dumps(config))
return mesos_cli_config_file.name
def write_etc_paasta(context, config, filename):
context.etc_paasta = "/etc/paasta"
if not os.path.exists(context.etc_paasta):
os.makedirs(context.etc_paasta)
with open(os.path.join(context.etc_paasta, filename), "w") as f:
f.write(json.dumps(config))
@given("we add a new docker volume to the public config")
def add_volume_public_config(context):
write_etc_paasta(
context,
{
"volumes": [
{
"hostPath": "/nail/etc/beep",
"containerPath": "/nail/etc/beep",
"mode": "RO",
},
{
"hostPath": "/nail/etc/bop",
"containerPath": "/nail/etc/bop",
"mode": "RO",
},
{
"hostPath": "/nail/etc/boop",
"containerPath": "/nail/etc/boop",
"mode": "RO",
},
{
"hostPath": "/nail/tmp/noob",
"containerPath": "/nail/tmp/noob",
"mode": "RO",
},
]
},
"volumes.json",
)
@given("a working paasta cluster")
def working_paasta_cluster(context):
return working_paasta_cluster_with_registry(context, "docker.io")
@given("a working paasta cluster, with docker registry {docker_registry}")
def working_paasta_cluster_with_registry(context, docker_registry):
"""Adds a working marathon_clients for the purposes of
interacting with them in the test."""
if not hasattr(context, "marathon_clients"):
(
context.marathon_clients,
context.marathon_servers,
context.system_paasta_config,
) = setup_marathon_clients()
else:
print("Marathon connections already established")
if not hasattr(context, "paasta_api_client"):
context.paasta_api_client = setup_paasta_api_client()
mesos_cli_config = _generate_mesos_cli_config(
_get_zookeeper_connection_string("mesos-testcluster")
)
mesos_cli_config_filename = write_mesos_cli_config(mesos_cli_config)
context.tag_version = 0
write_etc_paasta(
context,
{"marathon_servers": context.system_paasta_config.get_marathon_servers()},
"marathon.json",
)
write_etc_paasta(
context,
{
"cluster": "testcluster",
"zookeeper": "zk://zookeeper/mesos-testcluster",
"vault_environment": "devc",
"docker_registry": docker_registry,
},
"cluster.json",
)
write_etc_paasta(context, {"log_writer": {"driver": "null"}}, "logs.json")
write_etc_paasta(context, {"sensu_host": None}, "sensu.json")
write_etc_paasta(
context,
{
"volumes": [
{
"hostPath": "/nail/etc/beep",
"containerPath": "/nail/etc/beep",
"mode": "RO",
},
{
"hostPath": "/nail/etc/bop",
"containerPath": "/nail/etc/bop",
"mode": "RO",
},
{
"hostPath": "/nail/etc/boop",
"containerPath": "/nail/etc/boop",
"mode": "RO",
},
]
},
"volumes.json",
)
write_etc_paasta(
context,
{"paasta_native": {"principal": "paasta_native", "secret": "secret4"}},
"paasta_native.json",
)
write_etc_paasta(
context, {"mesos_config": {"path": mesos_cli_config_filename}}, "mesos.json"
)
write_etc_paasta(
context,
{"api_endpoints": {"testcluster": get_paasta_api_url()}},
"api_endpoints.json",
)
write_etc_paasta(
context,
{
"dashboard_links": {
"testcluster": {
"Marathon RO": [
"http://accessible-marathon",
"http://accessible-marathon1",
"http://accessible-marathon2",
]
}
}
},
"dashboard_links.json",
)
write_etc_paasta(context, {"deployd_use_zk_queue": True}, "deployd.json")
@given(
'I have yelpsoa-configs for the marathon job "{job_id}" on shard {shard:d}, previous shard {previous_shard:d}'
)
@given('I have yelpsoa-configs for the marathon job "{job_id}"')
def write_soa_dir_marathon_job(context, job_id, shard=None, previous_shard=None):
(service, instance, _, __) = decompose_job_id(job_id)
try:
soa_dir = context.soa_dir
except AttributeError:
soa_dir = "/nail/etc/services/"
if not os.path.exists(os.path.join(soa_dir, service)):
os.makedirs(os.path.join(soa_dir, service))
soa = {
str(instance): {
"cpus": 0.1,
"mem": 100,
"marathon_shard": shard,
"previous_marathon_shards": [previous_shard] if previous_shard else None,
}
}
if hasattr(context, "cmd"):
soa[instance]["cmd"] = context.cmd
with open(
os.path.join(soa_dir, service, "marathon-%s.yaml" % context.cluster), "w"
) as f:
f.write(yaml.safe_dump(soa))
context.soa_dir = soa_dir
@given('we have yelpsoa-configs for native service "{job_id}"')
def write_soa_dir_native_service(context, job_id):
(service, instance, _, __) = decompose_job_id(job_id)
try:
soa_dir = context.soa_dir
except AttributeError:
soa_dir = "/nail/etc/services/"
if not os.path.exists(os.path.join(soa_dir, service)):
os.makedirs(os.path.join(soa_dir, service))
with open(
os.path.join(soa_dir, service, "paasta_native-%s.yaml" % context.cluster), "w"
) as f:
f.write(
yaml.safe_dump(
{"%s" % instance: {"cpus": 0.1, "mem": 100, "cmd": "/bin/sleep 300"}}
)
)
context.soa_dir = soa_dir
context.service = service
context.instance = instance
@given("we load_paasta_native_job_config")
def call_load_paasta_native_job_config(context):
context.new_config = native_scheduler.load_paasta_native_job_config(
service=context.service,
instance=context.instance,
cluster=context.cluster,
soa_dir=context.soa_dir,
)
@given(
'we have a deployments.json for the service "{service}" with {disabled} instance '
'"{csv_instances}" image "{image}"'
)
def write_soa_dir_deployments(context, service, disabled, csv_instances, image):
if disabled == "disabled":
desired_state = "stop"
else:
desired_state = "start"
if not os.path.exists(os.path.join(context.soa_dir, service)):
os.makedirs(os.path.join(context.soa_dir, service))
with open(os.path.join(context.soa_dir, service, "deployments.json"), "w") as dp:
dp.write(
json.dumps(
{
"v1": {
"{}:paasta-{}".format(
service, utils.get_paasta_branch(context.cluster, instance)
): {"docker_image": image, "desired_state": desired_state}
for instance in csv_instances.split(",")
},
"v2": {
"deployments": {
f"{context.cluster}.{instance}": {
"docker_image": image,
"git_sha": "deadbeef",
}
for instance in csv_instances.split(",")
},
"controls": {
f"{service}:{context.cluster}.{instance}": {
"desired_state": desired_state,
"force_bounce": None,
}
for instance in csv_instances.split(",")
},
},
}
)
)
@given(
'we have a deployments.json for the service "{service}" with {disabled} instance "{csv_instance}"'
)
def write_soa_dir_deployments_default_image(context, service, disabled, csv_instance):
write_soa_dir_deployments(
context,
service,
disabled,
csv_instance,
"test-image-foobar%d" % context.tag_version,
)
@when(
(
'we set the "{field}" field of the {framework} config for service "{service}"'
' and instance "{instance}" to "{value}"'
)
)
def modify_configs(context, field, framework, service, instance, value):
soa_dir = context.soa_dir
with open(
os.path.join(soa_dir, service, f"{framework}-{context.cluster}.yaml"), "r+"
) as f:
data = yaml.safe_load(f.read())
data[instance][field] = value
f.seek(0)
f.write(yaml.safe_dump(data))
f.truncate()
@when(
(
'we set the "{field}" field of the {framework} config for service "{service}"'
' and instance "{instance}" to the integer {value:d}'
)
)
def modify_configs_for_int(context, field, framework, service, instance, value):
modify_configs(context, field, framework, service, instance, value)
| Yelp/paasta | paasta_itests/steps/setup_steps.py | Python | apache-2.0 | 13,298 |
#!/usr/bin/env python
"""
Created on Sun Feb 9 16:47:35 2014
Author: Oren Freifeld
Email: [email protected]
"""
import pickle, pdb
import pylab
from pylab import plt
import numpy as np
#pylab.ion()
pylab.close('all')
#from js.utils.timing import StopWatch
from transform_cuda import CpavfCalcsGPU
from of.utils import *
dim = 2
mytype = np.float64
if dim == 1:
#my_dict = Pkl.load('./Debug1D.pkl')
my_dict = Pkl.load('./Debug1Dwarparound.pkl')
elif dim == 2:
my_dict = Pkl.load('./Debug2D.pkl')
nCells = my_dict['Trels'].shape[0]
print my_dict.keys()
print my_dict['pts_at_0'].shape
N = my_dict['pts_at_0'].shape[0]
#print my_dict['CPU_results'].shape
# call to the transform function
#sw = StopWatch()
#calc_flowline_arr1d(**my_dict)
#sw.toc("flowline CPU")
#pos0 = np.zeros((my_dict['x_old'].shape[0],2))
#pos0[:,0] = my_dict['x_old'];
#pos0[:,1] = my_dict['y_old'];
#X,Y = np.meshgrid(np.arange(512),np.arange(512))
#pos0 = np.zeros((512*512,2))
#pos0[:,0] = X.ravel()
#pos0[:,1] = Y.ravel()
#
#sw.tic()
cpavf_calcs_gpu = CpavfCalcsGPU(nCells,mytype,dim)
#sw.toctic("GPU init")
#print my_dict['pts_at_0']
pts0 = my_dict['pts_at_0']
pts0 = np.
nPts = 1000
pts=np.zeros((1000,3)
nC = 4**3
posT = cpavf_calcs_gpu.calc_transformation(
my_dict['xmins'],
my_dict['xmaxs'],
my_dict['Trels'],my_dict['As'],
my_dict['pts_at_0'],my_dict['dt'],
my_dict['nTimeSteps'],my_dict['nStepsODEsolver'],
posT )
1/0
#sw.toctic("GPU compute")
v = cpavf_calcs_gpu.calc_velocities(
my_dict['xmins'],
my_dict['xmaxs'],
my_dict['As'],
my_dict['pts_at_0'])
#sw.toctic("GPU velocities")
posH = cpavf_calcs_gpu.calc_trajectory(
my_dict['xmins'],
my_dict['xmaxs'],
my_dict['Trels'],my_dict['As'],
my_dict['pts_at_0'],my_dict['dt'],
my_dict['nTimeSteps'],my_dict['nStepsODEsolver'])
# to have the starting points in the history is as well
posH = np.r_[my_dict['pts_at_0'],posH]
# make sure the ending points are the same for both methods
T = my_dict['nTimeSteps']
if np.any((posH[(T)*N : (T+1)*N,:]-posT) > 1e-6):
print (posH[(T)*N : (T+1)*N,:]-posT)
raise ValueError
#pdb.set_trace()
#print posT.shape
#print posT.T
if dim == 1:
fig=plt.figure()
plt.subplot(5,1,1)
plt.plot(np.arange(posT.size),my_dict['pts_at_0'][:,0],'.r')
plt.subplot(5,1,2)
plt.plot(np.arange(posT.size),posT[:,0],'.b',label='GPU')
plt.legend()
plt.subplot(5,1,3)
#plt.plot(np.arange(posT.size),my_dict['CPU_results'][0,:],'.r',label='CPU')
plt.plot(np.arange(posT.size),posT[:,0],'.b')
plt.legend()
plt.subplot(5,1,4)
plt.plot(np.arange(v.size),v[:,0],'.b',label='GPU velocities')
plt.legend()
plt.subplot(5,1,5)
for i in range(0,N,32):
# print posH[i::N].shape
plt.plot(np.ones(T+1)*i,posH[i::N],'r-x')
plt.plot(np.ones(1)*i,posH[i,0],'bo')
plt.legend()
plt.show()
elif dim == 2:
fig=plt.figure()
plt.subplot(4,1,1)
plt.plot(my_dict['pts_at_0'][:,0],my_dict['pts_at_0'][:,1],'.r')
plt.gca().invert_yaxis()
plt.subplot(4,1,2)
plt.plot(posT[:,0],posT[:,1],'.b',label='GPU')
plt.gca().invert_yaxis()
plt.legend()
plt.subplot(4,1,3)
plt.plot(my_dict['CPU_results'][0,:],my_dict['CPU_results'][1,:],'.r',label='CPU')
plt.plot(posT[:,0],posT[:,1],'.b',label='GPU')
plt.gca().invert_yaxis()
plt.legend()
plt.subplot(4,1,4)
for i in range(0,N,32):
plt.plot(posH[i::N,0],posH[i::N,1],'r')
# plt.plot(posH[i::N,0],posH[i::N,1],'r')
# plt.plot(posH[i,0],posH[i,1],'bo')
plt.gca().invert_yaxis()
plt.legend()
plt.figure()
# plt.plot(v[:,0],v[:,1],'.b',label='GPU velocities')
plt.quiver(my_dict['pts_at_0'][:,0][::10],
my_dict['pts_at_0'][:,1][::10],
v[:,0][::10],v[:,1][::10])
plt.gca().invert_yaxis()
plt.axis('scaled')
plt.legend()
pylab.show()
#raw_input()
| freifeld/cpabDiffeo | cpab/gpu/demo_3d.py | Python | mit | 3,810 |
from pudzu.charts import *
df = pd.read_csv("datasets/flagsyk.csv")
groups = list(remove_duplicates(df.group))
array = [[dict(r) for _,r in df.iterrows() if r.group == g] for g in groups]
data = pd.DataFrame(array, index=list(remove_duplicates(df.group)))
FONT = calibri or sans
fg, bg="black", "#EEEEEE"
default_img = "https://s-media-cache-ak0.pinimg.com/736x/0d/36/e7/0d36e7a476b06333d9fe9960572b66b9.jpg"
def process(d):
if not d: return None
description = get_non(d, 'description')
description = "({})".format(description) if description else " "
flag = Image.from_url_with_cache(get_non(d, 'image', default_img)).to_rgba()
flag = flag.resize_fixed_aspect(height=198) if flag.width / flag.height < 1.3 else flag.resize((318,198))
flag = flag.pad((1,1,0,1) if "Devín" in d['name'] else 1, "grey")
return Image.from_column([
Image.from_text_bounded(d['name'].replace(r"\n","\n"), (320 if "Switzerland" not in description else 200,200), 32, partial(FONT, bold=True), beard_line=True, align="center", fg=fg),
Image.from_text(description, FONT(24, italics=True), fg=fg),
flag
], padding=2, bg=bg, equal_widths=True)
title = Image.from_text(f"Black & Gold flags".upper(), FONT(80, bold=True), fg=fg, bg=bg).pad(40, bg)
grid = grid_chart(data, process, padding=(10,20), fg=fg, bg=bg, yalign=(0.5,1,0.5), row_label=lambda r: None if data.index[r].startswith("_") else Image.from_text("{}\nflags".format(data.index[r]).upper(), FONT(32, bold=True), align="center", line_spacing=3))
img = Image.from_column([title, grid, Rectangle((0,40))], bg=bg)
img.place(Image.from_text("/u/Udzu", FONT(24), fg=fg, bg=bg, padding=5).pad((1,1,0,0), fg), align=1, padding=5, copy=False)
img.save("output/flagsyk.png")
| Udzu/pudzu | dataviz/flagsyk.py | Python | mit | 1,756 |
from numpy import matrix, array, diag, sqrt, abs, ravel, ones, arange
from scipy import rand, real, isscalar, hstack
from scipy.sparse import csr_matrix, isspmatrix, bsr_matrix, isspmatrix_bsr,\
spdiags
import pyamg
from pyamg.util.utils import diag_sparse, profile_solver, to_type,\
type_prep, get_diagonal,\
get_block_diag, symmetric_rescaling, symmetric_rescaling_sa,\
relaxation_as_linear_operator, filter_operator, scale_T, get_Cpt_params,\
compute_BtBinv, eliminate_diag_dom_nodes
from numpy.testing import TestCase, assert_equal, assert_almost_equal,\
assert_array_almost_equal, assert_array_equal
class TestUtils(TestCase):
def test_diag_sparse(self):
# check sparse -> array
A = matrix([[-4]])
assert_equal(diag_sparse(csr_matrix(A)), [-4])
A = matrix([[1, 0, -5], [-2, 5, 0]])
assert_equal(diag_sparse(csr_matrix(A)), [1, 5])
A = matrix([[0, 1], [0, -5]])
assert_equal(diag_sparse(csr_matrix(A)), [0, -5])
A = matrix([[1.3, -4.7, 0], [-2.23, 5.5, 0], [9, 0, -2]])
assert_equal(diag_sparse(csr_matrix(A)), [1.3, 5.5, -2])
# check array -> sparse
A = matrix([[-4]])
assert_equal(diag_sparse(array([-4])).todense(),
csr_matrix(A).todense())
A = matrix([[1, 0], [0, 5]])
assert_equal(diag_sparse(array([1, 5])).todense(),
csr_matrix(A).todense())
A = matrix([[0, 0], [0, -5]])
assert_equal(diag_sparse(array([0, -5])).todense(),
csr_matrix(A).todense())
A = matrix([[1.3, 0, 0], [0, 5.5, 0], [0, 0, -2]])
assert_equal(diag_sparse(array([1.3, 5.5, -2])).todense(),
csr_matrix(A).todense())
def test_symmetric_rescaling(self):
cases = []
cases.append(diag_sparse(array([1, 2, 3, 4])))
cases.append(diag_sparse(array([1, 0, 3, 4])))
A = array([[5.5, 3.5, 4.8],
[2., 9.9, 0.5],
[6.5, 2.6, 5.7]])
A = csr_matrix(A)
cases.append(A)
P = diag_sparse([1, 0, 1])
cases.append(P*A*P)
P = diag_sparse([0, 1, 0])
cases.append(P*A*P)
P = diag_sparse([1, -1, 1])
cases.append(P*A*P)
for A in cases:
D_sqrt, D_sqrt_inv, DAD = symmetric_rescaling(A)
assert_almost_equal(diag_sparse(A) > 0, diag_sparse(DAD))
assert_almost_equal(diag_sparse(DAD), D_sqrt*D_sqrt_inv)
D_sqrt, D_sqrt_inv = diag_sparse(D_sqrt), diag_sparse(D_sqrt_inv)
assert_almost_equal((D_sqrt_inv*A*D_sqrt_inv).todense(),
DAD.todense())
def test_symmetric_rescaling_sa(self):
cases = []
# case 1
e = ones((5, 1)).ravel()
data = [-1*e, 2*e, -1*e]
A = spdiags(data, [-1, 0, 1], 5, 5).tocsr()
B = e.copy().reshape(-1, 1)
DAD_answer = array([[1., -0.5, 0., 0., 0.],
[-0.5, 1., -0.5, 0., 0.],
[0., -0.5, 1., -0.5, 0.],
[0., 0., -0.5, 1., -0.5],
[0., 0., 0., -0.5, 1.]])
DB_answer = sqrt(2*e.reshape(-1, 1))
# matrix B BH expected matrix expected B expected BH
cases.append((A, B, None, DAD_answer, DB_answer, None))
# case 2
A2 = A.copy()
A2.symmetry = 'nonsymmetric'
cases.append((A2, B.copy(), B.copy(), DAD_answer, DB_answer,
DB_answer))
# case 3
A3 = A.copy()
A3.symmetry = 'hermitian'
cases.append((A3, B.copy(), None, DAD_answer, DB_answer, None))
# case 4
B4 = hstack((B.copy(), 2*B.copy()))
DB4_answer = sqrt(2)*B4
A4 = A.copy()
A4.symmetry = 'nonsymmetric'
cases.append((A4, B4, B4.copy(), DAD_answer, DB4_answer, DB4_answer))
for case in cases:
[A, B, BH, DAD_answer, DB_answer, DBH_answer] = case
[DAD, DB, DBH] = symmetric_rescaling_sa(A, B, BH=BH)
assert_array_almost_equal(DAD.todense(), DAD_answer)
assert_array_almost_equal(DB, DB_answer)
if DBH_answer is not None:
assert_array_almost_equal(DBH, DBH_answer)
def test_profile_solver(self):
from scipy.sparse.linalg import cg
from pyamg.gallery import poisson
from pyamg.aggregation import smoothed_aggregation_solver
A = poisson((100, 100), format='csr')
ml = smoothed_aggregation_solver(A)
opts = []
opts.append({})
opts.append({'accel': cg})
opts.append({'accel': cg, 'tol': 1e-10})
for kwargs in opts:
residuals = profile_solver(ml, **kwargs)
del residuals
def test_get_block_diag(self):
from scipy import arange, ravel, array
from scipy.sparse import csr_matrix
A = csr_matrix(arange(1, 37, dtype=float).reshape(6, 6))
block_diag = get_block_diag(A, blocksize=1, inv_flag=False)
assert_array_almost_equal(ravel(block_diag), A.diagonal())
block_diag = get_block_diag(A, blocksize=2, inv_flag=False)
answer = array([[[1., 2.],
[7., 8.]],
[[15., 16.],
[21., 22.]],
[[29., 30.],
[35., 36.]]])
assert_array_almost_equal(ravel(block_diag), ravel(answer))
block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=True)
answer = array([[[-1.33333333, 0.33333333],
[1.16666667, -0.16666667]],
[[-3.66666667, 2.66666667],
[3.5, -2.5]],
[[-6., 5.],
[5.83333333, -4.83333333]]])
assert_array_almost_equal(ravel(block_diag_inv), ravel(answer),
decimal=3)
# try singular (1,1) block, a zero (2,2) block and a zero (0,2) block
A = bsr_matrix(array([1., 2., 3., 4., 0., 0.,
5., 6., 7., 8., 0., 0.,
9., 10., 11., 11., 12., 13.,
14., 15., 16., 16., 18., 19.,
20., 21., 22., 23., 0., 0.,
26., 27., 28., 29., 0., 0., ]).reshape(6, 6),
blocksize=(3, 3))
block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=True)
answer = array([[[-1.5, 0.5],
[1.25, -0.25]],
[[0.01458886, 0.02122016],
[0.01458886, 0.02122016]],
[[0., 0.],
[0., 0.]]])
assert_array_almost_equal(ravel(block_diag_inv), ravel(answer),
decimal=3)
# try with different types of zero blocks
A = bsr_matrix(array([0., 0., 3., 4., 0., 0.,
0., 0., 7., 8., 0., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 22., 23.,
0., 0., 0., 0., 28., 29., ]).reshape(6, 6),
blocksize=(2, 2))
block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=False)
answer = array([[[0., 0.],
[0., 0.]],
[[0., 0.],
[0., 0.]],
[[22., 23.],
[28., 29.]]])
assert_array_almost_equal(ravel(block_diag_inv), ravel(answer),
decimal=3)
def test_relaxation_as_linear_operator(self):
As = []
bs = []
xs = []
methods = ['gauss_seidel', 'jacobi', 'block_gauss_seidel',
'block_jacobi']
params = [{}, {'iterations': 2}]
As.append(pyamg.gallery.poisson((10, 10), format='csr'))
As.append(1.0j*pyamg.gallery.poisson((10, 10), format='csr'))
As.append(1.0j*pyamg.gallery.elasticity.linear_elasticity((20, 20))[0])
As.append(pyamg.gallery.elasticity.linear_elasticity((20, 20))[0])
for A in As:
if A.dtype == 'complex':
xs.append(rand(A.shape[0], 1)+1.0j*rand(A.shape[0], 1))
bs.append(rand(A.shape[0], 1)+1.0j*rand(A.shape[0], 1))
else:
bs.append(rand(A.shape[0], 1))
xs.append(rand(A.shape[0], 1))
for method in methods:
for kwargs in params:
for (A, x, b) in zip(As, xs, bs):
kwargs_linop = dict(kwargs)
# run relaxation as a linear operator
if kwargs_linop == dict({}):
relax = relaxation_as_linear_operator(method, A, b)
else:
fmethod = (method, kwargs_linop)
relax = relaxation_as_linear_operator(fmethod, A, b)
x_linop = relax*x
# manually run the relaxation routine
relax2 = getattr(pyamg.relaxation, method)
x_gold = x.copy()
blockflag = False
kwargs_gold = dict(kwargs)
# deal with block matrices
if method.startswith('block') and isspmatrix_bsr(A):
blockflag = True
kwargs_gold['blocksize'] = A.blocksize[0]
# deal with omega and jacobi
# --> note that we assume the default setup for jacobi uses
# omega = 1/rho
if method.endswith('jacobi'):
if blockflag:
kwargs_gold['omega'] = 1.0/A.rho_block_D_inv
else:
kwargs_gold['omega'] = 1.0/A.rho_D_inv
relax2(A, x_gold, b, **kwargs_gold)
assert_array_almost_equal(x_linop, x_gold)
def test_filter_operator(self):
# Basic tests of dimension 1 and 2 problems
# 1x1
A = csr_matrix(array([[1.2]]))
C = csr_matrix(array([[1.]]))
B = array([[0.5]])
Bf = array([[1.5]])
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = matrix([[3.0]])
assert_array_almost_equal(A_known, A_filter)
# 1x1, but with no entries in C
C = csr_matrix(array([[0.]]))
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = array([[0.0]])
assert_array_almost_equal(A_known, A_filter)
# 1x1, but with no entries in A
A = csr_matrix(array([[0.]]))
C = csr_matrix(array([[1.]]))
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = array([[3.0]])
assert_array_almost_equal(A_known, A_filter)
# 1x2
A = csr_matrix(array([[1.2, 1.]]))
C = csr_matrix(array([[1., 1.]]))
B = array([[0.5], [0.5]])
Bf = array([[1.5]])
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = matrix([[1.6, 1.4]])
assert_array_almost_equal(A_known, A_filter)
# 1x2, but sparser
C = csr_matrix(array([[0., 1.]]))
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = array([[0., 3.]])
assert_array_almost_equal(A_known, A_filter)
# 1x2, but with no entries
C = csr_matrix(array([[0., 0.]]))
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = array([[0., 0.]])
assert_array_almost_equal(A_known, A_filter)
# 2x1
A = csr_matrix(array([[1.2], [1.]]))
C = csr_matrix(array([[1.], [1.]]))
B = array([[0.5]])
Bf = array([[1.5], [0.4]])
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = matrix([[3.], [0.8]])
assert_array_almost_equal(A_known, A_filter)
# 2x1, but sparser
C = csr_matrix(array([[0.], [1.]]))
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = array([[0.], [.8]])
assert_array_almost_equal(A_known, A_filter)
# 2x1, but with no entries
C = csr_matrix(array([[0.], [0.]]))
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = array([[0.], [0.]])
assert_array_almost_equal(A_known, A_filter)
# 2x2
A = csr_matrix(array([[1.2, 1.1], [1., 0.5]]))
C = csr_matrix(array([[1.2, 1.1], [1., 0.]]))
B = array([[0.5, 1.0], [0.5, 1.1]])
Bf = array([[0.5, 1.0], [0.5, 1.1]])
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = array([[1., 0.], [1.08, 0.]])
assert_array_almost_equal(A_known, A_filter)
# 1x2, but sparser
C = csr_matrix(array([[0., 0.], [1., 0.]]))
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = array([[0., 0.], [1.08, 0.]])
assert_array_almost_equal(A_known, A_filter)
# Try block structure
A = A.tobsr((2, 2))
C = C.tobsr((2, 2))
A_filter = filter_operator(A, C, B, Bf).todense()
A_known = array([[1., 0.], [0., 1.]])
assert_array_almost_equal(A_known, A_filter)
# Basic tests, with easy to compute answers
# test one, the constant
A = array([[1., 1, 1],
[1, 1, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1]])
C = array([[1., 1, 0],
[1, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1]])
B = ones((3, 1))
Bf = ones((6, 1))
A_filter = filter_operator(csr_matrix(A), csr_matrix(C), B, Bf)
A_filter = A_filter.todense()
A_known = matrix([[0.5, 0.5, 0.],
[0.5, 0.5, 0.],
[0., 1., 0.],
[0., 1., 0.],
[0., 0., 1.],
[0., 0., 1.]])
assert_array_almost_equal(A_known, A_filter)
# test two, the constant and linears
B = hstack((B, arange(B.shape[0]).reshape(-1, 1)))
Bf = hstack((Bf, arange(Bf.shape[0]).reshape(-1, 1)))
A_filter = filter_operator(csr_matrix(A), csr_matrix(C), B, Bf)
A_filter = A_filter.todense()
A_known = matrix([[1., 0., 0.],
[0., 1., 0.],
[0., 1.5, 0.],
[0., 2., 0.],
[0., 0., 1.8],
[0., 0., 2.2]])
assert_array_almost_equal(A_known, A_filter)
# Run two tests based on the Laplacian
# first test, constants
from pyamg.gallery import poisson
A = poisson((10, 10), format='csr')
C = A.copy()
C.data[arange(0, C.nnz, 5)] = 0.0
C.eliminate_zeros()
B = ones((A.shape[0], 1))
Bf = ones((A.shape[0], 1))
A_filter = filter_operator(A, C, B, Bf)
assert_array_almost_equal(A_filter*B, Bf)
# second test, constants and linears
B = hstack((B, arange(B.shape[0]).reshape(-1, 1)))
Bf = hstack((Bf, arange(Bf.shape[0]).reshape(-1, 1)))
A_filter = filter_operator(A, C, B, Bf)
assert_array_almost_equal(A_filter*B, Bf)
def test_scale_T(self):
from scipy.sparse import bsr_matrix
from scipy import matrix
# Trivially sized tests
# 1x1
T = matrix([[1.1]])
P_I = matrix([[1.0]])
I_F = matrix([[0.0]])
T_scaled = scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F))
T_scaled = T_scaled.todense()
T_answer = matrix([[1.0]])
assert_array_almost_equal(T_answer, T_scaled)
T = matrix([[1.1]])
P_I = matrix([[0.0]])
I_F = matrix([[1.0]])
T_scaled = scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F))
T_scaled = T_scaled.todense()
T_answer = matrix([[1.1]])
assert_array_almost_equal(T_answer, T_scaled)
T = matrix([[0.0]])
P_I = matrix([[0.0]])
I_F = matrix([[1.0]])
T_scaled = scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F))
T_scaled = T_scaled.todense()
T_answer = matrix([[0.]])
assert_array_almost_equal(T_answer, T_scaled)
# 2x1
T = matrix([[1.5], [1.2]])
P_I = matrix([[1.], [0.]])
I_F = matrix([[0., 0.], [0., 1.]])
T_scaled = scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F))
T_scaled = T_scaled.todense()
T_answer = matrix([[1.], [0.8]])
assert_array_almost_equal(T_answer, T_scaled)
T = matrix([[0.], [1.2]])
P_I = matrix([[1.], [0.]])
I_F = matrix([[0., 0.], [0., 1.]])
T_scaled = scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F))
T_scaled = T_scaled.todense()
T_answer = matrix([[1.], [0.]])
assert_array_almost_equal(T_answer, T_scaled)
T = matrix([[0.], [0.]])
P_I = matrix([[1.], [0.]])
I_F = matrix([[0., 0.], [0., 1.]])
T_scaled = scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F))
T_scaled = T_scaled.todense()
T_answer = matrix([[1.], [0.]])
assert_array_almost_equal(T_answer, T_scaled)
T = matrix([[0.], [0.]])
P_I = matrix([[0.], [0.]])
I_F = matrix([[1., 0.], [0., 1.]])
T_scaled = scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F))
T_scaled = T_scaled.todense()
T_answer = matrix([[0.], [0.]])
assert_array_almost_equal(T_answer, T_scaled)
# 2x2
T = matrix([[2., 0.], [1., 1.]])
P_I = matrix([[1., 0.], [0., 1.]])
I_F = matrix([[0., 0.], [0., 0.]])
T_scaled = scale_T(bsr_matrix(T, blocksize=(1, 1)),
bsr_matrix(P_I, blocksize=(1, 1)),
bsr_matrix(I_F, blocksize=(1, 1))).todense()
T_answer = matrix([[1., 0.], [0., 1.]])
assert_array_almost_equal(T_answer, T_scaled)
T = matrix([[2., 0.], [1., 1.]])
P_I = matrix([[1., 0.], [0., 1.]])
I_F = matrix([[0., 0.], [0., 0.]])
T_scaled = scale_T(bsr_matrix(T, blocksize=(2, 2)),
bsr_matrix(P_I, blocksize=(2, 2)),
bsr_matrix(I_F, blocksize=(2, 2))).todense()
T_answer = matrix([[1., 0.], [0., 1.]])
assert_array_almost_equal(T_answer, T_scaled)
T = matrix([[2., 0.], [1., 1.]])
P_I = matrix([[0., 0.], [0., 0.]])
I_F = matrix([[1., 0.], [0., 1.]])
T_scaled = scale_T(bsr_matrix(T, blocksize=(2, 2)),
bsr_matrix(P_I, blocksize=(2, 2)),
bsr_matrix(I_F, blocksize=(2, 2))).todense()
T_answer = matrix([[2., 0.], [1., 1.]])
assert_array_almost_equal(T_answer, T_scaled)
# Test for one CSR and one BSR example
T = matrix([[1.0, 0., 0.],
[0.5, 0., 0.],
[0., 1., 0.],
[0., 0.5, 0.],
[0., 0., 1.],
[0., 0., 0.25]])
P_I = matrix([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 1.]])
I_F = matrix([[1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.]])
T_answer = matrix([[2., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 0.5, 0.],
[0., 0., 4.],
[0., 0., 1.]])
T_scaled = scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F))
T_scaled = T_scaled.todense()
assert_array_almost_equal(T_answer, T_scaled)
# BSR test
T = matrix([[1.0, 1., 0., 0.],
[0.5, 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 2., 1.],
[0., 0., 3., 1.],
[0., 0., 4., 1.],
[0., 0., 2., 0.]])
P_I = matrix([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]])
I_F = matrix([[1., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 1.]])
T_answer = matrix([[1., 1., 0., 0.],
[0.5, 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.],
[0., 0., -1., 2.],
[0., 0., -2., 2.]])
T = bsr_matrix(T, blocksize=(2, 2))
P_I = bsr_matrix(P_I, blocksize=(2, 2))
I_F = bsr_matrix(I_F, blocksize=(2, 2))
T_scaled = scale_T(T, P_I, I_F).todense()
assert_array_almost_equal(T_answer, T_scaled)
# BSR test
T = matrix([[1.0, 1., 0., 0.],
[0.5, 1., 0., 0.],
[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 2., 1.],
[0., 0., 3., 1.],
[0., 0., 4., 1.],
[0., 0., 2., 0.]])
P_I = matrix([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]])
I_F = matrix([[1., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 1.]])
T_answer = matrix([[0.5, 0.5, 0., 0.],
[0.375, 0.375, 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.],
[0., 0., -1., 2.],
[0., 0., -2., 2.]])
# Cpts = array([1, 2])
T = bsr_matrix(T, blocksize=(2, 2))
P_I = bsr_matrix(P_I, blocksize=(2, 2))
I_F = bsr_matrix(I_F, blocksize=(2, 2))
T_scaled = scale_T(T, P_I, I_F).todense()
assert_array_almost_equal(T_answer, T_scaled)
def test_get_Cpt_params(self):
from pyamg.gallery import poisson
from scipy.sparse import csr_matrix, bsr_matrix
# Begin with trivially sized tests
# 1x1
A = csr_matrix(array([[1.2]]))
Cpts = array([0])
AggOp = csr_matrix(array([[1.]]))
T = AggOp.copy().tobsr()
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix(array([[1.]]), blocksize=(1, 1))
I_F = bsr_matrix(array([[0.]]), blocksize=(1, 1))
P_I = bsr_matrix(array([[1.]]), blocksize=(1, 1))
assert_equal(array([0]), params['Cpts'])
assert_equal(array([]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
A = csr_matrix(array([[1.2]]))
Cpts = array([])
AggOp = csr_matrix(array([[1.]]))
T = AggOp.copy().tobsr()
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix(array([[0.]]), blocksize=(1, 1))
I_F = bsr_matrix(array([[1.]]), blocksize=(1, 1))
P_I = bsr_matrix(array([[0.]]), blocksize=(1, 1))
assert_equal(array([]), params['Cpts'])
assert_equal(array([0]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
# 2x2
A = csr_matrix(array([[1., 1.], [1., 1.]]))
Cpts = array([0])
AggOp = csr_matrix(array([[1.], [1.]]))
T = AggOp.copy().tobsr()
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix(array([[1., 0.], [0., 0.]]), blocksize=(1, 1))
I_F = bsr_matrix(array([[0., 0.], [0., 1.]]), blocksize=(1, 1))
P_I = bsr_matrix(array([[1.], [0.]]), blocksize=(1, 1))
assert_equal(array([0]), params['Cpts'])
assert_equal(array([1]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
Cpts = array([0, 1])
AggOp = csr_matrix(array([[1., 0], [0., 1.]]))
T = AggOp.copy().tobsr()
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix(array([[1., 0.], [0., 1.]]), blocksize=(1, 1))
I_F = bsr_matrix(array([[0., 0.], [0., 0.]]), blocksize=(1, 1))
P_I = bsr_matrix(array([[1., 0.], [0., 1.]]), blocksize=(1, 1))
assert_equal(array([0, 1]), params['Cpts'])
assert_equal(array([]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
Cpts = array([])
AggOp = csr_matrix(array([[0.], [0.]]))
T = AggOp.copy().tobsr()
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix(array([[0., 0.], [0., 0.]]), blocksize=(1, 1))
I_F = bsr_matrix(array([[1., 0.], [0., 1.]]), blocksize=(1, 1))
P_I = bsr_matrix(array([[0.], [0.]]), blocksize=(1, 1))
assert_equal(array([]), params['Cpts'])
assert_equal(array([0, 1]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
A = A.tobsr(blocksize=(2, 2))
Cpts = array([0])
AggOp = csr_matrix(array([[1.]]))
T = bsr_matrix(array([[1., 1.], [1., 2.]]), blocksize=(2, 2))
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix(array([[1., 0.], [0., 1.]]), blocksize=(2, 2))
I_F = bsr_matrix(array([[0., 0.], [0., 0.]]), blocksize=(2, 2))
P_I = bsr_matrix(array([[1., 0.], [0., 1.]]), blocksize=(2, 2))
assert_equal(array([0, 1]), params['Cpts'])
assert_equal(array([]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
Cpts = array([])
AggOp = csr_matrix(array([[1.]]))
T = bsr_matrix(array([[1., 1.], [1., 2.]]), blocksize=(2, 2))
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix(array([[0., 0.], [0., 0.]]), blocksize=(2, 2))
I_F = bsr_matrix(array([[1., 0.], [0., 1.]]), blocksize=(2, 2))
P_I = bsr_matrix(array([[0., 0.], [0., 0.]]), blocksize=(2, 2))
assert_equal(array([]), params['Cpts'])
assert_equal(array([0, 1]), params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
# Begin more "realistic" tests
A = poisson((10,), format='csr')
Cpts = array([3, 7])
AggOp = ([[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.],
[0., 1.],
[0., 1.],
[0., 1.],
[0., 1.]])
AggOp = csr_matrix(AggOp)
T = AggOp.copy().tobsr()
# CSR Test
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix((array([[[1.]], [[1.]]]),
array([3, 7]),
array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2])),
shape=(10, 10))
I_F = bsr_matrix((array([[[1.]], [[1.]], [[1.]], [[1.]],
[[1.]], [[1.]], [[1.]], [[1.]]]),
array([0, 1, 2, 4, 5, 6, 8, 9]),
array([0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8])),
shape=(10, 10))
P_I = matrix([[0., 0.],
[0., 0.],
[0., 0.],
[1., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 1.],
[0., 0.],
[0., 0.]])
P_I = bsr_matrix(P_I, blocksize=(1, 1))
Fpts = array([0, 1, 2, 4, 5, 6, 8, 9])
assert_equal(Cpts, params['Cpts'])
assert_equal(Fpts, params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
# BSR Test
A = A.tobsr(blocksize=(2, 2))
Cpts = array([1, 3])
AggOp = ([[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.],
[0., 1.]])
AggOp = csr_matrix(AggOp)
T = hstack((T.todense(), T.todense()))[:, [0, 2, 1, 3]]
T = bsr_matrix(T, blocksize=(2, 2))
params = get_Cpt_params(A, Cpts, AggOp, T)
I_C = bsr_matrix((array([[[1., 0.], [0., 1.]],
[[1., 0.], [0., 1.]]]),
array([1, 3]),
array([0, 0, 1, 1, 2, 2])),
shape=(10, 10))
I_F = bsr_matrix((array([[[1., 0.], [0., 1.]],
[[1., 0.], [0., 1.]],
[[1., 0.], [0., 1.]]]),
array([0, 2, 4]),
array([0, 1, 1, 2, 2, 3])),
shape=(10, 10))
P_I = matrix([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]])
P_I = bsr_matrix(P_I, blocksize=(2, 2))
Fpts = array([0, 1, 4, 5, 8, 9])
Cpts = array([2, 3, 6, 7])
assert_equal(Cpts, params['Cpts'])
assert_equal(Fpts, params['Fpts'])
assert_equal(I_C.indptr, params['I_C'].indptr)
assert_equal(I_C.indices, params['I_C'].indices)
assert_equal(I_C.data, params['I_C'].data)
assert_equal(I_F.indptr, params['I_F'].indptr)
assert_equal(I_F.indices, params['I_F'].indices)
assert_equal(I_F.data, params['I_F'].data)
assert_equal(P_I.indptr, params['P_I'].indptr)
assert_equal(P_I.indices, params['P_I'].indices)
assert_equal(P_I.data, params['P_I'].data)
def test_compute_BtBinv(self):
# Trivially sized tests
# 1x1x1
T = matrix([[1.]])
T = bsr_matrix(T, blocksize=(1, 1))
B = array([[1.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[1.]]])
assert_array_almost_equal(BtBinv, answer)
T = matrix([[1.]])
T = bsr_matrix(T, blocksize=(1, 1))
B = array([[0.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[0.]]])
assert_array_almost_equal(BtBinv, answer)
T = matrix([[1.]])
T = bsr_matrix(T, blocksize=(1, 1))
B = array([[0.5]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[4.]]])
assert_array_almost_equal(BtBinv, answer)
# 2x1x1
T = matrix([[1., 0.], [1., 1.]])
T = bsr_matrix(T, blocksize=(1, 1))
B = array([[1.], [1.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[1.]], [[0.5]]])
assert_array_almost_equal(BtBinv, answer)
T = matrix([[1., 0.], [1., 1.]])
T = bsr_matrix(T, blocksize=(1, 1))
B = array([[0.], [1.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[0.]], [[1.]]])
assert_array_almost_equal(BtBinv, answer)
T = matrix([[1., 0.], [1., 1.]])
T = bsr_matrix(T, blocksize=(2, 2))
B = array([[0.], [2.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[0.25]]])
assert_array_almost_equal(BtBinv, answer)
T = matrix([[1., 0.], [1., 0.],
[0., .5], [0., .25]])
T = bsr_matrix(T, blocksize=(1, 1))
B = array([[1.], [2.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[1.]], [[1.]],
[[0.25]], [[0.25]]])
assert_array_almost_equal(BtBinv, answer)
T = matrix([[1., 0.], [0., .25]])
T = bsr_matrix(T, blocksize=(1, 1))
B = array([[1., 1.], [2., 1.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[0.25, 0.25], [0.25, 0.25]],
[[0.16, 0.08], [0.08, 0.04]]])
assert_array_almost_equal(BtBinv, answer)
T = matrix([[1., 0.], [0., .25]])
T = bsr_matrix(T, blocksize=(2, 2))
B = array([[1., 1.], [1., 1.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[0.125, 0.125],
[0.125, 0.125]]])
assert_array_almost_equal(BtBinv, answer)
# Simple BSR test
T = matrix([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 0.5, 0.5],
[0., 0., 0.25, 0.25]])
T = bsr_matrix(T, blocksize=(2, 2))
B = array([[1., 1.], [1., 2.], [1., 1.], [1., 3.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[5., -3.], [-3., 2.]],
[[2.5, -1.], [-1., 0.5]]])
assert_array_almost_equal(BtBinv, answer)
def test_eliminate_diag_dom_nodes(self):
# Simple CSR test
from pyamg.gallery import poisson
A = poisson((4,), format='csr')
C = eliminate_diag_dom_nodes(A, A.copy(), 1.1)
answer = array([[1., 0., 0., 0.],
[0., 2., -1., 0.],
[0., -1., 2., 0.],
[0., 0., 0., 1.]])
assert_array_almost_equal(C.todense(), answer)
# Simple BSR test
from pyamg.gallery import poisson
A = poisson((6,), format='csr')
A.data[3] = 5.0
A = A.tobsr((2, 2))
C = poisson((3,), format='csr')
C = eliminate_diag_dom_nodes(A, C, 1.1)
answer = array([[1., 0., 0.],
[0., 2., -1.],
[0., -1., 2.]])
assert_array_almost_equal(C.todense(), answer)
def test_remove_diagonal(self):
from pyamg.gallery import poisson
from pyamg.util.utils import remove_diagonal
A = poisson((4,), format='csr')
C = remove_diagonal(A)
exact = array([[0., -1., 0., 0.],
[-1., 0., -1., 0.],
[0., -1., 0., -1.],
[0., 0., -1., 0.]])
assert_array_almost_equal(C.todense(), exact)
def test_scale_rows_by_largest_entry(self):
from pyamg.gallery import poisson
from pyamg.util.utils import scale_rows_by_largest_entry
A = poisson((4,), format='csr')
A.data = array(A.data, dtype=complex)
A.data[1] = 8.
A = scale_rows_by_largest_entry(A)
exact = array([[0.25, 1., 0., 0.],
[-0.5, 1., -0.5, 0.],
[0., -0.5, 1., -0.5],
[0., 0., -0.5, 1.]])
assert_array_almost_equal(A.todense(), exact)
class TestComplexUtils(TestCase):
def test_diag_sparse(self):
# check sparse -> array
A = matrix([[-4-4.0j]])
assert_equal(diag_sparse(csr_matrix(A)), [-4-4.0j])
A = matrix([[1, 0, -5], [-2, 5-2.0j, 0]])
assert_equal(diag_sparse(csr_matrix(A)), [1, 5-2.0j])
# check array -> sparse
A = matrix([[-4+1.0j]])
assert_equal(diag_sparse(array([-4+1.0j])).todense(),
csr_matrix(A).todense())
A = matrix([[1, 0], [0, 5-2.0j]])
assert_equal(diag_sparse(array([1, 5-2.0j])).todense(),
csr_matrix(A).todense())
def test_symmetric_rescaling(self):
cases = []
A = array([[5.5+1.0j, 3.5, 4.8],
[2., 9.9, 0.5-2.0j],
[6.5, 2.6, 5.7+1.0j]])
A = csr_matrix(A)
cases.append(A)
P = diag_sparse([1, 0, 1.0j])
cases.append(P*A*P)
P = diag_sparse([0, 1+1.0j, 0])
cases.append(P*A*P)
for A in cases:
D_sqrt, D_sqrt_inv, DAD = symmetric_rescaling(A)
assert_almost_equal(diag_sparse(A) != 0, real(diag_sparse(DAD)))
assert_almost_equal(diag_sparse(DAD), D_sqrt*D_sqrt_inv)
D_sqrt, D_sqrt_inv = diag_sparse(D_sqrt), diag_sparse(D_sqrt_inv)
assert_almost_equal((D_sqrt_inv*A*D_sqrt_inv).todense(),
DAD.todense())
def test_symmetric_rescaling_sa(self):
cases = []
# case 1
e = 1.0j*ones((5, 1)).ravel()
data = [-1*e, 2*e, -1*e]
A = 1.0j*spdiags(data, [-1, 0, 1], 5, 5).tocsr()
B = e.copy().reshape(-1, 1)
DAD_answer = array([[1., -0.5, 0., 0., 0.],
[-0.5, 1., -0.5, 0., 0.],
[0., -0.5, 1., -0.5, 0.],
[0., 0., -0.5, 1., -0.5],
[0., 0., 0., -0.5, 1.]])
DB_answer = sqrt(2)*1.0j*e.reshape(-1, 1)
# matrix B BH expected matrix expected B expected BH
cases.append((A, B, None, DAD_answer, DB_answer, None))
for case in cases:
[A, B, BH, DAD_answer, DB_answer, DBH_answer] = case
[DAD, DB, DBH] = symmetric_rescaling_sa(A, B, BH=BH)
assert_array_almost_equal(DAD.todense(), DAD_answer)
assert_array_almost_equal(DB, DB_answer)
if DBH_answer is not None:
assert_array_almost_equal(DBH, DBH_answer)
def test_get_diagonal(self):
cases = []
for i in range(1, 6):
A = rand(i, i)
Ai = A + 1.0j*rand(i, i)
cases.append(csr_matrix(A))
cases.append(csr_matrix(Ai))
for A in cases:
D_A = get_diagonal(A, norm_eq=False, inv=False)
D_A_inv = get_diagonal(A, norm_eq=False, inv=True)
D_AA = get_diagonal(A, norm_eq=1, inv=False)
D_AA_inv = get_diagonal(A, norm_eq=1, inv=True)
D_AA2 = get_diagonal(A, norm_eq=2, inv=False)
D_AA_inv2 = get_diagonal(A, norm_eq=2, inv=True)
D = diag(A.todense())
assert_almost_equal(D, D_A)
D = 1.0/D
assert_almost_equal(D, D_A_inv)
D = diag((A.H*A).todense())
assert_almost_equal(D, D_AA)
D = 1.0/D
assert_almost_equal(D, D_AA_inv)
D = diag((A*A.H).todense())
assert_almost_equal(D, D_AA2)
D = 1.0/D
assert_almost_equal(D, D_AA_inv2)
def test_profile_solver(self):
from scipy.sparse.linalg import cg
from pyamg.gallery import poisson
from pyamg.aggregation import smoothed_aggregation_solver
A = poisson((100, 100), format='csr')
A.data = A.data + 1e-5*rand(A.nnz)
ml = smoothed_aggregation_solver(A)
opts = []
opts.append({})
opts.append({'accel': cg})
opts.append({'accel': cg, 'tol': 1e-10})
for kwargs in opts:
residuals = profile_solver(ml, **kwargs)
del residuals
def test_to_type(self):
w = 1.2
x = ones((5, 1))
y = rand(3, 2)
z = csr_matrix(rand(2, 2))
inlist = [w, x, y, z]
out = to_type(complex, inlist)
for i in range(len(out)):
assert(out[i].dtype == complex)
if isspmatrix(out[i]):
diff = ravel(out[i].data - inlist[i].data)
else:
diff = out[i] - inlist[i]
assert_equal(max(abs(ravel(diff))), 0.0)
def test_type_prep(self):
w = 1.2
x = ones((5, 1))
y = rand(3, 2)
z = csr_matrix(rand(2, 2))
inlist = [w, x, y, z]
out = type_prep(complex, inlist)
for i in range(len(out)):
assert(out[i].dtype == complex)
assert(not isscalar(out[i]))
if isspmatrix(out[i]):
diff = ravel(out[i].data - inlist[i].data)
else:
diff = out[i] - inlist[i]
assert_equal(max(abs(ravel(diff))), 0.0)
def test_filter_operator(self):
# Basic tests, with easy to compute answers
# test one, the constant
A = array([[1.+0.j, 1, 1], [1, 1, 1], [0, 1, 0],
[0, 1, 0], [0, 0, 1], [0, 0, 1]])
C = array([[1.+0.j, 1, 0], [1, 1, 0], [0, 1, 0],
[0, 1, 0], [0, 0, 1], [0, 0, 1]])
B = ones((3, 1)) + 0.j
Bf = ones((6, 1)) + 1.0j * ones((6, 1))
A_filter = filter_operator(csr_matrix(A), csr_matrix(C), B, Bf)
A_filter = A_filter.todense()
A_known = matrix([[0.5+0.5j, 0.5+0.5j, 0.0+0.j],
[0.5+0.5j, 0.5+0.5j, 0.0+0.j],
[0.0+0.j, 1.0+1.j, 0.0+0.j],
[0.0+0.j, 1.0+1.j, 0.0+0.j],
[0.0+0.j, 0.0+0.j, 1.0+1.j],
[0.0+0.j, 0.0+0.j, 1.0+1.j]])
assert_array_almost_equal(A_known, A_filter)
# test two, the constant and linears
# Note that for the rows with only one nonzero, Bf can't be
# approximated exactly
B = hstack((B, arange(B.shape[0]).reshape(-1, 1)))
ww = arange(Bf.shape[0]).reshape(-1, 1) +\
1.0j*arange(Bf.shape[0]).reshape(-1, 1)
Bf = hstack((Bf, ww))
A_filter = filter_operator(csr_matrix(A), csr_matrix(C), B, Bf)
A_filter = A_filter.todense()
A_known = matrix([[1.0+1.j, 0.0+0.j, 0.0+0.j],
[0.0+0.j, 1.0+1.j, 0.0+0.j],
[0.0+0.j, 1.5+1.5j, 0.0+0.j],
[0.0+0.j, 2.0+2.j, 0.0+0.j],
[0.0+0.j, 0.0+0.j, 1.8+1.8j],
[0.0+0.j, 0.0+0.j, 2.2+2.2j]])
assert_array_almost_equal(A_known, A_filter)
def test_scale_T(self):
from scipy.sparse import bsr_matrix
from scipy import matrix
# Test for one CSR and one BSR example
T = matrix([[1.0, 0., 0.],
[0.5j, 0., 0.],
[0., 1., 0.],
[0., .5j, 0.],
[0., 0., 1.j],
[0., 0., 0.25]])
T_answer = matrix([[0.-2.j, 0.+0.j, 0.+0.j],
[1.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 0.+0.5j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+4.j],
[0.+0.j, 0.+0.j, 1.+0.j]])
P_I = matrix([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 1.]])
I_F = matrix([[1., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0.],
[0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0.]])
T_scaled = scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F))
T_scaled = T_scaled.todense()
assert_array_almost_equal(T_answer, T_scaled)
# BSR test
T = matrix([[1.j, 1., 0., 0.],
[0.5, 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 2.j, 0.],
[0., 0., 0., 1.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]])
P_I = matrix([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]])
I_F = matrix([[1., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 1.]])
T_answer = matrix([[0.0+1.j, 1.0+0.j, 0.0+0.j, 0.0+0.j],
[0.5+0.j, 1.0+0.j, 0.0+0.j, 0.0+0.j],
[1.0+0.j, 0.0+0.j, 0.0+0.j, 0.0+0.j],
[0.0+0.j, 1.0+0.j, 0.0+0.j, 0.0+0.j],
[0.0+0.j, 0.0+0.j, 1.0+0.j, 0.0+0.j],
[0.0+0.j, 0.0+0.j, 0.0+0.j, 1.0+0.j],
[0.0+0.j, 0.0+0.j, 0.0-0.5j, 1.0+0.j],
[0.0+0.j, 0.0+0.j, 0.0-0.5j, 1.0+0.j]])
T = bsr_matrix(T, blocksize=(2, 2))
P_I = bsr_matrix(P_I, blocksize=(2, 2))
I_F = bsr_matrix(I_F, blocksize=(2, 2))
T_scaled = scale_T(T, P_I, I_F).todense()
assert_array_almost_equal(T_answer, T_scaled)
def test_compute_BtBinv(self):
# Simple CSR test
T = matrix([[1.j, 0.], [1., 0.],
[0., .5], [0., .25]])
T = bsr_matrix(T, blocksize=(1, 1))
B = array([[1.+1.j], [2.j]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[0.50+0.j]], [[0.50+0.j]],
[[0.25+0.j]], [[0.25+0.j]]])
assert_array_almost_equal(BtBinv, answer)
# Simple BSR test
T = matrix([[1., 0., 0., 1.],
[1., 0., 0., 1.],
[0., 0., 0.5, 0.],
[0., 0., 0.25, 0.]])
T = bsr_matrix(T, blocksize=(2, 2))
B = array([[1.j, 1.], [1.j, 3.], [1.j, 4.], [1.j, 2.]])
BtBinv = compute_BtBinv(B, T)
answer = array([[[1.5+0.j, 0.0+0.5j], [0.0-0.5j, 0.2+0.j]],
[[5.0+0.j, 0.0+1.5j], [0.0-1.5j, 0.5+0.j]]])
assert_array_almost_equal(BtBinv, answer)
def test_eliminate_diag_dom_nodes(self):
# Simple CSR test
from pyamg.gallery import poisson
A = poisson((4,), format='csr')
A.data = array(A.data, dtype=complex)
A.data[0] = 1+5.0j
C = eliminate_diag_dom_nodes(A, A.copy(), 1.1)
answer = array([[1., 0., 0., 0.],
[0., 2., -1., 0.],
[0., -1., 2., 0.],
[0., 0., 0., 1.]])
assert_array_almost_equal(C.todense(), answer)
# Simple BSR test
from pyamg.gallery import poisson
A = poisson((6,), format='csr')
A.data = array(A.data, dtype=complex)
A.data[3] = 1.0 + 5.0j
A = A.tobsr((2, 2))
C = poisson((3,), format='csr')
C = eliminate_diag_dom_nodes(A, C, 1.1)
answer = array([[1., 0., 0.],
[0., 2., -1.],
[0., -1., 2.]])
assert_array_almost_equal(C.todense(), answer)
def test_remove_diagonal(self):
from pyamg.gallery import poisson
from pyamg.util.utils import remove_diagonal
A = poisson((4,), format='csr')
C = remove_diagonal(1.0j*A)
exact = array([[0., -1., 0., 0.],
[-1., 0., -1., 0.],
[0., -1., 0., -1.],
[0., 0., -1., 0.]])
assert_array_almost_equal(C.todense(), 1.0j*exact)
def test_scale_rows_by_largest_entry(self):
from pyamg.gallery import poisson
from pyamg.util.utils import scale_rows_by_largest_entry
A = poisson((4,), format='csr')
A.data = array(A.data, dtype=complex)
A.data[1] = 3. + 2.j
A = scale_rows_by_largest_entry(A)
exact = array([[0.55470020+0.j, 0.83205029+0.5547002j, 0.0, 0.0],
[-0.50000000+0.j, 1.00000000+0.j, -0.5+0.j, 0.0+0.j],
[0.00000000+0.j, -0.50000000+0.j, 1.0+0.j, -0.5+0.j],
[0.00000000+0.j, 0.00000000+0.j, -0.5+0.j, 1.0+0.j]])
assert_array_almost_equal(A.todense(), exact)
# note: no explicitly complex tests necessary for get_Cpt_params
# def test_get_Cpt_params(self):
class TestLevelize(TestCase):
def test_levelize_smooth_or_improve_candidates(self):
from pyamg.util.utils import levelize_smooth_or_improve_candidates
# test 1
result = levelize_smooth_or_improve_candidates([('jacobi', {})], 5)
assert_equal(result, [('jacobi', {}) for i in range(5)])
# test 2
result = levelize_smooth_or_improve_candidates('jacobi', 5)
assert_equal(result, ['jacobi' for i in range(5)])
# test 3
result = levelize_smooth_or_improve_candidates(('jacobi', {}), 5)
assert_equal(result, [('jacobi', {}) for i in range(5)])
# test 4
result = levelize_smooth_or_improve_candidates([('jacobi', {}), None],
5)
assert_equal(result, [('jacobi', {}), None, None, None, None])
def test_levelize_strength_or_aggregation(self):
from pyamg.util.utils import levelize_strength_or_aggregation
from pyamg.gallery import poisson
A = poisson((100,), format='csr')
# test 1
max_levels, max_coarse, result = \
levelize_strength_or_aggregation([('symmetric', {})], 5, 5)
assert_equal(result, [('symmetric', {}) for i in range(4)])
assert_equal(max_levels, 5)
assert_equal(max_coarse, 5)
# test 2
max_levels, max_coarse, result =\
levelize_strength_or_aggregation('symmetric', 5, 5)
assert_equal(result, ['symmetric' for i in range(4)])
assert_equal(max_levels, 5)
assert_equal(max_coarse, 5)
# test 3
max_levels, max_coarse, result =\
levelize_strength_or_aggregation(('symmetric', {}), 5, 5)
assert_equal(result, [('symmetric', {}) for i in range(4)])
assert_equal(max_levels, 5)
assert_equal(max_coarse, 5)
# test 4
max_levels, max_coarse, result =\
levelize_strength_or_aggregation([('symmetric', {}), None], 5, 5)
assert_equal(result, [('symmetric', {}), None, None, None])
assert_equal(max_levels, 5)
assert_equal(max_coarse, 5)
# test 5
max_levels, max_coarse, result =\
levelize_strength_or_aggregation(('predefined', {'C': A}), 5, 5)
assert_array_equal(result, [('predefined', {'C': A})])
assert_equal(max_levels, 2)
assert_equal(max_coarse, 0)
# test 6
max_levels, max_coarse, result =\
levelize_strength_or_aggregation([('predefined', {'C': A}),
('predefined', {'C': A})], 5, 5)
assert_array_equal(result, [('predefined', {'C': A}),
('predefined', {'C': A})])
assert_equal(max_levels, 3)
assert_equal(max_coarse, 0)
# test 7
max_levels, max_coarse, result = \
levelize_strength_or_aggregation(None, 5, 5)
assert_equal(result, [(None, {}) for i in range(4)])
assert_equal(max_levels, 5)
assert_equal(max_coarse, 5)
| huahbo/pyamg | pyamg/util/tests/test_utils.py | Python | mit | 56,959 |
"""
问题描述: 你和你的朋友,两个人一起玩 Nim游戏:桌子上有一堆石头,每次你们轮流拿掉 1 - 3 块石头。
拿掉最后一块石头的人就是获胜者。你作为先手。
你们是聪明人,每一步都是最优解。 编写一个函数,来判断你是否可以在给定石头数量的情况下赢得游戏。
示例:
输入: 4
输出: false
解释: 如果堆中有 4 块石头,那么你永远不会赢得比赛;
因为无论你拿走 1 块、2 块 还是 3 块石头,最后一块石头总是会被你的朋友拿走。
方法:
f(4) = False
f(5) = True
f(6) = True
f(7) = True
f(8) = False
可以先多推几个,然后进行猜想和验证。这里可以将大数的情况往f(4),f(8),f(12)...转
"""
class Solution:
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
return not (n % 4 == 0) | ResolveWang/algrithm_qa | 分类代表题目/数学知识/Nim游戏.py | Python | mit | 904 |
# -*- coding: utf-8 -*-
import pytest
from .typeconv import str2bool
from .typeconv import bool201
class Test_str2bool(object):
@pytest.mark.parametrize('s, d, exp', [
(True, None, True),
('1', None, True),
('t', None, True),
('yEs', None, True),
('true', None, True),
('TRUE', None, True),
('oN', None, True),
(False, None, False),
('0', None, False),
('f', None, False),
('n', None, False),
('FaLse', None, False),
('nO', None, False),
('Off', None, False),
])
def test_str2bool_ignore_default(self, s, d, exp):
act = str2bool(s)
assert act == exp
act = str2bool(s, d)
assert act == exp
@pytest.mark.parametrize("s, d, exp", [
(None, 3, 3), # noqa: E241
('01', -1, -1), # noqa: E241
('fka', -2, -2), # noqa: E241
('-1', -3, -3), # noqa: E241
])
def test_str2bool_meaningful_defaults(self, s, d, exp):
act = str2bool(s, default=d)
assert act == exp
class Test_bool201(object):
@pytest.mark.parametrize('b, exp', [
(True, '1'),
(False, '0'),
])
def test_normal(self, b, exp):
act = bool201(b)
assert act == exp
@pytest.mark.parametrize('b', [
(None),
])
def test_raises_TypeError(self, b):
with pytest.raises(TypeError):
bool201(b)
class TestFunctionCompositions(object):
@pytest.mark.parametrize('b', [
(u'0'),
(u'1'),
])
def test_bool201_str2bool(self, b):
act = bool201(str2bool(b))
assert act == b
@pytest.mark.parametrize('b', [
(False),
(True),
])
def test_str2bool_bool201(self, b):
act = str2bool(bool201(b))
assert act == b
| maxkoryukov/route4me-python-sdk | route4me/sdk/_internals/typeconv_test.py | Python | isc | 1,560 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from eventlet import event
from eventlet import greenthread
from heat.openstack.common.gettextutils import _ # noqa
from heat.openstack.common import log as logging
from heat.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = timeutils.utcnow()
self.f(*self.args, **self.kw)
end = timeutils.utcnow()
if not self._running:
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
# TODO(mikal): this class name is deprecated in Havana and should be removed
# in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
| lakshmi-kannan/matra | openstack/common/loopingcall.py | Python | apache-2.0 | 4,667 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.IdentifiedObject import IdentifiedObject
class OperationalLimit(IdentifiedObject):
"""A value associated with a specific kind of limit.
"""
def __init__(self, type='', OperationalLimitType=None, OperationalLimitSet=None, *args, **kw_args):
"""Initialises a new 'OperationalLimit' instance.
@param type: Used to specify high/low and limit levels.
@param OperationalLimitType: The limit type associated with this limit.
@param OperationalLimitSet: The limit set to which the limit values belong.
"""
#: Used to specify high/low and limit levels.
self.type = type
self._OperationalLimitType = None
self.OperationalLimitType = OperationalLimitType
self._OperationalLimitSet = None
self.OperationalLimitSet = OperationalLimitSet
super(OperationalLimit, self).__init__(*args, **kw_args)
_attrs = ["type"]
_attr_types = {"type": str}
_defaults = {"type": ''}
_enums = {}
_refs = ["OperationalLimitType", "OperationalLimitSet"]
_many_refs = []
def getOperationalLimitType(self):
"""The limit type associated with this limit.
"""
return self._OperationalLimitType
def setOperationalLimitType(self, value):
if self._OperationalLimitType is not None:
filtered = [x for x in self.OperationalLimitType.OperationalLimit if x != self]
self._OperationalLimitType._OperationalLimit = filtered
self._OperationalLimitType = value
if self._OperationalLimitType is not None:
if self not in self._OperationalLimitType._OperationalLimit:
self._OperationalLimitType._OperationalLimit.append(self)
OperationalLimitType = property(getOperationalLimitType, setOperationalLimitType)
def getOperationalLimitSet(self):
"""The limit set to which the limit values belong.
"""
return self._OperationalLimitSet
def setOperationalLimitSet(self, value):
if self._OperationalLimitSet is not None:
filtered = [x for x in self.OperationalLimitSet.OperationalLimitValue if x != self]
self._OperationalLimitSet._OperationalLimitValue = filtered
self._OperationalLimitSet = value
if self._OperationalLimitSet is not None:
if self not in self._OperationalLimitSet._OperationalLimitValue:
self._OperationalLimitSet._OperationalLimitValue.append(self)
OperationalLimitSet = property(getOperationalLimitSet, setOperationalLimitSet)
| rwl/PyCIM | CIM14/IEC61970/OperationalLimits/OperationalLimit.py | Python | mit | 3,684 |
class Move(object):
'''
This class represents a move in term of directions
'''
# CONSTANTS
NORTH = ( -1, 0)
SOUTH = ( 1, 0)
EAST = ( 0, 1)
WEST = ( 0, -1) | Shathra/puzzlib | puzz/move.py | Python | bsd-2-clause | 165 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
"hci_conn.c:407",
"workqueue.c:480"
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| sub77/kernel_msm | scripts/gcc-wrapper.py | Python | gpl-2.0 | 3,426 |
import gluon.contrib.simplejson as sj
results_per_page = 10
def search():
'''returns the results in json to an audio search'''
page = 0
json_response = {}
search_criteria = None
if len(request.args) > 0:
search_criteria = request.args[0]
if len(request.args) == 2:
page = int(request.args[1])
#get approperate records
if search_criteria:
#this is a gae hack because there isn't any full text string search
if request.env.web2py_runtime_gae:
audio_by_artist = db.Audio.artist == search_criteria
#audio = db.executesql("")
#audio = db.executesql("SELECT artist FROM Audio WHERE prop >= '" + search_criteria + "' AND prop < '" + (unicode(search_criteria) + u"\ufffd") + "'")
else:
audio_by_artist = db.Audio.artist.like('%'+search_criteria+'%')
audio = db(audio_by_artist).select(limitby=(page*results_per_page, (page+1) * (results_per_page+1)))
json_audio = []
audio_result_len = len(audio)
#create a json representation of the audio results
for i in range(0, audio_result_len):
a = audio[i]
json_audio.append( {'id':a.id, 'artist':a.artist, 'track':a.track})
#create flags to determine if the forward and back links are applicable
if audio_result_len <= results_per_page:
forward = None;
else:
forward = page + 1
if page == 0:
back = None
else:
back = page - 1
json_response['audio'] = json_audio
json_response['pagnation'] = {'searchCriteria':search_criteria, 'forward':forward, 'back':back}
return sj.dumps(json_response)
| davidhampgonsalves/mix-tree | init/controllers/audio.py | Python | mit | 1,753 |
#!/usr/bin/env python
""""
File : tests_post_to_producer.py
Author : ian
Created : 04-26-2016
Last Modified By : ian
Last Modified On : 04-26-2016
***********************************************************************
The MIT License (MIT)
Copyright © 2015 Ian Cooper <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
**********************************************************************i*
"""
import unittest
from brightside.registry import MessageMapperRegistry
from tests.messaging_testdoubles import FakeMessageStore, FakeProducer
from brightside.command_processor import CommandProcessor
from tests.handlers_testdoubles import MyCommand, MyOtherCommand, map_mycommand_to_message
from brightside.exceptions import ConfigurationException
class PostTests(unittest.TestCase):
def setUp(self):
self._messageMapperRegistry = MessageMapperRegistry()
self._messageMapperRegistry.register(MyCommand, map_mycommand_to_message)
self._message_store = FakeMessageStore()
self._producer = FakeProducer()
self._commandProcessor = CommandProcessor(
message_mapper_registry=self._messageMapperRegistry,
message_store=self._message_store,
producer=self._producer)
def test_handle_command(self):
""" given that we have a message mapper and producer registered for a commnd,
when we post a command,
it should send via the producer
"""
request = MyCommand()
self._commandProcessor.post(request)
self.assertTrue(self._message_store.message_was_added, "Expected a message to be added")
self.assertTrue(self._message_store.get_message(request.id), "Expected the command to be converted into a message")
self.assertTrue(self._producer.was_sent_message, "Expected a message to be sent via the producer")
self.assertTrue(str(self._message_store.get_message(request.id).body), "")
def test_missing_message_mapper(self):
"""given that we have no message mapper registered for a command
when we post a command
it should raise an error
"""
request = MyOtherCommand()
was_exception_thrown = False
try:
self._commandProcessor.post(request)
except ConfigurationException:
was_exception_thrown = True
# it looks as though we should use self_assertRaises...
self.assertTrue(was_exception_thrown, "")
def test_missing_message_producer(self):
"""given that we have no me message producer configured for the commandprocessor
when we post a command
it should raise a confiugration error
"""
self._commandProcessor = CommandProcessor(
message_mapper_registry=self._messageMapperRegistry,
message_store=self._message_store,
producer=None)
was_exception_thrown = False
try:
request = MyCommand()
self._commandProcessor.post(request)
except ConfigurationException:
was_exception_thrown = True
self.assertTrue(was_exception_thrown)
def test_missing_message_mapper_registry(self):
""" given that we have no message mapper registry for the commandprocessor
when we post a command
it should raise a configuration error
"""
self._commandProcessor = CommandProcessor(
message_mapper_registry=None,
message_store=self._message_store,
producer=self._producer
)
was_exception_thrown = False
try:
request = MyCommand()
self._commandProcessor.post(request)
except ConfigurationException:
was_exception_thrown = True
self.assertTrue(was_exception_thrown)
if __name__ == '__main__':
unittest.main()
| BrighterCommand/Brightside | tests/tests_post_to_producer.py | Python | mit | 4,927 |
from heapq import *
from heapq_showtree import show_tree
def merge_sorted_file(ls_of_filedata):
min_heap = []
# (heap : (value, origin))
result = []
for origin, cur_file in enumerate(ls_of_filedata):
if cur_file:
heappush(min_heap, (cur_file[0], origin, 1))
while min_heap:
nxt_min, origin, nxt_ndx = heappop(min_heap)
result.append(nxt_min)
nxt_file = ls_of_filedata[origin]
if nxt_ndx < len(nxt_file):
heappush(min_heap, (nxt_file[nxt_ndx], origin, nxt_ndx + 1))
return result
return min_heap
if __name__ == '__main__':
test_cases = [
([[1, 2], [2, 3, 4, 5]], [1, 2, 2, 3, 4, 5]),
([[1, 2], [2, 3, 4, 5], [3, 4, 5]], [1, 2, 2, 3, 3, 4, 4, 5, 5]),
]
for test_case, exp in test_cases:
show_tree(merge_sorted_file(test_case))
print(merge_sorted_file(test_case)) == exp
| misscindy/Interview | Heap/11_01_Merge_Sorted_Files.py | Python | cc0-1.0 | 916 |
from config import cloudplatform
storage_adapter = None
if cloudplatform == "google":
import googlestorage
storage_adapter = googlestorage
elif cloudplatform == "aws":
import awsstorage
storage_adapter = awsstorage
elif cloudplatform == "azure":
from FlaskWebProject import azurestorage
storage_adapter = azurestorage
def create_container(bucketID):
""" Creates Container with given bucketID
:param string bucketID: container name
:return boolean: true if succeed
"""
return storage_adapter.create_container(bucketID)
def container_exists(bucketID):
""" Check if container with ID exists
:param string bucketID: container name
:return boolean: true if exists
"""
return storage_adapter.container_exists(bucketID)
def file_exists(bucketID, filename):
""" Checks if file in container exists
:param string bucketID: container name
:param string filename: file to search
:return boolean: true if exists
"""
return storage_adapter.file_exists(bucketID, filename)
def list_files(bucketID):
""" Lists files in specified bucket
:param string bucketID: container name
:return list: list of FileIDs
"""
return storage_adapter.list_files(bucketID)
def delete_file(bucketID, filename):
""" delete file from container
:param string bucketID: container name
:param string filename: file to delete
:return boolean: true if succeed
"""
return storage_adapter.delete_file(bucketID, filename)
def delete_container(bucketID):
""" delete container
:param string bucketID: container name
:return boolean: true if succeed
"""
return storage_adapter.delete_container(bucketID)
def upload_from_path(bucketID, path):
""" Uploads a local file from client to the cloud
:param string bucketID: container name
:param string path: local filepath
:return boolean: true if succeed
"""
return storage_adapter.upload_from_path(bucketID, path)
def upload_from_text(bucketID, filename, text):
""" Uploads text to container in specified file
:param string bucketID: container name
:param string filename: destination file
:param string text: text to upload
:return boolean: true if succeed
"""
return storage_adapter.upload_from_text(bucketID, filename, text)
def download_file_to_path(bucketID, filename, path):
""" Downloads file from container to local path
:param string bucketID: container name
:param string filename: file to download
:param string path: destination local filepath
:return boolean: true if succeed
"""
return storage_adapter.download_file_to_path(bucketID, filename, path)
def download_file_to_text(bucketID, filename):
""" Downloads file from container to text
:param string bucketID: container name
:param string filename: file to download
:return string: text that got downloaded
"""
return storage_adapter.download_file_to_text(bucketID, filename)
def get_download_url(bucketID, filename):
""" Returns a download for specified file in container
:param string bucketID: container name
:param string filename: file to download
:return string: the url to download the file from
"""
return storage_adapter.get_download_url(bucketID, filename)
| ohaz/amos-ss15-proj1 | FlaskWebProject/storageinterface.py | Python | agpl-3.0 | 3,348 |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import re_path, include
from django.views import defaults as default_views
from main.views import HomePageView
urlpatterns = [
re_path(r'^$', HomePageView.as_view(), name='home'),
# url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
re_path(settings.ADMIN_URL, admin.site.urls),
# User management
re_path(r'^users/', include('dotmanca.users.urls', namespace='users')),
# Your stuff: custom urls includes go here
re_path(r'^news/', include('news.urls')),
re_path(r'^about/', include('main.urls')),
re_path(r'^galleries/', include('gallery.urls')),
re_path(r'^comics/', include('comics.urls')),
re_path(r'^characters/', include('characters.urls')),
re_path(r'^places/', include('places.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
re_path(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
re_path(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
re_path(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
re_path(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
re_path(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| evanepio/dotmanca | config/urls.py | Python | mit | 1,796 |
"""Tests for the Abode cover device."""
from unittest.mock import patch
from homeassistant.components.abode import ATTR_DEVICE_ID
from homeassistant.components.cover import DOMAIN as COVER_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
STATE_CLOSED,
)
from .common import setup_platform
DEVICE_ID = "cover.garage_door"
async def test_entity_registry(hass):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, COVER_DOMAIN)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get(DEVICE_ID)
assert entry.unique_id == "61cbz3b542d2o33ed2fz02721bda3324"
async def test_attributes(hass):
"""Test the cover attributes are correct."""
await setup_platform(hass, COVER_DOMAIN)
state = hass.states.get(DEVICE_ID)
assert state.state == STATE_CLOSED
assert state.attributes.get(ATTR_DEVICE_ID) == "ZW:00000007"
assert not state.attributes.get("battery_low")
assert not state.attributes.get("no_response")
assert state.attributes.get("device_type") == "Secure Barrier"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "Garage Door"
async def test_open(hass):
"""Test the cover can be opened."""
await setup_platform(hass, COVER_DOMAIN)
with patch("abodepy.AbodeCover.open_cover") as mock_open:
await hass.services.async_call(
COVER_DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_open.assert_called_once()
async def test_close(hass):
"""Test the cover can be closed."""
await setup_platform(hass, COVER_DOMAIN)
with patch("abodepy.AbodeCover.close_cover") as mock_close:
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: DEVICE_ID},
blocking=True,
)
await hass.async_block_till_done()
mock_close.assert_called_once()
| partofthething/home-assistant | tests/components/abode/test_cover.py | Python | apache-2.0 | 2,112 |
# -*- coding: utf-8 -*-
import os
import pygame
import random
import classes.board
import classes.game_driver as gd
import classes.level_controller as lc
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self, mainloop, 1, 10)
gd.BoardGame.__init__(self, mainloop, speaker, config, screen_w, screen_h, 11, 9)
def create_game_objects(self, level=1):
self.allow_unit_animations = False
self.ai_enabled = True
self.board.draw_grid = False
white = [255, 255, 255]
scheme = "white"
if self.mainloop.scheme is not None:
if self.mainloop.scheme.dark:
scheme = "black"
if self.level.lvl == 1:
data = [7, 5, 17, -2]
elif self.level.lvl == 2:
data = [7, 5, 17, -1]
elif self.level.lvl == 3:
data = [7, 5, 14, -2]
elif self.level.lvl == 4:
data = [7, 5, 14, -1]
elif self.level.lvl == 5:
data = [7, 5, 12, -2]
elif self.level.lvl == 6:
data = [7, 5, 12, -1]
elif self.level.lvl == 7:
data = [7, 5, 10, -2]
elif self.level.lvl == 8:
data = [7, 5, 10, -1]
elif self.level.lvl == 9:
data = [7, 5, 8, -2]
elif self.level.lvl == 10:
data = [7, 5, 8, -1]
self.ai_speed = data[2]
# stretch width to fit the screen size
max_x_count = self.get_x_count(data[1], even=False)
if max_x_count > 7:
data[0] = max_x_count
self.data = data
self.level.game_step = 0
self.level.games_per_lvl = 1
self.moveable = False
self.moves = []
self.move_buttons = []
self.possible_move_buttons = []
self.sequence_counter = 0
self.current_step = 0
self.start_sequence = True
self.completed_mode = False
self.center = [data[0] // 2, data[1] // 2]
self.vis_buttons = [0, 1, 1, 1, 1, 0, 1, 0, 0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.layout.update_layout(data[0], data[1])
scale = self.layout.scale
self.board.level_start(data[0], data[1], scale)
self.board.add_unit(self.center[0], self.center[1], 1, 1, classes.board.MultiImgSprite, "", white,
os.path.join("schemes", scheme, "owl_5.png"), 0, frame_flow=[0, 1, 2, 3, 4, 3, 2, 1, 0],
frame_count=9, row_data=[5, 1])
self.owl = self.board.ships[0]
self.owl.outline = False
self.owl.draggable = False
self.owl.audible = True
self.board.active_ship = 0
self.ship_id = 0
self.images = [os.path.join("schemes", scheme, "a_yellow_150.png"),
os.path.join("schemes", scheme, "a_green_150.png"),
os.path.join("schemes", scheme, "a_blue_150.png"),
os.path.join("schemes", scheme, "a_red_150.png")]
for i in range(4):
self.board.add_door(self.center[0], self.center[1], 1, 1, classes.board.SlidingDoor, "", white,
self.images[i], frame_flow=[0, 1], frame_count=2, row_data=[2, 1])
self.update_arrows()
self.board.all_sprites_list.move_to_front(self.board.ships[0])
self.add_next_move()
def handle(self, event):
gd.BoardGame.handle(self, event)
if self.ship_id < 0 and event.type == pygame.MOUSEBUTTONDOWN:
# make it impossible to deselect the main character
self.board.active_ship = 0
self.ship_id = 0
if self.moveable:
pos = event.pos
column = pos[0] // (self.layout.width)
row = (pos[1] - self.layout.top_margin) // (self.layout.height)
self.direction = [0, 0]
arrow_clicked = False
if column == self.owl_pos[0] - 1 and row == self.owl_pos[1]:
self.direction[0] = -1
arrow_clicked = True
elif column == self.owl_pos[0] + 1 and row == self.owl_pos[1]:
self.direction[0] = 1
arrow_clicked = True
elif column == self.owl_pos[0] and row == self.owl_pos[1] - 1:
self.direction[1] = -1
arrow_clicked = True
elif column == self.owl_pos[0] and row == self.owl_pos[1] + 1:
self.direction[1] = 1
arrow_clicked = True
if arrow_clicked:
self.check_direction_kdown()
if (event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN) and not self.moveable:
self.move = False
elif event.type == pygame.KEYUP or event.type == pygame.MOUSEBUTTONUP:
self.highlight_color(-1)
self.mainloop.redraw_needed[0] = True
self.move = False
def update_arrows(self):
directions = [[-1, 0], [1, 0], [0, -1], [0, 1]]
self.owl_pos = list(self.board.active_ship_pos)
self.possible_moves = []
self.possible_move_buttons = []
for i in range(4):
if 0 <= self.owl_pos[0] + directions[i][0] < self.data[0] and 0 <= self.owl_pos[1] + directions[i][1] < \
self.data[1]:
pos = [self.owl_pos[0] + directions[i][0], self.owl_pos[1] + directions[i][1]]
self.possible_moves.append(pos)
self.possible_move_buttons.append(i)
else:
pos = self.owl_pos
self.board.units[i].set_pos(pos)
self.mainloop.redraw_needed[0] = True
def update(self, game):
game.fill((255, 255, 255))
gd.BoardGame.update(self, game)
def after_keydown_move(self):
self.update_arrows()
if self.owl_pos == self.moves[self.current_step]:
self.highlight_color(self.move_buttons[self.current_step])
if self.current_step < len(self.moves) - 1:
self.current_step += 1
self.level.game_step = self.current_step
else:
self.level.game_step = self.current_step + 1
self.ai_speed = 5
self.completed_mode = True
self.ai_enabled = True
self.mainloop.redraw_needed[1] = True
self.mainloop.redraw_needed[0] = True
else:
self.game_over()
self.move = False
def next_level(self):
self.current_step = 0
self.board._place_unit(0, self.center)
self.update_arrows()
def game_over(self):
self.level.games_per_lvl = 1
self.level.game_step = 0
self.mainloop.redraw_needed[1] = True
self.level.game_over()
def highlight_color(self, btn_id):
for i in range(4):
if i == btn_id:
self.board.units[i].set_frame(1)
self.board.units[i].update_me = True
else:
self.board.units[i].set_frame(0)
self.board.units[i].update_me = True
def add_next_move(self):
next, btn = self.pick_index()
if len(self.moves) > -1 - self.data[3]:
while btn == self.move_buttons[-1] and btn == self.move_buttons[self.data[3]]:
next, btn = self.pick_index()
self.moves.append(next) # possible_moves = self.possible_moves()
self.move_buttons.append(btn)
def pick_index(self):
index = random.choice(range(len(self.possible_moves)))
next = self.possible_moves[index]
btn = self.possible_move_buttons[index]
return [next, btn]
def ai_walk(self):
if self.start_sequence:
if self.sequence_counter < len(self.moves) * 2:
if self.sequence_counter % 2 == 0:
self.highlight_color(self.move_buttons[self.sequence_counter // 2])
else:
self.highlight_color(-1)
self.sequence_counter += 1
else:
self.start_sequence = False
self.ai_enabled = False
self.sequence_counter = 0
self.moveable = True
elif self.completed_mode:
if self.owl.frame < self.owl.frame_count - 1:
self.owl.next_frame()
self.owl.update_me = True
else:
self.check_result()
def check_result(self):
if self.current_step == len(self.moves) - 1:
# self.update_score(len(self.moves)*2)
self.add_next_move()
self.next_level()
self.level.games_per_lvl = len(self.moves) # gpl #number of games to play in order to level up
self.level.game_step = 0
self.owl.set_frame(0)
self.owl.update_me = True
self.mainloop.redraw_needed[1] = True
self.completed_mode = False
self.start_sequence = True
self.ai_enabled = True
self.ai_speed = self.data[2]
self.moveable = False
| imiolek-ireneusz/eduActiv8 | game_boards/game012.py | Python | gpl-3.0 | 9,202 |
import json
from typing import Any
from urllib.error import URLError
from urllib.request import Request, urlopen
from libqtile.log_utils import logger
from libqtile.widget import base
try:
import xmltodict
def xmlparse(body):
return xmltodict.parse(body)
except ImportError:
# TODO: we could implement a similar parser by hand, but i'm lazy, so let's
# punt for now
def xmlparse(body):
raise Exception("no xmltodict library")
class GenPollText(base.ThreadPoolText):
"""A generic text widget that polls using poll function to get the text"""
defaults = [
("func", None, "Poll Function"),
]
def __init__(self, **config):
base.ThreadPoolText.__init__(self, "", **config)
self.add_defaults(GenPollText.defaults)
def poll(self):
if not self.func:
return "You need a poll function"
return self.func()
class GenPollUrl(base.ThreadPoolText):
"""A generic text widget that polls an url and parses it using parse function"""
defaults = [
("url", None, "Url"),
("data", None, "Post Data"),
("parse", None, "Parse Function"),
("json", True, "Is Json?"),
("user_agent", "Qtile", "Set the user agent"),
("headers", {}, "Extra Headers"),
("xml", False, "Is XML?"),
] # type: list[tuple[str, Any, str]]
def __init__(self, **config):
base.ThreadPoolText.__init__(self, "", **config)
self.add_defaults(GenPollUrl.defaults)
self.headers["User-agent"] = self.user_agent
if self.json:
self.headers["Content-Type"] = "application/json"
if self.data and not isinstance(self.data, str):
self.data = json.dumps(self.data).encode()
def fetch(self):
req = Request(self.url, self.data, self.headers)
res = urlopen(req)
charset = res.headers.get_content_charset()
body = res.read()
if charset:
body = body.decode(charset)
if self.json:
body = json.loads(body)
if self.xml:
body = xmlparse(body)
return body
def poll(self):
if not self.parse or not self.url:
return "Invalid config"
try:
body = self.fetch()
except URLError:
return "No network"
try:
text = self.parse(body)
except Exception:
logger.exception("got exception polling widget")
text = "Can't parse"
return text
| qtile/qtile | libqtile/widget/generic_poll_text.py | Python | mit | 2,540 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.374395
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/mobile/about.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class about(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(about, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<html>\r
<head>\r
\t<title>OpenWebif</title>\r
\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
\t<meta name="viewport" content="user-scalable=no, width=device-width"/>\r
\t<meta name="apple-mobile-web-app-capable" content="yes" />\r
\t<link rel="stylesheet" href="http://code.jquery.com/mobile/1.0/jquery.mobile-1.0.min.css" />\r
\t<link rel="stylesheet" type="text/css" href="/css/iphone.css" media="screen"/>\r
\t<script src="/js/jquery-1.6.2.min.js"></script>\r
\t<script src="/js/jquery.mobile-1.0.min.js"></script>\r
\t<script src="/js/openwebif.mobile.js"></script>\r
</head>\r
<body> \r
\t<div data-role="page">\r
\r
\t\t<div id="header">\r
\t\t\t<div class="button" onClick="history.back()">''')
_v = VFFSL(SL,"tstrings",True)['back'] # u"$tstrings['back']" on line 18, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['back']")) # from line 18, col 49.
write(u'''</div>\r
\t\t\t<h1><a style="color:#FFF;text-decoration:none;" href=\'/mobile\'>OpenWebif</a></h1>\r
\t\t</div>\r
\r
\t\t<div id="contentContainer">\r
\t\t\t<div id="mainContent" style="text-align: center;">\r
\t\t\t\t<h3>''')
_v = VFFSL(SL,"tstrings",True)['openwebif_header'] # u"$tstrings['openwebif_header']" on line 24, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['openwebif_header']")) # from line 24, col 9.
write(u'''</h3>\r
\t\t\t\t<h3>''')
_v = VFFSL(SL,"tstrings",True)['site_source'] # u"$tstrings['site_source']" on line 25, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['site_source']")) # from line 25, col 9.
write(u''': <a href="https://github.com/E2OpenPlugins/e2openplugin-OpenWebif">Github</a></h3>\r
\t\t\t\t<hr>\t\t\t\r
\t\t\t\t<br>\r
\t\t\t\t<h1>''')
_v = VFFSL(SL,"tstrings",True)['authors'] # u"$tstrings['authors']" on line 28, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['authors']")) # from line 28, col 9.
write(u'''</h1>\r
\t\t\t\t<div class="info">\r
\t\t\t\t\tmeo aka bacicciosat<br>\t\r
\t\t\t\t\tskaman<br>\r
\t\t\t\t\tHomey-GER<br>\r
\t\t\t\t</div>\r
\t\t\t\t<hr>\r
\t\t\t\t<br>\t\r
\t\t\t\t<h1>''')
_v = VFFSL(SL,"tstrings",True)['javalib'] # u"$tstrings['javalib']" on line 36, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['javalib']")) # from line 36, col 9.
write(u'''</h1>\r
\t\t\t\t<div class="info">\r
\t\t\t\t\t<a href="http://jqueryui.com/">jQuery UI</a><br>\r
\t\t\t\t\t<a href="http://jquerymobile.com/">jQuery Mobile</a>\r
\t\t\t\t</div>\r
\t\t\t\t<hr>\r
\t\t\t\t<br>\t\r
\t\t\t\t<h1>''')
_v = VFFSL(SL,"tstrings",True)['template_engine'] # u"$tstrings['template_engine']" on line 43, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['template_engine']")) # from line 43, col 9.
write(u'''</h1>\r
\t\t\t\t<div class="info">\r
\t\t\t\t\t<a href="http://www.cheetahtemplate.org/">Cheetah</a>\r
\t\t\t\t</div>\r
\t\t\t\t<hr>\r
\t\t\t\t<br>\t\t\t\r
\t\t\t\t<h1>''')
_v = VFFSL(SL,"tstrings",True)['license'] # u"$tstrings['license']" on line 49, col 9
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['license']")) # from line 49, col 9.
write(u'''</h1>\r
\t\t\t\t''')
_v = VFFSL(SL,"tstrings",True)['license_text_m'] # u"$tstrings['license_text_m']" on line 50, col 5
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['license_text_m']")) # from line 50, col 5.
write(u'''\r
\t\t\t</div>\r
\t\t</div>\r
\r
\t\t<div id="footer">\r
\t\t\t<p>OpenWebif Mobile</p>\r
\t\t\t<a onclick="document.location.href=\'/index?mode=fullpage\';return false;" href="#">''')
_v = VFFSL(SL,"tstrings",True)['show_full_openwebif'] # u"$tstrings['show_full_openwebif']" on line 56, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['show_full_openwebif']")) # from line 56, col 86.
write(u'''</a>\r
\t\t</div>\r
\t\t\r
\t</div>\r
</body>\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_about= 'respond'
## END CLASS DEFINITION
if not hasattr(about, '_initCheetahAttributes'):
templateAPIClass = getattr(about, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(about)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=about()).run()
| pli3/e2-openwbif | plugin/controllers/views/mobile/about.py | Python | gpl-2.0 | 8,266 |
from django.contrib import admin
from .models import dynamic_models
for model in dynamic_models.values():
admin.site.register(model)
# Register your models here.
| NikitaKoshelev/exam4sovzond | apps/dynamic_models/admin.py | Python | mit | 170 |
#!/usr/bin/env python
# [SublimeLinter pep8-max-line-length:150]
# -*- coding: utf-8 -*-
"""
black_rhino is a multi-agent simulator for financial network analysis
Copyright (C) 2016 Co-Pierre Georg ([email protected])
Pawel Fiedor ([email protected])
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from src.agent import Agent
from src.measurement import Measurement
from src.updater import Updater
from abm_template.src.baserunner import BaseRunner
# from abm_template.src.baserunner import BaseRunner
# -------------------------------------------------------------------------
#
# class Runner
#
# -------------------------------------------------------------------------
class Runner(BaseRunner):
#
#
# VARIABLES
#
#
identifier = ""
num_sweeps = 0
#
#
# METHODS
#
#
# -------------------------------------------------------------------------
# __init__
# -------------------------------------------------------------------------
def __init__(self, environment):
self.initialize(environment)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# initialize()
# -------------------------------------------------------------------------
def initialize(self, environment):
self.identifier = environment.identifier
self.num_sweeps = int(environment.static_parameters['num_sweeps'])
self.updater = Updater(environment)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# get_identifier
# -------------------------------------------------------------------------
def get_identifier(self):
return self.identifier
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# set_identifier
# -------------------------------------------------------------------------
def set_identifier(self, value):
return super(Runner, self).set_identifier(value)
# -------------------------------------------------------------------------
# get_num_sweeps
# -------------------------------------------------------------------------
def get_num_sweeps(self):
return self.num_sweeps
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# set_num_simulations
# -------------------------------------------------------------------------
def set_num_sweeps(self, value):
super(Runner, self).set_num_sweeps(value)
# -------------------------------------------------------------------------
# do_run
# -------------------------------------------------------------------------
def do_run(self, environment):
# loop over all time steps and do the updating
# For each update step
measurement = Measurement(environment, self)
measurement.open_file()
for i in range(self.num_sweeps):
self.current_step = i
self.updater.do_update(environment)
measurement.write_to_file()
print("***\nThis run had {}s sweeps and {}s simulations".format(self.num_sweeps, environment.static_parameters['num_simulations']))
print("Check the output file that was written as csv in the measurements folder\n***")
# environment.print_parameters()
# agent = Agent()
# print(self.get_identifier())
# print(self.get_num_sweeps())
# print(environment.agents[0])
# print(environment.agents[1])
# parameters={'deposit_rate':-0.02}
# agent.append_parameters(parameters)
# print(agent.get_parameters())
measurement.close_file()
# ------------------------------------------------------------------------
| cogeorg/black_rhino | examples/degroot/src/runner.py | Python | gpl-3.0 | 4,685 |
VERSION = '0.8.7'
| ophiry/dvc | dvc/__init__.py | Python | apache-2.0 | 18 |
import argparse
import re
import colorama
from django.apps import apps
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand, CommandError
from django.core.urlresolvers import reverse
from termcolor import colored
def show_values_style(arg):
special_choices = ['a', 'l']
if arg in special_choices:
return arg
try:
return int(arg)
except ValueError:
raise argparse.ArgumentTypeError("Show values style must be one of '{values}' or an integer".format(
values=', '.join(special_choices)))
class Command(BaseCommand):
help = 'Provides a grep-like command line interface for searching objects in the database'
def add_arguments(self, parser):
parser.add_argument('pattern', type=str, help='Pattern to search for')
parser.add_argument('identifiers', nargs='*', type=str, help='Identifier of a model or field')
parser.add_argument('--show-values', '-s', nargs='?', type=show_values_style, default='l',
help='Turn off showing matching values (default is any line containing a match), ' \
'or provide the mode "a" to show the entire field ' \
'or an integer to show that many characters either side of a match.')
parser.add_argument('--ignore-case', '-i', action='store_true', help='Match case-insensitively')
parser.add_argument('--find-text-fields', '-t', dest='field_type', action='append_const', const='TextField',
help='Search all TextField fields (and subclasses) on a model if no field is specified')
parser.add_argument('--find-char-fields', '-c', dest='field_type', action='append_const', const='CharField',
help='Search all CharField fields (and subclasses) on a model if no field is specified')
parser.add_argument('--find-fields', '-f', dest='field_type', action='append', type=str,
help='Search all fields of this type (and subclasses) on a model if no field is specified')
parser.add_argument('--preset', '-p', help='The name of a preset configuration in DJANGO_GREPDB_PRESETS. ' \
'DJANGO_GREPDB_PRESETS should be a dict of dicts, with each config dict providing ' \
'default values for any number of parser args.')
if apps.is_installed('django.contrib.admin'):
parser.add_argument('--admin-links', '-l', dest='admin_hostname', nargs='*', default=['default'],
help='Generate admin links. Defaults to true, using http://localhost:8000/ as hostname. ' \
'Can be passed one or more hostnames to use instead. If DJANGO_GREPDB_SITES is a ' \
'dict defined in settings, the value of the "default" key will be used as default, ' \
'and keys from it can also be passed to use their values as hostnames. ' \
'Links can be disabled by using this argument without any values.')
self.parser = parser
def handle(self, **options):
colorama.init()
preset = self.get_preset(options['preset'])
if preset:
self.parser.set_defaults(**preset)
# re-parse the command line arguments with new defaults in place
try:
options = vars(self.parser.parse_args(self.raw_args))
except AttributeError:
if not self._called_from_command_line:
# regular call_command doesn't store raw_args
msg = '--preset mode is not compatible with django.core.management.call_command: you need to ' \
'use django_grepdb.management.call_command instead'
raise CommandError(msg)
else:
# if it was called from the command line, the problem is something unknown
raise
self.pattern = options['pattern']
self.ignore_case = options['ignore_case']
self.show_values = options.get('show_values', False)
self.field_type = options['field_type'] or ['TextField']
self.admin_hostnames = self.get_admin_hostnames(options)
identifiers = options['identifiers']
queries = self.get_queries(identifiers)
for query in queries:
results = self.search(query)
if results.exists():
self.stdout.write(colored(u'\n{model} {field}'.format(model=query['manager'].model, field=query['field_name']),
'cyan', attrs=['bold']))
for result in results:
self.stdout.write(colored(u'{result} (pk={result.pk})'.format(result=result), 'green', attrs=['bold']))
if self.admin_hostnames:
self.stdout.write(self.get_admin_links(result))
if self.show_values is not None: # can't be a truthiness check, as zero is different from no show
self.stdout.write(self.get_value(result, query))
def run_from_argv(self, argv):
# store raw args so that we can re-parse them with new defaults if preset mode is used
self.raw_args = argv[2:]
super(Command, self).run_from_argv(argv)
def get_admin_hostnames(self, options):
from_options = options.get('admin_hostname', False)
if not from_options:
return
from django.contrib.admin import site as admin_site
self.admin_site = admin_site
hostnames = []
for reference in from_options:
hostnames.append(self.get_admin_hostname(reference))
return hostnames
def get_admin_hostname(self, reference):
"""Treats the reference as a hostname if it contains either 'http' or 'localhost'.
If it contains neither, looks up the reference in settings.DJANGO_GREPDB_SITES
"""
if 'http' in reference or 'localhost' in reference:
return reference
try:
hostname = self.get_admin_hostname_from_settings(reference)
except CommandError:
if reference == 'default':
hostname = 'localhost:8000'
else:
raise
return hostname
def get_admin_hostname_from_settings(self, reference):
try:
sites = getattr(settings, 'DJANGO_GREPDB_SITES')
except AttributeError:
msg = u'Reference {} is not recognised as a hostname and DJANGO_GREPDB_SITES is not configured in settings'
raise CommandError(msg.format(reference))
try:
hostname = sites[reference]
except KeyError:
msg = u'Reference {} is not recognised as a hostname and was not found in DJANGO_GREPDB_SITES'
raise CommandError(msg.format(reference))
return hostname
def get_preset(self, preset_name):
if not preset_name:
return None
try:
presets = getattr(settings, 'DJANGO_GREPDB_PRESETS')
except AttributeError:
raise CommandError(u'Preset specified but DJANGO_GREPDB_PRESETS is not configured in settings')
try:
preset = presets[preset_name]
except TypeError:
msg = u'DJANGO_GREPDB_PRESETS is not a dict-like object'
raise CommandError(msg)
except KeyError:
msg = u'Preset "{preset_name}" not found in DJANGO_GREPDB_PRESETS. Available values are: {values}'
raise CommandError(msg.format(preset_name=preset_name, values=', '.join(presets.keys())))
try:
preset.keys()
except AttributeError:
msg = u'Preset "{preset_name}" is not a dict-like object'
raise CommandError(msg.format(preset_name=preset_name))
return preset
def get_queries(self, identifiers):
queries = []
for identifier in identifiers:
queries.extend(self.get_queries_for_identifier(identifier))
return queries
def get_queries_for_identifier(self, identifier):
model, field_names = self.parse_identifier(identifier)
queries = []
for field_name in field_names:
params = self.get_queryset_params(field_name)
queries.append(dict(manager=model._default_manager, params=params, field_name=field_name))
return queries
def search(self, query):
return query['manager'].filter(**query['params'])
def parse_identifier(self, identifier):
parts = identifier.split('.')
app_label, model_name = parts[:2]
field_names = parts[2:]
model = apps.get_model(app_label, model_name)
if not field_names:
field_names = self.get_field_names_for_model(model)
return (model, field_names)
def get_field_names_for_model(self, model):
return [field.name for field in model._meta.fields if field.get_internal_type() in self.field_type]
def get_queryset_params(self, field_name):
lookup_type = 'regex'
if self.ignore_case:
lookup_type = 'i' + lookup_type
return {'{field_name}__{lookup_type}'.format(field_name=field_name, lookup_type=lookup_type): self.pattern}
def get_value(self, result, query):
text = getattr(result, query['field_name'])
show_values = self.show_values
if show_values == 'a':
return self.get_value_all(text)
elif show_values == 'l':
return self.get_value_line(text)
else:
return self.get_value_surrounded(text)
def get_value_all(self, text):
regex_args = [self.pattern, text, re.DOTALL]
if self.ignore_case:
regex_args[2] += re.IGNORECASE
matches = [m.span() for m in re.finditer(*regex_args)]
value = u''
end_of_previous = 0
for start, end in matches:
value = value + text[end_of_previous:start] + colored(text[start:end], 'grey', 'on_yellow')
end_of_previous = end
value = value + text[end_of_previous:] + '\n\n'
return value
def get_value_line(self, text):
value = u''
for line in text.splitlines():
regex_args = [self.pattern, line]
if self.ignore_case:
regex_args.append(re.IGNORECASE)
matches = [m.span() for m in re.finditer(*regex_args)]
if matches:
end_of_previous = 0
for start, end in matches:
value = value + line[end_of_previous:start] + colored(line[start:end], 'grey', 'on_yellow')
end_of_previous = end
value = value + line[end_of_previous:] + '\n\n'
return value
def get_value_surrounded(self, text):
regex_args = [self.pattern, text]
if self.ignore_case:
regex_args.append(re.IGNORECASE)
matches = re.findall(*regex_args)
chars = self.show_values
matches = [m.span() for m in re.finditer(*regex_args)]
value = u''
end_of_previous = 0
for start, end in matches:
if end_of_previous and end_of_previous > start:
value = value[:start - end_of_previous]
elif end_of_previous and end_of_previous > start - chars:
value += text[end_of_previous:start]
else:
value += '\n' + text[start - chars:start]
value += colored(text[start:end], 'grey', 'on_yellow') + text[end:end + chars]
end_of_previous = end + chars
value = value.strip() + '\n\n'
return value
def get_admin_links(self, result):
content_type = ContentType.objects.get_for_model(result)
admin_url_pattern = 'admin:{app}_{model}_change'.format(app=content_type.app_label, model=content_type.model)
relative_url = reverse(admin_url_pattern, args=[result.pk])
return '\n'.join([colored(hostname + relative_url, 'green') for hostname in self.admin_hostnames])
def get_version(self):
from ...version import VERSION
return VERSION
| exonian/django-grep-db | build/lib.linux-x86_64-2.7/django_grepdb/management/commands/grepdb.py | Python | mit | 12,308 |
def add_native_methods(clazz):
def copyFromShortArray__java_lang_Object__long__long__long__(a0, a1, a2, a3):
raise NotImplementedError()
def copyToShortArray__long__java_lang_Object__long__long__(a0, a1, a2, a3):
raise NotImplementedError()
def copyFromIntArray__java_lang_Object__long__long__long__(a0, a1, a2, a3):
raise NotImplementedError()
def copyToIntArray__long__java_lang_Object__long__long__(a0, a1, a2, a3):
raise NotImplementedError()
def copyFromLongArray__java_lang_Object__long__long__long__(a0, a1, a2, a3):
raise NotImplementedError()
def copyToLongArray__long__java_lang_Object__long__long__(a0, a1, a2, a3):
raise NotImplementedError()
clazz.copyFromShortArray__java_lang_Object__long__long__long__ = staticmethod(copyFromShortArray__java_lang_Object__long__long__long__)
clazz.copyToShortArray__long__java_lang_Object__long__long__ = staticmethod(copyToShortArray__long__java_lang_Object__long__long__)
clazz.copyFromIntArray__java_lang_Object__long__long__long__ = staticmethod(copyFromIntArray__java_lang_Object__long__long__long__)
clazz.copyToIntArray__long__java_lang_Object__long__long__ = staticmethod(copyToIntArray__long__java_lang_Object__long__long__)
clazz.copyFromLongArray__java_lang_Object__long__long__long__ = staticmethod(copyFromLongArray__java_lang_Object__long__long__long__)
clazz.copyToLongArray__long__java_lang_Object__long__long__ = staticmethod(copyToLongArray__long__java_lang_Object__long__long__)
| laffra/pava | pava/implementation/natives/java/nio/Bits.py | Python | mit | 1,550 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.row_lengths."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedRowLengthsOp(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
# Docstring Example
dict(
rt_input=[[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []],
expected=[2, 0, 2, 1, 0]),
dict(
rt_input=[[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []],
axis=2,
expected=[[3, 1], [], [2, 1], [1], []]),
# 2D Tensor (1 ragged dimension)
dict(
rt_input=[['a'], ['b', 'c', 'd'], ['e'], [], ['f']],
expected=[1, 3, 1, 0, 1]),
dict(
rt_input=[['a'], ['b', 'c', 'd'], ['e'], [], ['f']],
axis=0,
expected=5),
dict(
rt_input=[['a', 'b', 'c', 'd', 'e', 'f', 'g']],
expected=[7]),
dict(
rt_input=[[], ['a', 'b', 'c', 'd', 'e', 'f', 'g'], []],
expected=[0, 7, 0]),
dict(
rt_input=[],
ragged_rank=1,
expected=[]),
dict(
rt_input=[],
ragged_rank=1,
axis=0,
expected=0),
# 3D Tensor (1 ragged dimension)
dict(
rt_input=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10]]],
ragged_rank=1,
axis=0,
expected=2),
dict(
rt_input=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10]]],
ragged_rank=1,
axis=1,
expected=[3, 2]),
dict(
rt_input=[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10]]],
ragged_rank=1,
axis=2,
expected=[[2, 2, 2], [2, 2]],
expected_ragged_rank=1),
# 3D Tensor (2 ragged dimensions)
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=0,
expected=2),
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=-3,
expected=2),
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=1,
expected=[3, 2]),
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=-2,
expected=[3, 2]),
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=2,
expected=[[2, 3, 0], [4, 1]],
expected_ragged_rank=1),
dict(
rt_input=[[[1, 2], [3, 4, 5], []], [[6, 7, 8, 9], [10]]],
axis=-1,
expected=[[2, 3, 0], [4, 1]],
expected_ragged_rank=1),
]) # pyformat: disable
def testRowLengths(self,
rt_input,
expected,
axis=1,
ragged_rank=None,
expected_ragged_rank=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
lengths = rt.row_lengths(axis)
self.assertAllEqual(lengths, expected)
if expected_ragged_rank is not None:
if isinstance(lengths, ragged_tensor.RaggedTensor):
self.assertEqual(lengths.ragged_rank, expected_ragged_rank)
else:
self.assertEqual(0, expected_ragged_rank)
@parameterized.parameters([
dict( # axis=2 out of bounds: expected -2<=axis<2.
rt_input=[[10, 20], [30]],
axis=2,
exception=(ValueError, errors.InvalidArgumentError)),
dict( # axis=-3 out of bounds: expected -2<=axis<2.
rt_input=[[2, 3, 0], [4, 1, 2]],
axis=-3,
exception=(ValueError, errors.InvalidArgumentError)),
])
def testErrors(self, rt_input, exception, message=None, axis=1):
rt = ragged_factory_ops.constant(rt_input)
with self.assertRaisesRegexp(exception, message):
rt.row_lengths(axis)
if __name__ == '__main__':
googletest.main()
| gunan/tensorflow | tensorflow/python/ops/ragged/ragged_row_lengths_op_test.py | Python | apache-2.0 | 4,937 |
# -*- coding: utf-8 -*-
import os
import json
import logging
from website import settings
logger = logging.getLogger(__name__)
def load_asset_paths():
try:
return json.load(open(settings.ASSET_HASH_PATH))
except IOError:
logger.error('No "webpack-assets.json" file found. You may need to run webpack.')
raise
asset_paths = load_asset_paths()
base_static_path = '/static/public/js/'
def webpack_asset(path, asset_paths=asset_paths):
"""Mako filter that resolves a human-readable asset path to its name on disk
(which may include the hash of the file).
"""
key = path.replace(base_static_path, '').replace('.js', '')
hash_path = asset_paths[key]
return os.path.join(base_static_path, hash_path)
def resolve_addon_path(config, file_name):
"""Check for addon asset in source directory (e.g. website/addons/dropbox/static');
if file is found, return path to webpack-built asset.
:param AddonConfig config: Addon config object
:param str file_name: Asset file name (e.g. "files.js")
"""
source_path = os.path.join(
settings.ADDON_PATH,
config.short_name,
'static',
file_name,
)
if os.path.exists(source_path):
return os.path.join(
'/',
'static',
'public',
'js',
config.short_name,
file_name,
)
return None
| kushG/osf.io | website/util/paths.py | Python | apache-2.0 | 1,423 |
#!/usr/bin/env python -u
# coding: utf-8
# -------------------------------------------------------------------------- #
# Copyright (c) 20011 MadeiraCloud, All Rights Reserved.
#
# License
# -------------------------------------------------------------------------- #
import os
import sys
import platform
from setuptools import setup, find_packages
from madeiracloud import __copyright__
from madeiracloud import __license__
from madeiracloud import __version__
from madeiracloud import __maintainer__
from madeiracloud import __author__
from madeiracloud import __email__
from madeiracloud import __status__
from madeiracloud import __url__
from madeiracloud import __classifier__
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def rread(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
# check user
if os.getuid() != 0:
print >> sys.stderr, 'Please use root to install this package\n'
sys.exit(1)
# check Python's version
if sys.version_info < (2, 6):
print >> sys.stderr, 'Please use Python 2.6 or later\n'
sys.exit(1)
# check OS
if platform.system() != 'Linux':
print >> sys.stderr, 'This package is for Linux only'
sys.exit(1)
# check Distro
distro=platform.linux_distribution()
# check Arch
#arch = platform.machine()
#
#long_description = ("""
#%s
#
#%s
#-----------------------------------------------------------------------
#%s
#""" % (readme, chaneglog, license))
setup(
name = "MadeiraCloud",
version = __version__,
url = __url__,
author = __author__,
author_email = __email__,
license = __license__,
keywords = "MadeiraCloud AWS",
description = ("MadeiraCloud Agent"),
#long_description= read('README'),
classifiers = __classifier__,
packages = ['madeiracloud', 'madeiracloud.script'],
scripts = ['bin/madeira.py'],
install_requires = ['pyinotify'],
include_package_datan= True,
)
if sys.argv[-1] == 'install':
os.system('sh %s/script/%s.sh' % (os.path.dirname(os.path.abspath(__file__)), distro[0].lower()))
print >> sys.stdout, """
Finished! MadeiraCloud has been installed on this machine.
The package is located at /usr/share/madeiracloud
To start, stop and restart the program, use /etc/init.d/madeiracloud
Please visit www.madeiracloud.com for more
---------- Enjoy! ----------
The MadeiraCloud Team
"""
| BillTheBest/MadeiraAgent | setup.py | Python | bsd-3-clause | 2,380 |
# Advection test evolution: convergence test
from models import advection
from bcs import periodic
from simulation import simulation
from methods import minmod_lf
from rk import euler
from grid import grid
import numpy
from matplotlib import pyplot
Ngz = 2
Npoints_all = 40 * 2**numpy.arange(6)
dx_all = 1 / Npoints_all
errors = numpy.zeros((3,len(dx_all)))
for i, Npoints in enumerate(Npoints_all):
print(Npoints)
interval = grid([-0.5, 0.5], Npoints, Ngz)
model = advection.advection(v=1,
initial_data = advection.initial_sine(period=1))
sim = simulation(model, interval, minmod_lf, euler, periodic)
sim.evolve(1.0)
errors[0, i] = sim.error_norm(1)
errors[1, i] = sim.error_norm(2)
errors[2, i] = sim.error_norm('inf')
norm_string = ("1", "2", "\infty")
fig = pyplot.figure(figsize=(12,6))
ax = fig.add_subplot(111)
for norm in range(3):
p = numpy.polyfit(numpy.log(dx_all), numpy.log(errors[norm,:]), 1)
ax.loglog(dx_all, errors[norm,:], 'x',
label=r"$\| Error \|_{}$".format(norm_string[norm]))
ax.loglog(dx_all, numpy.exp(p[1])*dx_all**p[0],
label=r"$\propto \Delta x^{{{:.3f}}}$".format(p[0]))
ax.set_xlabel(r"$\Delta x$")
ax.set_ylabel("Error")
ax.legend(loc= "upper left")
pyplot.title("Advection, sine, L-F, Minmod, Euler")
pyplot.show()
| IanHawke/toy-evolve | toy-evolve/advection_sine_minmod_lf_euler_convergence.py | Python | mit | 1,359 |
# Copyright 2013 Georgia Tech Research Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# http://healthcare-robotics.com/
#
# Any robot that wants to use equilibrium point control should implement the functions
# sketched out in the HRLArm and HRLArmKinematics
#
## @package hrl_haptic_mpc
# @author Advait Jain
import numpy as np, math
import copy
from threading import RLock
import roslib; roslib.load_manifest('hrl_haptic_mpc')
try:
import hrl_lib.geometry as hg
except ImportError, e:
print '<hrl_arm.py> WARNING:', e
class HRLArm():
def __init__(self, kinematics):
# object of class derived from HRLArmKinematics
self.kinematics = kinematics
self.ep = None # equilibrium point
self.kp = None # joint stiffness
self.kd = None # joint damping
self.q = None # angles
self.qdot = None # angular velocity
self.joint_names_list = None # joint names
self.lock = RLock()
def get_joint_velocities(self):
with self.lock:
return copy.copy(self.qdot)
def get_joint_angles(self):
with self.lock:
return copy.copy(self.q)
def set_ep(self, *args):
raise RuntimeError('Unimplemented Function')
# publish different viz markers.
def publish_rviz_markers(self):
raise RuntimeError('Unimplemented Function')
def get_ep(self):
with self.lock:
return copy.copy(self.ep)
# returns kp, kd
# np arrays of stiffness and damping of the virtual springs.
def get_joint_impedance(self):
with self.lock:
return copy.copy(self.kp), copy.copy(self.kd)
def get_joint_names(self):
with self.lock:
return copy.copy(self.joint_names_list)
# do we really need this function?
def freeze(self):
self.set_ep(self.ep)
def get_end_effector_pose(self):
return self.kinematics.FK(self.get_joint_angles())
class HRLArmKinematics():
def __init__(self, n_jts):
self.tooltip_pos = np.matrix([0.,0.,0.]).T
self.tooltip_rot = np.matrix(np.eye(3))
self.n_jts = n_jts
# FK without the tooltip
def FK_vanilla(self, q, link_number=None):
raise RuntimeError('Unimplemented Function')
# @param q - array-like (RADIANs)
# @param link_number - perform FK up to this link. (0-n_jts)
# @return pos (3X1) np matrix, rot (3X3) np matrix
def FK(self, q, link_number=None):
if link_number == None:
link_number = self.n_jts
if link_number > self.n_jts:
raise RuntimeError('Link Number is greater than n_jts: %d'%link_number)
pos, rot = self.FK_vanilla(q, link_number)
if link_number == self.n_jts:
tooltip_baseframe = rot * self.tooltip_pos
pos += tooltip_baseframe
rot = rot * self.tooltip_rot
return pos, rot
##
# Computes IK for the tooltip. The desired location is first transformed
# back into the last link's frame and IK is performed on that location.
# @param pos Desired link position (3x1 np matrix)
# @param rot Desired link rotation (3x3 np matrix)
# @param q_guess Estimate of the desired joint angles which seeds the IK solver
def IK(self, pos, rot, q_guess=None):
last_link_pos = pos - rot * self.tooltip_rot.T * self.tooltip_pos
last_link_rot = rot * self.tooltip_rot.T
return self.IK_vanilla(last_link_pos, last_link_rot, q_guess)
# IK without the tooltip.
def IK_vanilla(self, p, rot, q_guess=None):
raise RuntimeError('Unimplemented Function')
# @param p - 3x1 np matrix
# @param rot - orientation of end effector frame wrt base of the arm.
def IK(self, p, rot, q_guess=None):
# this code should be common to everyone.
pass
## compute Jacobian at point pos.
def jacobian(self, q, pos=None):
raise RuntimeError('Unimplemented Function')
## return min_array, max_array
def get_joint_limits(self):
raise RuntimeError('Unimplemented Function')
## define tooltip as a 3x1 np matrix in the wrist coord frame.
def set_tooltip(self, p, rot=np.matrix(np.eye(3))):
self.tooltip_pos = copy.copy(p)
self.tooltip_rot = copy.copy(rot)
#----- 2D functions ----------
# return list of 2D points corresponding to the locations of the
# joint axes for a planar arm. Something funky for a spatial arm
# that Advait does not want to put into words.
def arm_config_to_points_list(self, q):
return [self.FK(q, i)[0].A1[0:2] for i in range(len(q)+1)]
# project point onto the arm skeleton in 2D and compute distance
# along it to the end effector.
def distance_from_ee_along_arm(self, q, pt):
p_l = self.arm_config_to_points_list(q)
ee = self.FK(q)[0]
d_ee = hg.distance_along_curve(ee, p_l)
d_pt = hg.distance_along_curve(pt, p_l)
assert(d_ee >= d_pt)
return d_ee - d_pt
# distance of a point from the arm
def distance_from_arm(self, q, pt):
p_l = self.arm_config_to_points_list(q)
return hg.distance_from_curve(pt, p_l)
# is pt at the joint?
# pt - 2x1 or 3x1 np matrix
# return True if distance between a joint and the point projected
# onto the skeleton is <= dist_threshold.
#
# tested only for planar arms. (see test_contact_at_joints.py in
# sandbox_advait_darpa_m3/src/sandbox_advait_darpa_m3/software_simulation)
def is_contact_at_joint(self, pt, q, dist_threshold):
pts_list = [self.FK(q, i)[0].A1 for i in range(len(q)+1)]
proj_pt = hg.project_point_on_curve(pt, pts_list)
# ignore end effector (it is not a joint)
for jt in pts_list[:-1]:
dist = np.linalg.norm(np.matrix(jt).T-proj_pt)
if dist <= dist_threshold:
return True
return False
| gt-ros-pkg/hrl-haptic-manip | hrl_haptic_mpc/src/hrl_haptic_mpc/hrl_arm.py | Python | apache-2.0 | 6,448 |
from . import logger
from chatnet import prep
import pandas as pd
from collections import Counter
from sklearn.externals import joblib
import os
class Pipeline(object):
"""
Transformer helper functions and state checkpoints
to go from text data/labels to model-ready numeric data
"""
def __init__(self, vocab_size=15000,
data_col=None, id_col=None, label_col=None, skip_top=10,
positive_class=None, df=None, message_key=None, **kwargs
):
# message processing
self.data_col = data_col or 'tokens'
self.id_col = id_col or 'id'
self.label_col = label_col or 'labels'
self.message_key = message_key or 'msgs'
self.positive_class = positive_class
if positive_class is None:
self.label_mode = 'multiclass'
self.n_classes = []
else:
self.label_mode = 'binary'
# vocab processing
self.tp = prep.TextPrepper()
self.vocab_size = vocab_size
self.skip_top = skip_top
self.to_matrices_kwargs = kwargs
if df is not None:
self.setup(df)
def _tokenize(self, df, message_key=''):
"""
Iterate over each row's messages (as specified by message_key),
tokenizing by ' ' and cleaning with self.tp.cleaner
"""
def mapper(message_col):
sequence = []
for message in message_col:
sequence += map(self.tp.cleaner, message.split())
return sequence
df[self.data_col] = df[message_key].map(mapper)
def _set_token_data(self, input_df):
df = input_df.copy()
if self.data_col not in df.columns:
self._tokenize(df, message_key=self.message_key)
self.data = pd.DataFrame(df[[self.data_col, self.id_col, self.label_col]])
logger.info("Counting words...")
self.set_word_counts()
def _set_vocabulary(self):
# This is extended by subclasses with special concerns about word_index (eg word embeddings)
self.set_word_index(skip_top=self.skip_top)
def _set_learning_data(self, **to_matrices_kwargs):
to_matrices_kwargs.setdefault('seed', 212)
to_matrices_kwargs.setdefault('test_split', .18)
to_matrices_kwargs.setdefault('chunk_size', 100)
to_matrices_kwargs.setdefault('data_col', self.data_col)
to_matrices_kwargs.setdefault('id_col', self.id_col)
to_matrices_kwargs.setdefault('label_col', self.label_col)
to_matrices_kwargs.setdefault('positive_class', self.positive_class)
logger.info("Making numeric sequences...")
self.learning_data = (X_train, y_train, train_ids), (X_test, y_test, test_ids) = \
self.tp.to_matrices(self.data, self.word_index, **to_matrices_kwargs)
def setup(self, df):
self._set_token_data(df)
self._set_vocabulary()
self._set_learning_data(**self.to_matrices_kwargs)
def set_word_counts(self):
"""
Map :tp.cleaner over token lists in :data
and return a counter of cleaned :word_counts
"""
word_counts = Counter()
def increment(word):
word_counts[word] += 1
self.data[self.data_col].map(lambda r: list(map(increment, r)))
self.word_counts = word_counts
def set_word_index(self, skip_top=None, nonembeddable=None):
"""
Accepts a dictionary of word counts
Selects the top :nb_words, after skipping the :skip_top most common
Optionally provide a set of words you don't have word vectors and want to omit entirely
Always includes special words (returned by self.cleaner) prepended with $
Returns dict like {word: ranking by count}
"""
skip_top = 10 if skip_top is None else skip_top
vocab = []
for (ix, (w, _)) in enumerate(self.word_counts.most_common(self.vocab_size)):
if w.startswith('$'):
if ix < skip_top:
skip_top += 1
vocab.append(w)
elif (not nonembeddable or w not in nonembeddable) and ix > skip_top:
vocab.append(w)
self.word_index = {v: ix for ix, v in enumerate(vocab)}
def persist(self, name, path):
for attr in self.persisted_attrs:
joblib.dump(getattr(self, attr), os.path.join(path, '_'.join([attr, name])))
@classmethod
def restore(cls, name, path):
pipe = cls()
for attr in cls.persisted_attrs:
setattr(pipe, attr, joblib.load(os.path.join(path, '_'.join([attr, name]))))
return pipe
def get_message_generator(message_key, kind='wide'):
if kind == 'wide':
# iterate over columns in message_key yielding from row
def message_generator(row):
for key in message_key:
yield row[key]
elif kind == 'dense':
# iterate over array of messages in row[message_key]
def message_generator(row):
for cell in row[message_key]:
yield cell
return message_generator
| bhtucker/chatnet | chatnet/pipes.py | Python | mit | 5,136 |
"""
planex-pin: Generate a new override spec file for a given package
"""
import argparse
import argcomplete
import os
import sys
import re
import glob
import logging
import tempfile
import hashlib
import shutil
import json
import rpm
from planex.util import run
from planex.util import setup_sigint_handler
from planex.util import add_common_parser_options
from planex.util import setup_logging
def describe(repo, treeish="HEAD"):
"""
Return an RPM compatible version string for a git repo at a given commit
"""
dotgitdir = os.path.join(repo, ".git")
if not os.path.exists(dotgitdir):
raise Exception("Pin target is not a git repository: '%s'" % repo)
# First, get the hash of the commit
cmd = ["git", "--git-dir=%s" % dotgitdir, "rev-parse", treeish]
sha = run(cmd)['stdout'].strip()
# Now lets describe that hash
cmd = ["git", "--git-dir=%s" % dotgitdir, "describe", "--tags", sha]
description = run(cmd, check=False)['stdout'].strip()
# if there are no tags, use the number of commits
if description == "":
cmd = ["git", "--git-dir=%s" % dotgitdir, "log", "--oneline", sha]
commits = run(cmd)['stdout'].strip()
description = str(len(commits.splitlines()))
# replace '-' with '+' in description to not confuse rpm
match = re.search("[^0-9]*", description)
matchlen = len(match.group())
return description[matchlen:].replace('-', '+')
def archive(repo, commit_hash, prefix, target_dir):
"""
Archive a git repo at a given commit with a specified version prefix.
Returns the path to a tar.gz to be used as a source for building an RPM.
"""
dotgitdir = os.path.join(repo, ".git")
prefix = "%s-%s" % (os.path.basename(repo), prefix)
path = os.path.join(target_dir, "%s.tar" % prefix)
run(["git", "--git-dir=%s" % dotgitdir, "archive", commit_hash,
"--prefix=%s/" % prefix, "-o", path])
run(["gzip", "--no-name", "-f", path])
return path + ".gz"
def pinned_spec_of_spec(spec_path, src_map):
"""
Given a path to a spec file, and a map of source number to (version, path),
return the contents of a new spec file for the pinned package. The new spec
file will have the source paths overriden and the Release tag set to
a sensible combination of the versions of the source pin targets. This.
This conforms to the Fedora Project packaging guidelines for what to put
into the Release Tag (see commit message for relevant link).
"""
logging.debug("Generating pinned spec for %s from source_map %s",
spec_path, src_map)
spec_in = open(spec_path)
spec_contents = spec_in.readlines()
spec_in.close()
pinned_spec = []
for line in spec_contents:
# replace the source url(s)
for src_num in src_map.iterkeys():
match = re.match(r'^([Ss]ource%s*:\s+)(.+)\n' % src_num, line)
if match:
source_url = "file://" + os.path.abspath(src_map[src_num][1])
logging.info("Replacing Source%s of %s with %s",
src_num, spec_path, src_map[src_num][1])
line = match.group(1) + source_url + "\n"
# replace the release
match = re.match(r'^([Rr]elease:\s+)([^%]+)(.*)\n', line)
if match:
# combine the source override versions to get the package release
release_stamps = ["s{0}+{1}".format(n, v)
for (n, (v, _)) in src_map.items()]
# Note that %% expands to just %...
pin_release = "%s+%s" % (match.group(2),
"_".join(sorted(release_stamps)))
logging.info("Replacing Release %s of %s with %s",
match.group(2), spec_path, pin_release)
line = match.group(1) + pin_release + match.group(3) + "\n"
pinned_spec.append(line)
return "".join(pinned_spec)
def version_of_spec_file(path):
"""
Return the version defined in the spec file at path.
"""
spec = rpm.ts().parseSpec(path)
return spec.sourceHeader['version']
def hash_of_file(path):
"""
Return the md5sum of the contents of a file at a given path.
"""
md5sum = hashlib.md5()
with open(path, 'r') as in_f:
md5sum.update(in_f.read())
return md5sum.digest()
def maybe_copy(src, dst, force=False):
"""
Copy a file from src to dst only if their contents differ.
"""
if force or not (os.path.exists(dst) and
hash_of_file(src) == hash_of_file(dst)):
shutil.copy(src, dst)
def update(args):
"""
Entry point for the 'update' sub-command.
Refreshes all the pins from the pin definition file, updating the source
tarball and spec file only if required.
"""
if os.path.exists(args.pins_dir):
if not os.path.isdir(args.pins_dir):
raise Exception(
"Output directory exists and is not a directory: '%s'" %
args.pins_dir)
else:
os.makedirs(args.pins_dir)
pins = parse_pins_file(args)
for (spec, pinned_sources) in pins.iteritems():
source_map = {}
orig_version = version_of_spec_file(spec)
for (src_num, pin_target) in pinned_sources.iteritems():
# we're assuming for now that the target is a git repository
repo, _, treeish = pin_target.partition('#')
src_version = describe(repo, treeish if treeish else None)
logging.debug("Source%s pin target is at version %s",
src_num, src_version)
tmpdir = tempfile.mkdtemp(prefix='planex-pin')
tmp_archive = archive(repo, treeish, orig_version, tmpdir)
tar_name = os.path.basename(tmp_archive).replace(orig_version,
src_version)
tar_path = os.path.join(args.pins_dir, tar_name)
maybe_copy(tmp_archive, tar_path, args.force)
shutil.rmtree(tmpdir)
source_map[src_num] = (src_version, tar_path)
out_spec_path = os.path.join(args.pins_dir, os.path.basename(spec))
with open(out_spec_path, 'w+') as out_spec_file:
out_spec_file.write(pinned_spec_of_spec(spec, source_map))
def parse_pins_file(args):
"""
Return a dictionary of spec files to pin targets from the pin definition
file. The file can have comments (lines that begin with a '#') and then
the pin definitions are a json dump of the dictionary
"""
lines = []
if os.access(args.pins_file, os.R_OK):
with open(args.pins_file, 'r') as pins_file:
for line in pins_file.readlines():
if not re.match(r'^\s*#', line):
lines.append(line)
return json.loads(''.join(lines)) if lines else {}
def serialise_pins(pins, path):
"""
Dump the pin definitions to a file at a given path.
"""
preamble = [
"# This file is auto-generated by planex-pin\n",
"# Do not edit directly, instead use planex-pin {add,list,remove}\n"
]
with open(path, 'w+') as pins_file:
pins_file.writelines(preamble)
json.dump(pins, pins_file,
sort_keys=True, indent=2, separators=(',', ': '))
def list_pins(args):
"""
Entry point for the 'list' sub-command.
Prints to stdout the pins in the pin definition file.
"""
pins = parse_pins_file(args)
for (spec, pinned_sources) in pins.iteritems():
for (source_number, target) in pinned_sources.iteritems():
print "* %s : Source%s -> %s" % (spec, source_number, target)
def add_pin(args):
"""
Entry point for the 'add' sub-command.
Checks if the spec file exists and add a definition to the pins file.
"""
if not os.access(args.spec_file, os.R_OK):
sys.stderr.write("error: File does not exist: '%s'\n" % args.spec_file)
sys.exit(1)
pins = parse_pins_file(args)
normalised_path = os.path.relpath(args.spec_file)
if normalised_path in pins:
if args.source in pins[normalised_path] and not args.force:
sys.exit("error: Package already has source pinned:\n"
"* %s : Source%s -> %s\n" %
(normalised_path, args.source,
pins[normalised_path][args.source]))
pins[normalised_path].update({args.source: args.target})
else:
pins[normalised_path] = {args.source: args.target}
serialise_pins(pins, args.pins_file)
def remove_pin(args):
"""
Entry point for the 'remove' sub-command.
Removes the pin definition from the pins file and touches the original spec
file to ensure dependencies are regenerated. The next 'rules' command will
ensure that the override spec file is removed.
"""
pins = parse_pins_file(args)
normalised_path = os.path.relpath(args.spec_file)
if normalised_path in pins:
if args.source in pins[normalised_path]:
del pins[normalised_path][args.source]
if not pins[normalised_path]:
del pins[normalised_path]
serialise_pins(pins, args.pins_file)
os.utime(args.spec_file, None)
def print_rules(args):
"""
Entry point for the 'rules' sub-command.
Prints to stdout the Makefile snippet required for pinning updates and
removes any override spec files for removed pins.
"""
pins = parse_pins_file(args)
for (spec, pinned_sources) in pins.iteritems():
pinned_spec_path = os.path.join(args.pins_dir, os.path.basename(spec))
repos = [pin.partition('#')[0] for pin in pinned_sources.values()]
repo_paths = [os.path.abspath(repo) for repo in repos]
gitdir_paths = [os.path.join(p, ".git/**/*") for p in repo_paths]
dependencies = "%s %s $(wildcard %s)" % (args.pins_file, spec,
" ".join(gitdir_paths))
print "%s: %s" % (args.deps_path, pinned_spec_path)
print "%s: %s" % (pinned_spec_path, dependencies)
print "\tplanex-pin --pins-file {0} --pins-dir {1} update".format(
args.pins_file, args.pins_dir)
expected_pin_specs = [os.path.join(args.pins_dir, path) for path in pins]
for pin_spec_path in glob.glob(os.path.join(args.pins_dir, '*.spec')):
if pin_spec_path not in expected_pin_specs:
os.remove(pin_spec_path)
def parse_args_or_exit(argv=None):
"""
Parse command line options
"""
# top-level parser
parser = argparse.ArgumentParser(
description='Pin a package to a specific version')
add_common_parser_options(parser)
parser.add_argument('--pins-file', default='pins',
help='Pins file (default: pins)')
parser.add_argument('--pins-dir', default='PINS',
help='Directory of pin artifcats (default: PINS)')
subparsers = parser.add_subparsers(title='COMMANDS')
# parser for the 'update' command
parser_update = subparsers.add_parser('update', help='Refresh a given pin')
parser_update.add_argument('--force', '-f', action='store_true',
help="Don't copy archive if unchanged")
parser_update.set_defaults(func=update)
# parser for the 'list' command
parser_list = subparsers.add_parser('list', help='List active pins')
parser_list.set_defaults(func=list_pins)
# parser for the 'add' command
parser_add = subparsers.add_parser('add', help='Add a new pin definition')
parser_add.add_argument('--force', '-f', action='store_true',
help='Override any existing pin definition')
parser_add.add_argument('--source', default="0",
help='Which source number to pin. (default: 0)')
parser_add.add_argument('spec_file', help='Spec file to pin')
parser_add.add_argument('target',
help='Pin target: <path-to-git-repo>#<tree-ish>')
parser_add.set_defaults(func=add_pin)
# parser for the 'remove' command
parser_remove = subparsers.add_parser('remove', help='Remove a pin')
parser_remove.add_argument('--source', default="0",
help='Which source to unpin. (default: 0)')
parser_remove.add_argument('spec_file', help='Spec file to un-pin')
parser_remove.set_defaults(func=remove_pin)
# parser for the 'rules' command
parser_rules = subparsers.add_parser('rules', help='Print pin make rules')
parser_rules.add_argument('deps_path', help='Path to deps file')
parser_rules.set_defaults(func=print_rules)
argcomplete.autocomplete(parser)
return parser.parse_args(argv)
def main(argv):
"""
Main function
"""
setup_sigint_handler()
args = parse_args_or_exit(argv)
setup_logging(args)
args.func(args)
def _main():
"""
Entry point for setuptools CLI wrapper
"""
main(sys.argv[1:])
# Entry point when run directly
if __name__ == "__main__":
_main()
| djs55/planex | planex/pin.py | Python | lgpl-2.1 | 13,103 |
"""Custom widgets for Facet."""
from django import forms
class ArrayFieldSelectMultiple(forms.SelectMultiple):
"""Allow selecting multiple items."""
def __init__(self, *args, **kwargs):
self.delimiter = kwargs.pop('delimiter', ',')
super(ArrayFieldSelectMultiple, self).__init__(*args, **kwargs)
def render_options(self, choices, value):
if isinstance(value, basestring):
value = value.split(self.delimiter)
return super(ArrayFieldSelectMultiple, self).render_options(choices, value)
| ProjectFacet/facet | project/editorial/widgets.py | Python | mit | 561 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-03-13 11:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0041_data_migration'),
]
operations = [
migrations.AlterField(
model_name='catalog',
name='comment',
field=models.TextField(blank=True, help_text='Additional internal information about this catalog.', verbose_name='Comment'),
),
migrations.AlterField(
model_name='catalog',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this catalog.', max_length=128, verbose_name='Key'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this catalog in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this catalog in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this catalog in the tertiary language.', max_length=256, verbose_name='Title (tertiary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this catalog in the quaternary language.', max_length=256, verbose_name='Title (quaternary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this catalog in the quinary language.', max_length=256, verbose_name='Title (quinary)'),
),
migrations.AlterField(
model_name='catalog',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this catalog (auto-generated).', max_length=640, verbose_name='URI'),
),
migrations.AlterField(
model_name='catalog',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this catalog.', max_length=256, verbose_name='URI Prefix'),
),
migrations.AlterField(
model_name='questionset',
name='comment',
field=models.TextField(blank=True, help_text='Additional internal information about this questionset.', verbose_name='Comment'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang1',
field=models.TextField(blank=True, help_text='The help text for this questionset in the primary language.', verbose_name='Help (primary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang2',
field=models.TextField(blank=True, help_text='The help text for this questionset in the secondary language.', verbose_name='Help (secondary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang3',
field=models.TextField(blank=True, help_text='The help text for this questionset in the tertiary language.', verbose_name='Help (tertiary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang4',
field=models.TextField(blank=True, help_text='The help text for this questionset in the quaternary language.', verbose_name='Help (quaternary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang5',
field=models.TextField(blank=True, help_text='The help text for this questionset in the quinary language.', verbose_name='Help (quinary)'),
),
migrations.AlterField(
model_name='questionset',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this questionset.', max_length=128, verbose_name='Key'),
),
migrations.AlterField(
model_name='questionset',
name='path',
field=models.CharField(blank=True, help_text='The path part of the URI of this questionset (auto-generated).', max_length=512, verbose_name='Path'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this questionset in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this questionset in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this questionset in the tertiary language.', max_length=256, verbose_name='Title (tertiary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this questionset in the quaternary language.', max_length=256, verbose_name='Title (quaternary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this questionset in the quinary language.', max_length=256, verbose_name='Title (quinary)'),
),
migrations.AlterField(
model_name='questionset',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this questionset (auto-generated).', max_length=640, verbose_name='URI'),
),
migrations.AlterField(
model_name='questionset',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this questionset.', max_length=256, verbose_name='URI Prefix'),
),
migrations.AlterField(
model_name='section',
name='comment',
field=models.TextField(blank=True, help_text='Additional internal information about this section.', verbose_name='Comment'),
),
migrations.AlterField(
model_name='section',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this section.', max_length=128, verbose_name='Key'),
),
migrations.AlterField(
model_name='section',
name='path',
field=models.CharField(blank=True, help_text='The path part of the URI of this section (auto-generated).', max_length=512, verbose_name='Label'),
),
migrations.AlterField(
model_name='section',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this section in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this section in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this section in the tertiary language.', max_length=256, verbose_name='Title (tertiary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this section in the quaternary language.', max_length=256, verbose_name='Title (quaternary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this section in the quinary language.', max_length=256, verbose_name='Title (quinary)'),
),
migrations.AlterField(
model_name='section',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this section (auto-generated).', max_length=640, verbose_name='URI'),
),
migrations.AlterField(
model_name='section',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this section.', max_length=256, verbose_name='URI Prefix'),
),
]
| rdmorganiser/rdmo | rdmo/questions/migrations/0042_remove_null_true.py | Python | apache-2.0 | 9,265 |
import binascii
import copy
import ctypes
import itertools
import logging
import os
import sys
import threading
import time
import archinfo
import cffi # lmao
import claripy
import pyvex
from angr.engines.vex.claripy import ccall
from angr.sim_state import SimState
from .. import sim_options as options
from ..errors import (SimMemoryError, SimSegfaultError, SimUnicornError,
SimUnicornUnsupport, SimValueError)
from ..misc.testing import is_testing
from .plugin import SimStatePlugin
l = logging.getLogger(name=__name__)
ffi = cffi.FFI()
try:
import unicorn
except ImportError:
l.warning("Unicorn is not installed. Support disabled.")
unicorn = None
class MEM_PATCH(ctypes.Structure):
"""
struct mem_update_t
"""
MEM_PATCH._fields_ = [
('address', ctypes.c_uint64),
('length', ctypes.c_uint64),
('next', ctypes.POINTER(MEM_PATCH))
]
class TRANSMIT_RECORD(ctypes.Structure):
"""
struct transmit_record_t
"""
_fields_ = [
('data', ctypes.c_void_p),
('count', ctypes.c_uint32)
]
class TaintEntityEnum:
"""
taint_entity_enum_t
"""
TAINT_ENTITY_REG = 0
TAINT_ENTITY_TMP = 1
TAINT_ENTITY_MEM = 2
TAINT_ENTITY_NONE = 3
class MemoryValue(ctypes.Structure):
"""
struct memory_value_t
"""
_MAX_MEM_ACCESS_SIZE = 8
_fields_ = [
('address', ctypes.c_uint64),
('value', ctypes.c_uint8 * _MAX_MEM_ACCESS_SIZE),
('size', ctypes.c_uint64),
('is_value_symbolic', ctypes.c_bool)
]
class RegisterValue(ctypes.Structure):
"""
struct register_value_t
"""
_MAX_REGISTER_BYTE_SIZE = 32
_fields_ = [
('offset', ctypes.c_uint64),
('value', ctypes.c_uint8 * _MAX_REGISTER_BYTE_SIZE),
('size', ctypes.c_int64)
]
class InstrDetails(ctypes.Structure):
"""
struct sym_instr_details_t
"""
_fields_ = [
('instr_addr', ctypes.c_uint64),
('has_memory_dep', ctypes.c_bool),
('memory_values', ctypes.POINTER(MemoryValue)),
('memory_values_count', ctypes.c_uint64),
]
class BlockDetails(ctypes.Structure):
"""
struct sym_block_details_ret_t
"""
_fields_ = [
('block_addr', ctypes.c_uint64),
('block_size', ctypes.c_uint64),
('symbolic_instrs', ctypes.POINTER(InstrDetails)),
('symbolic_instrs_count', ctypes.c_uint64),
('register_values', ctypes.POINTER(RegisterValue)),
('register_values_count', ctypes.c_uint64),
]
class STOP:
"""
enum stop_t
"""
STOP_NORMAL = 0
STOP_STOPPOINT = 1
STOP_ERROR = 2
STOP_SYSCALL = 3
STOP_EXECNONE = 4
STOP_ZEROPAGE = 5
STOP_NOSTART = 6
STOP_SEGFAULT = 7
STOP_ZERO_DIV = 8
STOP_NODECODE = 9
STOP_HLT = 10
STOP_VEX_LIFT_FAILED = 11
STOP_SYMBOLIC_CONDITION = 12
STOP_SYMBOLIC_PC = 13
STOP_SYMBOLIC_READ_ADDR = 14
STOP_SYMBOLIC_READ_SYMBOLIC_TRACKING_DISABLED = 15
STOP_SYMBOLIC_WRITE_ADDR = 16
STOP_SYMBOLIC_BLOCK_EXIT_CONDITION = 17
STOP_SYMBOLIC_BLOCK_EXIT_TARGET = 18
STOP_UNSUPPORTED_STMT_PUTI = 19
STOP_UNSUPPORTED_STMT_STOREG = 20
STOP_UNSUPPORTED_STMT_LOADG = 21
STOP_UNSUPPORTED_STMT_CAS = 22
STOP_UNSUPPORTED_STMT_LLSC = 23
STOP_UNSUPPORTED_STMT_DIRTY = 24
STOP_UNSUPPORTED_EXPR_GETI = 25
STOP_UNSUPPORTED_STMT_UNKNOWN = 26
STOP_UNSUPPORTED_EXPR_UNKNOWN = 27
STOP_UNKNOWN_MEMORY_WRITE_SIZE = 28
STOP_SYMBOLIC_MEM_DEP_NOT_LIVE = 29
STOP_SYSCALL_ARM = 30
STOP_SYMBOLIC_MEM_DEP_NOT_LIVE_CURR_BLOCK = 31
STOP_X86_CPUID = 32
stop_message = {}
stop_message[STOP_NORMAL] = "Reached maximum steps"
stop_message[STOP_STOPPOINT] = "Hit a stop point"
stop_message[STOP_ERROR] = "Something wrong"
stop_message[STOP_SYSCALL] = "Unable to handle syscall"
stop_message[STOP_EXECNONE] = "Fetching empty page"
stop_message[STOP_ZEROPAGE] = "Accessing zero page"
stop_message[STOP_NOSTART] = "Failed to start"
stop_message[STOP_SEGFAULT] = "Permissions or mapping error"
stop_message[STOP_ZERO_DIV] = "Divide by zero"
stop_message[STOP_NODECODE] = "Instruction decoding error"
stop_message[STOP_HLT] = "hlt instruction encountered"
stop_message[STOP_VEX_LIFT_FAILED] = "Failed to lift block to VEX"
stop_message[STOP_SYMBOLIC_CONDITION] = "Symbolic condition for ITE"
stop_message[STOP_SYMBOLIC_PC] = "Instruction pointer became symbolic"
stop_message[STOP_SYMBOLIC_READ_ADDR] = "Attempted to read from symbolic address"
stop_message[STOP_SYMBOLIC_READ_SYMBOLIC_TRACKING_DISABLED]= ("Attempted to read symbolic data from memory but "
"symbolic tracking is disabled")
stop_message[STOP_SYMBOLIC_WRITE_ADDR] = "Attempted to write to symbolic address"
stop_message[STOP_SYMBOLIC_BLOCK_EXIT_CONDITION] = "Guard condition of block's exit statement is symbolic"
stop_message[STOP_SYMBOLIC_BLOCK_EXIT_TARGET] = "Target of default exit of block is symbolic"
stop_message[STOP_UNSUPPORTED_STMT_PUTI] = "Symbolic taint propagation for PutI statement not yet supported"
stop_message[STOP_UNSUPPORTED_STMT_STOREG] = "Symbolic taint propagation for StoreG statement not yet supported"
stop_message[STOP_UNSUPPORTED_STMT_LOADG] = "Symbolic taint propagation for LoadG statement not yet supported"
stop_message[STOP_UNSUPPORTED_STMT_CAS] = "Symbolic taint propagation for CAS statement not yet supported"
stop_message[STOP_UNSUPPORTED_STMT_LLSC] = "Symbolic taint propagation for LLSC statement not yet supported"
stop_message[STOP_UNSUPPORTED_STMT_DIRTY] = "Symbolic taint propagation for Dirty statement not yet supported"
stop_message[STOP_UNSUPPORTED_EXPR_GETI] = "Symbolic taint propagation for GetI expression not yet supported"
stop_message[STOP_UNSUPPORTED_STMT_UNKNOWN]= "Canoo propagate symbolic taint for unsupported VEX statement type"
stop_message[STOP_UNSUPPORTED_EXPR_UNKNOWN]= "Cannot propagate symbolic taint for unsupported VEX expression"
stop_message[STOP_UNKNOWN_MEMORY_WRITE_SIZE] = "Cannot determine size of memory write; likely because unicorn didn't"
stop_message[STOP_SYMBOLIC_MEM_DEP_NOT_LIVE] = "A symbolic memory dependency on stack is no longer in scope"
stop_message[STOP_SYSCALL_ARM] = "ARM syscalls are currently not supported by SimEngineUnicorn"
stop_message[STOP_SYMBOLIC_MEM_DEP_NOT_LIVE_CURR_BLOCK] = ("An instruction in current block overwrites a symbolic "
"value needed for re-executing some instruction in same "
"block")
stop_message[STOP_X86_CPUID] = "Block executes cpuid which should be handled in VEX engine"
symbolic_stop_reasons = [STOP_SYMBOLIC_CONDITION, STOP_SYMBOLIC_PC, STOP_SYMBOLIC_READ_ADDR,
STOP_SYMBOLIC_READ_SYMBOLIC_TRACKING_DISABLED, STOP_SYMBOLIC_WRITE_ADDR,
STOP_SYMBOLIC_BLOCK_EXIT_CONDITION, STOP_SYMBOLIC_BLOCK_EXIT_TARGET, STOP_SYSCALL_ARM,
STOP_SYMBOLIC_MEM_DEP_NOT_LIVE_CURR_BLOCK, STOP_X86_CPUID]
unsupported_reasons = [STOP_UNSUPPORTED_STMT_PUTI, STOP_UNSUPPORTED_STMT_STOREG, STOP_UNSUPPORTED_STMT_LOADG,
STOP_UNSUPPORTED_STMT_CAS, STOP_UNSUPPORTED_STMT_LLSC, STOP_UNSUPPORTED_STMT_DIRTY,
STOP_UNSUPPORTED_STMT_UNKNOWN, STOP_UNSUPPORTED_EXPR_UNKNOWN, STOP_VEX_LIFT_FAILED]
@staticmethod
def name_stop(num):
for item in dir(STOP):
if item.startswith('STOP_') and getattr(STOP, item) == num:
return item
raise ValueError(num)
@staticmethod
def get_stop_msg(stop_reason):
if stop_reason in STOP.stop_message:
return STOP.stop_message[stop_reason]
return "Unknown stop reason"
class StopDetails(ctypes.Structure):
"""
struct stop_details_t
"""
_fields_ = [
('stop_reason', ctypes.c_int),
('block_addr', ctypes.c_uint64),
('block_size', ctypes.c_uint64),
]
class SimOSEnum:
"""
enum simos_t
"""
SIMOS_CGC = 0
SIMOS_LINUX = 1
SIMOS_OTHER = 2
#
# Memory mapping errors - only used internally
#
class MemoryMappingError(Exception): # pylint: disable=missing-class-docstring
pass
class AccessingZeroPageError(MemoryMappingError): # pylint: disable=missing-class-docstring
pass
class FetchingZeroPageError(MemoryMappingError): # pylint: disable=missing-class-docstring
pass
class SegfaultError(MemoryMappingError): # pylint: disable=missing-class-docstring
pass
class MixedPermissonsError(MemoryMappingError): # pylint: disable=missing-class-docstring
pass
#
# This annotation is added to constraints that Unicorn generates in aggressive concretization mode
#
class AggressiveConcretizationAnnotation(claripy.SimplificationAvoidanceAnnotation):
# pylint: disable=missing-class-docstring
def __init__(self, addr):
claripy.SimplificationAvoidanceAnnotation.__init__(self)
self.unicorn_start_addr = addr
#
# Because Unicorn leaks like crazy, we use one Uc object per thread...
#
_unicounter = itertools.count()
class Uniwrapper(unicorn.Uc if unicorn is not None else object):
# pylint: disable=non-parent-init-called,missing-class-docstring
def __init__(self, arch, cache_key, thumb=False):
l.debug("Creating unicorn state!")
self.arch = arch
self.cache_key = cache_key
self.wrapped_mapped = set()
self.wrapped_hooks = set()
self.id = None
if thumb:
uc_mode = arch.uc_mode_thumb
else:
uc_mode = arch.uc_mode
unicorn.Uc.__init__(self, arch.uc_arch, uc_mode)
def hook_add(self, htype, callback, user_data=None, begin=1, end=0, arg1=0):
h = unicorn.Uc.hook_add(self, htype, callback, user_data=user_data, begin=begin, end=end, arg1=arg1)
#l.debug("Hook: %s,%s -> %s", htype, callback.__name__, h)
self.wrapped_hooks.add(h)
return h
def hook_del(self, h):
#l.debug("Clearing hook %s", h)
h = unicorn.Uc.hook_del(self, h)
self.wrapped_hooks.discard(h)
return h
def mem_map(self, addr, size, perms=7):
#l.debug("Mapping %d bytes at %#x", size, addr)
m = unicorn.Uc.mem_map(self, addr, size, perms=perms)
self.wrapped_mapped.add((addr, size))
return m
def mem_map_ptr(self, addr, size, perms, ptr):
m = unicorn.Uc.mem_map_ptr(self, addr, size, perms, ptr)
self.wrapped_mapped.add((addr, size))
return m
def mem_unmap(self, addr, size):
#l.debug("Unmapping %d bytes at %#x", size, addr)
m = unicorn.Uc.mem_unmap(self, addr, size)
self.wrapped_mapped.discard((addr, size))
return m
def mem_reset(self):
#l.debug("Resetting memory.")
for addr,size in self.wrapped_mapped:
#l.debug("Unmapping %d bytes at %#x", size, addr)
unicorn.Uc.mem_unmap(self, addr, size)
self.wrapped_mapped.clear()
def hook_reset(self):
#l.debug("Resetting hooks.")
for h in self.wrapped_hooks:
#l.debug("Clearing hook %s", h)
unicorn.Uc.hook_del(self, h)
self.wrapped_hooks.clear()
def reset(self):
self.mem_reset()
#self.hook_reset()
#l.debug("Reset complete.")
_unicorn_tls = threading.local()
_unicorn_tls.uc = None
class _VexCacheInfo(ctypes.Structure):
"""
VexCacheInfo struct from vex
"""
_fields_ = [
("num_levels", ctypes.c_uint),
("num_caches", ctypes.c_uint),
("caches", ctypes.c_void_p),
("icaches_maintain_coherence", ctypes.c_bool),
]
class _VexArchInfo(ctypes.Structure):
"""
VexArchInfo struct from vex
"""
_fields_ = [
("hwcaps", ctypes.c_uint),
("endness", ctypes.c_int),
("hwcache_info", _VexCacheInfo),
("ppc_icache_line_szB", ctypes.c_int),
("ppc_dcbz_szB", ctypes.c_uint),
("ppc_dcbzl_szB", ctypes.c_uint),
("arm64_dMinLine_lg2_szB", ctypes.c_uint),
("arm64_iMinLine_lg2_szB", ctypes.c_uint),
("x86_cr0", ctypes.c_uint),
]
def _locate_lib(module: str, library: str) -> str:
"""
Attempt to find a native library without using pkg_resources, and only fall back to pkg_resources upon failures.
This is because "import pkg_resources" is slow.
:return: The full path of the native library.
"""
base_dir = os.path.join(os.path.dirname(__file__), "..")
attempt = os.path.join(base_dir, library)
if os.path.isfile(attempt):
return attempt
import pkg_resources # pylint:disable=import-outside-toplevel
return pkg_resources.resource_filename(module, os.path.join('lib', library))
def _load_native():
if sys.platform == 'darwin':
libfile = 'angr_native.dylib'
elif sys.platform in ('win32', 'cygwin'):
libfile = 'angr_native.dll'
else:
libfile = 'angr_native.so'
try:
angr_path = _locate_lib("angr", os.path.join("lib", libfile))
h = ctypes.CDLL(angr_path)
VexArch = ctypes.c_int
uc_err = ctypes.c_int
state_t = ctypes.c_void_p
stop_t = ctypes.c_int
uc_engine_t = ctypes.c_void_p
def _setup_prototype(handle, func, restype, *argtypes):
realname = 'simunicorn_' + func
_setup_prototype_explicit(handle, realname, restype, *argtypes)
setattr(handle, func, getattr(handle, realname))
def _setup_prototype_explicit(handle, func, restype, *argtypes):
getattr(handle, func).restype = restype
getattr(handle, func).argtypes = argtypes
#_setup_prototype_explicit(h, 'logSetLogLevel', None, ctypes.c_uint64)
_setup_prototype(h, 'alloc', state_t, uc_engine_t, ctypes.c_uint64, ctypes.c_uint64)
_setup_prototype(h, 'dealloc', None, state_t)
_setup_prototype(h, 'hook', None, state_t)
_setup_prototype(h, 'unhook', None, state_t)
_setup_prototype(h, 'start', uc_err, state_t, ctypes.c_uint64, ctypes.c_uint64)
_setup_prototype(h, 'stop', None, state_t, stop_t)
_setup_prototype(h, 'sync', ctypes.POINTER(MEM_PATCH), state_t)
_setup_prototype(h, 'bbl_addrs', ctypes.POINTER(ctypes.c_uint64), state_t)
_setup_prototype(h, 'stack_pointers', ctypes.POINTER(ctypes.c_uint64), state_t)
_setup_prototype(h, 'bbl_addr_count', ctypes.c_uint64, state_t)
_setup_prototype(h, 'syscall_count', ctypes.c_uint64, state_t)
_setup_prototype(h, 'step', ctypes.c_uint64, state_t)
_setup_prototype(h, 'activate_page', None, state_t, ctypes.c_uint64, ctypes.c_void_p, ctypes.c_void_p)
_setup_prototype(h, 'set_last_block_details', None, state_t, ctypes.c_uint64, ctypes.c_int64, ctypes.c_int64)
_setup_prototype(h, 'set_stops', None, state_t, ctypes.c_uint64, ctypes.POINTER(ctypes.c_uint64))
_setup_prototype(h, 'cache_page', ctypes.c_bool, state_t, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_char_p, ctypes.c_uint64)
_setup_prototype(h, 'uncache_pages_touching_region', None, state_t, ctypes.c_uint64, ctypes.c_uint64)
_setup_prototype(h, 'clear_page_cache', None, state_t)
_setup_prototype(h, 'enable_symbolic_reg_tracking', None, state_t, VexArch, _VexArchInfo)
_setup_prototype(h, 'disable_symbolic_reg_tracking', None, state_t)
_setup_prototype(h, 'symbolic_register_data', None, state_t, ctypes.c_uint64, ctypes.POINTER(ctypes.c_uint64))
_setup_prototype(h, 'get_symbolic_registers', ctypes.c_uint64, state_t, ctypes.POINTER(ctypes.c_uint64))
_setup_prototype(h, 'is_interrupt_handled', ctypes.c_bool, state_t)
_setup_prototype(h, 'set_cgc_syscall_details', None, state_t, ctypes.c_uint32, ctypes.c_uint64, ctypes.c_uint32, ctypes.c_uint64)
_setup_prototype(h, 'process_transmit', ctypes.POINTER(TRANSMIT_RECORD), state_t, ctypes.c_uint32)
_setup_prototype(h, 'set_tracking', None, state_t, ctypes.c_bool, ctypes.c_bool)
_setup_prototype(h, 'executed_pages', ctypes.c_uint64, state_t)
_setup_prototype(h, 'in_cache', ctypes.c_bool, state_t, ctypes.c_uint64)
_setup_prototype(h, 'set_map_callback', None, state_t, unicorn.unicorn.UC_HOOK_MEM_INVALID_CB)
_setup_prototype(h, 'set_vex_to_unicorn_reg_mappings', None, state_t, ctypes.POINTER(ctypes.c_uint64),
ctypes.POINTER(ctypes.c_uint64), ctypes.POINTER(ctypes.c_uint64), ctypes.c_uint64)
_setup_prototype(h, 'set_artificial_registers', None, state_t, ctypes.POINTER(ctypes.c_uint64), ctypes.c_uint64)
_setup_prototype(h, 'get_count_of_blocks_with_symbolic_instrs', ctypes.c_uint64, state_t)
_setup_prototype(h, 'get_details_of_blocks_with_symbolic_instrs', None, state_t, ctypes.POINTER(BlockDetails))
_setup_prototype(h, 'get_stop_details', StopDetails, state_t)
_setup_prototype(h, 'set_register_blacklist', None, state_t, ctypes.POINTER(ctypes.c_uint64), ctypes.c_uint64)
_setup_prototype(h, 'set_cpu_flags_details', None, state_t, ctypes.POINTER(ctypes.c_uint64),
ctypes.POINTER(ctypes.c_uint64), ctypes.POINTER(ctypes.c_uint64), ctypes.c_uint64)
_setup_prototype(h, 'set_fd_bytes', state_t, ctypes.c_uint64, ctypes.c_void_p, ctypes.c_uint64, ctypes.c_uint64)
l.info('native plugin is enabled')
return h
except (OSError, AttributeError) as e:
l.warning('failed loading "%s", unicorn support disabled (%s)', libfile, e)
raise ImportError("Unable to import native SimUnicorn support") from e
try:
_UC_NATIVE = _load_native()
#_UC_NATIVE.logSetLogLevel(2)
except ImportError:
_UC_NATIVE = None
class Unicorn(SimStatePlugin):
'''
setup the unicorn engine for a state
'''
UC_CONFIG = {} # config cache for each arch
def __init__(
self,
syscall_hooks=None,
cache_key=None,
unicount=None,
symbolic_var_counts=None,
symbolic_inst_counts=None,
concretized_asts=None,
always_concretize=None,
never_concretize=None,
concretize_at=None,
concretization_threshold_memory=None,
concretization_threshold_registers=None,
concretization_threshold_instruction=None,
cooldown_symbolic_stop=2,
cooldown_unsupported_stop=2,
cooldown_nonunicorn_blocks=100,
cooldown_stop_point=1,
max_steps=1000000,
):
"""
Initializes the Unicorn plugin for angr. This plugin handles communication with
UnicornEngine.
"""
SimStatePlugin.__init__(self)
self._syscall_pc = None
self.jumpkind = 'Ijk_Boring'
self.error = None
self.errno = 0
self.trap_ip = None
self.cache_key = hash(self) if cache_key is None else cache_key
# cooldowns to avoid thrashing in and out of unicorn
# the countdown vars are the CURRENT counter that is counting down
# when they hit zero execution will start
# the cooldown vars are the settings for what the countdown should start at
# the val is copied from cooldown to countdown on check fail
self.cooldown_nonunicorn_blocks = cooldown_nonunicorn_blocks
self.cooldown_symbolic_stop = cooldown_symbolic_stop
self.cooldown_unsupported_stop = cooldown_unsupported_stop
self.cooldown_stop_point = cooldown_stop_point
self.countdown_nonunicorn_blocks = 0
self.countdown_symbolic_stop = 0
self.countdown_unsupported_stop = 0
self.countdown_stop_point = 0
# the default step limit
self.max_steps = max_steps
self.steps = 0
self._mapped = 0
self._uncache_regions = []
self._symbolic_offsets = None
self.gdt = None
# following variables are used in python level hook
# we cannot see native hooks from python
self.syscall_hooks = { } if syscall_hooks is None else syscall_hooks
# native state in libsimunicorn
self._uc_state = None
self.stop_reason = None
self.stop_details = None
self.stop_message = None
# this is the counter for the unicorn count
self._unicount = next(_unicounter) if unicount is None else unicount
#
# Selective concretization stuff
#
# this is the number of times specific symbolic variables have kicked us out of unicorn
self.symbolic_var_counts = { } if symbolic_var_counts is None else symbolic_var_counts
# this is the number of times we've been kept out of unicorn at given instructions
self.symbolic_inst_counts = { } if symbolic_inst_counts is None else symbolic_inst_counts
# these are threshold for the number of times that we tolerate being kept out of unicorn
# before we start concretizing
self.concretization_threshold_memory = concretization_threshold_memory
self.concretization_threshold_registers = concretization_threshold_registers
self.concretization_threshold_instruction = concretization_threshold_instruction
# these are sets of names of variables that should either always or never
# be concretized
self.always_concretize = set() if always_concretize is None else always_concretize
self.never_concretize = set() if never_concretize is None else never_concretize
self.concretize_at = set() if concretize_at is None else concretize_at
# this is a record of the ASTs for which we've added concretization constraints
self._concretized_asts = set() if concretized_asts is None else concretized_asts
# the address to use for concrete transmits
self.cgc_transmit_addr = None
# the address for CGC receive
self.cgc_receive_addr = None
self.time = None
# Concrete bytes of open fds
self.fd_bytes = {}
self._bullshit_cb = ctypes.cast(unicorn.unicorn.UC_HOOK_MEM_INVALID_CB(self._hook_mem_unmapped), unicorn.unicorn.UC_HOOK_MEM_INVALID_CB)
self._skip_next_callback = False
@SimStatePlugin.memo
def copy(self, _memo):
u = Unicorn(
syscall_hooks=dict(self.syscall_hooks),
cache_key=self.cache_key,
#unicount=self._unicount,
symbolic_var_counts = dict(self.symbolic_var_counts),
symbolic_inst_counts = dict(self.symbolic_inst_counts),
concretized_asts = set(self._concretized_asts),
always_concretize = set(self.always_concretize),
never_concretize = set(self.never_concretize),
concretize_at = set(self.concretize_at),
concretization_threshold_memory = self.concretization_threshold_memory,
concretization_threshold_registers = self.concretization_threshold_registers,
concretization_threshold_instruction = self.concretization_threshold_instruction,
cooldown_nonunicorn_blocks=self.cooldown_nonunicorn_blocks,
cooldown_symbolic_stop=self.cooldown_symbolic_stop,
cooldown_unsupported_stop=self.cooldown_unsupported_stop,
max_steps=self.max_steps,
)
u.countdown_nonunicorn_blocks = self.countdown_nonunicorn_blocks
u.countdown_symbolic_stop = self.countdown_symbolic_stop
u.countdown_unsupported_stop = self.countdown_unsupported_stop
u.countdown_stop_point = self.countdown_stop_point
u.cgc_receive_addr = self.cgc_receive_addr
u.cgc_transmit_addr = self.cgc_transmit_addr
u._uncache_regions = list(self._uncache_regions)
u.gdt = self.gdt
return u
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
self.cooldown_nonunicorn_blocks = max(
self.cooldown_nonunicorn_blocks,
max(o.cooldown_nonunicorn_blocks for o in others)
)
self.cooldown_symbolic_stop = max(
self.cooldown_symbolic_stop,
max(o.cooldown_symbolic_stop for o in others)
)
self.cooldown_unsupported_stop = max(
self.cooldown_unsupported_stop,
max(o.cooldown_unsupported_stop for o in others)
)
self.countdown_nonunicorn_blocks = max(
self.countdown_nonunicorn_blocks,
max(o.countdown_nonunicorn_blocks for o in others)
)
self.countdown_symbolic_stop = max(
self.countdown_symbolic_stop,
max(o.countdown_symbolic_stop for o in others)
)
self.countdown_unsupported_stop = max(
self.countdown_unsupported_stop,
max(o.countdown_unsupported_stop for o in others)
)
self.countdown_stop_point = max(
self.countdown_stop_point,
max(o.countdown_stop_point for o in others)
)
# get a fresh unicount, just in case
self._unicount = next(_unicounter)
# keep these guys, since merging them sounds like a pain
#self.symbolic_var_counts
#self.symbolic_inst_counts
# these are threshold for the number of times that we tolerate being kept out of unicorn
# before we start concretizing
def merge_nullable_min(*args):
nonnull = [a for a in args if a is not None]
if not nonnull:
return None
return min(nonnull)
self.concretization_threshold_memory = merge_nullable_min(self.concretization_threshold_memory, *(o.concretization_threshold_memory for o in others))
self.concretization_threshold_registers = merge_nullable_min(self.concretization_threshold_registers, *(o.concretization_threshold_registers for o in others))
self.concretization_threshold_instruction = merge_nullable_min(self.concretization_threshold_instruction, *(o.concretization_threshold_instruction for o in others))
# these are sets of names of variables that should either always or never
# be concretized
self.always_concretize.union(*[o.always_concretize for o in others])
self.never_concretize.union(*[o.never_concretize for o in others])
self.concretize_at.union(*[o.concretize_at for o in others])
# intersect these so that we know to add future constraints properly
self._concretized_asts.intersection(*[o._concretized_asts for o in others])
# I guess always lie to the static analysis?
return False
def widen(self, others): # pylint: disable=unused-argument
l.warning("Can't widen the unicorn plugin!")
def __getstate__(self):
d = dict(self.__dict__)
del d['_bullshit_cb']
del d['_uc_state']
del d['cache_key']
del d['_unicount']
return d
def __setstate__(self, s):
self.__dict__.update(s)
self._bullshit_cb = ctypes.cast(unicorn.unicorn.UC_HOOK_MEM_INVALID_CB(self._hook_mem_unmapped), unicorn.unicorn.UC_HOOK_MEM_INVALID_CB)
self._unicount = next(_unicounter)
self._uc_state = None
self.cache_key = hash(self)
_unicorn_tls.uc = None
def set_state(self, state):
SimStatePlugin.set_state(self, state)
if self._is_mips32:
self._unicount = next(_unicounter)
@property
def _reuse_unicorn(self):
return not self._is_mips32
@property
def uc(self):
new_id = next(_unicounter)
is_thumb = self.state.arch.qemu_name == 'arm' and self.state.arch.is_thumb(self.state.addr)
if (
not hasattr(_unicorn_tls, "uc") or
_unicorn_tls.uc is None or
_unicorn_tls.uc.arch != self.state.arch or
_unicorn_tls.uc.cache_key != self.cache_key
):
_unicorn_tls.uc = Uniwrapper(self.state.arch, self.cache_key, thumb=is_thumb)
elif _unicorn_tls.uc.id != self._unicount:
if not self._reuse_unicorn:
_unicorn_tls.uc = Uniwrapper(self.state.arch, self.cache_key, thumb=is_thumb)
else:
#l.debug("Reusing unicorn state!")
_unicorn_tls.uc.reset()
else:
#l.debug("Reusing unicorn state!")
pass
_unicorn_tls.uc.id = new_id
self._unicount = new_id
return _unicorn_tls.uc
@staticmethod
def delete_uc():
_unicorn_tls.uc = None
@property
def _uc_regs(self):
return self.state.arch.uc_regs
@property
def _uc_prefix(self):
return self.state.arch.uc_prefix
@property
def _uc_const(self):
return self.state.arch.uc_const
def _setup_unicorn(self):
if self.state.arch.uc_mode is None:
raise SimUnicornUnsupport("unsupported architecture %r" % self.state.arch)
def set_last_block_details(self, details):
_UC_NATIVE.set_last_block_details(self._uc_state, details["addr"], details["curr_count"], details["tot_count"])
def set_stops(self, stop_points):
_UC_NATIVE.set_stops(self._uc_state,
ctypes.c_uint64(len(stop_points)),
(ctypes.c_uint64 * len(stop_points))(*map(ctypes.c_uint64, stop_points))
)
def set_tracking(self, track_bbls, track_stack):
_UC_NATIVE.set_tracking(self._uc_state, track_bbls, track_stack)
def hook(self):
#l.debug('adding native hooks')
_UC_NATIVE.hook(self._uc_state) # prefer to use native hooks
self.uc.hook_add(unicorn.UC_HOOK_MEM_UNMAPPED, self._hook_mem_unmapped, None, 1)
arch = self.state.arch.qemu_name
if arch == 'x86_64':
self.uc.hook_add(unicorn.UC_HOOK_INTR, self._hook_intr_x86, None, 1, 0)
self.uc.hook_add(unicorn.UC_HOOK_INSN, self._hook_syscall_x86_64, None, arg1=self._uc_const.UC_X86_INS_SYSCALL)
elif arch == 'i386':
self.uc.hook_add(unicorn.UC_HOOK_INTR, self._hook_intr_x86, None, 1, 0)
elif arch == 'mips':
self.uc.hook_add(unicorn.UC_HOOK_INTR, self._hook_intr_mips, None, 1, 0)
elif arch == 'mipsel':
self.uc.hook_add(unicorn.UC_HOOK_INTR, self._hook_intr_mips, None, 1, 0)
elif arch == 'arm':
# EDG says: Unicorn's ARM support has no concept of interrupts.
# This is because interrupts are not a part of the ARM ISA per se, and interrupt controllers
# are left to the vendor to provide.
# TODO: This is not true for CortexM. Revisit when Tobi's NVIC implementation gets upstreamed.
pass
else:
raise SimUnicornUnsupport
def _hook_intr_mips(self, uc, intno, user_data):
self.trap_ip = self.uc.reg_read(unicorn.mips_const.UC_MIPS_REG_PC)
if intno == 17: # EXCP_SYSCALL
sysno = uc.reg_read(self._uc_regs['v0'])
pc = uc.reg_read(self._uc_regs['pc'])
l.debug('hit sys_%d at %#x', sysno, pc)
self._syscall_pc = pc
self._handle_syscall(uc, user_data)
else:
l.warning('unhandled interrupt %d', intno)
_UC_NATIVE.stop(self._uc_state, STOP.STOP_ERROR)
def _hook_intr_x86(self, uc, intno, user_data):
if _UC_NATIVE.is_interrupt_handled(self._uc_state):
return
if self.state.arch.bits == 32:
self.trap_ip = self.uc.reg_read(unicorn.x86_const.UC_X86_REG_EIP)
else:
self.trap_ip = self.uc.reg_read(unicorn.x86_const.UC_X86_REG_RIP)
# https://wiki.osdev.org/Exceptions
if intno == 0:
# divide by zero
_UC_NATIVE.stop(self._uc_state, STOP.STOP_ZERO_DIV)
elif intno == 0x80:
if self.state.arch.bits == 32:
self._hook_syscall_i386(uc, user_data)
else:
self._hook_syscall_x86_64(uc, user_data)
else:
l.warning('unhandled interrupt %d', intno)
_UC_NATIVE.stop(self._uc_state, STOP.STOP_ERROR)
def _hook_syscall_x86_64(self, uc, user_data):
sysno = uc.reg_read(self._uc_regs['rax'])
pc = uc.reg_read(self._uc_regs['rip'])
l.debug('hit sys_%d at %#x', sysno, pc)
self._syscall_pc = pc + 2 # skip syscall instruction
self._handle_syscall(uc, user_data)
def _hook_syscall_i386(self, uc, user_data):
sysno = uc.reg_read(self._uc_regs['eax'])
pc = uc.reg_read(self._uc_regs['eip'])
l.debug('hit sys_%d at %#x', sysno, pc)
self._syscall_pc = pc
if not self._quick_syscall(sysno):
self._handle_syscall(uc, user_data)
def _quick_syscall(self, sysno):
if sysno in self.syscall_hooks:
self.syscall_hooks[sysno](self.state)
return True
else:
return False
def _handle_syscall(self, uc, user_data): #pylint:disable=unused-argument
# unicorn does not support syscall, we should giveup emulation
# and send back to SimProcedure. (ignore is always False)
l.info('stop emulation')
self.jumpkind = 'Ijk_Sys_syscall'
_UC_NATIVE.stop(self._uc_state, STOP.STOP_SYSCALL)
def _concretize(self, d):
cd = self.state.solver.eval_to_ast(d, 1)[0]
if hash(d) not in self._concretized_asts:
constraint = (d == cd).annotate(AggressiveConcretizationAnnotation(self.state.regs.ip))
self.state.add_constraints(constraint)
self._concretized_asts.add(hash(d))
return cd
def _symbolic_passthrough(self, d):
if not d.symbolic:
return d
elif options.UNICORN_AGGRESSIVE_CONCRETIZATION in self.state.options:
return self._concretize(d)
elif len(d.variables & self.never_concretize) > 0:
return d
elif d.variables.issubset(self.always_concretize):
return self._concretize(d)
elif self.state.solver.eval(self.state.ip) in self.concretize_at:
return self._concretize(d)
else:
return d
def _report_symbolic_blocker(self, d, from_where):
if options.UNICORN_THRESHOLD_CONCRETIZATION in self.state.options:
if self.concretization_threshold_instruction is not None:
addr = self.state.solver.eval(self.state.ip)
count = self.symbolic_inst_counts.get(addr, 0)
l.debug("... inst count for %s: %d", addr, count)
self.symbolic_inst_counts[addr] = count + 1
if count >= self.concretization_threshold_instruction:
self.concretize_at.add(addr)
threshold = (
self.concretization_threshold_memory if from_where == 'mem' else
self.concretization_threshold_registers
)
if threshold is None:
return
for v in d.variables:
old_count = self.symbolic_var_counts.get(v, 0)
l.debug("... %s: %d", v, old_count)
self.symbolic_var_counts[v] = old_count + 1
if old_count >= threshold:
self.always_concretize.add(v)
def _process_value(self, d, from_where):
"""
Pre-process an AST for insertion into unicorn.
:param d: the AST
:param from_where: the ID of the memory region it comes from ('mem' or 'reg')
:returns: the value to be inserted into Unicorn, or None
"""
if len(d.annotations):
l.debug("Blocking annotated AST.")
return None
elif not d.symbolic:
return d
else:
l.debug("Processing AST with variables %s.", d.variables)
dd = self._symbolic_passthrough(d)
if not dd.symbolic:
if d.symbolic:
l.debug("... concretized")
return dd
elif from_where == 'reg' and options.UNICORN_SYM_REGS_SUPPORT in self.state.options:
l.debug("... allowing symbolic register")
return dd
else:
l.debug("... denied")
return None
def _hook_mem_unmapped(self, uc, access, address, size, value, user_data): #pylint:disable=unused-argument
"""
This callback is called when unicorn needs to access data that's not yet present in memory.
"""
if user_data == 1:
self._skip_next_callback = True
elif self._skip_next_callback:
self._skip_next_callback = False
return True
start = address & ~0xfff
needed_pages = 2 if address - start + size > 0x1000 else 1
attempt_pages = 10
for pageno in range(attempt_pages):
page_addr = (start + pageno * 0x1000) & ((1 << self.state.arch.bits) - 1)
if page_addr == 0:
if pageno >= needed_pages:
break
if options.UNICORN_ZEROPAGE_GUARD in self.state.options:
self.error = 'accessing zero page (%#x)' % access
l.warning(self.error)
_UC_NATIVE.stop(self._uc_state, STOP.STOP_ZEROPAGE)
return False
l.info('mmap [%#x, %#x] because %d', page_addr, page_addr + 0xfff, access)
try:
self._map_one_page(uc, page_addr)
except SegfaultError:
# this is the unicorn segfault error. idk why this would show up
_UC_NATIVE.stop(self._uc_state, STOP.STOP_SEGFAULT)
return False
except SimSegfaultError:
_UC_NATIVE.stop(self._uc_state, STOP.STOP_SEGFAULT)
return False
except unicorn.UcError as e:
if e.errno != 11:
self.error = str(e)
_UC_NATIVE.stop(self._uc_state, STOP.STOP_ERROR)
return False
l.info("...already mapped :)")
break
except SimMemoryError as e:
if pageno >= needed_pages:
l.info("...never mind")
break
else:
self.error = str(e)
_UC_NATIVE.stop(self._uc_state, STOP.STOP_ERROR)
return False
return True
def _map_one_page(self, _uc, addr):
# allow any SimMemory errors to propagate upward. they will be caught immediately above
perm = self.state.memory.permissions(addr)
if perm.op != 'BVV':
perm = 7
elif options.ENABLE_NX not in self.state.options:
perm = perm.args[0] | 4
else:
perm = perm.args[0]
# this should return two memoryviews
# if they are writable they are direct references to the state backing store and can be mapped directly
data, bitmap = self.state.memory.concrete_load(addr, 0x1000, with_bitmap=True, writing=(perm & 2) != 0)
if not bitmap:
raise SimMemoryError('No bytes available in memory? when would this happen...')
if bitmap.readonly:
# old-style mapping, do it via copy
self.uc.mem_map(addr, 0x1000, perm)
# huge hack. why doesn't ctypes let you pass memoryview as void*?
unicorn.unicorn._uc.uc_mem_write(self.uc._uch, addr, ctypes.cast(int(ffi.cast('uint64_t', ffi.from_buffer(data))), ctypes.c_void_p), len(data))
#self.uc.mem_write(addr, data)
self._mapped += 1
_UC_NATIVE.activate_page(self._uc_state, addr, int(ffi.cast('uint64_t', ffi.from_buffer(bitmap))), None)
else:
# new-style mapping, do it directly
self.uc.mem_map_ptr(addr, 0x1000, perm, int(ffi.cast('uint64_t', ffi.from_buffer(data))))
self._mapped += 1
_UC_NATIVE.activate_page(self._uc_state, addr, int(ffi.cast('uint64_t', ffi.from_buffer(bitmap))), int(ffi.cast('unsigned long', ffi.from_buffer(data))))
def _get_details_of_blocks_with_symbolic_instrs(self):
def _get_register_values(register_values):
for register_value in register_values:
# Convert the register value in bytes to number of appropriate size and endianness
reg_name = self.state.arch.register_size_names[(register_value.offset, register_value.size)]
if self.state.arch.register_endness == archinfo.Endness.LE:
reg_value = int.from_bytes(register_value.value, "little")
else:
reg_value = int.from_bytes(register_value.value, "big")
reg_value = reg_value & (pow(2, register_value.size * 8) - 1)
yield (reg_name, reg_value)
def _get_memory_values(memory_values):
for memory_value in memory_values:
yield {"address": memory_value.address, "value": bytes(memory_value.value[:memory_value.size]),
"size": memory_value.size, "symbolic": memory_value.is_value_symbolic}
def _get_instr_details(symbolic_instrs):
for instr in symbolic_instrs:
instr_entry = {"instr_addr": instr.instr_addr, "mem_dep": []}
if instr.has_memory_dep:
instr_entry["mem_dep"] = _get_memory_values(instr.memory_values[:instr.memory_values_count])
yield instr_entry
block_count = _UC_NATIVE.get_count_of_blocks_with_symbolic_instrs(self._uc_state)
if block_count == 0:
return
block_details_list = (BlockDetails * block_count)()
_UC_NATIVE.get_details_of_blocks_with_symbolic_instrs(self._uc_state, block_details_list)
for block_details in block_details_list:
entry = {"block_addr": block_details.block_addr, "block_size": block_details.block_size, "registers": {}}
entry["registers"] = _get_register_values(block_details.register_values[:block_details.register_values_count])
entry["instrs"] = _get_instr_details(block_details.symbolic_instrs[:block_details.symbolic_instrs_count])
yield entry
def uncache_region(self, addr, length):
self._uncache_regions.append((addr, length))
def clear_page_cache(self):
self._uncache_regions = [] # this is no longer needed, everything has been uncached
_UC_NATIVE.clear_page_cache()
@property
def _is_mips32(self):
"""
There seems to be weird issues with unicorn-engine support on MIPS32 code (see commit 01126bf7). As a result,
we test if the current architecture is MIPS32 in several places, and if so, we perform some extra steps, like
re-creating the thread-local UC object.
:return: True if the current architecture is MIPS32, False otherwise.
:rtype: bool
"""
return self.state.arch.name == "MIPS32"
def setup(self):
if self._is_mips32 and options.COPY_STATES not in self.state.options:
# we always re-create the thread-local UC object for MIPS32 even if COPY_STATES is disabled in state
# options. this is to avoid some weird bugs in unicorn (e.g., it reports stepping 1 step while in reality it
# did not step at all).
self.delete_uc()
self._setup_unicorn()
try:
self.set_regs()
except SimValueError:
# reset the state and re-raise
self.uc.reset()
raise
if self.state.os_name == "CGC":
simos_val = SimOSEnum.SIMOS_CGC
elif self.state.os_name == "Linux":
simos_val = SimOSEnum.SIMOS_LINUX
else:
simos_val = SimOSEnum.SIMOS_OTHER
# tricky: using unicorn handle from unicorn.Uc object
self._uc_state = _UC_NATIVE.alloc(self.uc._uch, self.cache_key, simos_val)
if options.UNICORN_SYM_REGS_SUPPORT in self.state.options and \
options.UNICORN_AGGRESSIVE_CONCRETIZATION not in self.state.options:
vex_archinfo = copy.deepcopy(self.state.arch.vex_archinfo)
vex_archinfo['hwcache_info']['caches'] = 0
vex_archinfo['hwcache_info'] = _VexCacheInfo(**vex_archinfo['hwcache_info'])
_UC_NATIVE.enable_symbolic_reg_tracking(
self._uc_state,
getattr(pyvex.pvc, self.state.arch.vex_arch),
_VexArchInfo(**vex_archinfo),
)
if self._symbolic_offsets:
l.debug("Sybmolic offsets: %s", self._symbolic_offsets)
sym_regs_array = (ctypes.c_uint64 * len(self._symbolic_offsets))(*map(ctypes.c_uint64, self._symbolic_offsets))
_UC_NATIVE.symbolic_register_data(self._uc_state, len(self._symbolic_offsets), sym_regs_array)
else:
_UC_NATIVE.symbolic_register_data(self._uc_state, 0, None)
# set (cgc, for now) transmit and receive syscall handler
if self.state.has_plugin('cgc'):
if options.UNICORN_HANDLE_CGC_TRANSMIT_SYSCALL in self.state.options:
if self.cgc_transmit_addr is None:
l.error("You haven't set the address for concrete transmits!!!!!!!!!!!")
self.cgc_transmit_addr = 0
if options.UNICORN_HANDLE_CGC_RECEIVE_SYSCALL in self.state.options:
if self.cgc_receive_addr is None:
l.error("You haven't set the address for receive syscall!!!!!!!!!!!!!!")
self.cgc_receive_addr = 0
else:
# Set stdin bytes in native interface
self.fd_bytes[0] = bytearray(self.state.posix.fd.get(0).concretize()[0])
_UC_NATIVE.set_cgc_syscall_details(self._uc_state, 2, self.cgc_transmit_addr, 3, self.cgc_receive_addr)
# set memory map callback so we can call it explicitly
_UC_NATIVE.set_map_callback(self._uc_state, self._bullshit_cb)
# activate gdt page, which was written/mapped during set_regs
if self.gdt is not None:
_UC_NATIVE.activate_page(self._uc_state, self.gdt.addr, bytes(0x1000), None)
# Pass all concrete fd bytes to native interface so that it can handle relevant syscalls
for fd_num, fd_data in self.fd_bytes.items():
fd_bytes_p = int(ffi.cast('uint64_t', ffi.from_buffer(memoryview(fd_data))))
read_pos = self.state.solver.eval(self.state.posix.fd.get(fd_num).read_pos)
_UC_NATIVE.set_fd_bytes(self._uc_state, fd_num, fd_bytes_p, len(fd_data), read_pos)
# Initialize list of artificial VEX registers
artificial_regs_list = self.state.arch.artificial_registers_offsets
artificial_regs_array = (ctypes.c_uint64 * len(artificial_regs_list))(*map(ctypes.c_uint64, artificial_regs_list))
_UC_NATIVE.set_artificial_registers(self._uc_state, artificial_regs_array, len(artificial_regs_list))
# Initialize VEX register offset to unicorn register ID mappings and VEX register offset to name map
vex_reg_offsets = []
unicorn_reg_ids = []
reg_sizes = []
for vex_reg_offset, (unicorn_reg_id, reg_size) in self.state.arch.vex_to_unicorn_map.items():
vex_reg_offsets.append(vex_reg_offset)
unicorn_reg_ids.append(unicorn_reg_id)
reg_sizes.append(reg_size)
vex_reg_offsets_array = (ctypes.c_uint64 * len(vex_reg_offsets))(*map(ctypes.c_uint64, vex_reg_offsets))
unicorn_reg_ids_array = (ctypes.c_uint64 * len(unicorn_reg_ids))(*map(ctypes.c_uint64, unicorn_reg_ids))
reg_sizes_array = (ctypes.c_uint64 * len(reg_sizes))(*map(ctypes.c_uint64, reg_sizes))
_UC_NATIVE.set_vex_to_unicorn_reg_mappings(self._uc_state, vex_reg_offsets_array, unicorn_reg_ids_array,
reg_sizes_array, len(vex_reg_offsets))
# VEX to unicorn mappings for VEX flag registers
if self.state.arch.cpu_flag_register_offsets_and_bitmasks_map:
flag_vex_offsets = []
flag_bitmasks = []
flag_uc_regs = []
for flag_vex_offset, (uc_reg, flag_bitmask) in self.state.arch.cpu_flag_register_offsets_and_bitmasks_map.items():
flag_vex_offsets.append(flag_vex_offset)
flag_bitmasks.append(flag_bitmask)
flag_uc_regs.append(uc_reg)
flag_vex_offsets_array = (ctypes.c_uint64 * len(flag_vex_offsets))(*map(ctypes.c_uint64, flag_vex_offsets))
flag_bitmasks_array = (ctypes.c_uint64 * len(flag_bitmasks))(*map(ctypes.c_uint64, flag_bitmasks))
flag_uc_regs_array = (ctypes.c_uint64 * len(flag_uc_regs))(*map(ctypes.c_uint64, flag_uc_regs))
_UC_NATIVE.set_cpu_flags_details(self._uc_state, flag_vex_offsets_array, flag_uc_regs_array,
flag_bitmasks_array, len(flag_vex_offsets))
elif self.state.arch.name.startswith("ARM"):
l.warning("Flag registers for %s not set in native unicorn interface.", self.state.arch.name)
# Initialize list of blacklisted registers
blacklist_regs_offsets = self.state.arch.reg_blacklist_offsets
if len(blacklist_regs_offsets) > 0:
blacklist_regs_array = (ctypes.c_uint64 * len(blacklist_regs_offsets))(*map(ctypes.c_uint64, blacklist_regs_offsets))
_UC_NATIVE.set_register_blacklist(self._uc_state, blacklist_regs_array, len(blacklist_regs_offsets))
def start(self, step=None):
self.jumpkind = 'Ijk_Boring'
self.countdown_nonunicorn_blocks = self.cooldown_nonunicorn_blocks
for addr, length in self._uncache_regions:
l.debug("Un-caching writable page region @ %#x of length %x", addr, length)
_UC_NATIVE.uncache_pages_touching_region(self._uc_state, addr, length)
self._uncache_regions = []
addr = self.state.solver.eval(self.state.ip)
l.info('started emulation at %#x (%d steps)', addr, self.max_steps if step is None else step)
self.time = time.time()
self.errno = _UC_NATIVE.start(self._uc_state, addr, self.max_steps if step is None else step)
self.time = time.time() - self.time
def finish(self):
# do the superficial synchronization
self.get_regs()
self.steps = _UC_NATIVE.step(self._uc_state)
self.stop_details = _UC_NATIVE.get_stop_details(self._uc_state)
self.stop_reason = self.stop_details.stop_reason
self.stop_message = STOP.get_stop_msg(self.stop_reason)
if self.stop_reason in (STOP.symbolic_stop_reasons + STOP.unsupported_reasons) or \
self.stop_reason in (STOP.STOP_UNKNOWN_MEMORY_WRITE_SIZE, STOP.STOP_VEX_LIFT_FAILED):
self.stop_message += f". Block 0x{self.stop_details.block_addr:02x}(size: {self.stop_details.block_size})."
# figure out why we stopped
if self.stop_reason == STOP.STOP_NOSTART and self.steps > 0:
# unicorn just does quits without warning if it sees hlt. detect that.
if (self.state.memory.load(self.state.ip, 1) == 0xf4).is_true():
self.stop_reason = STOP.STOP_HLT
else:
raise SimUnicornError("Got STOP_NOSTART but a positive number of steps. This indicates a serious unicorn bug.")
addr = self.state.solver.eval(self.state.ip)
l.info('finished emulation at %#x after %d steps: %s', addr, self.steps, STOP.name_stop(self.stop_reason))
# should this be in destroy?
_UC_NATIVE.disable_symbolic_reg_tracking(self._uc_state)
# synchronize memory contents - head is a linked list of memory updates
head = _UC_NATIVE.sync(self._uc_state)
p_update = head
while bool(p_update):
update = p_update.contents
address, length = update.address, update.length
if self.gdt is not None and self.gdt.addr <= address < self.gdt.addr + self.gdt.limit:
l.warning("Emulation touched fake GDT at %#x, discarding changes", self.gdt.addr)
else:
s = bytes(self.uc.mem_read(address, int(length)))
l.debug('...changed memory: [%#x, %#x] = %s', address, address + length, binascii.hexlify(s))
self.state.memory.store(address, s)
p_update = update.next
# process the concrete transmits
i = 0
stdout = self.state.posix.get_fd(1)
while True:
record = _UC_NATIVE.process_transmit(self._uc_state, i)
if not bool(record):
break
string = ctypes.string_at(record.contents.data, record.contents.count)
stdout.write_data(string)
i += 1
if self.stop_reason in (STOP.STOP_NORMAL, STOP.STOP_SYSCALL, STOP.STOP_SYMBOLIC_MEM_DEP_NOT_LIVE):
self.countdown_nonunicorn_blocks = 0
elif self.stop_reason == STOP.STOP_STOPPOINT:
self.countdown_nonunicorn_blocks = 0
self.countdown_stop_point = self.cooldown_stop_point
elif self.stop_reason in STOP.symbolic_stop_reasons:
self.countdown_nonunicorn_blocks = 0
self.countdown_symbolic_stop = self.cooldown_symbolic_stop
elif self.stop_reason in STOP.unsupported_reasons:
self.countdown_nonunicorn_blocks = 0
self.countdown_unsupported_stop = self.cooldown_unsupported_stop
elif self.stop_reason == STOP.STOP_UNKNOWN_MEMORY_WRITE_SIZE:
# Skip one block in case of unknown memory write size
self.countdown_nonunicorn_blocks = 0
self.countdown_unsupported_stop = 2
else:
self.countdown_nonunicorn_blocks = self.cooldown_nonunicorn_blocks
if not is_testing and self.time != 0 and self.steps / self.time < 10: # TODO: make this tunable
l.info(
"Unicorn stepped %d block%s in %fsec (%f blocks/sec), enabling cooldown",
self.steps,
'' if self.steps == 1 else 's',
self.time,
self.steps/self.time
)
self.countdown_nonunicorn_blocks = self.cooldown_nonunicorn_blocks
else:
l.info(
"Unicorn stepped %d block%s in %f sec (%f blocks/sec)",
self.steps,
'' if self.steps == 1 else 's',
self.time,
self.steps/self.time if self.time != 0 else float('nan')
)
# get the address list out of the state
if options.UNICORN_TRACK_BBL_ADDRS in self.state.options:
bbl_addrs = _UC_NATIVE.bbl_addrs(self._uc_state)
#bbl_addr_count = _UC_NATIVE.bbl_addr_count(self._uc_state)
# why is bbl_addr_count unused?
if self.steps:
self.state.history.recent_bbl_addrs = bbl_addrs[:self.steps]
# get the stack pointers
if options.UNICORN_TRACK_STACK_POINTERS in self.state.options:
stack_pointers = _UC_NATIVE.stack_pointers(self._uc_state)
self.state.scratch.stack_pointer_list = stack_pointers[:self.steps]
# syscall counts
self.state.history.recent_syscall_count = _UC_NATIVE.syscall_count(self._uc_state)
# executed page set
self.state.scratch.executed_pages_set = set()
while True:
page = _UC_NATIVE.executed_pages(self._uc_state)
if page == 2**64 - 1:
break
self.state.scratch.executed_pages_set.add(page)
def destroy(self):
#l.debug("Unhooking.")
_UC_NATIVE.unhook(self._uc_state)
self.uc.hook_reset()
#l.debug('deallocting native state %#x', self._uc_state)
_UC_NATIVE.dealloc(self._uc_state)
self._uc_state = None
# there's something we're not properly resetting for syscalls, so
# we'll clear the state when they happen
if self.stop_reason not in (STOP.STOP_NORMAL, STOP.STOP_STOPPOINT):
self.delete_uc()
#l.debug("Resetting the unicorn state.")
self.uc.reset()
def set_regs(self):
''' setting unicorn registers '''
uc = self.uc
self._symbolic_offsets = set()
if self.state.arch.qemu_name == 'x86_64':
fs = self.state.solver.eval(self.state.regs.fs)
gs = self.state.solver.eval(self.state.regs.gs)
self.write_msr(fs, 0xC0000100)
self.write_msr(gs, 0xC0000101)
elif self.state.arch.qemu_name == 'i386':
fs = self.state.solver.eval(self.state.regs.fs) << 16
gs = self.state.solver.eval(self.state.regs.gs) << 16
self.setup_gdt(fs, gs)
elif self.state.arch.qemu_name == 'mips':
# ulr
ulr = self.state.regs._ulr
uc.reg_write(self._uc_const.UC_MIPS_REG_CP0_USERLOCAL, self.state.solver.eval(ulr))
self.setup_flags()
for r, c in self._uc_regs.items():
if r in self.state.arch.reg_blacklist:
continue
v = self._process_value(getattr(self.state.regs, r), 'reg')
if v is None:
raise SimValueError('setting a symbolic register')
# l.debug('setting $%s = %#x', r, self.state.solver.eval(v))
uc.reg_write(c, self.state.solver.eval(v))
start, size = self.state.arch.registers[r]
if v.symbolic:
symbolic_reg_offsets = set(range(start, start + size))
# Process subregisters in decreasing order of their size so that smaller subregisters' taint status
# isn't clobbered by larger subregisters
subregs = sorted(self.state.arch.get_register_by_name(r).subregisters, key=lambda x: x[-1], reverse=True)
for subreg in subregs:
if not getattr(self.state.regs, subreg[0]).symbolic:
for subreg_offset in range(start + subreg[1], start + subreg[1] + subreg[2]):
symbolic_reg_offsets.discard(subreg_offset)
self._symbolic_offsets.update(symbolic_reg_offsets)
# TODO: Support ARM hardfloat synchronization
if self.state.arch.name in ('X86', 'AMD64'):
# sync the fp clerical data
c3210 = self.state.solver.eval(self.state.regs.fc3210)
top = self.state.solver.eval(self.state.regs.ftop[2:0])
rm = self.state.solver.eval(self.state.regs.fpround[1:0])
control = 0x037F | (rm << 10)
status = (top << 11) | c3210
uc.reg_write(unicorn.x86_const.UC_X86_REG_FPCW, control)
uc.reg_write(unicorn.x86_const.UC_X86_REG_FPSW, status)
for rn in ('fc3210', 'ftop', 'fpround'):
start, size = self.state.arch.registers[rn]
self._symbolic_offsets.difference_update(range(start, start + size))
# we gotta convert the 64-bit doubles values to 80-bit extended precision!
uc_offset = unicorn.x86_const.UC_X86_REG_FP0
vex_offset = self.state.arch.registers['fpu_regs'][0]
vex_tag_offset = self.state.arch.registers['fpu_tags'][0]
tag_word = 0
for _ in range(8):
tag = self.state.solver.eval(self.state.registers.load(vex_tag_offset, size=1))
tag_word <<= 2
if tag == 0:
tag_word |= 3 # unicorn doesn't care about any value other than 3 for setting
else:
val = self._process_value(self.state.registers.load(vex_offset, size=8), 'reg')
if val is None:
raise SimValueError('setting a symbolic fp register')
if val.symbolic:
self._symbolic_offsets.difference_update(b for b,vb in enumerate(val.chop(8), start) if vb.symbolic)
val = self.state.solver.eval(val)
sign = bool(val & 0x8000000000000000)
exponent = (val & 0x7FF0000000000000) >> 52
mantissa = val & 0x000FFFFFFFFFFFFF
if exponent not in (0, 0x7FF): # normal value
exponent = exponent - 1023 + 16383
mantissa <<= 11
mantissa |= 0x8000000000000000 # set integer part bit, implicit to double
elif exponent == 0: # zero or subnormal value
mantissa = 0
elif exponent == 0x7FF: # nan or infinity
exponent = 0x7FFF
if mantissa != 0:
mantissa = 0x8000000000000000
else:
mantissa = 0xFFFFFFFFFFFFFFFF
if sign:
exponent |= 0x8000
uc.reg_write(uc_offset, (exponent, mantissa))
uc_offset += 1
vex_offset += 8
vex_tag_offset += 1
uc.reg_write(unicorn.x86_const.UC_X86_REG_FPTAG, tag_word)
def setup_flags(self):
uc = self.uc
# Save any symbolic VEX CC registers
saved_cc_regs = {}
for reg in self.state.arch.vex_cc_regs:
reg_val = getattr(self.state.regs, reg.name)
if reg_val.symbolic:
saved_cc_regs[reg.name] = reg_val
setattr(self.state.regs, reg.name, self.state.solver.eval(reg_val))
if saved_cc_regs:
vex_offset = self.state.arch.registers['cc_op'][0]
self._symbolic_offsets.update(range(vex_offset, vex_offset + self.state.arch.bytes*4))
if self.state.arch.qemu_name in ["i386", "x86_64"]:
flags = self._process_value(self.state.regs.eflags, 'reg')
if flags is None:
raise SimValueError('symbolic eflags')
uc.reg_write(self._uc_const.UC_X86_REG_EFLAGS, self.state.solver.eval(flags))
elif self.state.arch.qemu_name == "arm":
flags = self._process_value(self.state.regs.flags, 'reg')
if flags is None:
raise SimValueError('symbolic cpsr')
uc.reg_write(self._uc_const.UC_ARM_REG_CPSR, self.state.solver.eval(flags))
# Restore saved symbolic VEX CC registers
for reg_name, saved_reg_val in saved_cc_regs.items():
setattr(self.state.regs, reg_name, saved_reg_val)
def setup_gdt(self, fs, gs):
gdt = self.state.project.simos.generate_gdt(fs, gs)
uc = self.uc
uc.mem_map(gdt.addr, gdt.limit)
uc.mem_write(gdt.addr + 8, gdt.table)
uc.reg_write(self._uc_const.UC_X86_REG_GDTR, (0, gdt.addr, gdt.limit, 0x0))
uc.reg_write(self._uc_const.UC_X86_REG_CS, gdt.cs)
uc.reg_write(self._uc_const.UC_X86_REG_DS, gdt.ds)
uc.reg_write(self._uc_const.UC_X86_REG_ES, gdt.es)
uc.reg_write(self._uc_const.UC_X86_REG_SS, gdt.ss)
uc.reg_write(self._uc_const.UC_X86_REG_FS, gdt.fs)
uc.reg_write(self._uc_const.UC_X86_REG_GS, gdt.gs)
# if programs want to access this memory....... let them
# uc.mem_unmap(GDT_ADDR, GDT_LIMIT)
self.gdt = gdt
# do NOT call either of these functions in a callback, lmao
def read_msr(self, msr=0xC0000100):
setup_code = b'\x0f\x32'
BASE = 0x100B000000
uc = self.uc
uc.mem_map(BASE, 0x1000)
uc.mem_write(BASE, setup_code)
uc.reg_write(self._uc_const.UC_X86_REG_RCX, msr)
uc.emu_start(BASE, BASE + len(setup_code))
uc.mem_unmap(BASE, 0x1000)
a = uc.reg_read(self._uc_const.UC_X86_REG_RAX)
d = uc.reg_read(self._uc_const.UC_X86_REG_RDX)
return (d << 32) + a
def write_msr(self, val, msr=0xC0000100):
setup_code = b'\x0f\x30'
BASE = 0x100B000000
uc = self.uc
uc.mem_map(BASE, 0x1000)
uc.mem_write(BASE, setup_code)
uc.reg_write(self._uc_const.UC_X86_REG_RCX, msr)
uc.reg_write(self._uc_const.UC_X86_REG_RAX, val & 0xFFFFFFFF)
uc.reg_write(self._uc_const.UC_X86_REG_RDX, val >> 32)
uc.emu_start(BASE, BASE + len(setup_code))
uc.mem_unmap(BASE, 0x1000)
def get_regs(self):
''' loading registers from unicorn '''
# first, get the ignore list (in case of symbolic registers)
saved_registers = []
if options.UNICORN_SYM_REGS_SUPPORT in self.state.options:
highest_reg_offset, reg_size = max(self.state.arch.registers.values())
symbolic_list = (ctypes.c_uint64*(highest_reg_offset + reg_size))()
num_regs = _UC_NATIVE.get_symbolic_registers(self._uc_state, symbolic_list)
# If any VEX cc_dep registers are symbolic, mark VEX cc_op register as symbolic so that it would be saved
# and restored for future use if needed
symbolic_list = symbolic_list[:num_regs]
for reg in self.state.arch.vex_cc_regs[1:]:
if reg.vex_offset in symbolic_list:
cc_op_reg = self.state.arch.vex_cc_regs[0]
if cc_op_reg.vex_offset not in symbolic_list:
symbolic_list.extend(range(cc_op_reg.vex_offset, cc_op_reg.vex_offset + cc_op_reg.size))
break
# we take the approach of saving off the symbolic regs and then writing them back
cur_group = None
last = None
for i in sorted(symbolic_list):
if cur_group is None:
cur_group = i
elif i != last + 1 or cur_group//self.state.arch.bytes != i//self.state.arch.bytes:
l.debug("Restoring symbolic register %d", cur_group)
saved_registers.append((
cur_group, self.state.registers.load(cur_group, last-cur_group+1)
))
cur_group = i
last = i
if cur_group is not None:
l.debug("Restoring symbolic register %d", cur_group)
saved_registers.append((
cur_group, self.state.registers.load(cur_group, last-cur_group+1)
))
# now we sync registers out of unicorn
for r, c in self._uc_regs.items():
if r in self.state.arch.reg_blacklist:
continue
v = self.uc.reg_read(c)
# l.debug('getting $%s = %#x', r, v)
setattr(self.state.regs, r, v)
# some architecture-specific register fixups
if self.state.arch.name in ('X86', 'AMD64'):
# update the eflags
self.state.regs.eflags = self.state.solver.BVV(self.uc.reg_read(self._uc_const.UC_X86_REG_EFLAGS), self.state.arch.bits)
# sync the fp clerical data
status = self.uc.reg_read(unicorn.x86_const.UC_X86_REG_FPSW)
c3210 = status & 0x4700
top = (status & 0x3800) >> 11
control = self.uc.reg_read(unicorn.x86_const.UC_X86_REG_FPCW)
rm = (control & 0x0C00) >> 10
self.state.regs.fpround = rm
self.state.regs.fc3210 = c3210
self.state.regs.ftop = top
# sync the stx registers
# we gotta round the 80-bit extended precision values to 64-bit doubles!
uc_offset = unicorn.x86_const.UC_X86_REG_FP0
vex_offset = self.state.arch.registers['fpu_regs'][0]
vex_tag_offset = self.state.arch.registers['fpu_tags'][0] + 7
tag_word = self.uc.reg_read(unicorn.x86_const.UC_X86_REG_FPTAG)
for _ in range(8):
if tag_word & 3 == 3:
self.state.registers.store(vex_tag_offset, 0, size=1)
else:
self.state.registers.store(vex_tag_offset, 1, size=1)
mantissa, exponent = self.uc.reg_read(uc_offset)
sign = bool(exponent & 0x8000)
exponent = (exponent & 0x7FFF)
if exponent not in (0, 0x7FFF): # normal value
exponent = exponent - 16383 + 1023
if exponent <= 0: # underflow to zero
exponent = 0
mantissa = 0
elif exponent >= 0x7FF: # overflow to infinity
exponent = 0x7FF
mantissa = 0
elif exponent == 0: # zero or subnormal value
mantissa = 0
elif exponent == 0x7FFF: # nan or infinity
exponent = 0x7FF
if mantissa != 0:
mantissa = 0xFFFF
val = 0x8000000000000000 if sign else 0
val |= exponent << 52
val |= (mantissa >> 11) & 0xFFFFFFFFFFFFF
# the mantissa calculation is to convert from the 64-bit mantissa to 52-bit
# additionally, extended precision keeps around an high bit that we don't care about
# so 11-shift, not 12
self.state.registers.store(vex_offset, val, size=8)
uc_offset += 1
vex_offset += 8
tag_word >>= 2
vex_tag_offset -= 1
# TODO: ARM hardfloat
# now, we restore the symbolic registers
if options.UNICORN_SYM_REGS_SUPPORT in self.state.options:
for o, r in saved_registers:
self.state.registers.store(o, r)
def _check_registers(self, report=True):
''' check if this state might be used in unicorn (has no concrete register)'''
for r in self.state.arch.uc_regs.keys():
v = getattr(self.state.regs, r)
processed_v = self._process_value(v, 'reg')
if processed_v is None or processed_v.symbolic:
#l.info('detected symbolic register %s', r)
if report:
self._report_symbolic_blocker(v, 'reg')
return False
if self.state.arch.vex_conditional_helpers:
flags = ccall._get_flags(self.state)
processed_flags = self._process_value(flags, 'reg')
if processed_flags is None or processed_flags.symbolic:
#l.info("detected symbolic rflags/eflags")
if report:
self._report_symbolic_blocker(flags, 'reg')
return False
#l.debug('passed quick check')
return True
SimState.register_default('unicorn', Unicorn)
| angr/angr | angr/state_plugins/unicorn_engine.py | Python | bsd-2-clause | 71,217 |
from django.contrib.auth import views
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth import get_user_model
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
import json
from account.models import DepartmentGroup
from task.models import Execution
from core.models import Department
def login(request, *args, **kwargs):
if request.method == 'POST':
if not request.POST.get('remember', None):
request.session.set_expiry(0)
return views.login(request, *args, **kwargs)
@login_required
def profile_page(request, user_id):
data = {}
user = get_object_or_404(get_user_model(), pk=user_id)
data['user_profile'] = user
data['user_executions'] = Execution.get_inline_by_user(user.id)
return render(request, 'page/profile.html', data)
def on_before_save_user(instance):
if len(instance.password):
instance.set_password(instance.password)
else:
instance.password = get_user_model().objects.get(pk=instance.id).password
def modal_permissions(request, group_id):
group = get_object_or_404(DepartmentGroup, pk=group_id)
department = Department.objects.get(pk=request.current_department_id)
if group.department_id != department.id:
return
if not request.user.has_perm('core.change_department', department):
return
data = {}
data['group'] = group
data['form_template'] = 'partial/permissions_form.html'
data['model_name'] = '%s group permissions' % group.local_name
data['is_new'] = False
data['no_delete'] = True
data['request_path'] = request.path
data['applications'] = department.applications
from core.models import Application, Environment
from task.models import Task
models = {
'department': Department,
'application': Application,
'environment': Environment,
'task': Task,
}
if request.method == 'POST':
from guardian.models import GroupObjectPermission
from guardian.shortcuts import assign_perm
GroupObjectPermission.objects.filter(group_id=group.id).delete()
assign_perm('core.view_department', group, group.department)
for name, value in request.POST.items():
key = name.split('_')
if len(key) == 3 and value == 'on':
action, model, pk = key
assign_perm('%s_%s' % (action, model), group, models[model].objects.get(pk=pk))
return HttpResponse(json.dumps({'status': True}), content_type="application/json")
else:
return render(request, 'partial/modal_form.html', data)
| senkal/gunnery | gunnery/account/views.py | Python | apache-2.0 | 2,675 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''messaging based notification driver, with message envelopes'''
from oslo.config import cfg
from neutron.openstack.common import context as req_context
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
LOG = logging.getLogger(__name__)
notification_topic_opt = cfg.ListOpt(
'topics', default=['notifications', ],
help='AMQP topic(s) used for openstack notifications')
opt_group = cfg.OptGroup(name='rpc_notifier2',
title='Options for rpc_notifier2')
CONF = cfg.CONF
CONF.register_group(opt_group)
CONF.register_opt(notification_topic_opt, opt_group)
def notify(context, message):
"""Sends a notification via RPC."""
if not context:
context = req_context.get_admin_context()
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
for topic in CONF.rpc_notifier2.topics:
topic = '%s.%s' % (topic, priority)
try:
rpc.notify(context, topic, message, envelope=True)
except Exception:
LOG.exception(_("Could not send notification to %(topic)s. "
"Payload=%(message)s"), locals())
| sajuptpm/neutron-ipam | neutron/openstack/common/notifier/rpc_notifier2.py | Python | apache-2.0 | 1,921 |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='index.html'), name = 'index'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'^application/', include('application.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| xnnyygn/vmm | vmm/urls.py | Python | apache-2.0 | 554 |
"""
Mac OS X .app build command for distutils
Originally (loosely) based on code from py2exe's build_exe.py by Thomas Heller.
"""
from __future__ import print_function
import imp
import sys
import os
import zipfile
import plistlib
import shlex
import shutil
import textwrap
import pkg_resources
import collections
from modulegraph import modulegraph
from py2app.apptemplate.setup import main as script_executable
from py2app.util import mergecopy, make_exec
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from itertools import chain
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
from modulegraph.find_modules import find_modules, parse_mf_results, find_needed_modules
from modulegraph.modulegraph import SourceModule, Package, Script
from modulegraph import zipio
import macholib.dyld
import macholib.MachOStandalone
import macholib.MachO
from macholib.util import flipwritable
from py2app.create_appbundle import create_appbundle
from py2app.create_pluginbundle import create_pluginbundle
from py2app.util import \
fancy_split, byte_compile, make_loader, imp_find_module, \
copy_tree, fsencoding, strip_files, in_system_path, makedirs, \
iter_platform_files, find_version, skipscm, momc, copy_file, \
copy_resource
from py2app.filters import \
not_stdlib_filter, not_system_filter, has_filename_filter
from py2app import recipes
from distutils.sysconfig import get_config_var, get_config_h_filename
PYTHONFRAMEWORK=get_config_var('PYTHONFRAMEWORK')
PLUGIN_SUFFIXES = {
'.qlgenerator': 'QuickLook',
'.mdimporter': 'Spotlight',
'.xpc': 'XPCServices',
'.service': 'Services',
'.prefPane': 'PreferencePanes',
'.iaplugin': 'InternetAccounts',
'.action': 'Automator',
}
try:
basestring
except NameError:
basestring = str
def rewrite_tkinter_load_commands(tkinter_path):
print("rewrite_tk", tkinter_path)
m = macholib.MachO.MachO(tkinter_path)
tcl_path = None
tk_path = None
rewrite_map = {}
for header in m.headers:
for idx, name, other in header.walkRelocatables():
if other.endswith('/Tk'):
if tk_path is not None and other != tk_path:
raise DistutilsPlatformError('_tkinter is linked to different Tk paths')
tk_path = other
elif other.endswith('/Tcl'):
if tcl_path is not None and other != tcl_path:
raise DistutilsPlatformError('_tkinter is linked to different Tcl paths')
tcl_path = other
if tcl_path is None or 'Tcl.framework' not in tcl_path:
raise DistutilsPlatformError('_tkinter is not linked a Tcl.framework')
if tk_path is None or 'Tk.framework' not in tk_path:
raise DistutilsPlatformError('_tkinter is not linked a Tk.framework')
system_tcl_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tcl.framework/Versions') if nm != 'Current']
system_tk_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tk.framework/Versions') if nm != 'Current']
if not tcl_path.startswith('/System/Library/Frameworks'):
# ../Versions/8.5/Tcl
ver = os.path.basename(os.path.dirname(tcl_path))
if ver not in system_tcl_versions:
raise DistutilsPlatformError('_tkinter is linked to a version of Tcl not in /System')
rewrite_map[tcl_path] = '/System/Library/Frameworks/Tcl.framework/Versions/%s/Tcl'%(ver,)
if not tk_path.startswith('/System/Library/Frameworks'):
# ../Versions/8.5/Tk
ver = os.path.basename(os.path.dirname(tk_path))
if ver not in system_tk_versions:
raise DistutilsPlatformError('_tkinter is linked to a version of Tk not in /System')
rewrite_map[tk_path] = '/System/Library/Frameworks/Tk.framework/Versions/%s/Tk'%(ver,)
if rewrite_map:
print("Relinking _tkinter.so to system Tcl/Tk")
rewroteAny = False
for header in m.headers:
for idx, name, other in header.walkRelocatables():
data = rewrite_map.get(other)
if data:
if header.rewriteDataForCommand(idx, data.encode(sys.getfilesystemencoding())):
rewroteAny = True
if rewroteAny:
old_mode = flipwritable(m.filename)
try:
with open(m.filename, 'rb+') as f:
for header in m.headers:
f.seek(0)
header.write(f)
f.seek(0, 2)
f.flush()
finally:
flipwritable(m.filename, old_mode)
else:
print("_tkinter already linked against system Tcl/Tk")
def get_zipfile(dist, semi_standalone=False):
if sys.version_info[0] == 3:
if semi_standalone:
return "python%d.%d/site-packages.zip"%(sys.version_info[:2])
else:
return "python%d%d.zip"%(sys.version_info[:2])
return getattr(dist, "zipfile", None) or "site-packages.zip"
def framework_copy_condition(src):
# Skip Headers, .svn, and CVS dirs
return skipscm(src) and os.path.basename(src) != 'Headers'
class PythonStandalone(macholib.MachOStandalone.MachOStandalone):
def __init__(self, appbuilder, *args, **kwargs):
super(PythonStandalone, self).__init__(*args, **kwargs)
self.appbuilder = appbuilder
def copy_dylib(self, src):
dest = os.path.join(self.dest, os.path.basename(src))
if os.path.islink(src):
dest = os.path.join(self.dest, os.path.basename(os.path.realpath(src)))
# Ensure that the orginal name also exists, avoids problems when
# the filename is used from Python (see issue #65)
#
# NOTE: The if statement checks that the target link won't
# point to itself, needed for systems like homebrew that
# store symlinks in "public" locations that point to
# files of the same name in a per-package install location.
link_dest = os.path.join(self.dest, os.path.basename(src))
if os.path.basename(link_dest) != os.path.basename(dest):
os.symlink(os.path.basename(dest), link_dest)
else:
dest = os.path.join(self.dest, os.path.basename(src))
return self.appbuilder.copy_dylib(src, dest)
def copy_framework(self, info):
destfn = self.appbuilder.copy_framework(info, self.dest)
dest = os.path.join(self.dest, info['shortname'] + '.framework')
self.pending.append((destfn, iter_platform_files(dest)))
return destfn
def iterRecipes(module=recipes):
for name in dir(module):
if name.startswith('_'):
continue
check = getattr(getattr(module, name), 'check', None)
if check is not None:
yield (name, check)
# A very loosely defined "target". We assume either a "script" or "modules"
# attribute. Some attributes will be target specific.
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
# If modules is a simple string, assume they meant list
m = self.__dict__.get("modules")
if m and isinstance(m, basestring):
self.modules = [m]
def get_dest_base(self):
dest_base = getattr(self, "dest_base", None)
if dest_base: return dest_base
script = getattr(self, "script", None)
if script:
return os.path.basename(os.path.splitext(script)[0])
modules = getattr(self, "modules", None)
assert modules, "no script, modules or dest_base specified"
return modules[0].split(".")[-1]
def validate(self):
resources = getattr(self, "resources", [])
for r_filename in resources:
if not os.path.isfile(r_filename):
raise DistutilsOptionError(
"Resource filename '%s' does not exist" % (r_filename,))
def validate_target(dist, attr, value):
res = FixupTargets(value, "script")
other = {"app": "plugin", "plugin": "app"}
if res and getattr(dist, other[attr]):
# XXX - support apps and plugins?
raise DistutilsOptionError(
"You must specify either app or plugin, not both")
def FixupTargets(targets, default_attribute):
if not targets:
return targets
try:
targets = eval(targets)
except:
pass
ret = []
for target_def in targets:
if isinstance(target_def, basestring):
# Create a default target object, with the string as the attribute
target = Target(**{default_attribute: target_def})
else:
d = getattr(target_def, "__dict__", target_def)
if default_attribute not in d:
raise DistutilsOptionError(
"This target class requires an attribute '%s'"
% (default_attribute,))
target = Target(**d)
target.validate()
ret.append(target)
return ret
def normalize_data_file(fn):
if isinstance(fn, basestring):
fn = convert_path(fn)
return ('', [fn])
return fn
def is_system():
prefix = sys.prefix
if os.path.exists(os.path.join(prefix, ".Python")):
fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
return in_system_path(prefix)
def installation_info(version=None):
if version is None:
version = sys.version
if is_system():
return version[:3] + " (FORCED: Using vendor Python)"
else:
return version[:3]
class py2app(Command):
description = "create a Mac OS X application or plugin from Python scripts"
# List of option tuples: long name, short name (None if no short
# name), and help string.
user_options = [
("app=", None,
"application bundle to be built"),
("plugin=", None,
"plugin bundle to be built"),
('optimize=', 'O',
"optimization level: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
("includes=", 'i',
"comma-separated list of modules to include"),
("packages=", 'p',
"comma-separated list of packages to include"),
("iconfile=", None,
"Icon file to use"),
("excludes=", 'e',
"comma-separated list of modules to exclude"),
("dylib-excludes=", 'E',
"comma-separated list of frameworks or dylibs to exclude"),
("datamodels=", None,
"xcdatamodels to be compiled and copied into Resources"),
("mappingmodels=", None,
"xcmappingmodels to be compiled and copied into Resources"),
("resources=", 'r',
"comma-separated list of additional data files and folders to include (not for code!)"),
("frameworks=", 'f',
"comma-separated list of additional frameworks and dylibs to include"),
("plist=", 'P',
"Info.plist template file, dict, or plistlib.Plist"),
("extension=", None,
"Bundle extension [default:.app for app, .plugin for plugin]"),
("graph", 'g',
"output module dependency graph"),
("xref", 'x',
"output module cross-reference as html"),
("no-strip", None,
"do not strip debug and local symbols from output"),
#("compressed", 'c',
# "create a compressed zipfile"),
("no-chdir", 'C',
"do not change to the data directory (Contents/Resources) [forced for plugins]"),
#("no-zip", 'Z',
# "do not use a zip file (XXX)"),
("semi-standalone", 's',
"depend on an existing installation of Python " + installation_info()),
("alias", 'A',
"Use an alias to current source file (for development only!)"),
("argv-emulation", 'a',
"Use argv emulation [disabled for plugins]."),
("argv-inject=", None,
"Inject some commands into the argv"),
("emulate-shell-environment", None,
"Emulate the shell environment you get in a Terminal window"),
("use-pythonpath", None,
"Allow PYTHONPATH to effect the interpreter's environment"),
("use-faulthandler", None,
"Enable the faulthandler in the generated bundle (Python 3.3 or later)"),
("verbose-interpreter", None,
"Start python in verbose mode"),
('bdist-base=', 'b',
'base directory for build library (default is build)'),
('dist-dir=', 'd',
"directory to put final built distributions in (default is dist)"),
('site-packages', None,
"include the system and user site-packages into sys.path"),
("strip", 'S',
"strip debug and local symbols from output (on by default, for compatibility)"),
("prefer-ppc", None,
"Force application to run translated on i386 (LSPrefersPPC=True)"),
('debug-modulegraph', None,
'Drop to pdb console after the module finding phase is complete'),
("debug-skip-macholib", None,
"skip macholib phase (app will not be standalone!)"),
("arch=", None, "set of architectures to use (fat, fat3, universal, intel, i386, ppc, x86_64; default is the set for the current python binary)"),
("qt-plugins=", None, "set of Qt plugins to include in the application bundle (default None)"),
("matplotlib-backends=", None, "set of matplotlib backends to include (default: include entire package)"),
("extra-scripts=", None, "set of scripts to include in the application bundle, next to the main application script"),
("include-plugins=", None, "List of plugins to include"),
("force-system-tk", None, "Ensure that Tkinter is linked against Apple's build of Tcl/Tk"),
("report-missing-from-imports", None, "Report the list of missing names for 'from module import name'"),
("no-report-missing-conditional-import", None, "Don't report missing modules when they appear to be conditional imports"),
]
boolean_options = [
#"compressed",
"xref",
"strip",
"no-strip",
"site-packages",
"semi-standalone",
"alias",
"argv-emulation",
#"no-zip",
"use-pythonpath",
"use-faulthandler",
"verbose-interpreter",
"no-chdir",
"debug-modulegraph",
"debug-skip-macholib",
"graph",
"prefer-ppc",
"emulate-shell-environment",
"force-system-tk",
"report-missing-from-imports",
"no-report-missing-conditional-import",
]
def initialize_options (self):
self.app = None
self.plugin = None
self.bdist_base = None
self.xref = False
self.graph = False
self.no_zip = 0
self.optimize = 0
if hasattr(sys, 'flags'):
self.optimize = sys.flags.optimize
self.arch = None
self.strip = True
self.no_strip = False
self.iconfile = None
self.extension = None
self.alias = 0
self.argv_emulation = 0
self.emulate_shell_environment = 0
self.argv_inject = None
self.no_chdir = 0
self.site_packages = False
self.use_pythonpath = False
self.use_faulthandler = False
self.verbose_interpreter = False
self.includes = None
self.packages = None
self.excludes = None
self.dylib_excludes = None
self.frameworks = None
self.resources = None
self.datamodels = None
self.mappingmodels = None
self.plist = None
self.compressed = True
self.semi_standalone = is_system()
self.dist_dir = None
self.debug_skip_macholib = False
self.debug_modulegraph = False
self.prefer_ppc = False
self.filters = []
self.eggs = []
self.qt_plugins = None
self.matplotlib_backends = None
self.extra_scripts = None
self.include_plugins = None
self.force_system_tk = False
self.report_missing_from_imports = False
self.no_report_missing_conditional_import = False
def finalize_options (self):
if not self.strip:
self.no_strip = True
elif self.no_strip:
self.strip = False
self.optimize = int(self.optimize)
if self.argv_inject and isinstance(self.argv_inject, basestring):
self.argv_inject = shlex.split(self.argv_inject)
self.includes = set(fancy_split(self.includes))
self.includes.add('encodings.*')
if self.use_faulthandler:
self.includes.add('faulthandler')
#if sys.version_info[:2] >= (3, 2):
# self.includes.add('pkgutil')
# self.includes.add('imp')
self.packages = set(fancy_split(self.packages))
self.excludes = set(fancy_split(self.excludes))
self.excludes.add('readline')
# included by apptemplate
self.excludes.add('site')
if getattr(self.distribution, 'install_requires', None):
self.includes.add('pkg_resources')
self.eggs = pkg_resources.require(self.distribution.install_requires)
# Setuptools/distribute style namespace packages uses
# __import__('pkg_resources'), and that import isn't detected at the
# moment. Forcefully include pkg_resources.
self.includes.add('pkg_resources')
dylib_excludes = fancy_split(self.dylib_excludes)
self.dylib_excludes = []
for fn in dylib_excludes:
try:
res = macholib.dyld.framework_find(fn)
except ValueError:
try:
res = macholib.dyld.dyld_find(fn)
except ValueError:
res = fn
self.dylib_excludes.append(res)
self.resources = fancy_split(self.resources)
frameworks = fancy_split(self.frameworks)
self.frameworks = []
for fn in frameworks:
try:
res = macholib.dyld.framework_find(fn)
except ValueError:
res = macholib.dyld.dyld_find(fn)
while res in self.dylib_excludes:
self.dylib_excludes.remove(res)
self.frameworks.append(res)
if not self.plist:
self.plist = {}
if isinstance(self.plist, basestring):
self.plist = plistlib.Plist.fromFile(self.plist)
if isinstance(self.plist, plistlib.Dict):
self.plist = dict(self.plist.__dict__)
else:
self.plist = dict(self.plist)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('bdist_base', 'bdist_base'))
if self.semi_standalone:
self.filters.append(not_stdlib_filter)
if self.iconfile is None and 'CFBundleIconFile' not in self.plist:
# Default is the generic applet icon in the framework
iconfile = os.path.join(sys.prefix, 'Resources', 'Python.app',
'Contents', 'Resources', 'PythonApplet.icns')
if os.path.exists(iconfile):
self.iconfile = iconfile
self.runtime_preferences = list(self.get_runtime_preferences())
self.qt_plugins = fancy_split(self.qt_plugins)
self.matplotlib_backends = fancy_split(self.matplotlib_backends)
self.extra_scripts = fancy_split(self.extra_scripts)
self.include_plugins = fancy_split(self.include_plugins)
if self.datamodels:
print("WARNING: the datamodels option is deprecated, add model files to the list of resources")
if self.mappingmodels:
print("WARNING: the mappingmodels option is deprecated, add model files to the list of resources")
def get_default_plist(self):
# XXX - this is all single target stuff
plist = {}
target = self.targets[0]
version = self.distribution.get_version()
if version == '0.0.0':
try:
version = find_version(target.script)
except ValueError:
pass
if not isinstance(version, basestring):
raise DistutilsOptionError("Version must be a string")
if sys.version_info[0] > 2 and isinstance(version, type('a'.encode('ascii'))):
raise DistutilsOptionError("Version must be a string")
plist['CFBundleVersion'] = version
name = self.distribution.get_name()
if name == 'UNKNOWN':
base = target.get_dest_base()
name = os.path.basename(base)
plist['CFBundleName'] = name
return plist
def get_runtime(self, prefix=None, version=None):
# XXX - this is a bit of a hack!
# ideally we'd use dylib functions to figure this out
if prefix is None:
prefix = sys.prefix
if version is None:
version = sys.version
version = version[:3]
info = None
if os.path.exists(os.path.join(prefix, ".Python")):
# We're in a virtualenv environment, locate the real prefix
fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
try:
fmwk = macholib.dyld.framework_find(prefix)
except ValueError:
info = None
else:
info = macholib.dyld.framework_info(fmwk)
if info is not None:
dylib = info['name']
runtime = os.path.join(info['location'], info['name'])
else:
dylib = 'libpython%s.dylib' % (sys.version[:3],)
runtime = os.path.join(prefix, 'lib', dylib)
return dylib, runtime
def symlink(self, src, dst):
try:
os.remove(dst)
except OSError:
pass
os.symlink(src, dst)
def get_runtime_preferences(self, prefix=None, version=None):
dylib, runtime = self.get_runtime(prefix=prefix, version=version)
yield os.path.join('@executable_path', '..', 'Frameworks', dylib)
if self.semi_standalone or self.alias:
yield runtime
def run(self):
if get_config_var('PYTHONFRAMEWORK') is None:
if not get_config_var('Py_ENABLE_SHARED'):
raise DistutilsPlatformError("This python does not have a shared library or framework")
else:
# Issue .. in py2app's tracker, and issue .. in python's tracker: a unix-style shared
# library build did not read the application environment correctly. The collection of
# if statements below gives a clean error message when py2app is started, instead of
# building a bundle that will give a confusing error message when started.
msg = "py2app is not supported for a shared library build with this version of python"
if sys.version_info[:2] < (2,7):
raise DistutilsPlatformError(msg)
elif sys.version_info[:2] == (2,7) and sys.version[3] < 4:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] < 2:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] == 2 and sys.version_info[3] < 3:
raise DistutilsPlatformError(msg)
elif sys.version_info[0] == 3 and sys.version_info[1] == 3 and sys.version_info[3] < 1:
raise DistutilsPlatformError(msg)
if hasattr(self.distribution, "install_requires") \
and self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
build = self.reinitialize_command('build')
build.build_base = self.bdist_base
build.run()
self.create_directories()
self.fixup_distribution()
self.initialize_plist()
sys_old_path = sys.path[:]
extra_paths = [
os.path.dirname(target.script)
for target in self.targets
]
extra_paths.extend([build.build_platlib, build.build_lib])
self.additional_paths = [
os.path.abspath(p)
for p in extra_paths
if p is not None
]
sys.path[:0] = self.additional_paths
# this needs additional_paths
self.initialize_prescripts()
try:
self._run()
finally:
sys.path = sys_old_path
def iter_datamodels(self, resdir):
for (path, files) in (normalize_data_file(fn) for fn in (self.datamodels or ())):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
basefn, ext = os.path.splitext(fn)
if ext != '.xcdatamodel':
basefn = fn
fn += '.xcdatamodel'
destfn = os.path.basename(basefn) + '.mom'
yield fn, os.path.join(resdir, path, destfn)
def compile_datamodels(self, resdir):
for src, dest in self.iter_datamodels(resdir):
print("compile datamodel", src, "->", dest)
self.mkpath(os.path.dirname(dest))
momc(src, dest)
def iter_mappingmodels(self, resdir):
for (path, files) in (normalize_data_file(fn) for fn in (self.mappingmodels or ())):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
basefn, ext = os.path.splitext(fn)
if ext != '.xcmappingmodel':
basefn = fn
fn += '.xcmappingmodel'
destfn = os.path.basename(basefn) + '.cdm'
yield fn, os.path.join(resdir, path, destfn)
def compile_mappingmodels(self, resdir):
for src, dest in self.iter_mappingmodels(resdir):
self.mkpath(os.path.dirname(dest))
mapc(src, dest)
def iter_extra_plugins(self):
for item in self.include_plugins:
if isinstance(item, (list, tuple)):
subdir, path = item
else:
ext = os.path.splitext(item)[1]
try:
subdir = PLUGIN_SUFFIXES[ext]
path = item
except KeyError:
raise DistutilsOptionError("Cannot determine subdirectory for plugin %s"%(item,))
yield path, os.path.join(subdir, os.path.basename(path))
def iter_data_files(self):
dist = self.distribution
allres = chain(getattr(dist, 'data_files', ()) or (), self.resources)
for (path, files) in (normalize_data_file(fn) for fn in allres):
path = fsencoding(path)
for fn in files:
fn = fsencoding(fn)
yield fn, os.path.join(path, os.path.basename(fn))
def collect_scripts(self):
# these contains file names
scripts = set()
for target in self.targets:
scripts.add(target.script)
scripts.update([
k for k in target.prescripts if isinstance(k, basestring)
])
if hasattr(target, 'extra_scripts'):
scripts.update(target.extra_scripts)
scripts.update(self.extra_scripts)
return scripts
def get_plist_options(self):
result = dict(
PyOptions=dict(
use_pythonpath=bool(self.use_pythonpath),
site_packages=bool(self.site_packages),
alias=bool(self.alias),
argv_emulation=bool(self.argv_emulation),
emulate_shell_environment=bool(self.emulate_shell_environment),
no_chdir=bool(self.no_chdir),
prefer_ppc=self.prefer_ppc,
verbose=self.verbose_interpreter,
use_faulthandler=self.use_faulthandler,
),
)
if self.optimize:
result['PyOptions']['optimize'] = self.optimize
return result
def initialize_plist(self):
plist = self.get_default_plist()
for target in self.targets:
plist.update(getattr(target, 'plist', {}))
plist.update(self.plist)
plist.update(self.get_plist_options())
if self.iconfile:
iconfile = self.iconfile
if not os.path.exists(iconfile):
iconfile = iconfile + '.icns'
if not os.path.exists(iconfile):
raise DistutilsOptionError("icon file must exist: %r"
% (self.iconfile,))
self.resources.append(iconfile)
plist['CFBundleIconFile'] = os.path.basename(iconfile)
if self.prefer_ppc:
plist['LSPrefersPPC'] = True
self.plist = plist
return plist
def run_alias(self):
self.app_files = []
for target in self.targets:
extra_scripts = list(self.extra_scripts)
if hasattr(target, 'extra_scripts'):
extra_scripts.update(extra_scripts)
dst = self.build_alias_executable(target, target.script, extra_scripts)
self.app_files.append(dst)
for fn in extra_scripts:
if fn.endswith('.py'):
fn = fn[:-3]
elif fn.endswith('.pyw'):
fn = fn[:-4]
src_fn = script_executable(arch=self.arch, secondary=True)
tgt_fn = os.path.join(target.appdir, 'Contents', 'MacOS', os.path.basename(fn))
mergecopy(src_fn, tgt_fn)
make_exec(tgt_fn)
def collect_recipedict(self):
return dict(iterRecipes())
def get_modulefinder(self):
if self.debug_modulegraph:
debug = 4
else:
debug = 0
return find_modules(
scripts=self.collect_scripts(),
includes=self.includes,
packages=self.packages,
excludes=self.excludes,
debug=debug,
)
def collect_filters(self):
return [has_filename_filter] + list(self.filters)
def process_recipes(self, mf, filters, flatpackages, loader_files):
rdict = self.collect_recipedict()
while True:
for name, check in rdict.items():
rval = check(self, mf)
if rval is None:
continue
# we can pull this off so long as we stop the iter
del rdict[name]
print('*** using recipe: %s ***' % (name,))
if rval.get('packages'):
self.packages.update(rval['packages'])
find_needed_modules(mf, packages=rval['packages'])
for pkg in rval.get('flatpackages', ()):
if isinstance(pkg, basestring):
pkg = (os.path.basename(pkg), pkg)
flatpackages[pkg[0]] = pkg[1]
filters.extend(rval.get('filters', ()))
loader_files.extend(rval.get('loader_files', ()))
newbootstraps = list(map(self.get_bootstrap,
rval.get('prescripts', ())))
if rval.get('includes'):
find_needed_modules(mf, includes=rval['includes'])
if rval.get('resources'):
self.resources.extend(rval['resources'])
for fn in newbootstraps:
if isinstance(fn, basestring):
mf.run_script(fn)
for target in self.targets:
target.prescripts.extend(newbootstraps)
break
else:
break
def _run(self):
try:
if self.alias:
self.run_alias()
else:
self.run_normal()
except:
raise
# XXX - remove when not debugging
# distutils sucks
import pdb, sys, traceback
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
print("Done!")
def filter_dependencies(self, mf, filters):
print("*** filtering dependencies ***")
nodes_seen, nodes_removed, nodes_orphaned = mf.filterStack(filters)
print('%d total' % (nodes_seen,))
print('%d filtered' % (nodes_removed,))
print('%d orphaned' % (nodes_orphaned,))
print('%d remaining' % (nodes_seen - nodes_removed,))
def get_appname(self):
return self.plist['CFBundleName']
def build_xref(self, mf, flatpackages):
for target in self.targets:
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
dgraph = os.path.join(appdir, appname + '.html')
print("*** creating dependency html: %s ***"
% (os.path.basename(dgraph),))
with open(dgraph, 'w') as fp:
mf.create_xref(fp)
def build_graph(self, mf, flatpackages):
for target in self.targets:
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
dgraph = os.path.join(appdir, appname + '.dot')
print("*** creating dependency graph: %s ***"
% (os.path.basename(dgraph),))
with open(dgraph, 'w') as fp:
mf.graphreport(fp, flatpackages=flatpackages)
def finalize_modulefinder(self, mf):
for item in mf.flatten():
if isinstance(item, Package) and item.filename == '-':
if sys.version_info[:2] <= (3,3):
fn = os.path.join(self.temp_dir, 'empty_package', '__init__.py')
if not os.path.exists(fn):
dn = os.path.dirname(fn)
if not os.path.exists(dn):
os.makedirs(dn)
with open(fn, 'w') as fp:
pass
item.filename = fn
py_files, extensions = parse_mf_results(mf)
# Remove all top-level scripts from the list of python files,
# those get treated differently.
py_files = [ item for item in py_files if not isinstance(item, Script) ]
extensions = list(extensions)
return py_files, extensions
def collect_packagedirs(self):
return list(filter(os.path.exists, [
os.path.join(os.path.realpath(self.get_bootstrap(pkg)), '')
for pkg in self.packages
]))
def run_normal(self):
mf = self.get_modulefinder()
filters = self.collect_filters()
flatpackages = {}
loader_files = []
self.process_recipes(mf, filters, flatpackages, loader_files)
if self.debug_modulegraph:
import pdb
pdb.Pdb().set_trace()
self.filter_dependencies(mf, filters)
if self.graph:
self.build_graph(mf, flatpackages)
if self.xref:
self.build_xref(mf, flatpackages)
py_files, extensions = self.finalize_modulefinder(mf)
pkgdirs = self.collect_packagedirs()
self.create_binaries(py_files, pkgdirs, extensions, loader_files)
missing = []
syntax_error = []
invalid_bytecode = []
for module in mf.nodes():
if isinstance(module, modulegraph.MissingModule):
if module.identifier != '__main__':
missing.append(module)
elif isinstance(module, modulegraph.InvalidSourceModule):
syntax_error.append(module)
elif hasattr(modulegraph, 'InvalidCompiledModule') and isinstance(module, modulegraph.InvalidCompiledModule):
invalid_bytecode.append(module)
if missing:
missing_unconditional = collections.defaultdict(set)
missing_fromimport = collections.defaultdict(set)
missing_fromimport_conditional = collections.defaultdict(set)
missing_conditional = collections.defaultdict(set)
for module in sorted(missing):
for m in mf.getReferers(module):
if m is None: continue # XXX
try:
ed = mf.edgeData(m, module)
except KeyError:
ed = None
if hasattr(modulegraph, 'DependencyInfo') and isinstance(ed, modulegraph.DependencyInfo):
c = missing_unconditional
if ed.conditional or ed.function:
if ed.fromlist:
c = missing_fromimport_conditional
else:
c = missing_conditional
elif ed.fromlist:
c = missing_fromimport
c[module.identifier].add(m.identifier)
else:
missing_unconditional[module.identifier].add(m.identifier)
if missing_unconditional:
log.warn("Modules not found (unconditional imports):")
for m in sorted(missing_unconditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_unconditional[m]))))
log.warn("")
if missing_conditional and not self.no_report_missing_conditional_import:
log.warn("Modules not found (conditional imports):")
for m in sorted(missing_conditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_conditional[m]))))
log.warn("")
if self.report_missing_from_imports and (
missing_fromimport or (
not self.no_report_missing_conditional_import and missing_fromimport_conditional)):
log.warn("Modules not found ('from ... import y'):")
for m in sorted(missing_fromimport):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport[m]))))
if not self.no_report_missing_conditional_import and missing_fromimport_conditional:
log.warn("")
log.warn("Conditional:")
for m in sorted(missing_fromimport_conditional):
log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport_conditional[m]))))
log.warn("")
if syntax_error:
log.warn("Modules with syntax errors:")
for module in sorted(syntax_error):
log.warn(" * %s"%(module.identifier))
log.warn("")
if invalid_bytecode:
log.warn("Modules with invalid bytecode:")
for module in sorted(invalid_bytecode):
log.warn(" * %s"%(module.identifier))
log.warn("")
def create_directories(self):
bdist_base = self.bdist_base
if self.semi_standalone:
self.bdist_dir = os.path.join(bdist_base,
'python%s-semi_standalone' % (sys.version[:3],), 'app')
else:
self.bdist_dir = os.path.join(bdist_base,
'python%s-standalone' % (sys.version[:3],), 'app')
if os.path.exists(self.bdist_dir):
shutil.rmtree(self.bdist_dir)
self.collect_dir = os.path.abspath(
os.path.join(self.bdist_dir, "collect"))
self.mkpath(self.collect_dir)
self.temp_dir = os.path.abspath(os.path.join(self.bdist_dir, "temp"))
self.mkpath(self.temp_dir)
self.dist_dir = os.path.abspath(self.dist_dir)
self.mkpath(self.dist_dir)
self.lib_dir = os.path.join(self.bdist_dir,
os.path.dirname(get_zipfile(self.distribution, self.semi_standalone)))
self.mkpath(self.lib_dir)
self.ext_dir = os.path.join(self.lib_dir, 'lib-dynload')
self.mkpath(self.ext_dir)
self.framework_dir = os.path.join(self.bdist_dir, 'Frameworks')
self.mkpath(self.framework_dir)
def create_binaries(self, py_files, pkgdirs, extensions, loader_files):
print("*** create binaries ***")
dist = self.distribution
pkgexts = []
copyexts = []
extmap = {}
def packagefilter(mod, pkgdirs=pkgdirs):
fn = os.path.realpath(getattr(mod, 'filename', None))
if fn is None:
return None
for pkgdir in pkgdirs:
if fn.startswith(pkgdir):
return None
return fn
if pkgdirs:
py_files = list(filter(packagefilter, py_files))
for ext in extensions:
fn = packagefilter(ext)
if fn is None:
fn = os.path.realpath(getattr(ext, 'filename', None))
pkgexts.append(ext)
else:
if '.' in ext.identifier:
py_files.append(self.create_loader(ext))
copyexts.append(ext)
extmap[fn] = ext
# byte compile the python modules into the target directory
print("*** byte compile python files ***")
byte_compile(py_files,
target_dir=self.collect_dir,
optimize=self.optimize,
force=self.force,
verbose=self.verbose,
dry_run=self.dry_run)
for item in py_files:
if not isinstance(item, Package): continue
self.copy_package_data(item, self.collect_dir)
self.lib_files = []
self.app_files = []
# create the shared zipfile containing all Python modules
archive_name = os.path.join(self.lib_dir,
get_zipfile(dist, self.semi_standalone))
for path, files in loader_files:
dest = os.path.join(self.collect_dir, path)
self.mkpath(dest)
for fn in files:
destfn = os.path.join(dest, os.path.basename(fn))
if os.path.isdir(fn):
self.copy_tree(fn, destfn, preserve_symlinks=False)
else:
self.copy_file(fn, destfn)
arcname = self.make_lib_archive(archive_name,
base_dir=self.collect_dir, verbose=self.verbose,
dry_run=self.dry_run)
# XXX: this doesn't work with python3
#self.lib_files.append(arcname)
# build the executables
for target in self.targets:
extra_scripts = list(self.extra_scripts)
if hasattr(target, 'extra_scripts'):
extra_scripts.extend(target.extra_scripts)
dst = self.build_executable(
target, arcname, pkgexts, copyexts, target.script, extra_scripts)
exp = os.path.join(dst, 'Contents', 'MacOS')
execdst = os.path.join(exp, 'python')
if self.semi_standalone:
self.symlink(sys.executable, execdst)
else:
if os.path.exists(os.path.join(sys.prefix, ".Python")):
fn = os.path.join(sys.prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt")
if os.path.exists(fn):
with open(fn, 'rU') as fp:
prefix = fp.read().strip()
rest_path = os.path.normpath(sys.executable)[len(os.path.normpath(sys.prefix))+1:]
if rest_path.startswith('.'):
rest_path = rest_path[1:]
if PYTHONFRAMEWORK:
# When we're using a python framework bin/python refers to a stub executable
# that we don't want use, we need the executable in Resources/Python.app
dpath = os.path.join(prefix, 'Resources', 'Python.app', 'Contents', 'MacOS')
self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst)
else:
self.copy_file(os.path.join(prefix, rest_path), execdst)
else:
if PYTHONFRAMEWORK:
# When we're using a python framework bin/python refers to a stub executable
# that we don't want use, we need the executable in Resources/Python.app
dpath = os.path.join(sys.prefix, 'Resources', 'Python.app', 'Contents', 'MacOS')
self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst)
else:
self.copy_file(sys.executable, execdst)
if not self.debug_skip_macholib:
if self.force_system_tk:
print("force system tk")
resdir = os.path.join(dst, 'Contents', 'Resources')
pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2]))
ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir))
tkinter_path = os.path.join(ext_dir, '_tkinter.so')
if os.path.exists(tkinter_path):
rewrite_tkinter_load_commands(tkinter_path)
else:
print("tkinter not found at", tkinter_path)
mm = PythonStandalone(self, dst, executable_path=exp)
dylib, runtime = self.get_runtime()
if self.semi_standalone:
mm.excludes.append(runtime)
else:
mm.mm.run_file(runtime)
for exclude in self.dylib_excludes:
info = macholib.dyld.framework_info(exclude)
if info is not None:
exclude = os.path.join(
info['location'], info['shortname'] + '.framework')
mm.excludes.append(exclude)
for fmwk in self.frameworks:
mm.mm.run_file(fmwk)
platfiles = mm.run()
if self.strip:
platfiles = self.strip_dsym(platfiles)
self.strip_files(platfiles)
self.app_files.append(dst)
def copy_package_data(self, package, target_dir):
"""
Copy any package data in a python package into the target_dir.
This is a bit of a hack, it would be better to identify python eggs
and copy those in whole.
"""
exts = [ i[0] for i in imp.get_suffixes() ]
exts.append('.py')
exts.append('.pyc')
exts.append('.pyo')
def datafilter(item):
for e in exts:
if item.endswith(e):
return False
return True
target_dir = os.path.join(target_dir, *(package.identifier.split('.')))
for dname in package.packagepath:
filenames = list(filter(datafilter, zipio.listdir(dname)))
for fname in filenames:
if fname in ('.svn', 'CVS', '.hg', '.git'):
# Scrub revision manager junk
continue
if fname in ('__pycache__',):
# Ignore PEP 3147 bytecode cache
continue
if fname.startswith('.') and fname.endswith('.swp'):
# Ignore vim(1) temporary files
continue
if fname.endswith('~') or fname.endswith('.orig'):
# Ignore backup files for common tools (hg, emacs, ...)
continue
pth = os.path.join(dname, fname)
# Check if we have found a package, exclude those
if zipio.isdir(pth):
# XXX: the 'and not' part is wrong, need to fix zipio.isdir
for p in zipio.listdir(pth):
if p.startswith('__init__.') and p[8:] in exts:
break
else:
if os.path.isfile(pth):
# Avoid extracting a resource file that happens
# to be zipfile.
# XXX: Need API in zipio for nicer code.
copy_file(pth, os.path.join(target_dir, fname))
else:
copy_tree(pth, os.path.join(target_dir, fname))
continue
elif zipio.isdir(pth) and (
zipio.isfile(os.path.join(pth, '__init__.py'))
or zipio.isfile(os.path.join(pth, '__init__.pyc'))
or zipio.isfile(os.path.join(pth, '__init__.pyo'))):
# Subdirectory is a python package, these will get included later on
# when the subpackage itself is included, ignore for now.
pass
else:
copy_file(pth, os.path.join(target_dir, fname))
def strip_dsym(self, platfiles):
""" Remove .dSYM directories in the bundled application """
#
# .dSYM directories are contain detached debugging information and
# should be completely removed when the "strip" option is specified.
#
if self.dry_run:
return platfiles
for dirpath, dnames, fnames in os.walk(self.appdir):
for nm in list(dnames):
if nm.endswith('.dSYM'):
print("removing debug info: %s/%s"%(dirpath, nm))
shutil.rmtree(os.path.join(dirpath, nm))
dnames.remove(nm)
return [file for file in platfiles if '.dSYM' not in file]
def strip_files(self, files):
unstripped = 0
stripfiles = []
for fn in files:
unstripped += os.stat(fn).st_size
stripfiles.append(fn)
log.info('stripping %s', os.path.basename(fn))
strip_files(stripfiles, dry_run=self.dry_run, verbose=self.verbose)
stripped = 0
for fn in stripfiles:
stripped += os.stat(fn).st_size
log.info('stripping saved %d bytes (%d / %d)',
unstripped - stripped, stripped, unstripped)
def copy_dylib(self, src, dst):
# will be copied from the framework?
if src != sys.executable:
force, self.force = self.force, True
self.copy_file(src, dst)
self.force = force
return dst
def copy_versioned_framework(self, info, dst):
# XXX - Boy is this ugly, but it makes sense because the developer
# could have both Python 2.3 and 2.4, or Tk 8.4 and 8.5, etc.
# Saves a good deal of space, and I'm pretty sure this ugly
# hack is correct in the general case.
version = info['version']
if version is None:
return self.raw_copy_framework(info, dst)
short = info['shortname'] + '.framework'
infile = os.path.join(info['location'], short)
outfile = os.path.join(dst, short)
vsplit = os.path.join(infile, 'Versions').split(os.sep)
def condition(src, vsplit=vsplit, version=version):
srcsplit = src.split(os.sep)
if (
len(srcsplit) > len(vsplit) and
srcsplit[:len(vsplit)] == vsplit and
srcsplit[len(vsplit)] != version and
not os.path.islink(src)
):
return False
# Skip Headers, .svn, and CVS dirs
return framework_copy_condition(src)
return self.copy_tree(infile, outfile,
preserve_symlinks=True, condition=condition)
def copy_framework(self, info, dst):
force, self.force = self.force, True
if info['shortname'] == PYTHONFRAMEWORK:
self.copy_python_framework(info, dst)
else:
self.copy_versioned_framework(info, dst)
self.force = force
return os.path.join(dst, info['name'])
def raw_copy_framework(self, info, dst):
short = info['shortname'] + '.framework'
infile = os.path.join(info['location'], short)
outfile = os.path.join(dst, short)
return self.copy_tree(infile, outfile,
preserve_symlinks=True, condition=framework_copy_condition)
def copy_python_framework(self, info, dst):
# XXX - In this particular case we know exactly what we can
# get away with.. should this be extended to the general
# case? Per-framework recipes?
includedir = get_config_var('CONFINCLUDEPY')
configdir = get_config_var('LIBPL')
if includedir is None:
includedir = 'python%d.%d'%(sys.version_info[:2])
else:
includedir = os.path.basename(includedir)
if configdir is None:
configdir = 'config'
else:
configdir = os.path.basename(configdir)
indir = os.path.dirname(os.path.join(info['location'], info['name']))
outdir = os.path.dirname(os.path.join(dst, info['name']))
self.mkpath(os.path.join(outdir, 'Resources'))
pydir = 'python%s.%s'%(sys.version_info[:2])
# Create a symlink "for Python.frameworks/Versions/Current". This
# is required for the Mac App-store.
os.symlink(
os.path.basename(outdir),
os.path.join(os.path.dirname(outdir), "Current"))
# Likewise for two links in the root of the framework:
os.symlink(
'Versions/Current/Resources',
os.path.join(os.path.dirname(os.path.dirname(outdir)), 'Resources'))
os.symlink(
os.path.join('Versions/Current', PYTHONFRAMEWORK),
os.path.join(os.path.dirname(os.path.dirname(outdir)), PYTHONFRAMEWORK))
# Experiment for issue 57
if not os.path.exists(os.path.join(indir, 'include')):
alt = os.path.join(indir, 'Versions/Current')
if os.path.exists(os.path.join(alt, 'include')):
indir = alt
# distutils looks for some files relative to sys.executable, which
# means they have to be in the framework...
self.mkpath(os.path.join(outdir, 'include'))
self.mkpath(os.path.join(outdir, 'include', includedir))
self.mkpath(os.path.join(outdir, 'lib'))
self.mkpath(os.path.join(outdir, 'lib', pydir))
self.mkpath(os.path.join(outdir, 'lib', pydir, configdir))
fmwkfiles = [
os.path.basename(info['name']),
'Resources/Info.plist',
'include/%s/pyconfig.h'%(includedir),
]
if '_sysconfigdata' not in sys.modules:
fmwkfiles.append(
'lib/%s/%s/Makefile'%(pydir, configdir)
)
for fn in fmwkfiles:
self.copy_file(
os.path.join(indir, fn),
os.path.join(outdir, fn))
def fixup_distribution(self):
dist = self.distribution
# Trying to obtain app and plugin from dist for backward compatibility
# reasons.
app = dist.app
plugin = dist.plugin
# If we can get suitable values from self.app and self.plugin, we prefer
# them.
if self.app is not None or self.plugin is not None:
app = self.app
plugin = self.plugin
# Convert our args into target objects.
dist.app = FixupTargets(app, "script")
dist.plugin = FixupTargets(plugin, "script")
if dist.app and dist.plugin:
# XXX - support apps and plugins?
raise DistutilsOptionError(
"You must specify either app or plugin, not both")
elif dist.app:
self.style = 'app'
self.targets = dist.app
elif dist.plugin:
self.style = 'plugin'
self.targets = dist.plugin
else:
raise DistutilsOptionError(
"You must specify either app or plugin")
if len(self.targets) != 1:
# XXX - support multiple targets?
raise DistutilsOptionError(
"Multiple targets not currently supported")
if not self.extension:
self.extension = '.' + self.style
# make sure all targets use the same directory, this is
# also the directory where the pythonXX.dylib must reside
paths = set()
for target in self.targets:
paths.add(os.path.dirname(target.get_dest_base()))
if len(paths) > 1:
raise DistutilsOptionError(
"all targets must use the same directory: %s" %
([p for p in paths],))
if paths:
app_dir = paths.pop() # the only element
if os.path.isabs(app_dir):
raise DistutilsOptionError(
"app directory must be relative: %s" % (app_dir,))
self.app_dir = os.path.join(self.dist_dir, app_dir)
self.mkpath(self.app_dir)
else:
# Do we allow to specify no targets?
# We can at least build a zipfile...
self.app_dir = self.lib_dir
def initialize_prescripts(self):
prescripts = []
prescripts.append('reset_sys_path')
if self.semi_standalone:
prescripts.append('semi_standalone_path')
if 0 and sys.version_info[:2] >= (3, 2) and not self.alias:
# Python 3.2 or later requires a more complicated
# bootstrap
prescripts.append('import_encodings')
if os.path.exists(os.path.join(sys.prefix, ".Python")):
# We're in a virtualenv, which means sys.path
# will be broken in alias builds unless we fix
# it.
if self.alias or self.semi_standalone:
prescripts.append("virtualenv")
prescripts.append(StringIO('_fixup_virtualenv(%r)' % (sys.real_prefix,)))
if self.site_packages or self.alias:
import site
global_site_packages = not os.path.exists(
os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt'))
prescripts.append('virtualenv_site_packages')
prescripts.append(StringIO('_site_packages(%r, %r, %d)' % (
sys.prefix, sys.real_prefix, global_site_packages)))
elif self.site_packages or self.alias:
prescripts.append('site_packages')
if is_system():
prescripts.append('system_path_extras')
#if self.style == 'app':
# prescripts.append('setup_pkgresource')
included_subpkg = [pkg for pkg in self.packages if '.' in pkg]
if included_subpkg:
prescripts.append('setup_included_subpackages')
prescripts.append(StringIO('_path_hooks = %r'%(
included_subpkg)))
if self.emulate_shell_environment:
prescripts.append('emulate_shell_environment')
if self.argv_emulation and self.style == 'app':
prescripts.append('argv_emulation')
if 'CFBundleDocumentTypes' not in self.plist:
self.plist['CFBundleDocumentTypes'] = [
{
'CFBundleTypeOSTypes' : [
'****',
'fold',
'disk',
],
'CFBundleTypeRole': 'Viewer'
},
]
if self.argv_inject is not None:
prescripts.append('argv_inject')
prescripts.append(
StringIO('_argv_inject(%r)\n' % (self.argv_inject,)))
if self.style == 'app' and not self.no_chdir:
prescripts.append('chdir_resource')
if not self.alias:
prescripts.append('disable_linecache')
prescripts.append('boot_' + self.style)
else:
# Add ctypes prescript because it is needed to
# find libraries in the bundle, but we don't run
# recipes and hence the ctypes recipe is not used
# for alias builds.
prescripts.append('ctypes_setup')
if self.additional_paths:
prescripts.append('path_inject')
prescripts.append(
StringIO('_path_inject(%r)\n' % (self.additional_paths,)))
prescripts.append('boot_alias' + self.style)
newprescripts = []
for s in prescripts:
if isinstance(s, basestring):
newprescripts.append(
self.get_bootstrap('py2app.bootstrap.' + s))
else:
newprescripts.append(s)
for target in self.targets:
prescripts = getattr(target, 'prescripts', [])
target.prescripts = newprescripts + prescripts
def get_bootstrap(self, bootstrap):
if isinstance(bootstrap, basestring):
if not os.path.exists(bootstrap):
bootstrap = imp_find_module(bootstrap)[1]
return bootstrap
def get_bootstrap_data(self, bootstrap):
bootstrap = self.get_bootstrap(bootstrap)
if not isinstance(bootstrap, basestring):
return bootstrap.getvalue()
else:
with open(bootstrap, 'rU') as fp:
return fp.read()
def create_pluginbundle(self, target, script, use_runtime_preference=True):
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
print("*** creating plugin bundle: %s ***" % (appname,))
if self.runtime_preferences and use_runtime_preference:
self.plist.setdefault(
'PyRuntimeLocations', self.runtime_preferences)
appdir, plist = create_pluginbundle(
appdir,
appname,
plist=self.plist,
extension=self.extension,
arch=self.arch,
)
appdir = fsencoding(appdir)
resdir = os.path.join(appdir, 'Contents', 'Resources')
return appdir, resdir, plist
def create_appbundle(self, target, script, use_runtime_preference=True):
base = target.get_dest_base()
appdir = os.path.join(self.dist_dir, os.path.dirname(base))
appname = self.get_appname()
print("*** creating application bundle: %s ***" % (appname,))
if self.runtime_preferences and use_runtime_preference:
self.plist.setdefault(
'PyRuntimeLocations', self.runtime_preferences)
pythonInfo = self.plist.setdefault('PythonInfoDict', {})
py2appInfo = pythonInfo.setdefault('py2app', {}).update(dict(
alias=bool(self.alias),
))
appdir, plist = create_appbundle(
appdir,
appname,
plist=self.plist,
extension=self.extension,
arch=self.arch,
)
appdir = fsencoding(appdir)
resdir = os.path.join(appdir, 'Contents', 'Resources')
return appdir, resdir, plist
def create_bundle(self, target, script, use_runtime_preference=True):
fn = getattr(self, 'create_%sbundle' % (self.style,))
return fn(
target,
script,
use_runtime_preference=use_runtime_preference
)
def iter_frameworks(self):
for fn in self.frameworks:
fmwk = macholib.dyld.framework_info(fn)
if fmwk is None:
yield fn
else:
basename = fmwk['shortname'] + '.framework'
yield os.path.join(fmwk['location'], basename)
def build_alias_executable(self, target, script, extra_scripts):
# Build an alias executable for the target
appdir, resdir, plist = self.create_bundle(target, script)
# symlink python executable
execdst = os.path.join(appdir, 'Contents', 'MacOS', 'python')
prefixPathExecutable = os.path.join(sys.prefix, 'bin', 'python')
if os.path.exists(prefixPathExecutable):
pyExecutable = prefixPathExecutable
else:
pyExecutable = sys.executable
self.symlink(pyExecutable, execdst)
# make PYTHONHOME
pyhome = os.path.join(resdir, 'lib', 'python' + sys.version[:3])
realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3])
makedirs(pyhome)
if self.optimize:
self.symlink('../../site.pyo', os.path.join(pyhome, 'site.pyo'))
else:
self.symlink('../../site.pyc', os.path.join(pyhome, 'site.pyc'))
self.symlink(
os.path.join(realhome, 'config'),
os.path.join(pyhome, 'config'))
# symlink data files
# XXX: fixme: need to integrate automatic data conversion
for src, dest in self.iter_data_files():
dest = os.path.join(resdir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
try:
copy_resource(src, dest, dry_run=self.dry_run, symlink=1)
except:
import traceback
traceback.print_exc()
raise
plugindir = os.path.join(appdir, 'Contents', 'Library')
for src, dest in self.iter_extra_plugins():
dest = os.path.join(plugindir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
try:
copy_resource(src, dest, dry_run=self.dry_run)
except:
import traceback
traceback.print_exc()
raise
# symlink frameworks
for src in self.iter_frameworks():
dest = os.path.join(
appdir, 'Contents', 'Frameworks', os.path.basename(src))
if src == dest:
continue
makedirs(os.path.dirname(dest))
self.symlink(os.path.abspath(src), dest)
self.compile_datamodels(resdir)
self.compile_mappingmodels(resdir)
bootfn = '__boot__'
bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w')
for fn in target.prescripts:
bootfile.write(self.get_bootstrap_data(fn))
bootfile.write('\n\n')
bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.realpath(script),))
script_map = {}
for fn in extra_scripts:
tgt = os.path.realpath(fn)
fn = os.path.basename(fn)
if fn.endswith('.py'):
script_map[fn[:-3]] = tgt
elif fn.endswith('.py'):
script_map[fn[:-4]] = tgt
else:
script_map[fn] = tgt
bootfile.write("SCRIPT_MAP=%r\n"%(script_map,))
bootfile.write('try:\n')
bootfile.write(' _run()\n')
bootfile.write('except KeyboardInterrupt:\n')
bootfile.write(' pass\n')
bootfile.close()
target.appdir = appdir
return appdir
def build_executable(self, target, arcname, pkgexts, copyexts, script, extra_scripts):
# Build an executable for the target
appdir, resdir, plist = self.create_bundle(target, script)
self.appdir = appdir
self.resdir = resdir
self.plist = plist
for fn in extra_scripts:
if fn.endswith('.py'):
fn = fn[:-3]
elif fn.endswith('.pyw'):
fn = fn[:-4]
src_fn = script_executable(arch=self.arch, secondary=True)
tgt_fn = os.path.join(self.appdir, 'Contents', 'MacOS', os.path.basename(fn))
mergecopy(src_fn, tgt_fn)
make_exec(tgt_fn)
site_path = os.path.join(resdir, 'site.py')
byte_compile([
SourceModule('site', site_path),
],
target_dir=resdir,
optimize=self.optimize,
force=self.force,
verbose=self.verbose,
dry_run=self.dry_run)
if not self.dry_run:
os.unlink(site_path)
includedir = get_config_var('CONFINCLUDEPY')
configdir = get_config_var('LIBPL')
if includedir is None:
includedir = 'python%d.%d'%(sys.version_info[:2])
else:
includedir = os.path.basename(includedir)
if configdir is None:
configdir = 'config'
else:
configdir = os.path.basename(configdir)
self.compile_datamodels(resdir)
self.compile_mappingmodels(resdir)
bootfn = '__boot__'
bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w')
for fn in target.prescripts:
bootfile.write(self.get_bootstrap_data(fn))
bootfile.write('\n\n')
bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.basename(script),))
script_map = {}
for fn in extra_scripts:
fn = os.path.basename(fn)
if fn.endswith('.py'):
script_map[fn[:-3]] = fn
elif fn.endswith('.py'):
script_map[fn[:-4]] = fn
else:
script_map[fn] = fn
bootfile.write("SCRIPT_MAP=%r\n"%(script_map,))
bootfile.write('_run()\n')
bootfile.close()
self.copy_file(script, resdir)
for fn in extra_scripts:
self.copy_file(fn, resdir)
pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2]))
if sys.version_info[0] == 2 or self.semi_standalone:
arcdir = os.path.join(resdir, 'lib', 'python' + sys.version[:3])
else:
arcdir = os.path.join(resdir, 'lib')
realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3])
self.mkpath(pydir)
# The site.py file needs to be a two locations
# 1) in lib/pythonX.Y, to be found during normal startup and
# by the 'python' executable
# 2) in the resources directory next to the script for
# semistandalone builds (the lib/pythonX.Y directory is too
# late on sys.path to be found in that case).
#
if self.optimize:
self.symlink('../../site.pyo', os.path.join(pydir, 'site.pyo'))
else:
self.symlink('../../site.pyc', os.path.join(pydir, 'site.pyc'))
cfgdir = os.path.join(pydir, configdir)
realcfg = os.path.join(realhome, configdir)
real_include = os.path.join(sys.prefix, 'include')
if self.semi_standalone:
self.symlink(realcfg, cfgdir)
self.symlink(real_include, os.path.join(resdir, 'include'))
else:
self.mkpath(cfgdir)
if '_sysconfigdata' not in sys.modules:
# Recent enough versions of Python 2.7 and 3.x have
# an _sysconfigdata module and don't need the Makefile
# to provide the sysconfig data interface. Don't copy
# them.
for fn in 'Makefile', 'Setup', 'Setup.local', 'Setup.config':
rfn = os.path.join(realcfg, fn)
if os.path.exists(rfn):
self.copy_file(rfn, os.path.join(cfgdir, fn))
inc_dir = os.path.join(resdir, 'include', includedir)
self.mkpath(inc_dir)
self.copy_file(get_config_h_filename(),
os.path.join(inc_dir, 'pyconfig.h'))
self.copy_file(arcname, arcdir)
if sys.version_info[0] != 2:
import zlib
self.copy_file(zlib.__file__, os.path.dirname(arcdir))
ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir))
self.copy_tree(self.ext_dir, ext_dir, preserve_symlinks=True)
self.copy_tree(self.framework_dir,
os.path.join(appdir, 'Contents', 'Frameworks'),
preserve_symlinks=True)
for pkg_name in self.packages:
pkg = self.get_bootstrap(pkg_name)
print('XXXX', pkg_name, pkg)
if self.semi_standalone:
# For semi-standalone builds don't copy packages
# from the stdlib into the app bundle, even when
# they are mentioned in self.packages.
p = Package(pkg_name, pkg)
if not not_stdlib_filter(p):
continue
dst = os.path.join(pydir, pkg_name)
self.mkpath(dst)
self.copy_tree(pkg, dst)
# FIXME: The python files should be bytecompiled
# here (see issue 101)
for copyext in copyexts:
fn = os.path.join(ext_dir,
(copyext.identifier.replace('.', os.sep) +
os.path.splitext(copyext.filename)[1])
)
self.mkpath(os.path.dirname(fn))
copy_file(copyext.filename, fn, dry_run=self.dry_run)
for src, dest in self.iter_data_files():
dest = os.path.join(resdir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
copy_resource(src, dest, dry_run=self.dry_run)
plugindir = os.path.join(appdir, 'Contents', 'Library')
for src, dest in self.iter_extra_plugins():
dest = os.path.join(plugindir, dest)
if src == dest:
continue
makedirs(os.path.dirname(dest))
copy_resource(src, dest, dry_run=self.dry_run)
target.appdir = appdir
return appdir
def create_loader(self, item):
# Hm, how to avoid needless recreation of this file?
slashname = item.identifier.replace('.', os.sep)
pathname = os.path.join(self.temp_dir, "%s.py" % slashname)
if os.path.exists(pathname):
if self.verbose:
print("skipping python loader for extension %r"
% (item.identifier,))
else:
self.mkpath(os.path.dirname(pathname))
# and what about dry_run?
if self.verbose:
print("creating python loader for extension %r"
% (item.identifier,))
fname = slashname + os.path.splitext(item.filename)[1]
source = make_loader(fname)
if not self.dry_run:
with open(pathname, "w") as fp:
fp.write(source)
else:
return
return SourceModule(item.identifier, pathname)
def make_lib_archive(self, zip_filename, base_dir, verbose=0,
dry_run=0):
# Like distutils "make_archive", except we can specify the
# compression to use - default is ZIP_STORED to keep the
# runtime performance up.
# Also, we don't append '.zip' to the filename.
from distutils.dir_util import mkpath
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
if self.compressed:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, "w",
compression=compression)
save_cwd = os.getcwd()
os.chdir(base_dir)
for dirpath, dirnames, filenames in os.walk('.'):
if filenames:
# Ensure that there are directory entries for
# all directories in the zipfile. This is a
# workaround for <http://bugs.python.org/issue14905>:
# zipimport won't consider 'pkg/foo.py' to be in
# namespace package 'pkg' unless there is an
# entry for the directory (or there is a
# pkg/__init__.py file as well)
z.write(dirpath, dirpath)
for fn in filenames:
path = os.path.normpath(os.path.join(dirpath, fn))
if os.path.isfile(path):
z.write(path, path)
os.chdir(save_cwd)
z.close()
return zip_filename
def copy_tree(self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0,
level=1, condition=None):
"""Copy an entire directory tree respecting verbose, dry-run,
and force flags.
This version doesn't bork on existing symlinks
"""
return copy_tree(
infile, outfile,
preserve_mode,preserve_times,preserve_symlinks,
not self.force,
dry_run=self.dry_run,
condition=condition)
| lovexiaov/SandwichApp | venv/lib/python2.7/site-packages/py2app/build_app.py | Python | apache-2.0 | 77,527 |
from behave import *
from django.test import Client
from django.conf import settings
from features.factories import VisitorFactory
from hamcrest import *
use_step_matcher("parse")
@when('I go to "{url}"')
def step_impl(context, url):
"""
:param url: "/" homepage
:type context: behave.runner.Context
"""
context.driver.get(context.server_url+url)
assert_that(context.driver.current_url, is_(context.server_url+url))
@then('I should see a page with title "{title}"')
def step_impl(context, title):
"""
:param title:
:type context: behave.runner.Context
"""
assert_that(context.driver.title, contains_string(title),
"Page title contains the word home")
@then('There should be a "{text}" link with name "{name}"')
def step_impl(context, text, name):
"""
:type text: str
:type name: str
:type context: behave.runner.Context
"""
html_element = context.driver.find_element_by_name(name)
assert_that(html_element.text, contains_string(text),
"Element has correct text")
@when('I press the "{name}"')
def step_impl(context, name):
"""
:type name: str
:type context: behave.runner.Context
"""
context.driver.find_element_by_name(name).click()
@then('There should be a "SIGN UP" link')
def step_impl(context):
"""
:type context: behave.runner.Context
"""
context.driver.find_element_by_id('sign-up-link')
@then('There should be a "LOG IN" link')
def step_impl(context):
"""
:type context: behave.runner.Context
"""
context.driver.find_element_by_id('log-in-link')
| nectR-Tutoring/nectr | features/steps/connection.py | Python | mit | 1,624 |
#
# Copyright (C) 2009, 2010 Brad Howes.
#
# This file is part of Pyslimp3.
#
# Pyslimp3 is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3, or (at your option) any later version.
#
# Pyslimp3 is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pyslimp3; see the file COPYING. If not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
#
from Content import Content
from Display import *
from KeyProcessor import *
from RatingDisplay import *
from TrackListBrowser import TrackListBrowser
#
# Display generator showing current playing status of iTunes. Responds to
# various keyCode events to control iTunes playback.
#
class PlaybackDisplay( DisplayGenerator ):
#
# Mapping of iTunes player state to a status string
#
kPlayerState = { 'k.playing': '',
'k.paused': 'PAUSED',
'k.stopped': 'STOPPED',
'k.fast_forwarding': 'FFWD',
'k.rewinding': 'RWD'
}
#
# Constructor.
#
def __init__( self, client, prevLevel ):
if prevLevel is None:
raise RuntimeError, 'prevLevel is None'
DisplayGenerator.__init__( self, client, prevLevel )
self.formatters = ( self.getPlayerTrackIndex,
self.getPlayerPositionElapsed,
self.getPlayerPositionRemaining,
self.getPlayerTrackDuration,
self.getEmptyString )
#
# Obtain the last formatter setting in use.
#
self.setFormatterIndex(
client.getSettings().getPlaybackFormatterIndex() )
self.tens = None
#
# Set the position formatter index to a new value.
#
def setFormatterIndex( self, value ):
#
# Make sure that the value is valid
#
if value < 0:
value = 0
elif value >= len( self.formatters ):
value = len( self.formatters ) - 1
#
# Install the new value and remember it in the client's settings
#
self.getPlayerPosition = self.formatters[ value ]
self.formatterIndex = value
self.client.getSettings().setPlaybackFormatterIndex( value )
#
# Override of Display method. Install our own keymap entries.
#
def fillKeyMap( self ):
DisplayGenerator.fillKeyMap( self )
self.addKeyMapEntry( kArrowUp, None,
self.nextTrackBrowser )
self.addKeyMapEntry( kArrowDown, None,
self.previousTrackBrowser )
#
# Show and edit the rating for the current track
#
self.addKeyMapEntry( kArrowRight, None, self.ratings )
self.addKeyMapEntry( kPIP, None, self.ratings )
#
# Start playing if not already
#
self.addKeyMapEntry( kPlay, None, self.play )
#
# Move to previous track if not held down.
#
self.addKeyMapEntry( kRewind, kModRelease, self.previousTrack )
#
# Rewind playback position while held down
#
self.addKeyMapEntry( kRewind, kModHeld, self.rewind )
#
# Resume normal playback once released
#
self.addKeyMapEntry( kRewind, kModReleaseHeld, self.resume )
#
# Move to next track if not held down
#
self.addKeyMapEntry( kFastForward, kModRelease, self.nextTrack )
#
# Fast-forward playback positiono while held down
#
self.addKeyMapEntry( kFastForward, kModHeld, self.fastForward )
#
# Resume normal playback once released
#
self.addKeyMapEntry( kFastForward, kModReleaseHeld, self.resume )
#
# Use the next available dislay formatter
#
self.addKeyMapEntry( kDisplay, kModFirst,
self.nextPlayerPositionFormatter )
#
# Install the next available display formatter
#
def nextPlayerPositionFormatter( self ):
index = ( self.formatterIndex + 1 ) % len( self.formatters )
self.setFormatterIndex( index )
def nextTrackBrowser( self ):
playlist = self.source.getActivePlaylist()
index = playlist.getTrackIndex( self.source.getCurrentTrack() ) + 1
if index >= playlist.getTrackCount():
index = 0
return TrackListBrowser( self.client, self, playlist.getTracks(),
index, True )
def previousTrackBrowser( self ):
playlist = self.source.getActivePlaylist()
index = playlist.getTrackIndex( self.source.getCurrentTrack() ) - 1
if index < 0:
index = playlist.getTrackCount() - 1
return TrackListBrowser( self.client, self, playlist.getTracks(),
index, True )
def unrecord( self, trackIndex ):
print( 'unrecord', trackIndex )
playlist = self.source.getActivePlaylist()
if not playlist.getCanManipulate():
return self
playlist.removeTrack( trackIndex )
if playlist.getTrackCount() == 0:
return self.prevLevel
return self
#
# Generate a screen showing what is playing, the current iTunes playback
# state, and the playback position.
#
def generate( self ):
track = self.source.getCurrentTrack()
line1 = track.getName()
line2 = track.getAlbumName() + \
unichr( CustomCharacters.kDottedVerticalBar ) + \
track.getArtistName()
state = self.getPlayerState( track )
return Content( [ line1,
line2 ],
[ self.getPlayerPosition( track ),
state ] )
#
# Obtain a string representing the current iTunes player state.
#
def getPlayerState( self, track ):
state = self.kPlayerState.get( self.source.getPlayerState(), '???' )
if state == '':
if self.source.getMute():
state = 'MUTED'
if state == '':
state = self.getPlayerPositionIndicator( track )
else:
state = unichr( CustomCharacters.kEllipsis ) + state
return state
#
# Obtain an empty string. This is one of the custom position indicators.
#
def getEmptyString( self, track ):
return ''
#
# Obtain the current track index and the total track count. This is one of
# the custom position indicators.
#
def getPlayerTrackIndex( self, track ):
playlist = self.source.getActivePlaylist()
return '%d/%d' % ( playlist.getTrackIndex( track ) + 1,
playlist.getTrackCount() )
#
# Obtain a graphical progress indicator showing how much of the song has
# elapsed.
#
def getPlayerPositionIndicator( self, track ):
position = float( self.source.getPlayerPosition() ) / \
track.getDuration()
return generateProgressIndicator( 5, position )
#
# Obtain a numerical elapsed indicator in MM:SS format.
#
def getPlayerPositionElapsed( self, track ):
return '+' + getHHMMSS( self.source.getPlayerPosition() )
#
# Obtain a numerical remaining indicator in MM:SS format.
#
def getPlayerPositionRemaining( self, track ):
return '-' + getHHMMSS( track.getDuration() -
self.source.getPlayerPosition() )
#
# Obtain the duration for the current track.
#
def getPlayerTrackDuration( self, track ):
return getHHMMSS( track.getDuration() )
#
# Show a track rating editor for the current track.
#
def ratings( self ):
return TrackRatingDisplay( self.client, self,
self.source.getCurrentTrack() )
#
# Move to the previous track in the current playlist
#
def previousTrack( self ):
self.source.previousTrack()
return self
#
# Move to the next track in the current playlist
#
def nextTrack( self ):
self.source.nextTrack()
return self
#
# Stop iTunes playback, and reset the playback position to the start of the
# current track.
#
def stop( self ):
self.source.stop()
return self
#
# Begin iTunes playback.
#
def play( self, trackIndex = -1 ):
if trackIndex == -1:
self.source.play()
else:
self.source.getActivePlaylist().play( trackIndex )
return self
#
# Stop iTunes playback, but leave the playback position where it is.
#
def pause( self ):
self.source.pause()
return self
#
# Move the current playback postion back in time.
#
def rewind( self ):
self.source.rewind()
return self
#
# Move the current playback postion forward in time.
#
def fastForward( self ):
self.source.fastForward()
return self
#
# Stop the rewind() or fastForward() behavior, resuming normal iTunes
# playback
#
def resume( self ):
self.source.resume()
return self
#
# Use the '0' key to restart the current track. Otherwise, treat as an
# index of the track to play (1-9). Override of DisplayGenerator method.
#
def digit( self, digit ):
source = self.source
playlist = source.getActivePlaylist()
maxIndex = playlist.getTrackCount()
index = digit
#
# If there was a previous digit that could make a valid index > 9,
# attempt to use it, and reset the tens offset.
#
if self.tens:
index += self.tens
self.tens = None
#
# See if this digit could be used to make a valid index > 9
#
elif digit > 0:
tens = digit * 10
if tens <= maxIndex:
self.tens = tens
#
# Calculate a valid index
#
if index == 0:
source.beginTrack()
else:
index = min( max( index, 1 ), maxIndex ) - 1
playlist.play( index )
return self
| bradhowes/pyslimp3 | server/PlaybackDisplay.py | Python | gpl-3.0 | 10,676 |
# pylint: disable=missing-docstring,redefined-builtin
from sys import exit
exit(0)
| ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/c/consider/consider_using_sys_exit_exempted.py | Python | mit | 85 |
"""First attempt at setup.py"""
from setuptools import setup, find_packages
setup (
name='ncexplorer',
version='0.7.2'
description='Climate data analysis utility.',
long_description='Climate data analysis utility.',
url='https://github.com/godfrey4000/ncexplorer',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
keywords='climate netcdf analysis',
# The packages
packages=find_packages(exclude=['docs', 'etc', 'ncexplorer/test']),
# install_requires=['xarray', 'esgf-pyclient'],
)
| godfrey4000/ncexplorer | setup.py | Python | mit | 709 |
# -*- coding: utf-8 -*-
"""Tools for downloading bibtex files from digital object identifiers."""
import bibpy
from urllib.request import Request, urlopen
def retrieve(doi, source='https://doi.org/{0}', raw=False, **options):
"""Download a bibtex file specified by a digital object identifier.
The source is a URL containing a single positional format specifier which
is where the requested doi should appear.
By default, the data from the doi is parsed by bibpy. If raw is True, the
raw string is returned instead.
The options kwargs correspond to the arguments normally passed to
:py:func:`bibpy.read_string`.
"""
req = Request(source.format(doi))
req.add_header('accept', 'application/x-bibtex')
handle = None
try:
handle = urlopen(req)
contents = handle.read()
if raw:
return contents
else:
return bibpy.read_string(
contents.decode('utf-8'),
**options
).entries[0]
finally:
if handle:
handle.close()
| MisanthropicBit/bibpy | bibpy/doi/__init__.py | Python | mit | 1,087 |
import sys
from math import log, sqrt
from itertools import combinations
def cosine_distance(a, b):
cos = 0.0
a_tfidf = a["tfidf"]
for token, tfidf in b["tfidf"].iteritems():
if token in a_tfidf:
cos += tfidf * a_tfidf[token]
return cos
def normalize(features):
norm = 1.0 / sqrt(sum(i**2 for i in features.itervalues()))
for k, v in features.iteritems():
features[k] = v * norm
return features
def add_tfidf_to(documents):
tokens = {}
for id, doc in enumerate(documents):
tf = {}
doc["tfidf"] = {}
doc_tokens = doc.get("tokens", [])
for token in doc_tokens:
tf[token] = tf.get(token, 0) + 1
num_tokens = len(doc_tokens)
if num_tokens > 0:
for token, freq in tf.iteritems():
tokens.setdefault(token, []).append((id, float(freq) / num_tokens))
doc_count = float(len(documents))
for token, docs in tokens.iteritems():
idf = log(doc_count / len(docs))
for id, tf in docs:
tfidf = tf * idf
if tfidf > 0:
documents[id]["tfidf"][token] = tfidf
for doc in documents:
doc["tfidf"] = normalize(doc["tfidf"])
def choose_cluster(node, cluster_lookup, edges):
new = cluster_lookup[node]
if node in edges:
seen, num_seen = {}, {}
for target, weight in edges.get(node, []):
seen[cluster_lookup[target]] = seen.get(
cluster_lookup[target], 0.0) + weight
for k, v in seen.iteritems():
num_seen.setdefault(v, []).append(k)
new = num_seen[max(num_seen)][0]
return new
def majorclust(graph):
cluster_lookup = dict((node, i) for i, node in enumerate(graph.nodes))
count = 0
movements = set()
finished = False
while not finished:
finished = True
for node in graph.nodes:
new = choose_cluster(node, cluster_lookup, graph.edges)
move = (node, cluster_lookup[node], new)
if new != cluster_lookup[node] and move not in movements:
movements.add(move)
cluster_lookup[node] = new
finished = False
clusters = {}
for k, v in cluster_lookup.iteritems():
clusters.setdefault(v, []).append(k)
return clusters.values()
def get_distance_graph(documents):
class Graph(object):
def __init__(self):
self.edges = {}
def add_edge(self, n1, n2, w):
self.edges.setdefault(n1, []).append((n2, w))
self.edges.setdefault(n2, []).append((n1, w))
graph = Graph()
doc_ids = range(len(documents))
graph.nodes = set(doc_ids)
for a, b in combinations(doc_ids, 2):
graph.add_edge(a, b, cosine_distance(documents[a], documents[b]))
return graph
def get_documents():
texts = [
"foo blub baz",
"foo bar baz",
"asdf bsdf csdf",
"foo bab blub",
"csdf hddf kjtz",
"123 456 890",
"321 890 456 foo",
"123 890 uiop",
]
return [{"text": text, "tokens": text.split()}
for i, text in enumerate(texts)]
def main(args):
documents = get_documents()
add_tfidf_to(documents)
dist_graph = get_distance_graph(documents)
for cluster in majorclust(dist_graph):
print "========="
for doc_id in cluster:
print documents[doc_id]["text"]
if __name__ == '__main__':
main(sys.argv)
| abitofalchemy/ScientificImpactPrediction | NetAnalysis/sandbox.py | Python | mit | 3,493 |
#!/usr/bin/env python
"""
This module gets the application's config from project/etc
We can't log in here, as the logging module has to import
config before it can configure itself to start logging
"""
import os
import json
from twisted.python.modules import getModule
class Config(object):
data = {}
@classmethod
def init(cls, appname):
if cls.data.get(appname):
return
cls.refresh(appname)
return None
@classmethod
def refresh(cls, appname):
"""
Go back to the filesystem and re-read the config file
"""
try:
filepath = getModule(__name__).filePath
basedir = filepath.parent().parent().parent().parent().path
except Exception, e:
print("Failed to get project basedir: %s" % (e,))
raise
json_config_file = os.path.join(basedir, "etc/config_data.json")
fh = open(json_config_file, 'r')
try:
cls.data[appname] = json.load(fh)
except Exception, e:
raise
finally:
fh.close()
@classmethod
def get(cls, appname, key):
cls.init(appname)
return cls.data.get(appname, {}).get(key)
if __name__ == '__main__':
c = Config()
print c.get('webtest', 'log')
| donalm/webtest | lib/python/webtest/config.py | Python | mit | 1,306 |
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DPA (Dynamic Protection Area) manager.
This DPA manager manages the move list and interference calculation for a DPA.
DPA differs from other protection entities as the aggregated interference is
computed at specific percentile of the combined interference random variable.
Example usage:
# Create a DPA
# - either with a simple default protection points builder
dpa = BuildDpa('East1', 'default (25,10,10,5)')
# - or from a GeoJson file holding a MultiPoint geometry
dpa = BuildDpa('East1', 'east_dpa_1_points.json')
# Set the grants
dpa.SetGrantsFromFad(sas_uut_fad, sas_th_fads)
# Compute the move list
dpa.ComputeMoveLists()
# Calculate the keep list interference for a given channel
channel = (3650, 3660)
interf_per_point = dpa.CalcKeepListInterference(channel)
# Check the interference according to Winnforum IPR tests
status = dpa.CheckInterference(sas_uut_keep_list, margin_db=2)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from datetime import datetime
import functools
import logging
import os
import numpy as np
import shapely.geometry as sgeo
import six
from six.moves import zip
from reference_models.common import data
from reference_models.common import mpool
from reference_models.dpa import dpa_builder
from reference_models.dpa import move_list as ml
from reference_models.geo import zones
# The default DPA parameters, corresponding to legacy Coastal DPA.
DPA_DEFAULT_THRESHOLD_PER_10MHZ = -144
DPA_DEFAULT_RADAR_HEIGHT = 50
DPA_DEFAULT_BEAMWIDTH = 3
DPA_DEFAULT_FREQ_RANGE = (3550, 3650)
DPA_DEFAULT_DISTANCES = (150, 200, 0, 25)
# The channel bandwidth
DPA_CHANNEL_BANDWIDTH = 10
# The logging path
def GetDpaLogDir():
dpa_log_dir = os.path.join(os.path.dirname(__file__),
'..', '..', 'testcases', 'output')
if not os.path.exists(dpa_log_dir): os.makedirs(dpa_log_dir)
return dpa_log_dir
# Help routine
class _EmptyFad(object):
"""Helper class for representing an empty FAD."""
def getCbsdRecords(self):
return []
def Db2Lin(x):
return 10**(x / 10.)
def Lin2Db(x):
return 10 * np.log10(np.asarray(x).clip(min=1e-100))
class DpaInterferenceResult(
namedtuple('DpaInterferenceResult',
['max_difference', 'A_DPA', 'A_DPA_ref', 'azimuth_array'])):
"""Holds detailed information on a DPA aggregate interference result.
Attributes:
max_difference: maximum difference between A_DPA and A_DPA_ref.
A_DPA: SAS UUT's interference to the DPA point.
A_DPA_ref: reference value for SAS UUT's interference to the DPA point.
azimuth_array: azimuths (in degrees) used in the interference calculation.
"""
__slots__ = ()
# The main Dpa class representing a Dpa zone
class Dpa(object):
"""Dynamic Protection Area.
A dynamic protection area is an ESC/Portal triggered protection area, ie not always on.
There are 2 main types of DPA: Coastal DPAs and Inland DPAs.
The difference between the two are mainly the set of parameters to use for
its protection.
Most DPA are co-channel only, although a few have also Out-Of-Band protected channels
for which special protection logic is run.
Attributes:
name: The DPA name (informative).
geometry: The DPA geometry, as a shapely shape.
channels: The list of 10MHz channels (freq_min_mhz, freq_max_mhz) to protect for
that DPA.
protected_points: A list of namedtuple (latitude, longitude) defining the actual
points to protect within the DPA.
threshold: The protection threshold (in dBm per 10MHz). The 95% quantile of the
aggregated interference shall not exceed this level.
radar_height: The radar height (meters).
beamwidth: The radar antenna beamwidth (degrees).
azimuth_range: The radar azimuth range (degrees) as a tuple of
(min_azimuth, max_azimuth) relative to true north.
neighbor_distances: The neighborhood distances (km) as a sequence:
(cata_dist, catb_dist, cata_oob_dist, catb_oob_dist)
monitor_type: The DPA monitoring category, either 'esc' or 'portal'.
move_lists: A list of move list (set of |CbsdGrantInfo|) per channel.
nbor_lists: A list of neighbor list (set of |CbsdGrantInfo|) per channel.
Note that keep list is the grants of the nbor_list not in the move list.
Usage:
# Setup the DPA
dpa = Dpa(protected_points)
dpa.SetGrantsFromFad(sas_uut_fad, sas_th_fads)
# Compute the move list
dpa.ComputeMoveLists()
# Calculate the keep list interference for a given channel
interf_per_point = dpa.CalcKeepListInterference(channel)
# Check the interference according to Winnforum IPR tests
status = dpa.CheckInterference(channel, sas_uut_keep_list, margin_db=1)
"""
num_iteration = 2000
@classmethod
def Configure(cls,
num_iteration=2000):
"""Configure operating parameters.
Args:
num_iteration: The number of iteration to use in the Monte Carlo simulation.
"""
cls.num_iteration = num_iteration
def __init__(self, protected_points,
geometry=None,
name='None',
threshold=DPA_DEFAULT_THRESHOLD_PER_10MHZ,
radar_height=DPA_DEFAULT_RADAR_HEIGHT,
beamwidth=DPA_DEFAULT_BEAMWIDTH,
azimuth_range=(0, 360),
freq_ranges_mhz=[DPA_DEFAULT_FREQ_RANGE],
neighbor_distances=DPA_DEFAULT_DISTANCES,
monitor_type='esc'):
"""Initialize the DPA attributes."""
self.name = name
self.geometry = geometry
self.protected_points = protected_points
self.threshold = threshold
self.radar_height = radar_height
self.azimuth_range = azimuth_range
self.beamwidth = beamwidth
self.neighbor_distances = neighbor_distances
self.monitor_type = monitor_type
self._channels = None
self._grants = []
self._has_th_grants = False
self.ResetFreqRange(freq_ranges_mhz)
self.ResetLists()
def __str__(self):
"""Returns the DPA str."""
return ('Dpa(protected_points=%r, geometry=%r, threshold=%.1f, radar_height=%.1f,'
'beamwidth=%.1f, azimuth_range=%r, channels=%r,'
'neighbor_distances=%r, monitor_type=%r)' % (
self.protected_points, self.geometry,
self.threshold, self.radar_height,
self.beamwidth, self.azimuth_range, self._channels,
self.neighbor_distances, self.monitor_type))
def ResetFreqRange(self, freq_ranges_mhz):
"""Reset the frequency ranges of the DPA.
If the range have changed, the move and neighbor lists are also reset.
Args:
freq_ranges_mhz: The protection frequencies (MHz) as a list of tuple
(freq_min_mhz, freq_max_mhz) of the DPA protected frequency ranges.
"""
channels = GetDpaProtectedChannels(freq_ranges_mhz,
is_portal_dpa=(self.monitor_type=='portal'))
if channels != self._channels:
self._channels = channels
self.ResetLists()
def ResetLists(self):
"""Reset move list and neighbor list."""
self.move_lists = [set() for _ in self._channels]
self.nbor_lists = [set() for _ in self._channels]
def _DetectIfPeerSas(self):
"""Returns True if holding grants from peer TH SAS."""
for grant in self._grants:
if not grant.is_managed_grant:
return True
return False
def SetGrantsFromFad(self, sas_uut_fad, sas_th_fads):
"""Sets the list of grants.
Args:
sas_uut_fad: The FAD object of SAS UUT, or None if none.
sas_th_fads: A list of FAD objects of other SAS test harness, or None if none.
"""
# Manages the possibility of None for the FADs.
if sas_uut_fad is None: sas_uut_fad = _EmptyFad()
if sas_th_fads is None: sas_th_fads = []
# TODO(sbdt): optim = pre-filtering of grants in global DPA neighborhood.
self._grants = data.getGrantObjectsFromFAD(sas_uut_fad, sas_th_fads)
self.ResetLists()
self._has_th_grants = self._DetectIfPeerSas()
def SetGrantsFromList(self, grants):
"""Sets the list of grants from a list of |data.CbsdGrantInfo|."""
# TODO(sbdt): optim = pre-filtering of grants in global DPA neighborhood.
self._grants = grants
self.ResetLists()
self._has_th_grants = self._DetectIfPeerSas()
def ComputeMoveLists(self):
"""Computes move/neighbor lists.
This routine updates the internal grants move list and neighbor list.
One set of list is maintained per protected channel.
To retrieve the list, see the routines GetMoveList(), GetNeighborList() and
GetKeepList().
"""
logging.info('DPA Compute movelist `%s`- channels %s thresh %s bw %s height %s '
'iter %s azi_range %s nbor_dists %s',
self.name, self._channels, self.threshold, self.beamwidth,
self.radar_height, Dpa.num_iteration,
self.azimuth_range, self.neighbor_distances)
logging.debug(' protected points: %s', self.protected_points)
pool = mpool.Pool()
self.ResetLists()
# Detect the inside "inside grants", which will allow to
# add them into move list for sure later on.
inside_grants = set()
if self.geometry and not isinstance(self.geometry, sgeo.Point):
inside_grants = set(g for g in self._grants
if sgeo.Point(g.longitude, g.latitude).intersects(self.geometry))
for chan_idx, (low_freq, high_freq) in enumerate(self._channels):
moveListConstraint = functools.partial(
ml.moveListConstraint,
low_freq=low_freq * 1.e6,
high_freq=high_freq * 1.e6,
grants=self._grants,
inc_ant_height=self.radar_height,
num_iter=Dpa.num_iteration,
threshold=self.threshold,
beamwidth=self.beamwidth,
min_azimuth=self.azimuth_range[0],
max_azimuth=self.azimuth_range[1],
neighbor_distances=self.neighbor_distances)
move_list, nbor_list = list(
zip(*pool.map(moveListConstraint, self.protected_points)))
# Combine the individual point move lists
move_list = set().union(*move_list)
nbor_list = set().union(*nbor_list)
include_grants = ml.filterGrantsForFreqRange(
inside_grants, low_freq * 1.e6, high_freq * 1.e6)
move_list.update(include_grants)
nbor_list.update(include_grants)
self.move_lists[chan_idx] = move_list
self.nbor_lists[chan_idx] = nbor_list
logging.info('DPA Result movelist `%s`- MOVE_LIST:%s NBOR_LIST: %s',
self.name, self.move_lists, self.nbor_lists)
def _GetChanIdx(self, channel):
"""Gets the channel idx for a given channel."""
try:
chan_idx = self._channels.index(channel)
except ValueError:
raise ValueError('Channel {} not protected by this DPA'.format(channel))
return chan_idx
def GetMoveList(self, channel):
"""Returns the move list for a given channel, as a set of grants.
Args:
channel: A channel as tuple (low_freq_mhz, high_freq_mhz).
"""
return self.move_lists[self._GetChanIdx(channel)]
def GetNeighborList(self, channel):
"""Returns the neighbor list for a given channel, as a set of grants.
Args:
channel: A channel as tuple (low_freq_mhz, high_freq_mhz).
"""
return self.nbor_lists[self._GetChanIdx(channel)]
def GetKeepList(self, channel):
"""Returns the keep list for a given channel, as a set of grants.
Args:
channel: A channel as tuple (low_freq_mhz, high_freq_mhz).
"""
return self.GetNeighborList(channel).difference(self.GetMoveList(channel))
def GetMoveListMask(self, channel):
"""Returns move list mask as a vector of bool.
Args:
channel: A channel as tuple (low_freq_mhz, high_freq_mhz).
"""
# Legacy function for getting a mask, as used in some example code.
move_list = self.GetMoveList(channel)
mask = np.zeros(len(self._grants), np.bool)
for k, grant in enumerate(self._grants):
if grant in move_list:
mask[k] = True
return mask
def CalcKeepListInterference(self, channel, num_iter=None):
"""Calculates max aggregate interference per protected point.
Args:
channel: A channel as tuple (low_freq_mhz, high_freq_mhz).
num_iter: The number of Monte Carlo iteration when calculating the aggregate
interference.
Returns:
The 95% aggregate interference per protected point, as a list.
Each element is the maximum aggregate interference over all radar directions.
"""
if num_iter is None:
num_iter = Dpa.num_iteration
keep_list = self.GetKeepList(channel)
interfCalculator = functools.partial(
ml.calcAggregatedInterference,
low_freq=channel[0] * 1e6,
high_freq=channel[1] * 1e6,
grants=keep_list,
inc_ant_height=self.radar_height,
num_iter=num_iter,
beamwidth=self.beamwidth,
min_azimuth=self.azimuth_range[0],
max_azimuth=self.azimuth_range[1],
neighbor_distances=self.neighbor_distances,
do_max=True)
pool = mpool.Pool()
max_interf = pool.map(interfCalculator,
self.protected_points)
return max_interf
def CheckInterference(self, sas_uut_active_grants, margin_db,
channel=None, num_iter=None,
do_abs_check_single_uut=False,
extensive_print=True,
output_data=None):
"""Checks interference of keep list of SAS UUT vs test harness.
This compares the aggregated interference (within some margin) generated by:
- the test harness keep list, versus
- the blended SAS UUT / Test harness keep list
for all protection points and azimuth.
Args:
sas_uut_active_grants: An iterable of authorized |data.CbsdGrantInfo| grants.
of the SAS UUT (ie not in the move list).
margin_db: Defines the method and margin (in dB) to use for checking the SAS UUT
aggregate interference:
- a number: the check is done against the reference model aggregate interference
using this value as the margin (in dB). [Legacy method].
- a string of one of the following type:
+ 'target(<number_db>)': the check is done against the DPA protection threshold,
with <number> being an allowed margin (in dB).
+ 'linear(<number_db>): the check is done against the ref model, but by using a
margin in linear domain. The `number_db` is still given in dB, but converted
into an equivalent mW margin for the target threshold. Then this mW margin is
used for the check.
channel: A channel as tuple (low_freq_mhz, high_freq_mhz), or None for all channels
of that Dpa.
num_iter: The number of Monte Carlo iteration for calculating the aggregate
interference. If None, use the global class level `num_iteration`.
do_abs_check_single_uut: If True, performs check against absolute threshold instead
of relative check against ref model, iff only SAS UUT present (no peer SAS).
In this mode, move list does not need to be precomputed, or grants setup,
(only the passed UUT active grants are used).
extensive_print: If True, extensive logging done onto local files.
output_result: If an empty list, then it will be populated with detailed
interference results (|DpaInterferenceResult|) for each protected point
and channel. Indexing is [chan_idx, point_idx], unless channel is unique,
in which case indexing is [point_idx].
Returns:
True if all SAS UUT aggregated interference are within margin, False otherwise
(ie if at least one combined protection point / azimuth fails the test).
"""
if channel is None:
test_passed = True
if output_data == []:
output_data.extend([[]] * len(self._channels))
else:
output_data = [None] * len(self._channels)
for k, chan in enumerate(self._channels):
if not self.CheckInterference(sas_uut_active_grants, margin_db, chan, num_iter,
do_abs_check_single_uut, extensive_print,
output_data[k]):
test_passed = False
return test_passed
if num_iter is None:
num_iter = Dpa.num_iteration
# Manages the various margin_db methods.
margin_method = 'std'
if isinstance(margin_db, six.string_types):
idx1, idx2 = margin_db.find('('), margin_db.find(')')
if idx1 == -1 or idx2 == -1:
raise ValueError('DPA CheckInterference: margin_db: `%s` not allowed.'
'Use a number, the `target(xx)` or `linear(xx)` options' %
margin_db)
margin_method = margin_db[:idx1].strip().lower()
if margin_method not in ['target', 'linear']:
raise ValueError('DPA CheckInterference: margin_db method: `%s` not allowed.'
'Use either `target(xx)` or `linear(xx)` options' % margin_method)
margin_db = float(margin_db[idx1+1:idx2].strip())
# Find the keep list component of TH: SAS UUT and peer SASes.
keep_list = self.GetKeepList(channel)
keep_list_th_other_sas = [
grant for grant in self._grants
if (not grant.is_managed_grant and grant in keep_list)]
keep_list_th_managing_sas = [
grant for grant in self._grants
if (grant.is_managed_grant and grant in keep_list)]
# Makes sure we have a list of SAS UUT active grants
sas_uut_active_grants = list(sas_uut_active_grants)
if extensive_print:
# Derive the estimated SAS UUT keep list, ie the SAS UUT active grants
# within the neighborhood (defined by distance and frequency). This is
# only insured to be a superset of the actual keep list.
# Note: to avoid any test harness regression, this list is currently only
# used for logging purpose (although it could be passed to the interference
# check routine for faster operation).
est_keep_list_uut_managing_sas = ml.getDpaNeighborGrants(
sas_uut_active_grants, self.protected_points, self.geometry,
low_freq=channel[0] * 1e6, high_freq=channel[1] * 1e6,
neighbor_distances=self.neighbor_distances)
try:
self.__PrintKeepLists(keep_list_th_other_sas, keep_list_th_managing_sas,
est_keep_list_uut_managing_sas, self.name, channel)
except Exception as e:
logging.error('Could not print DPA keep lists: %s', e)
# Do absolute threshold in some case
hard_threshold = None
if margin_method == 'target' or (
do_abs_check_single_uut and not self._has_th_grants):
hard_threshold = self.threshold
logging.info('DPA Check interf `%s`- channel %s thresh %s bw %s '
'iter %s azi_range %s nbor_dists %s',
self.name, channel,
hard_threshold if hard_threshold else
('`MoveList`' if margin_method == 'std' else 'MoveList + Linear'),
self.beamwidth, num_iter, self.azimuth_range, self.neighbor_distances)
checkPointInterf = functools.partial(
_CalcTestPointInterfDiff,
channel=channel,
keep_list_th_other_sas=keep_list_th_other_sas,
keep_list_th_managing_sas=keep_list_th_managing_sas,
keep_list_uut_managing_sas=sas_uut_active_grants,
radar_height=self.radar_height,
beamwidth=self.beamwidth,
num_iter=num_iter,
azimuth_range=self.azimuth_range,
neighbor_distances=self.neighbor_distances,
threshold=hard_threshold
)
pool = mpool.Pool()
result = pool.map(checkPointInterf, self.protected_points)
if output_data == []:
output_data.extend(result)
margin_mw = None # for standard or target method
if margin_method == 'linear': # linear method
margin_mw = Db2Lin(self.threshold + margin_db) - Db2Lin(self.threshold)
if extensive_print:
try:
self.__PrintStatistics(result, self.name, channel, self.threshold, margin_mw)
except Exception as e:
logging.error('Could not print DPA statistics: %s', e)
if margin_mw is None: # standard or target method
max_diff_interf = max(r.max_difference for r in result)
if max_diff_interf > margin_db:
logging.warning('DPA Check Fail `%s`- channel %s thresh %s max_diff %s',
self.name, channel,
hard_threshold if hard_threshold else '`MoveList`',
max_diff_interf)
else:
logging.info('DPA Check Succeed - max_diff %s', max_diff_interf)
return max_diff_interf <= margin_db
else: # Linear method
max_diff_interf_mw = max([np.max(Db2Lin(r.A_DPA) - Db2Lin(r.A_DPA_ref))
for r in result])
if max_diff_interf_mw > margin_mw:
logging.warning('DPA Check Fail `%s`- channel %s thresh `MoveList+Linear`'
' margin_mw excess by %.4fdB',
self.name, channel,
Lin2Db(max_diff_interf_mw / margin_mw))
else:
logging.info('DPA Check Succeed - margin_mw headroom by %.4fdB',
Lin2Db(max_diff_interf_mw / margin_mw))
return max_diff_interf_mw <= margin_mw
def __PrintStatistics(self, results, dpa_name, channel, threshold, margin_mw=None):
"""Prints result statistics."""
timestamp = datetime.now().strftime('%Y-%m-%d %H_%M_%S')
filename = os.path.join(GetDpaLogDir(),
'%s DPA=%s channel=%s threshold=%s.csv' % (
timestamp, dpa_name, channel, threshold))
logging.info('Saving stats for DPA %s, channel %s to file: %s', dpa_name,
channel, filename)
with open(filename, 'w') as f:
# CSV header.
f.write(
'Latitude,Longitude,Azimuth (degrees),A_DPA (dBm),A_DPA_ref (dBm),A_DPA - A_DPA_ref,A_DPA - threshold\n'
)
for result, point in zip(results, self.protected_points):
latitude = point.latitude
longitude = point.longitude
for k, azimuth in enumerate(result.azimuth_array):
if result.A_DPA.size == 1:
# Effectively a scalar, resulting from no neighbor grants (see
# function calcAggregatedInterference() which sets this).
A_DPA = result.A_DPA.item(0)
else:
# Normal case: at least one grant in the neighborhood.
A_DPA = result.A_DPA[k]
if np.isscalar(result.A_DPA_ref): # float or int
# Happens when there are no peer SASes.
A_DPA_ref = result.A_DPA_ref
elif result.A_DPA_ref.size == 1:
# Peer SAS case: no grants in the neighborhood.
A_DPA_ref = result.A_DPA_ref.item(0)
else:
# Peer SAS case: at least one grant in the neighborhood.
A_DPA_ref = result.A_DPA_ref[k]
line = ','.join('%3.10f' % val for val in [
latitude, longitude, azimuth, A_DPA, A_DPA_ref, A_DPA -
A_DPA_ref, A_DPA - threshold
])
f.write(line + '\n')
differences = np.zeros([len(self.protected_points), len(results[0].azimuth_array)])
for k, (result, point) in enumerate(zip(results, self.protected_points)):
if margin_mw is not None:
difference = result.A_DPA - Lin2Db(Db2Lin(result.A_DPA_ref) + margin_mw)
else:
difference = result.A_DPA - result.A_DPA_ref
differences[k, :] = difference
logging.info('--- Difference statistics versus %s ---',
'Ref model' if margin_mw is None else 'Ref model+margin_mw')
logging.info('Min difference: %s', np.min(differences))
logging.info('Max difference: %s', np.max(differences))
for percentile in [50, 90, 99, 99.9, 99.99, 99.999, 99.9999]:
logging.info('%f percent of differences are <= %f', percentile,
np.percentile(differences, percentile))
logging.info('--- End statistics ---')
def __PrintKeepLists(self, keep_list_th_other_sas, keep_list_th_managing_sas,
keep_list_uut_managing_sas, dpa_name, channel):
"""Prints keep list and neighbor list."""
def WriteList(filename, keep_list):
logging.info('Writing list to file: %s', filename)
fields = [
'latitude', 'longitude', 'height_agl', 'indoor_deployment',
'cbsd_category', 'antenna_azimuth', 'antenna_gain',
'antenna_beamwidth', 'max_eirp', 'low_frequency', 'high_frequency',
'is_managed_grant'
]
with open(filename, 'w') as f:
f.write(','.join(fields) + '\n')
for cbsd_grant_info in keep_list:
f.write(','.join(str(getattr(cbsd_grant_info, key)) for key in fields) + '\n')
timestamp = datetime.now().strftime('%Y-%m-%d %H_%M_%S')
base_filename = os.path.join(GetDpaLogDir(),
'%s DPA=%s channel=%s' % (timestamp, dpa_name, channel))
# SAS test harnesses (peer SASes) full neighbor list
filename = '%s (neighbor list).csv' % base_filename
WriteList(filename, self.GetNeighborList(channel))
# SAS test harnesses (peer SASes) combined keep list
filename = '%s (combined peer SAS keep list).csv' % base_filename
WriteList(filename, keep_list_th_other_sas)
# SAS UUT keep list (according to test harness)
filename = '%s (SAS UUT keep list, according to test harness).csv' % base_filename
WriteList(filename, keep_list_th_managing_sas)
# SAS UUT keep list (according to SAS UUT)
filename = '%s (SAS UUT keep list, according to SAS UUT).csv' % base_filename
WriteList(filename, keep_list_uut_managing_sas)
def GetDpaProtectedChannels(freq_ranges_mhz, is_portal_dpa=False):
""" Gets protected channels list for DPA.
Note: For ESC-monitored DPA, only the highest channel below 3550MHz is
kept.
Args:
freq_ranges_mhz: The protection frequencies (MHz) as a list of tuple
(freq_min_mhz, freq_max_mhz) of the DPA protected frequency ranges.
is_portal_dpa: True if a portal DPA, False if Coastal ESC DPA.
Returns:
A list of protected channels as a tuple (low_freq_mhz, high_freq_mhz)
"""
channels = set()
for freq_range in freq_ranges_mhz:
min_freq = int(freq_range[0] / DPA_CHANNEL_BANDWIDTH) * DPA_CHANNEL_BANDWIDTH
max_freq = freq_range[1]
freqs = np.arange(min_freq, max_freq, DPA_CHANNEL_BANDWIDTH)
for freq in freqs:
channels.add((freq, freq + DPA_CHANNEL_BANDWIDTH))
channels = sorted(list(channels))
if not is_portal_dpa:
# For ESC DPA, channels below 3550 are always ON, and only the highest one dominates.
idx_above_3550 = 0
for chan in channels:
if chan[0] >= 3550:
break
idx_above_3550 +=1
highest_channel_idx_below_3550 = max(0, idx_above_3550 - 1)
channels = channels[highest_channel_idx_below_3550:]
return channels
def _CalcTestPointInterfDiff(point,
channel,
keep_list_th_other_sas,
keep_list_th_managing_sas,
keep_list_uut_managing_sas,
radar_height,
beamwidth,
num_iter,
azimuth_range,
neighbor_distances,
threshold=None):
"""Calculate difference of aggregate interference between reference and SAS UUT.
This implements the check required by the IPR certification tests, comparing the
reference model keep list aggregated interference versus the blended one.
The blended one uses the SAS UUT authorized grants that it manages, plus the CBSD
of other SAS in the keep list.
Note that the "keep list" can be loosely defined as a superset of the actual
keep lists, including excess grants out of the neighborhood area. This routine
will insure that only the actual grants in the neighborhood are used for the
interference check.
Note that this routine reduce the amount of random variation by reusing the same
random draw for the CBSD that are shared between the two keep lists. This is done
by using the caching engine provided by |reference_models.common.cache|.
Args:
point: A point having attributes 'latitude' and 'longitude'.
channel: A channel as tuple (low_freq_mhz, high_freq_mhz).
keep_list_th_other_sas: A list of |data.CbsdGrantInfo| for non managing SAS, as
computed by the reference model.
keep_list_th_managing_sas: A list of |data.CbsdGrantInfo| for managing SAS, as
computed by the reference model.
keep_list_uut_managing_sas: A list of |data.CbsdGrantInfo| for managing SAS as
computed by the SAS UUT.
radar_height: The radar height (meters).
beamwidth: The radar antenna beamwidth (degrees).
num_iteration: The number of iteration to use in the Monte Carlo simulation.
azimuth_range: The radar azimuth range (degrees) as a tuple of
(min_azimuth, max_azimuth) relative to true north.
neighbor_distances: The neighborhood distance (km) as a sequence:
[cata_dist, catb_dist, cata_oob_dist, catb_oob_dist]
threshold: If set, do an absolute threshold check of SAS UUT interference against
threshold. Otherwise compare against the reference model aggregated interference.
Returns:
The maximum aggregated difference across all the radar pointing directions between
the blended and the reference models.
"""
azimuths = ml.findAzimuthRange(azimuth_range[0], azimuth_range[1], beamwidth)
# Perform caching of the per device interference, as to reduce the Monte-Carlo
# variability on similar CBSD in keep list.
# TODO(sbdt): check if better to context manage once per process, with clearing
# in between.
with ml.InterferenceCacheManager() as cm:
uut_interferences = ml.calcAggregatedInterference(
point,
low_freq=channel[0] * 1e6,
high_freq=channel[1] * 1e6,
grants=keep_list_th_other_sas + keep_list_uut_managing_sas,
inc_ant_height=radar_height,
num_iter=num_iter,
beamwidth=beamwidth,
min_azimuth=azimuth_range[0],
max_azimuth=azimuth_range[1],
neighbor_distances=neighbor_distances)
if threshold is not None:
max_diff = np.max(uut_interferences - threshold)
logging.debug('%s UUT interf @ %s Thresh %sdBm Diff %sdB: %s',
'Exceeded (ignoring delta_DPA)' if max_diff > 0 else 'Ok', point, threshold,
max_diff, uut_interferences)
if max_diff > 0:
logging.info('Exceeded (ignoring delta_DPA) UUT interf @ %s Thresh %sdBm Diff %sdB: %s',
point, threshold, max_diff, uut_interferences)
return DpaInterferenceResult(
max_difference=max_diff,
A_DPA=uut_interferences,
A_DPA_ref=threshold,
azimuth_array=azimuths)
th_interferences = ml.calcAggregatedInterference(
point,
low_freq=channel[0] * 1e6,
high_freq=channel[1] * 1e6,
grants=keep_list_th_other_sas + keep_list_th_managing_sas,
inc_ant_height=radar_height,
num_iter=num_iter,
beamwidth=beamwidth,
min_azimuth=azimuth_range[0],
max_azimuth=azimuth_range[1],
neighbor_distances=neighbor_distances)
max_diff = np.max(uut_interferences - th_interferences)
logging.debug(
'%s UUT interf @ %s Diff %sdB: %s',
'Exceeded (ignoring delta_DPA)' if max_diff > 0 else 'Ok', point,
max_diff,
list(zip(np.atleast_1d(th_interferences),
np.atleast_1d(uut_interferences))))
if max_diff > 0:
logging.info(
'Exceeded (ignoring delta_DPA) UUT interf @ %s Diff %sdB: %s', point,
max_diff,
list(zip(np.atleast_1d(th_interferences),
np.atleast_1d(uut_interferences))))
return DpaInterferenceResult(
max_difference=max_diff,
A_DPA=uut_interferences,
A_DPA_ref=th_interferences, azimuth_array=azimuths)
def BuildDpa(dpa_name, protection_points_method=None, portal_dpa_filename=None):
"""Builds a DPA parameterized correctly.
The DPA special parameters are obtained from the DPA database.
The DPA protection points are generated either from a file, or using a provided default
method.
Args:
dpa_name: The DPA official name.
protection_points_method: Three methods are supported for getting the protection points:
+ a path pointing to the file holding the protected points location defined as a
geojson MultiPoint or Point geometry. The path can be either absolute or relative
to the running script (normally the `harness/` directory).
+ 'default <parameters>': A simple default method. Parameters is a tuple defining
the number of points to use for different part of the DPA:
num_pts_front_border: Number of points in the front border
num_pts_back_border: Number of points in the back border
num_pts_front_zone: Number of points in the front zone
num_pts_back_zone: Number of points in the back zone
front_us_border_buffer_km: Buffering of US border for delimiting front/back.
min_dist_front_border_pts_km: Minimum distance between front border points (km).
min_dist_back_border_pts_km: Minimum distance between back border points (km).
min_dist_front_zone_pts_km: Minimum distance between front zone points (km).
min_dist_back_zone_pts_km: Minimum distance between back zone points (km).
Example of encoding:
'default (200,50,20,5)'
Note the default values are (25, 10, 10, 5, 40, 0.2, 1, 0.5, 3)
Only the passed parameters will be redefined.
The result are only approximate (actual distance and number of points may differ).
+ other 'my_method (p1, p2, ..pk)': The 'my_method` will be checked against registered
methods in which case it will be used, and passing to it the parameters p1, p2,...
Returns:
A Dpa object.
Raises:
IOError: if the provided file cannot be found.
ValueError: in case of other errors, such as invalid file or parameters.
"""
try:
dpa_zone = zones.GetCoastalDpaZones()[dpa_name]
monitor_type = 'esc'
except KeyError:
try:
dpa_zone = zones.GetPortalDpaZones(kml_path=portal_dpa_filename)[dpa_name]
monitor_type = 'portal'
except KeyError:
raise ValueError('DPA %s not found in DPA database' % dpa_name)
# Get the DPA protection points
protection_points = dpa_builder.DpaProtectionPoints(
dpa_name, dpa_zone.geometry, protection_points_method)
# Set all DPA operational parameters
protection_threshold = dpa_zone.protectionCritDbmPer10MHz
radar_height = dpa_zone.refHeightMeters
radar_beamwidth = dpa_zone.antennaBeamwidthDeg
azimuth_range = (dpa_zone.minAzimuthDeg, dpa_zone.maxAzimuthDeg)
freq_ranges_mhz = dpa_zone.freqRangeMHz
neighbor_distances = (dpa_zone.catANeighborhoodDistanceKm,
dpa_zone.catBNeighborhoodDistanceKm,
dpa_zone.catAOOBNeighborhoodDistanceKm,
dpa_zone.catBOOBNeighborhoodDistanceKm)
return Dpa(protection_points,
geometry=dpa_zone.geometry,
name=dpa_name,
threshold=protection_threshold,
radar_height=radar_height,
beamwidth=radar_beamwidth,
azimuth_range=azimuth_range,
freq_ranges_mhz=freq_ranges_mhz,
neighbor_distances=neighbor_distances,
monitor_type=monitor_type)
| Wireless-Innovation-Forum/Spectrum-Access-System | src/harness/reference_models/dpa/dpa_mgr.py | Python | apache-2.0 | 36,632 |
# -*- coding: utf-8 -*-
from acq4.devices.OptomechDevice import OptomechDevice
from acq4.devices.DAQGeneric import DAQGeneric
class PMT(DAQGeneric, OptomechDevice):
def __init__(self, dm, config, name):
self.omConf = {}
for k in ['parentDevice', 'transform']:
if k in config:
self.omConf[k] = config.pop(k)
DAQGeneric.__init__(self, dm, config, name)
OptomechDevice.__init__(self, dm, config, name)
def getFilterDevice(self):
# return parent filter device or None
if 'Filter' in self.omConf.get('parentDevice', {}):
return self.omConf['parentDevice']
else:
return None
| acq4/acq4 | acq4/devices/PMT/PMT.py | Python | mit | 705 |
from datetime import datetime
from math import floor
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.contrib.auth.password_validation import password_validators_help_texts
from django.http import JsonResponse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from apps.account.forms import UserForm, AccountForm
"""
Account views created to manage the account CRUD operation.
"""
class AccountIndexView(LoginRequiredMixin, ListView):
"""
View that is used to show all the accounts that exist in the Chronos platform.
Receives optional parameters to show alert functions:
@param result (optional) - Shows alert functions accordingly
Account added - YWRkZWQ=
Account edited - ZWRpdGVk
Account deleted - ZGVsZXRlZA==
TODO: Develop this view
"""
template_name = 'account/account_index.html'
model = User
def get_alert_information(self):
"""
Function used to generate the alert string based on the return result by URL
:return: String containing the result message
"""
if 'result' in self.kwargs:
if self.kwargs['result'] == 'YWRkZWQ=':
return _("A new user was added with success!")
if self.kwargs['result'] == 'ZWRpdGVk':
return _("The user information was edited with success!")
if self.kwargs['result'] == 'ZGVsZXRlZA==':
return _("The user information was deleted with success!")
def get_context_data(self, **kwargs):
context = super(AccountIndexView, self).get_context_data(**kwargs)
context['page_title'] = _('User list - CHRONOS')
context['account_active'] = 'active open'
context['account_viewall_active'] = 'active'
context['result'] = self.get_alert_information()
return context
class AccountDetailView(LoginRequiredMixin, DetailView):
"""
View that is used to show the account information that exists in the Chronos platform.
TODO: Develop this view
"""
template_name = "account/account_base.html"
model = User
def get_context_data(self, **kwargs):
context = super(AccountDetailView, self).get_context_data(**kwargs)
context['page_title'] = _('User detail - CHRONOS')
context['account_active'] = 'active open'
context['account_viewall_active'] = 'active'
context['progress'] = self.get_profile_completion()
return context
def get_profile_completion(self):
"""
This function is used to calculate the total percentage of the account's profile completion.
:return: the calculated percentage
"""
account = self.get_object()
filled_fields = 0
total_fields = len(account._meta.fields)
for field in account._meta.fields:
if getattr(account, field.name):
filled_fields += 1
progression = floor((filled_fields / total_fields) * 100)
return progression
class AccountAddView(LoginRequiredMixin, CreateView):
"""
View that is used to add a new account in the Chronos platform.
TODO: Develop this view
"""
model = User
form_class = UserForm
template_name = 'account/account_form.html'
def get_context_data(self, **kwargs):
context = super(AccountAddView, self).get_context_data(**kwargs)
context['page_title'] = _('Add new user - CHRONOS')
context['title'] = _('Add a new user')
context['account_active'] = 'active open'
context['account_add_active'] = 'active'
context['account_list'] = self.get_queryset()
context['is_new_account'] = True
context['help_text'] = password_validators_help_texts()
if self.request.POST:
context['accountform'] = AccountForm(self.request.POST)
else:
context['accountform'] = AccountForm()
return context
def form_valid(self, form):
context = self.get_context_data()
accountform = context['accountform']
if accountform.is_valid() and form.is_valid():
accountform.instance.user = form.save()
accountform.save()
return super(AccountAddView, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('account:index', kwargs={'result': 'YWRkZWQ='})
class AccountEditView(LoginRequiredMixin, UpdateView):
"""
View that is used to edit an account in the Chronos platform.
TODO: Develop this view
"""
model = User
form_class = UserForm
template_name = 'account/account_form.html'
def get_context_data(self, **kwargs):
context = super(AccountEditView, self).get_context_data(**kwargs)
context['page_title'] = _('Edit user - CHRONOS')
context['title'] = _('Edit user')
context['account_active'] = 'active open'
context['account_viewall_active'] = 'active'
context['is_new_account'] = False
context['help_text'] = password_validators_help_texts()
if self.request.POST:
context['accountform'] = AccountForm(self.request.POST, instance=self.get_object().account)
else:
context['accountform'] = AccountForm(instance=self.get_object().account)
return context
def form_valid(self, form):
context = self.get_context_data()
accountform = context['accountform']
if accountform.is_valid() and form.is_valid():
accountform.instance.last_updated = datetime.now()
accountform.save()
return super(AccountEditView, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('account:index', kwargs={'result': 'ZWRpdGVk'})
class AccountDeleteView(LoginRequiredMixin, DeleteView):
"""
View that is used to delete an account in the Chronos platform. Accessed via AJAX call
TODO: Develop this view
"""
model = User
template_name = 'account/account_delete_modal.html'
def dispatch(self, *args, **kwargs):
response = super(AccountDeleteView, self).dispatch(*args, **kwargs)
if self.request.is_ajax():
response_data = {"result": "ok"}
return JsonResponse(response_data)
else:
# POST request (not ajax) will do a redirect to success_url
return response
def get_success_url(self):
return reverse_lazy('account:index', kwargs={'result': 'ZGVsZXRlZA=='})
| hgpestana/chronos | apps/account/views.py | Python | mit | 6,032 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
# Configure the baseURL
baseUrl = "https://www.expedia.es"
# Create a webDriver instance and maximize window
driver = webdriver.Firefox()
driver.maximize_window()
# Navigage to URL and put a 10 seconds implicit wait
driver.get(baseUrl)
driver.implicitly_wait(10)
# Find and click on element "Flights"
# Find departure textbox and type "Barcelona"
# Find departure textbox and type "Madrid"
# Find departure time and type "23/11/2017"
# Close Calendar
# Find the "Find" button and click on
# Quit driver
| twiindan/selenium_lessons | 04_Selenium/exercices/expedia.py | Python | apache-2.0 | 654 |
# proof for AVX512 approach
def length(nibble):
if nibble & 0x8 == 0x0:
return 1
if nibble & 0xc == 0x8:
return 0
if nibble & 0xe == 0xc:
return 2
if nibble & 0xf == 0xe:
return 3
if nibble & 0xf == 0xf:
return 4
assert False
def generate_words():
continuation = b'\x80'
for byte0 in range(0, 256):
if length(byte0 >> 4) < 2:
continue
for byte1 in range(256):
if byte1 & 0xc0 != 0x80:
continue
b0 = byte0.to_bytes(1, 'little')
b1 = byte1.to_bytes(1, 'little')
char2 = b0 + b1
v, r = is_valid_utf8(b0 + b1)
if v:
yield (r, v, byte0, byte1)
continue
v, r = is_valid_utf8(b0 + b1 + continuation)
if v:
yield (r, v, byte0, byte1)
continue
v, r = is_valid_utf8(b0 + b1 + continuation + continuation)
if v:
yield (r, v, byte0, byte1)
continue
yield (r, False, byte0, byte1)
def is_valid_utf8(bytes):
try:
str(bytes, encoding='utf8')
return True, ''
except UnicodeError as e:
return False, str(e)
def algorithm(leading_byte, continuation1):
assert 0 <= leading_byte <= 255
assert 0 <= continuation1 <= 255
nibble1 = leading_byte >> 4
nibble0 = leading_byte & 0xf
# pshufb of (leading_byte >> 4)
n = length(nibble1)
if n == 0:
return (False, "cont")
if n == 1:
return (True, "ASCII")
if n == 2:
greater = (leading_byte >= 0xc2)
return (greater, "len2")
if False:
# naive version
if n == 3:
M = (continuation1 & 0x3f) > 0x1f
if nibble0 == 0b0000:
return (M, "len3: M")
if nibble0 == 0b1101:
return (not M, "len3: !M")
return (True, "len3")
else:
if n == 3:
tmp = continuation1 & 0x3f
if nibble0 == 0b0000:
tmp = tmp ^ 0b0000_0000
elif nibble0 == 0b1101:
tmp = tmp ^ 0b0010_0000
else:
tmp = tmp ^ 0b1100_0000
return (tmp > 0x1f, "len3")
if False:
if n == 4:
M = (continuation1 & 0x3f) > 0x0f
if nibble0 == 0b0000:
return (M, f"len4: M ({M}, {continuation1:08b})")
if nibble0 in (0b0001, 0b0010, 0b0011):
return True, "len4"
if nibble0 == 0b0100:
return (not M, "len4: !M")
return (False, "len4*")
else:
if n == 4:
tmp = continuation1 & 0x3f
a = (tmp - 0x10) & 0xff
tmp = a ^ 0x40
if nibble0 == 0b0000:
# tmp > 0x0f
mask = 0x40
elif nibble0 in (0b0001, 0b0010, 0b0011):
# true
mask = 0xc0
elif nibble0 == 0b0100:
# tmp <= 0x0f
mask = 0x80
else:
# false
mask = 0x00
t0 = (tmp & mask)
flag = t0 != 0
print(f"{nibble0:02x}: {continuation1 & 0x3f:02x} & {mask:02x} = {t0:02x} ({flag})")
return (flag, "len4*")
return (False, "other")
def main():
failed = 0
for i, (reason, expected, byte0, byte1) in enumerate(generate_words()):
result, which = algorithm(byte0, byte1)
if result != expected:
#print(reason)
print(failed, '\t', f'{byte0:02x} {byte1:02x} -> expected={expected}, result={result} {which}')
print()
failed += 1
if __name__ == '__main__':
main()
| WojciechMula/toys | avx512-utf8-to-utf32/validate/avx512-validate-leading-bytes.py | Python | bsd-2-clause | 3,842 |
#!/usr/bin/python3
"""Script to determine the Pywikibot version (tag, revision and date).
.. versionchanged:: 7.0
version script was moved to the framework scripts folder
"""
#
# (C) Pywikibot team, 2007-2021
#
# Distributed under the terms of the MIT license.
#
import codecs
import os
import sys
import pywikibot
from pywikibot.version import get_toolforge_hostname, getversion
class DummyModule:
"""Fake module instance."""
__version__ = 'n/a'
try:
import setuptools
except ImportError:
setuptools = DummyModule()
try:
import mwparserfromhell
except ImportError:
mwparserfromhell = DummyModule()
try:
import wikitextparser
except ImportError:
wikitextparser = DummyModule()
try:
import requests
except ImportError:
requests = DummyModule()
WMF_CACERT = 'MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs'
def main(*args: str) -> None:
"""Print pywikibot version and important settings."""
pywikibot.output('Pywikibot: ' + getversion())
pywikibot.output('Release version: ' + pywikibot.__version__)
pywikibot.output('setuptools version: ' + setuptools.__version__)
pywikibot.output('mwparserfromhell version: '
+ mwparserfromhell.__version__)
pywikibot.output('wikitextparser version: ' + wikitextparser.__version__)
pywikibot.output('requests version: ' + requests.__version__)
has_wikimedia_cert = False
if (not hasattr(requests, 'certs')
or not hasattr(requests.certs, 'where')
or not callable(requests.certs.where)):
pywikibot.output(' cacerts: not defined')
elif not os.path.isfile(requests.certs.where()):
pywikibot.output(' cacerts: {} (missing)'.format(
requests.certs.where()))
else:
pywikibot.output(' cacerts: ' + requests.certs.where())
with codecs.open(requests.certs.where(), 'r', 'utf-8') as cert_file:
text = cert_file.read()
if WMF_CACERT in text:
has_wikimedia_cert = True
pywikibot.output(' certificate test: {}'
.format('ok' if has_wikimedia_cert else 'not ok'))
if not has_wikimedia_cert:
pywikibot.output(' Please reinstall requests!')
pywikibot.output('Python: ' + sys.version)
toolforge_env_hostname = get_toolforge_hostname()
if toolforge_env_hostname:
pywikibot.output('Toolforge hostname: ' + toolforge_env_hostname)
# check environment settings
settings = {key for key in os.environ if key.startswith('PYWIKIBOT')}
settings.update(['PYWIKIBOT_DIR', 'PYWIKIBOT_DIR_PWB',
'PYWIKIBOT_NO_USER_CONFIG'])
for environ_name in sorted(settings):
pywikibot.output(
'{}: {}'.format(environ_name,
os.environ.get(environ_name, 'Not set') or "''"))
pywikibot.output('Config base dir: ' + pywikibot.config.base_dir)
for family, usernames in pywikibot.config.usernames.items():
if not usernames:
continue
pywikibot.output('Usernames for family {!r}:'.format(family))
for lang, username in usernames.items():
pywikibot.output('\t{}: {}'.format(lang, username))
if __name__ == '__main__':
main()
| wikimedia/pywikibot-core | pywikibot/scripts/version.py | Python | mit | 3,279 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for directory_watcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from mxconsole.backend.event_processing import directory_watcher
from mxconsole.backend.event_processing import io_wrapper
from mxconsole.framework import test_util
from mxconsole.platform import gfile, googletest
class _ByteLoader(object):
"""A loader that loads individual bytes from a file."""
def __init__(self, path):
self._f = open(path)
self.bytes_read = 0
def Load(self):
while True:
self._f.seek(self.bytes_read)
byte = self._f.read(1)
if byte:
self.bytes_read += 1
yield byte
else:
return
class DirectoryWatcherTest(test_util.TensorFlowTestCase):
def setUp(self):
# Put everything in a directory so it's easier to delete.
self._directory = os.path.join(self.get_temp_dir(), 'monitor_dir')
os.mkdir(self._directory)
self._watcher = directory_watcher.DirectoryWatcher(self._directory,
_ByteLoader)
self.stubs = googletest.StubOutForTesting()
def tearDown(self):
self.stubs.CleanUp()
try:
shutil.rmtree(self._directory)
except OSError:
# Some tests delete the directory.
pass
def _WriteToFile(self, filename, data):
path = os.path.join(self._directory, filename)
with open(path, 'a') as f:
f.write(data)
def _LoadAllEvents(self):
"""Loads all events in the watcher."""
for _ in self._watcher.Load():
pass
def assertWatcherYields(self, values):
self.assertEqual(list(self._watcher.Load()), values)
def testRaisesWithBadArguments(self):
with self.assertRaises(ValueError):
directory_watcher.DirectoryWatcher(None, lambda x: None)
with self.assertRaises(ValueError):
directory_watcher.DirectoryWatcher('dir', None)
def testEmptyDirectory(self):
self.assertWatcherYields([])
def testSingleWrite(self):
self._WriteToFile('a', 'abc')
self.assertWatcherYields(['a', 'b', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleWrites(self):
self._WriteToFile('a', 'abc')
self.assertWatcherYields(['a', 'b', 'c'])
self._WriteToFile('a', 'xyz')
self.assertWatcherYields(['x', 'y', 'z'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleLoads(self):
self._WriteToFile('a', 'a')
self._watcher.Load()
self._watcher.Load()
self.assertWatcherYields(['a'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleFilesAtOnce(self):
self._WriteToFile('b', 'b')
self._WriteToFile('a', 'a')
self.assertWatcherYields(['a', 'b'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testFinishesLoadingFileWhenSwitchingToNewFile(self):
self._WriteToFile('a', 'a')
# Empty the iterator.
self.assertEquals(['a'], list(self._watcher.Load()))
self._WriteToFile('a', 'b')
self._WriteToFile('b', 'c')
# The watcher should finish its current file before starting a new one.
self.assertWatcherYields(['b', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testIntermediateEmptyFiles(self):
self._WriteToFile('a', 'a')
self._WriteToFile('b', '')
self._WriteToFile('c', 'c')
self.assertWatcherYields(['a', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testPathFilter(self):
self._watcher = directory_watcher.DirectoryWatcher(
self._directory, _ByteLoader,
lambda path: 'do_not_watch_me' not in path)
self._WriteToFile('a', 'a')
self._WriteToFile('do_not_watch_me', 'b')
self._WriteToFile('c', 'c')
self.assertWatcherYields(['a', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testDetectsNewOldFiles(self):
self._WriteToFile('b', 'a')
self._LoadAllEvents()
self._WriteToFile('a', 'a')
self._LoadAllEvents()
self.assertTrue(self._watcher.OutOfOrderWritesDetected())
def testIgnoresNewerFiles(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
self._WriteToFile('q', 'a')
self._LoadAllEvents()
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testDetectsChangingOldFiles(self):
self._WriteToFile('a', 'a')
self._WriteToFile('b', 'a')
self._LoadAllEvents()
self._WriteToFile('a', 'c')
self._LoadAllEvents()
self.assertTrue(self._watcher.OutOfOrderWritesDetected())
def testDoesntCrashWhenFileIsDeleted(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
os.remove(os.path.join(self._directory, 'a'))
self._WriteToFile('b', 'b')
self.assertWatcherYields(['b'])
def testRaisesRightErrorWhenDirectoryIsDeleted(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
shutil.rmtree(self._directory)
with self.assertRaises(directory_watcher.DirectoryDeletedError):
self._LoadAllEvents()
def testDoesntRaiseDirectoryDeletedErrorIfOutageIsTransient(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
shutil.rmtree(self._directory)
# Fake a single transient I/O error.
def FakeFactory(original):
def Fake(*args, **kwargs):
if FakeFactory.has_been_called:
original(*args, **kwargs)
else:
raise OSError('lp0 temporarily on fire')
return Fake
FakeFactory.has_been_called = False
for stub_name in ['ListDirectoryAbsolute', 'ListRecursively']:
self.stubs.Set(io_wrapper, stub_name,
FakeFactory(getattr(io_wrapper, stub_name)))
for stub_name in ['IsDirectory', 'Exists', 'Stat']:
self.stubs.Set(gfile, stub_name,
FakeFactory(getattr(gfile, stub_name)))
with self.assertRaises((IOError, OSError)):
self._LoadAllEvents()
if __name__ == '__main__':
googletest.main()
| bravomikekilo/mxconsole | mxconsole/backend/event_processing/directory_watcher_test.py | Python | apache-2.0 | 6,673 |
import requests
from flask import current_app
class GoogleMaps:
@classmethod
def get_latlong(cls, address):
params = {
'address': address,
'key': current_app.config['GOOGLE']['maps_api_key'],
}
url = "https://maps.googleapis.com/maps/api/geocode/json"
info = requests.get(url, params=params).json()
if info['results']:
location = info['results'][0]['geometry']['location']
info = {'lat': location['lat'], 'lng': location['lng']}
else:
info = {}
return info
| SalveMais/api | salvemais/util/maps.py | Python | mit | 585 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction nb_possede."""
from fractions import Fraction
from primaires.scripting.fonction import Fonction
class ClasseFonction(Fonction):
"""Teste si un personnage possède un objet."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.nb_possede_proto, "Personnage", "str")
@staticmethod
def nb_possede_proto(personnage, prototype_ou_type):
"""Retourne le nombre d'objets possédés par le personnage.
Paramètres à préciser :
* personnage : le personnage à tester
* prototype_ou_type : un nom de type ou clé de prototype
Cette fonction retourne le nombre d'objets correspondants, 0
si le personnage ne possède aucun objet correspondant
(dans son équipement, directement, ou dans un sac). Vous
pouvez aussi vérifier que le personnage possède un certain
type d'objet en précisant le nom du type précédé d'un '+'. Voir
les exemples ci-dessous.
Exemples d'utilisation :
nb = nb_possede(personnage, "pomme_rouge")
si nb:
# La pomme rouge a pu être trouvée dans le personnage
si nb_possede(personnage, "pomme_rouge"):
# ...
# Si vous voulez savoir si un personnage possède une arme
nb_armes = nb_possede(personnage, "+arme")
si nb_armes:
# Et si vous n'avez pas besoin de connaître l'objet
si nb_possede(personnage, "+arme"):
# ...
"""
total = 0
for objet, qtt in personnage.equipement.inventaire.iter_objets_qtt():
if prototype_ou_type.startswith("+"):
nom_type = prototype_ou_type[1:]
if objet.est_de_type(nom_type):
total += qtt
else:
if objet.cle == prototype_ou_type:
total += qtt
return Fraction(total)
| vlegoff/tsunami | src/primaires/scripting/fonctions/nb_possede.py | Python | bsd-3-clause | 3,519 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ip(CMakePackage):
"""The NCEP general interpolation library (iplib) contains Fortran 90
subprograms to be used for interpolating between nearly all grids used at
NCEP. This is part of the NCEPLIBS project."""
homepage = "https://noaa-emc.github.io/NCEPLIBS-ip"
url = "https://github.com/NOAA-EMC/NCEPLIBS-ip/archive/refs/tags/v3.3.3.tar.gz"
maintainers = ['t-brown', 'kgerheiser', 'edwardhartnett', 'Hang-Lei-NOAA']
version('4.0.0', sha256='a2ef0cc4e4012f9cb0389fab6097407f4c623eb49772d96eb80c44f804aa86b8')
version('3.3.3', sha256='d5a569ca7c8225a3ade64ef5cd68f3319bcd11f6f86eb3dba901d93842eb3633', preferred=True)
depends_on('sp')
def setup_run_environment(self, env):
for suffix in ('4', '8', 'd'):
lib = find_libraries('libip_4', root=self.prefix,
shared=False, recursive=True)
env.set('IP_LIB' + suffix, lib[0])
env.set('IP_INC' + suffix, join_path(self.prefix, 'include_' + suffix))
| LLNL/spack | var/spack/repos/builtin/packages/ip/package.py | Python | lgpl-2.1 | 1,242 |
# -*- coding:utf-8 -*-
from .utils import github_url, connect_url
class FBRankException(Exception):
""" Base Exception FOr FBRank Project
"""
exit_code = 1
def __init__(self, message):
pass
def __repr__(self):
return "This is Base Exception"
__str__ = __repr__
class IllegalArgumentException(FBRankException):
"""Use When argument is illegal
"""
class IllegalNameException(FBRankException):
"""use when provide name is not illegal
"""
def __init__(self, name):
self.name = name
def __repr__(self, name):
message = "Sorry What your input {name} can't be recongnized,you can seed an email to the" \
"{mail},or send one PR to the {github}".format(name=self.name, mail=connect_url, github=github_url)
return message
def __str__(self):
return self.__repr__(self.name)
# __str__ = __repr__
class ParseException(FBRankException):
"""use when can't parse webpage content
"""
class NotSupprotedYetException(FBRankException):
"""still not supprt
"""
| Allianzcortex/FBRank | FBRank/utils/exceptions.py | Python | apache-2.0 | 1,097 |
import sqlalchemy as sa
from oslo_db.sqlalchemy import types as db_types
from nca47.db.sqlalchemy.models import base as model_base
from nca47.objects import attributes as attr
HasTenant = model_base.HasTenant
HasId = model_base.HasId
HasStatus = model_base.HasStatus
HasOperationMode = model_base.HasOperationMode
class DnsServer(model_base.BASE, HasId, HasOperationMode):
"""Represents a dns server."""
name = sa.Column(sa.String(attr.NAME_MAX_LEN))
class Zone(model_base.BASE, HasId, HasOperationMode):
"""Represents a dns zone."""
__tablename__ = 'dns_zone_info'
zone_name = sa.Column(sa.String(attr.NAME_MAX_LEN))
tenant_id = sa.Column(sa.String(attr.NAME_MAX_LEN))
zone_id = sa.Column(sa.String(attr.NAME_MAX_LEN))
vres_id = sa.Column(sa.String(attr.NAME_MAX_LEN))
masters = sa.Column(db_types.JsonEncodedList)
slaves = sa.Column(db_types.JsonEncodedList)
renewal = sa.Column(sa.String(attr.NAME_MAX_LEN))
default_ttl = sa.Column(sa.String(attr.NAME_MAX_LEN))
owners = sa.Column(db_types.JsonEncodedList)
ad_controller = sa.Column(sa.String(attr.NAME_MAX_LEN))
comment = sa.Column(sa.String(attr.NAME_MAX_LEN))
class ZoneRecord(model_base.BASE, HasId, HasOperationMode):
"""Represents a dns zone."""
__tablename__ = 'dns_rrs_info'
zone_id = sa.Column(sa.String(attr.UUID_LEN))
rrs_id = sa.Column(sa.String(attr.NAME_MAX_LEN))
rrs_name = sa.Column(sa.String(attr.NAME_MAX_LEN))
type = sa.Column(sa.String(attr.NAME_MAX_LEN))
klass = sa.Column(sa.String(attr.NAME_MAX_LEN))
ttl = sa.Column(sa.String(attr.NAME_MAX_LEN))
rdata = sa.Column(sa.String(attr.NAME_MAX_LEN))
| willowd878/nca47 | nca47/db/sqlalchemy/models/dns.py | Python | apache-2.0 | 1,680 |
# -*- coding: utf-8 -*-
__author__ = 'shreejoy'
import unittest
from article_text_mining.rep_html_table_struct import rep_html_table_struct
class RepTableStructureTest(unittest.TestCase):
@property
def load_html_table_simple(self):
# creates data table object 16055 with some dummy data
with open('tests/test_html_data_tables/example_html_table_simple.html', mode='rb') as f:
simple_table_text = f.read()
return simple_table_text
@property
def load_html_table_complex(self):
with open('tests/test_html_data_tables/example_html_table_complex.html', mode='rb') as f:
complex_table_text = f.read()
return complex_table_text
def test_rep_html_table_struct_simple(self):
expected_table_output = [['th-1', 'th-2', 'th-3', 'th-4', 'th-5', 'th-6'],
['td-1', 'td-1', 'td-1', 'td-1', 'td-1', 'td-1'],
['td-2', 'td-3', 'td-4', 'td-5', 'td-6', 'td-7'],
['td-8', 'td-9', 'td-10', 'td-11', 'td-12', 'td-13'],
['td-14', 'td-15', 'td-16', 'td-17', 'td-18', 'td-19'],
['td-20', 'td-21', 'td-22', 'td-23', 'td-24', 'td-25'],
['td-26', 'td-27', 'td-28', 'td-29', 'td-30', 'td-31'],
['td-32', 'td-33', 'td-34', 'td-35', 'td-36', 'td-37'],
['td-38', 'td-39', 'td-40', 'td-41', 'td-42', 'td-43'],
['td-44', 'td-45', 'td-46', 'td-47', 'td-48', 'td-49'],
['td-50', 'td-51', 'td-52', 'td-53', 'td-54', 'td-55'],
['td-56', 'td-57', 'td-58', 'td-59', 'td-60', 'td-61'],
['td-62', 'td-63', 'td-64', 'td-65', 'td-66', 'td-67'],
['td-68', 'td-69', 'td-70', 'td-71', 'td-72', 'td-73'],
['td-74', 'td-75', 'td-76', 'td-77', 'td-78', 'td-79'],
['td-80', 'td-81', 'td-82', 'td-83', 'td-84', 'td-85'],
['td-86', 'td-87', 'td-88', 'td-89', 'td-90', 'td-91'],
['td-92', 'td-93', 'td-94', 'td-95', 'td-96', 'td-97'],
['td-98', 'td-99', 'td-100', 'td-101', 'td-102', 'td-103'],
['td-104', 'td-105', 'td-106', 'td-107', 'td-108', 'td-109']]
html_table_text = self.load_html_table_simple
a, b, html_id_table = rep_html_table_struct(html_table_text)
self.assertEqual(html_id_table, expected_table_output)
def test_rep_html_table_struct_complex(self):
expected_table_output = [['td-1', 0, 0, 0, 0, 0, 0],
['td-2', 'td-2', 'td-2', 'td-2', 'td-2', 'td-2', 'td-2'],
['td-3', 'td-4', 'td-4', 'td-5', 'td-5', 'td-6', 'td-6'],
['td-3', 'td-7', 'td-8', 'td-9', 'td-10', 'td-11', 'td-12'],
['td-13', 'td-13', 'td-13', 'td-13', 'td-13', 'td-13', 'td-13'],
['td-14', 'td-15', 'td-16', 'td-17', 'td-18', 'td-19', 'td-20'],
['td-21', 'td-22', 'td-23', 'td-24', 'td-25', 'td-26', 'td-27'],
['td-28', 'td-29', 'td-30', 'td-31', 'td-32', 'td-33', 'td-34'],
['td-35', 'td-36', 'td-37', 'td-38', 'td-39', 'td-40', 'td-41'],
['td-42', 'td-43', 'td-44', 'td-45', 'td-46', 'td-47', 'td-48'],
['td-49', 'td-50', 'td-51', 'td-52', 'td-53', 'td-54', 'td-55'],
['td-56', 'td-57', 'td-58', 'td-59', 'td-60', 'td-61', 'td-62'],
['td-63', 'td-64', 'td-65', 'td-66', 'td-67', 'td-68', 'td-69'],
['td-70', 'td-71', 'td-72', 'td-73', 'td-74', 'td-75', 'td-76'],
['td-77', 'td-78', 'td-79', 'td-80', 'td-81', 'td-82', 'td-83']]
html_table_text = self.load_html_table_complex
a, b, html_id_table = rep_html_table_struct(html_table_text)
self.assertEqual(html_id_table, expected_table_output)
if __name__ == '__main__':
unittest.main() | lessc0de/neuroelectro_org | tests/test_rep_html_table_struct.py | Python | gpl-2.0 | 4,428 |
from hashlib import sha1
def grade(autogen, key):
secretkey = "my_key_here"
n = autogen.instance
flag = sha1((str(n) + secretkey).encode('utf-8')).hexdigest()
if flag.lower() in key.lower().strip():
return True, "Correct!"
else:
return False, "Try Again."
def get_hints():
return [("10", "Think colors"), ("10", "Sometimes things can be the same color")]
| mcpa-stlouis/mcpa-ctf | example_problems/web/hidden-message/grader/grader.py | Python | mit | 397 |
from csp import Channel, put, go, alts, sleep, CLOSED
def produce(chan, value):
yield sleep(0.1)
yield put(chan, value)
def main():
chans = []
for i in range(20):
chan = Channel()
go(produce, chan, i)
chans.append(chan)
def timeout(seconds):
chan = Channel()
def t():
yield sleep(seconds)
chan.close()
go(t)
return chan
chans.append(timeout(0.3))
while True:
value, chan = yield alts(chans)
if value == CLOSED:
print "time out"
break
else:
print value
| ubolonton/twisted-csp | example/select.py | Python | epl-1.0 | 627 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "swiftwind_heroku.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| adamcharnock/swiftwind-heroku | manage.py | Python | mit | 814 |
import gzip
import os
import struct
import tempfile
import unittest
from io import BytesIO, StringIO, TextIOWrapper
from unittest import mock
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.files.move import file_move_safe
from django.core.files.temp import NamedTemporaryFile
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, UploadedFile,
)
try:
from PIL import Image
except ImportError:
Image = None
else:
from django.core.files import images
class FileTests(unittest.TestCase):
def test_unicode_uploadedfile_name(self):
uf = UploadedFile(name='¿Cómo?', content_type='text')
self.assertIs(type(repr(uf)), str)
def test_unicode_file_name(self):
f = File(None, 'djángö')
self.assertIs(type(repr(f)), str)
def test_context_manager(self):
orig_file = tempfile.TemporaryFile()
base_file = File(orig_file)
with base_file as f:
self.assertIs(base_file, f)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
self.assertTrue(orig_file.closed)
def test_open_resets_opened_file_to_start_and_returns_context_manager(self):
file = File(BytesIO(b'content'))
file.read()
with file.open() as f:
self.assertEqual(f.read(), b'content')
def test_open_reopens_closed_file_and_returns_context_manager(self):
temporary_file = tempfile.NamedTemporaryFile(delete=False)
file = File(temporary_file)
try:
file.close()
with file.open() as f:
self.assertFalse(f.closed)
finally:
# remove temporary file
os.unlink(file.name)
def test_namedtemporaryfile_closes(self):
"""
The symbol django.core.files.NamedTemporaryFile is assigned as
a different class on different operating systems. In
any case, the result should minimally mock some of the API of
tempfile.NamedTemporaryFile from the Python standard library.
"""
tempfile = NamedTemporaryFile()
self.assertTrue(hasattr(tempfile, "closed"))
self.assertFalse(tempfile.closed)
tempfile.close()
self.assertTrue(tempfile.closed)
def test_file_mode(self):
# Should not set mode to None if it is not present.
# See #14681, stdlib gzip module crashes if mode is set to None
file = SimpleUploadedFile("mode_test.txt", b"content")
self.assertFalse(hasattr(file, 'mode'))
gzip.GzipFile(fileobj=file)
def test_file_iteration(self):
"""
File objects should yield lines when iterated over.
Refs #22107.
"""
file = File(BytesIO(b'one\ntwo\nthree'))
self.assertEqual(list(file), [b'one\n', b'two\n', b'three'])
def test_file_iteration_windows_newlines(self):
"""
#8149 - File objects with \r\n line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_mac_newlines(self):
"""
#8149 - File objects with \r line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\rtwo\rthree'))
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_mixed_newlines(self):
f = File(BytesIO(b'one\rtwo\nthree\r\nfour'))
self.assertEqual(list(f), [b'one\r', b'two\n', b'three\r\n', b'four'])
def test_file_iteration_with_unix_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\ntwo\nthree'))
# Set chunk size to create a boundary after \n:
# b'one\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\n', b'two\n', b'three'])
def test_file_iteration_with_windows_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
# Set chunk size to create a boundary between \r and \n:
# b'one\r\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_with_mac_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\rtwo\rthree'))
# Set chunk size to create a boundary after \r:
# b'one\r...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_with_text(self):
f = File(StringIO('one\ntwo\nthree'))
self.assertEqual(list(f), ['one\n', 'two\n', 'three'])
def test_readable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.readable())
self.assertFalse(test_file.readable())
def test_writable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.writable())
self.assertFalse(test_file.writable())
with tempfile.TemporaryFile('rb') as temp, File(temp, name='something.txt') as test_file:
self.assertFalse(test_file.writable())
def test_seekable(self):
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
self.assertTrue(test_file.seekable())
self.assertFalse(test_file.seekable())
def test_io_wrapper(self):
content = "vive l'été\n"
with tempfile.TemporaryFile() as temp, File(temp, name='something.txt') as test_file:
test_file.write(content.encode())
test_file.seek(0)
wrapper = TextIOWrapper(test_file, 'utf-8', newline='\n')
self.assertEqual(wrapper.read(), content)
wrapper.write(content)
wrapper.seek(0)
self.assertEqual(wrapper.read(), content * 2)
test_file = wrapper.detach()
test_file.seek(0)
self.assertEqual(test_file.read(), (content * 2).encode())
class NoNameFileTestCase(unittest.TestCase):
"""
Other examples of unnamed files may be tempfile.SpooledTemporaryFile or
urllib.urlopen()
"""
def test_noname_file_default_name(self):
self.assertIsNone(File(BytesIO(b'A file with no name')).name)
def test_noname_file_get_size(self):
self.assertEqual(File(BytesIO(b'A file with no name')).size, 19)
class ContentFileTestCase(unittest.TestCase):
def test_content_file_default_name(self):
self.assertIsNone(ContentFile(b"content").name)
def test_content_file_custom_name(self):
"""
The constructor of ContentFile accepts 'name' (#16590).
"""
name = "I can have a name too!"
self.assertEqual(ContentFile(b"content", name=name).name, name)
def test_content_file_input_type(self):
"""
ContentFile can accept both bytes and strings and the retrieved content
is of the same type.
"""
self.assertIsInstance(ContentFile(b"content").read(), bytes)
self.assertIsInstance(ContentFile("español").read(), str)
def test_open_resets_file_to_start_and_returns_context_manager(self):
file = ContentFile(b'content')
with file.open() as f:
self.assertEqual(f.read(), b'content')
with file.open() as f:
self.assertEqual(f.read(), b'content')
class InMemoryUploadedFileTests(unittest.TestCase):
def test_open_resets_file_to_start_and_returns_context_manager(self):
uf = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
uf.read()
with uf.open() as f:
self.assertEqual(f.read(), '1')
class DimensionClosingBug(unittest.TestCase):
"""
get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = BytesIO()
try:
images.get_image_dimensions(empty_io)
finally:
self.assertTrue(not empty_io.closed)
@unittest.skipUnless(Image, "Pillow not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper:
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
images.open = catching_open
try:
images.get_image_dimensions(os.path.join(os.path.dirname(__file__), "test1.png"))
finally:
del images.open
self.assertTrue(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
img_path = os.path.join(os.path.dirname(__file__), "test.png")
with open(img_path, 'rb') as fh:
image = images.ImageFile(fh)
image_pil = Image.open(fh)
size_1 = images.get_image_dimensions(image)
size_2 = images.get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
@unittest.skipUnless(Image, "Pillow not installed")
def test_bug_19457(self):
"""
Regression test for #19457
get_image_dimensions fails on some pngs, while Image.size is working good on them
"""
img_path = os.path.join(os.path.dirname(__file__), "magic.png")
size = images.get_image_dimensions(img_path)
with open(img_path, 'rb') as fh:
self.assertEqual(size, Image.open(fh).size)
@unittest.skipUnless(Image, "Pillow not installed")
class GetImageDimensionsTests(unittest.TestCase):
def test_invalid_image(self):
"""
get_image_dimensions() should return (None, None) for the dimensions of
invalid images (#24441).
brokenimg.png is not a valid image and it has been generated by:
$ echo "123" > brokenimg.png
"""
img_path = os.path.join(os.path.dirname(__file__), "brokenimg.png")
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
def test_valid_image(self):
"""
get_image_dimensions() should catch struct.error while feeding the PIL
Image parser (#24544).
Emulates the Parser feed error. Since the error is raised on every feed
attempt, the resulting image size should be invalid: (None, None).
"""
img_path = os.path.join(os.path.dirname(__file__), "test.png")
with mock.patch('PIL.ImageFile.Parser.feed', side_effect=struct.error):
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
class FileMoveSafeTests(unittest.TestCase):
def test_file_move_overwrite(self):
handle_a, self.file_a = tempfile.mkstemp()
handle_b, self.file_b = tempfile.mkstemp()
# file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False
with self.assertRaises(IOError):
file_move_safe(self.file_a, self.file_b, allow_overwrite=False)
# should allow it and continue on if allow_overwrite is True
self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True))
os.close(handle_a)
os.close(handle_b)
class SpooledTempTests(unittest.TestCase):
def test_in_memory_spooled_temp(self):
with tempfile.SpooledTemporaryFile() as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
def test_written_spooled_temp(self):
with tempfile.SpooledTemporaryFile(max_size=4) as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
| labcodes/django | tests/files/tests.py | Python | bsd-3-clause | 13,129 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-07 19:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('cpf', models.CharField(max_length=11)),
('email', models.EmailField(max_length=254)),
('phone', models.CharField(max_length=20)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| renzon/wttd | eventex/subscriptions/migrations/0001_initial.py | Python | agpl-3.0 | 844 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
from cinder import exception
from cinder.volume.targets import driver
LOG = logging.getLogger(__name__)
class NVMeOF(driver.Target):
"""Target object for block storage devices with RDMA transport."""
protocol = 'nvmeof'
target_protocol_map = {
'nvmet_rdma': 'rdma',
}
def __init__(self, *args, **kwargs):
"""Reads NVMeOF configurations."""
super(NVMeOF, self).__init__(*args, **kwargs)
self.target_ip = self.configuration.target_ip_address
self.target_port = self.configuration.target_port
self.nvmet_port_id = self.configuration.nvmet_port_id
self.nvmet_ns_id = self.configuration.nvmet_ns_id
self.nvmet_subsystem_name = self.configuration.target_prefix
target_protocol = self.configuration.target_protocol
if target_protocol in self.target_protocol_map:
self.nvme_transport_type = self.target_protocol_map[
target_protocol]
else:
raise exception.UnsupportedNVMETProtocol(
protocol=target_protocol
)
def initialize_connection(self, volume, connector):
"""Returns the connection info.
In NVMeOF driver, :driver_volume_type: is set to 'nvmeof',
:data: is the driver data that has the value of
_get_connection_properties.
Example return value:
.. code-block:: json
{
"driver_volume_type": "nvmeof",
"data":
{
"target_portal": "1.1.1.1",
"target_port": 4420,
"nqn": "nqn.volume-0001",
"transport_type": "rdma",
"ns_id": 10
}
}
"""
return {
'driver_volume_type': self.protocol,
'data': self._get_connection_properties(volume)
}
def _get_connection_properties(self, volume):
"""Gets NVMeOF connection configuration.
:return: dictionary of the following keys:
:target_portal: NVMe target IP address
:target_port: NVMe target port
:nqn: NQN of the NVMe target
:transport_type: Network fabric being used for an
NVMe-over-Fabrics network
:ns_id: namespace id associated with the subsystem
"""
location = volume['provider_location']
target_connection, nvme_transport_type, nqn, nvmet_ns_id = (
location.split(' '))
target_portal, target_port = target_connection.split(':')
return {
'target_portal': target_portal,
'target_port': target_port,
'nqn': nqn,
'transport_type': nvme_transport_type,
'ns_id': nvmet_ns_id
}
def get_nvmeof_location(self, nqn, target_ip, target_port,
nvme_transport_type, nvmet_ns_id):
"""Serializes driver data into single line string."""
return "%(ip)s:%(port)s %(transport)s %(nqn)s %(ns_id)s" % (
{'ip': target_ip,
'port': target_port,
'transport': nvme_transport_type,
'nqn': nqn,
'ns_id': nvmet_ns_id})
def terminate_connection(self, volume, connector, **kwargs):
pass
def create_export(self, context, volume, volume_path):
"""Creates export data for a logical volume."""
return self.create_nvmeof_target(
volume['id'],
self.configuration.target_prefix,
self.target_ip,
self.target_port,
self.nvme_transport_type,
self.nvmet_port_id,
self.nvmet_ns_id,
volume_path)
def ensure_export(self, context, volume, volume_path):
pass
def remove_export(self, context, volume):
return self.delete_nvmeof_target(volume)
def validate_connector(self, connector):
if 'initiator' not in connector:
LOG.error('The volume driver requires the NVMe initiator '
'name in the connector.')
raise exception.InvalidConnectorException(
missing='initiator')
return True
@abc.abstractmethod
def create_nvmeof_target(self,
volume_id,
subsystem_name,
target_ip,
target_port,
transport_type,
nvmet_port_id,
ns_id,
volume_path):
pass
@abc.abstractmethod
def delete_nvmeof_target(self, target_name):
pass
| phenoxim/cinder | cinder/volume/targets/nvmeof.py | Python | apache-2.0 | 5,303 |
import os
import boto
from boto.s3.key import Key
from boto.s3.connection import OrdinaryCallingFormat
def upload_s3_book(release, directory):
conn = boto.s3.connect_to_region(
'us-west-1', calling_format=OrdinaryCallingFormat())
bucket = conn.get_bucket('readiab.org')
html = {'Content-type': 'text/html; charset=utf-8'}
css = {'Content-type': 'text/css; charset=utf-8'}
key_prefix = 'book/%s/' % release
root_offset = None
for root, dirs, files in os.walk(directory):
if not root_offset:
root_offset = root
r = root.replace(root_offset, '').replace('/', '')
for file in files:
key = key_prefix
if r:
key += r + '/'
key += file
if file.startswith('index'):
key += '.html'
path = os.path.join(root, file)
upload = Key(bucket)
upload.key = key
if '.zip' in path:
upload.set_contents_from_filename(path)
elif '.css' in path:
upload.set_contents_from_filename(path, headers=css)
else:
upload.set_contents_from_filename(path, headers=html)
| gregcaporaso/build-iab | biab/s3.py | Python | bsd-3-clause | 1,217 |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from unittest.mock import Mock
from opentelemetry._metrics.measurement import Measurement as APIMeasurement
from opentelemetry.sdk._metrics.instrument import (
Counter,
Histogram,
ObservableCounter,
ObservableGauge,
ObservableUpDownCounter,
UpDownCounter,
)
from opentelemetry.sdk._metrics.measurement import Measurement
class TestCounter(TestCase):
def test_add(self):
mc = Mock()
counter = Counter("name", Mock(), mc)
counter.add(1.0)
mc.consume_measurement.assert_called_once()
def test_add_non_monotonic(self):
mc = Mock()
counter = Counter("name", Mock(), mc)
counter.add(-1.0)
mc.consume_measurement.assert_not_called()
class TestUpDownCounter(TestCase):
def test_add(self):
mc = Mock()
counter = UpDownCounter("name", Mock(), mc)
counter.add(1.0)
mc.consume_measurement.assert_called_once()
def test_add_non_monotonic(self):
mc = Mock()
counter = UpDownCounter("name", Mock(), mc)
counter.add(-1.0)
mc.consume_measurement.assert_called_once()
TEST_ATTRIBUTES = {"foo": "bar"}
def callable_callback():
return [
APIMeasurement(1, attributes=TEST_ATTRIBUTES),
APIMeasurement(2, attributes=TEST_ATTRIBUTES),
APIMeasurement(3, attributes=TEST_ATTRIBUTES),
]
def generator_callback():
yield [
APIMeasurement(1, attributes=TEST_ATTRIBUTES),
APIMeasurement(2, attributes=TEST_ATTRIBUTES),
APIMeasurement(3, attributes=TEST_ATTRIBUTES),
]
class TestObservableGauge(TestCase):
def test_callable_callback(self):
observable_gauge = ObservableGauge(
"name", Mock(), Mock(), callable_callback
)
self.assertEqual(
list(observable_gauge.callback()),
[
Measurement(
1, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
),
Measurement(
2, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
),
Measurement(
3, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
),
],
)
def test_generator_callback(self):
observable_gauge = ObservableGauge(
"name", Mock(), Mock(), generator_callback()
)
self.assertEqual(
list(observable_gauge.callback()),
[
Measurement(
1, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
),
Measurement(
2, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
),
Measurement(
3, instrument=observable_gauge, attributes=TEST_ATTRIBUTES
),
],
)
class TestObservableCounter(TestCase):
def test_callable_callback(self):
observable_counter = ObservableCounter(
"name", Mock(), Mock(), callable_callback
)
self.assertEqual(
list(observable_counter.callback()),
[
Measurement(
1,
instrument=observable_counter,
attributes=TEST_ATTRIBUTES,
),
Measurement(
2,
instrument=observable_counter,
attributes=TEST_ATTRIBUTES,
),
Measurement(
3,
instrument=observable_counter,
attributes=TEST_ATTRIBUTES,
),
],
)
def test_generator_callback(self):
observable_counter = ObservableCounter(
"name", Mock(), Mock(), generator_callback()
)
self.assertEqual(
list(observable_counter.callback()),
[
Measurement(
1,
instrument=observable_counter,
attributes=TEST_ATTRIBUTES,
),
Measurement(
2,
instrument=observable_counter,
attributes=TEST_ATTRIBUTES,
),
Measurement(
3,
instrument=observable_counter,
attributes=TEST_ATTRIBUTES,
),
],
)
class TestObservableUpDownCounter(TestCase):
def test_callable_callback(self):
observable_up_down_counter = ObservableUpDownCounter(
"name", Mock(), Mock(), callable_callback
)
self.assertEqual(
list(observable_up_down_counter.callback()),
[
Measurement(
1,
instrument=observable_up_down_counter,
attributes=TEST_ATTRIBUTES,
),
Measurement(
2,
instrument=observable_up_down_counter,
attributes=TEST_ATTRIBUTES,
),
Measurement(
3,
instrument=observable_up_down_counter,
attributes=TEST_ATTRIBUTES,
),
],
)
def test_generator_callback(self):
observable_up_down_counter = ObservableUpDownCounter(
"name", Mock(), Mock(), generator_callback()
)
self.assertEqual(
list(observable_up_down_counter.callback()),
[
Measurement(
1,
instrument=observable_up_down_counter,
attributes=TEST_ATTRIBUTES,
),
Measurement(
2,
instrument=observable_up_down_counter,
attributes=TEST_ATTRIBUTES,
),
Measurement(
3,
instrument=observable_up_down_counter,
attributes=TEST_ATTRIBUTES,
),
],
)
class TestHistogram(TestCase):
def test_record(self):
mc = Mock()
hist = Histogram("name", Mock(), mc)
hist.record(1.0)
mc.consume_measurement.assert_called_once()
def test_record_non_monotonic(self):
mc = Mock()
hist = Histogram("name", Mock(), mc)
hist.record(-1.0)
mc.consume_measurement.assert_not_called()
| open-telemetry/opentelemetry-python | opentelemetry-sdk/tests/metrics/test_instrument.py | Python | apache-2.0 | 7,197 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: symbol.py
"""Non-terminal symbols of Python grammar (from "graminit.h")."""
single_input = 256
file_input = 257
eval_input = 258
decorator = 259
decorators = 260
decorated = 261
funcdef = 262
parameters = 263
varargslist = 264
fpdef = 265
fplist = 266
stmt = 267
simple_stmt = 268
small_stmt = 269
expr_stmt = 270
augassign = 271
print_stmt = 272
del_stmt = 273
pass_stmt = 274
flow_stmt = 275
break_stmt = 276
continue_stmt = 277
return_stmt = 278
yield_stmt = 279
raise_stmt = 280
import_stmt = 281
import_name = 282
import_from = 283
import_as_name = 284
dotted_as_name = 285
import_as_names = 286
dotted_as_names = 287
dotted_name = 288
global_stmt = 289
exec_stmt = 290
assert_stmt = 291
compound_stmt = 292
if_stmt = 293
while_stmt = 294
for_stmt = 295
try_stmt = 296
with_stmt = 297
with_item = 298
except_clause = 299
suite = 300
testlist_safe = 301
old_test = 302
old_lambdef = 303
test = 304
or_test = 305
and_test = 306
not_test = 307
comparison = 308
comp_op = 309
expr = 310
xor_expr = 311
and_expr = 312
shift_expr = 313
arith_expr = 314
term = 315
factor = 316
power = 317
atom = 318
listmaker = 319
testlist_comp = 320
lambdef = 321
trailer = 322
subscriptlist = 323
subscript = 324
sliceop = 325
exprlist = 326
testlist = 327
dictorsetmaker = 328
classdef = 329
arglist = 330
argument = 331
list_iter = 332
list_for = 333
list_if = 334
comp_iter = 335
comp_for = 336
comp_if = 337
testlist1 = 338
encoding_decl = 339
yield_expr = 340
sym_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
sym_name[_value] = _name
def main():
import sys
import token
if len(sys.argv) == 1:
sys.argv = sys.argv + ['Include/graminit.h', 'Lib/symbol.py']
token.main()
if __name__ == '__main__':
main() | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/symbol.py | Python | unlicense | 1,939 |
# -*- coding: utf-8 -*-
from __future__ import division
# Copyright (C) 2007 Samuel Abels
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from SpiffWorkflow.util.event import Event
from SpiffWorkflow.Task import Task
from SpiffWorkflow.exceptions import WorkflowException
LOG = logging.getLogger(__name__)
class TaskSpec(object):
"""
This class implements an abstract base type for all tasks.
Tasks provide the following signals:
- **entered**: called when the state changes to READY or WAITING, at a
time where spec data is not yet initialized.
- **reached**: called when the state changes to READY or WAITING, at a
time where spec data is already initialized using data_assign
and pre-assign.
- **ready**: called when the state changes to READY, at a time where
spec data is already initialized using data_assign and
pre-assign.
- **completed**: called when the state changes to COMPLETED, at a time
before the post-assign variables are assigned.
- **cancelled**: called when the state changes to CANCELLED, at a time
before the post-assign variables are assigned.
- **finished**: called when the state changes to COMPLETED or CANCELLED,
at the last possible time after the post-assign variables are
assigned and mutexes are released.
Event sequence is: entered -> reached -> ready -> completed -> finished
(cancelled may happen at any time)
The only events where implementing something other than state tracking
may be useful are the following:
- Reached: You could mess with the pre-assign variables here, for
example. Other then that, there is probably no need in a real
application.
- Ready: This is where a task could implement custom code, for example
for triggering an external system. This is also the only event where a
return value has a meaning (returning non-True will mean that the
post-assign procedure is skipped.)
"""
def __init__(self, parent, name, **kwargs):
"""
Constructor.
The difference between the assignment of a data value using
the data argument versus pre_assign and post_assign is that
changes made using data are task-local, i.e. they are
not visible to other tasks.
Similarly, "defines" are spec data fields that, once defined, can
no longer be modified.
:type parent: L{SpiffWorkflow.specs.WorkflowSpec}
:param parent: A reference to the parent (usually a workflow).
:type name: string
:param name: A name for the task.
:type lock: list(str)
:param lock: A list of mutex names. The mutex is acquired
on entry of execute() and released on leave of
execute().
:type data: dict((str, object))
:param data: name/value pairs
:type defines: dict((str, object))
:param defines: name/value pairs
:type pre_assign: list((str, object))
:param pre_assign: a list of name/value pairs
:type post_assign: list((str, object))
:param post_assign: a list of name/value pairs
"""
assert parent is not None
assert name is not None
self._parent = parent
self.id = None
self.name = str(name)
self.description = kwargs.get('description', '')
self.inputs = []
self.outputs = []
self.manual = False
self.internal = False # Only for easing debugging.
self.data = kwargs.get('data', {})
self.defines = kwargs.get('defines', {})
self.pre_assign = kwargs.get('pre_assign', [])
self.post_assign = kwargs.get('post_assign', [])
self.locks = kwargs.get('lock', [])
self.lookahead = 2 # Maximum number of MAYBE predictions.
# Events.
self.entered_event = Event()
self.reached_event = Event()
self.ready_event = Event()
self.completed_event = Event()
self.cancelled_event = Event()
self.finished_event = Event()
self._parent._add_notify(self)
self.data.update(self.defines)
assert self.id is not None
def _connect_notify(self, taskspec):
"""
Called by the previous task to let us know that it exists.
:type taskspec: TaskSpec
:param taskspec: The task by which this method is executed.
"""
self.inputs.append(taskspec)
def ancestors(self):
"""Returns list of ancestor task specs based on inputs"""
results = []
def recursive_find_ancestors(task, stack):
for input in task.inputs:
if input not in stack:
stack.append(input)
recursive_find_ancestors(input, stack)
recursive_find_ancestors(self, results)
return results
def _get_activated_tasks(self, my_task, destination):
"""
Returns the list of tasks that were activated in the previous
call of execute(). Only returns tasks that point towards the
destination task, i.e. those which have destination as a
descendant.
:type my_task: Task
:param my_task: The associated task in the task tree.
:type destination: Task
:param destination: The destination task.
"""
return my_task.children
def _get_activated_threads(self, my_task):
"""
Returns the list of threads that were activated in the previous
call of execute().
:type my_task: Task
:param my_task: The associated task in the task tree.
"""
return my_task.children
def set_data(self, **kwargs):
"""
Defines the given data field(s) using the given name/value pairs.
"""
for key in kwargs:
if key in self.defines:
msg = "Spec data %s can not be modified" % key
raise WorkflowException(self, msg)
self.data.update(kwargs)
def get_data(self, name, default=None):
"""
Returns the value of the data field with the given name, or the
given default value if the data was not defined.
:type name: string
:param name: The name of the data field.
:type default: string
:param default: Returned if the data field is not defined.
"""
return self.data.get(name, default)
def connect(self, taskspec):
"""
Connect the *following* task to this one. In other words, the
given task is added as an output task.
:type taskspec: TaskSpec
:param taskspec: The new output task.
"""
self.outputs.append(taskspec)
taskspec._connect_notify(self)
def follow(self, taskspec):
"""
Make this task follow the provided one. In other words, this task is
added to the given task outputs.
This is an alias to connect, just easier to understand when reading
code - ex: my_task.follow(the_other_task)
Adding it after being confused by .connect one times too many!
:type taskspec: TaskSpec
:param taskspec: The task to follow.
"""
taskspec.connect(self)
def test(self):
"""
Checks whether all required attributes are set. Throws an exception
if an error was detected.
"""
if self.id is None:
raise WorkflowException(self, 'TaskSpec is not yet instanciated.')
if len(self.inputs) < 1:
raise WorkflowException(self, 'No input task connected.')
def _predict(self, my_task, seen=None, looked_ahead=0):
"""
Updates the branch such that all possible future routes are added.
Should NOT be overwritten! Instead, overwrite _predict_hook().
:type my_task: Task
:param my_task: The associated task in the task tree.
:type seen: list[taskspec]
:param seen: A list of already visited tasks.
:type looked_ahead: integer
:param looked_ahead: The depth of the predicted path so far.
"""
if my_task._is_finished():
return
if seen is None:
seen = []
elif self in seen:
return
if not my_task._is_finished():
self._predict_hook(my_task)
if not my_task._is_definite():
if looked_ahead + 1 >= self.lookahead:
return
seen.append(self)
for child in my_task.children:
child.task_spec._predict(child, seen[:], looked_ahead + 1)
def _predict_hook(self, my_task):
# If the task's status is not predicted, we default to FUTURE
# for all it's outputs.
# Otherwise, copy my own state to the children.
if my_task._is_definite():
best_state = Task.FUTURE
else:
best_state = my_task.state
my_task._sync_children(self.outputs, best_state)
for child in my_task.children:
if not child._is_definite():
child._set_state(best_state)
def _update_state(self, my_task):
"""
Called whenever any event happens that may affect the
state of this task in the workflow. For example, if a predecessor
completes it makes sure to call this method so we can react.
"""
my_task._inherit_data()
self._update_state_hook(my_task)
def _update_state_hook(self, my_task):
"""
Typically this method should perform the following actions::
- Update the state of the corresponding task.
- Update the predictions for its successors.
Returning non-False will cause the task to go into READY.
Returning any other value will cause no action.
"""
if my_task._is_predicted():
self._predict(my_task)
LOG.debug("'%s'._update_state_hook says parent (%s, state=%s) "
"is_finished=%s" % (self.name, my_task.parent.get_name(),
my_task.parent.get_state_name(),
my_task.parent._is_finished()))
if not my_task.parent._is_finished():
return
self.entered_event.emit(my_task.workflow, my_task)
my_task._ready()
def _on_ready(self, my_task):
"""
Return True on success, False otherwise.
:type my_task: Task
:param my_task: The associated task in the task tree.
"""
assert my_task is not None
self.test()
# Acquire locks, if any.
for lock in self.locks:
mutex = my_task.workflow._get_mutex(lock)
if not mutex.testandset():
return
# Assign variables, if so requested.
for assignment in self.pre_assign:
assignment.assign(my_task, my_task)
# Run task-specific code.
self._on_ready_before_hook(my_task)
self.reached_event.emit(my_task.workflow, my_task)
self._on_ready_hook(my_task)
# Run user code, if any.
if self.ready_event.emit(my_task.workflow, my_task):
# Assign variables, if so requested.
for assignment in self.post_assign:
assignment.assign(my_task, my_task)
# Release locks, if any.
for lock in self.locks:
mutex = my_task.workflow._get_mutex(lock)
mutex.unlock()
self.finished_event.emit(my_task.workflow, my_task)
def _on_ready_before_hook(self, my_task):
"""
A hook into _on_ready() that does the task specific work.
:type my_task: Task
:param my_task: The associated task in the task tree.
"""
pass
def _on_ready_hook(self, my_task):
"""
A hook into _on_ready() that does the task specific work.
:type my_task: Task
:param my_task: The associated task in the task tree.
"""
pass
def _on_cancel(self, my_task):
"""
May be called by another task to cancel the operation before it was
completed.
:type my_task: Task
:param my_task: The associated task in the task tree.
"""
self.cancelled_event.emit(my_task.workflow, my_task)
def _on_trigger(self, my_task):
"""
May be called by another task to trigger a task-specific
event.
:type my_task: Task
:param my_task: The associated task in the task tree.
:rtype: boolean
:returns: True on success, False otherwise.
"""
raise NotImplementedError("Trigger not supported by this task.")
def _on_complete(self, my_task):
"""
Return True on success, False otherwise. Should not be overwritten,
overwrite _on_complete_hook() instead.
:type my_task: Task
:param my_task: The associated task in the task tree.
:rtype: boolean
:returns: True on success, False otherwise.
"""
assert my_task is not None
if my_task.workflow.debug:
print("Executing task:", my_task.get_name())
self._on_complete_hook(my_task)
# Notify the Workflow.
my_task.workflow._task_completed_notify(my_task)
if my_task.workflow.debug:
if hasattr(my_task.workflow, "outer_workflow"):
my_task.workflow.outer_workflow.task_tree.dump()
self.completed_event.emit(my_task.workflow, my_task)
return True
def _on_complete_hook(self, my_task):
"""
A hook into _on_complete() that does the task specific work.
:type my_task: Task
:param my_task: The associated task in the task tree.
:rtype: bool
:returns: True on success, False otherwise.
"""
# If we have more than one output, implicitly split.
for child in my_task.children:
child.task_spec._update_state(child)
def serialize(self, serializer, **kwargs):
"""
Serializes the instance using the provided serializer.
.. note::
The events of a TaskSpec are not serialized. If you
use them, make sure to re-connect them once the spec is
deserialized.
:type serializer: L{SpiffWorkflow.storage.Serializer}
:param serializer: The serializer to use.
:type kwargs: dict
:param kwargs: Passed to the serializer.
:rtype: object
:returns: The serialized object.
"""
return serializer._serialize_task_spec(self, **kwargs)
@classmethod
def deserialize(cls, serializer, wf_spec, s_state, **kwargs):
"""
Deserializes the instance using the provided serializer.
.. note::
The events of a TaskSpec are not serialized. If you
use them, make sure to re-connect them once the spec is
deserialized.
:type serializer: L{SpiffWorkflow.storage.Serializer}
:param serializer: The serializer to use.
:type wf_spec: L{SpiffWorkflow.spec.WorkflowSpec}
:param wf_spec: An instance of the WorkflowSpec.
:type s_state: object
:param s_state: The serialized task specification object.
:type kwargs: dict
:param kwargs: Passed to the serializer.
:rtype: TaskSpec
:returns: The task specification instance.
"""
instance = cls(wf_spec, s_state['name'])
return serializer._deserialize_task_spec(wf_spec,
s_state,
instance,
**kwargs)
| zetaops/SpiffWorkflow | SpiffWorkflow/specs/TaskSpec.py | Python | lgpl-3.0 | 16,586 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.