text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""Invalid Python comment test."""
// invalid python comment # noqa -- tell linters to ignore the intentional syntax error
| duynguyen/incubator-openwhisk | tests/dat/actions/malformed.py | Python | apache-2.0 | 124 | 0 |
###
# Copyright (c) 2013, Nicolas Coevoet
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class ChanRegTestCase(PluginTestCase):
plugins = ('ChanReg',)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| ncoevoet/ChanReg | test.py | Python | mit | 1,737 | 0.000576 |
# Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.shortcuts import redirect
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import fiware_api
from openstack_dashboard.dashboards.idm import utils as idm_utils
from openstack_dashboard.dashboards.idm.home import tables as home_tables
LOG = logging.getLogger('idm_logger')
class IndexView(tables.MultiTableView):
table_classes = (home_tables.OrganizationsTable,
home_tables.ApplicationsTable)
template_name = 'idm/home/index.html'
def dispatch(self, request, *args, **kwargs):
if request.organization.id != request.user.default_project_id:
return redirect("/idm/home_orgs/")
return super(IndexView, self).dispatch(request, *args, **kwargs)
def has_more_data(self, table):
return False
def get_organizations_data(self):
organizations = []
# try:
# organizations = fiware_api.keystone.project_list(
# self.request,
# user=self.request.user.id)
# switchable_organizations = [org.id for org
# in self.request.organizations]
# organizations = sorted(organizations, key=lambda x: x.name.lower())
# for org in organizations:
# if org.id in switchable_organizations:
# setattr(org, 'switchable', True)
# except Exception:
# exceptions.handle(self.request,
# ("Unable to retrieve organization list."))
return idm_utils.filter_default(organizations)
def get_applications_data(self):
applications = []
# try:
# # TODO(garcianavalon) extract to fiware_api
# all_apps = fiware_api.keystone.application_list(self.request)
# apps_with_roles = [a.application_id for a
# in fiware_api.keystone.user_role_assignments(
# self.request,
# user=self.request.user.id,
# organization=self.request.organization)]
# applications = [app for app in all_apps
# if app.id in apps_with_roles]
# applications = sorted(applications, key=lambda x: x.name.lower())
# except Exception:
# exceptions.handle(self.request,
# ("Unable to retrieve application list."))
return idm_utils.filter_default(applications)
| ging/horizon | openstack_dashboard/dashboards/idm/home/views.py | Python | apache-2.0 | 3,184 | 0.000942 |
#-------------------------------------------------------------------------------
# Name: Largest Product in a Series
# Purpose: The four adjacent digits in the 1000-digit number (provided in
# textfile 'q008.txt') that have the greatest product are
# 9 x 9 x 8 x 9 = 5832. Find the thirteen adjacent digits in the
# 1000-digit number that have the greatest product. What is the
# value of this product?
# Answer: 23514624000
# Author: Alex Adusei
#-------------------------------------------------------------------------------
# Helper function to read large number from textfile
def readNums():
file = open("q008.txt")
nums = []
for line in file:
line = line.strip()
for i in range (len(line)):
nums += [int(line[i])]
return nums
numbers = readNums()
currentSum = 0
DIGITS = 13
for i in range(len(numbers)):
digitSum = 1
if i < (len(numbers) - DIGITS):
for k in range(DIGITS):
digitSum *= numbers[i+k]
if digitSum > currentSum:
currentSum = digitSum
print currentSum | alexadusei/ProjectEuler | q008.py | Python | mit | 1,143 | 0.006999 |
from flask_pymongo import PyMongo
from flask_cors import CORS
mongo = PyMongo()
cors = CORS() | TornikeNatsvlishvili/skivri.ge | backend/backend/extensions.py | Python | mit | 94 | 0.010638 |
import os
import sys
import logging
from functools import wraps
import celery
from celery.signals import celeryd_after_setup
from kombu import Queue
from django.conf import settings
from munch.core.utils import get_worker_types
log = logging.getLogger('munch')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'munch.settings')
def catch_exception(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except Exception as err:
sys.stderr.write(str(err))
raise
return wrapper
class Celery(celery.Celery):
def on_configure(self):
if hasattr(settings, 'RAVEN_CONFIG'):
from raven import Client
from raven.contrib.celery import register_signal
from raven.contrib.celery import register_logger_signal
client = Client(settings.RAVEN_CONFIG.get('dsn'))
register_logger_signal(client)
register_signal(client)
class CeleryRouteMap(object):
def __init__(self, app):
self.app = app
self.exchange = settings.CELERY_DEFAULT_EXCHANGE
self.exchange_type = settings.CELERY_DEFAULT_EXCHANGE_TYPE
self.queues = {
'default': {
'name': settings.CELERY_DEFAULT_QUEUE,
'routing_key': settings.CELERY_DEFAULT_ROUTING_KEY
}
}
self.routes = {}
def add_queue(self, worker_type, queue):
self.app.amqp.queues.add(
Queue(
queue, routing_key='{}.#'.format(queue),
queue_arguments={'x-max-priority': 100}
)
)
self.queues.update({
worker_type: {'name': queue, 'routing_key': queue}})
log.debug('Added queue {} for {} workers'.format(
queue, worker_type.upper()))
def register_route(self, task, worker_type, munch_app):
if worker_type not in self.queues:
raise ValueError(
'Can not register celery route. '
'No queue defined for worker_type {}'.format(
worker_type.upper()))
self.routes.update({task: {'worker': worker_type, 'key': munch_app}})
log.debug(
'Registered route for {} on {} workers'.format(
task, worker_type.upper()))
def import_tasks_map(self, tasks_map, munch_app):
for worker, tasks in tasks_map.items():
for task in tasks:
self.register_route(task, worker, munch_app)
def lookup_route(self, task):
if task in self.routes:
worker = self.routes.get(task)['worker']
key = self.routes.get(task)['key']
queue = self.queues.get(worker)
return {
'queue': queue['name'],
'exchange': self.exchange,
'exchange_type': self.exchange_type,
'routing_key': '{}.{}'.format(queue['routing_key'], key)
}
return None
def register_to_queue(self, queue):
self.app.amqp.queues.select_add(
queue, routing_key='{}.#'.format(queue),
queue_arguments={'x-max-priority': 100})
def register_as_worker(self, worker_type):
if worker_type not in self.queues:
raise ValueError(
'Can not register as worker {}. '
'No queue defined for this worker_type'.format(
worker_type.upper()))
self.register_to_queue(self.queues[worker_type]['name'])
def get_queue_for(self, worker_type):
return self.queues.get(worker_type, 'default')['name']
def get_workers_map(self):
workers_map = {}
for k, v in self.routes.items():
workers_map.setdefault(v['worker'], []).append(k)
return workers_map
class CeleryRouter(object):
def route_for_task(self, task, args=None, kwargs=None):
return munch_tasks_router.lookup_route(task)
# Celery App initialization
app = Celery('munch', broker=settings.BROKER_URL)
app.config_from_object('django.conf:settings')
munch_tasks_router = CeleryRouteMap(app)
# Queues, Tasks and worker registration methods for munch.core
def add_queues():
munch_tasks_router.add_queue('core', 'munch.core')
munch_tasks_router.add_queue('status', 'munch.status')
munch_tasks_router.add_queue('gc', 'munch.gc')
def register_tasks():
tasks_map = {
'gc': ['munch.core.mail.tasks.purge_raw_mail']
}
munch_tasks_router.import_tasks_map(tasks_map, 'munch')
@celeryd_after_setup.connect
@catch_exception
def configure_worker(instance, **kwargs):
if any([t in get_worker_types() for t in ['gc', 'all']]):
from .mail.tasks import purge_raw_mail # noqa
sys.stdout.write(
'[core-app] Registering worker as GARBAGE COLLECTOR...')
munch_tasks_router.register_as_worker('gc')
| crunchmail/munch-core | src/munch/core/celery.py | Python | agpl-3.0 | 4,888 | 0 |
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from iris import db
from iris.role_lookup import IrisRoleLookupException
import logging
logger = logging.getLogger(__name__)
class mailing_list(object):
def __init__(self, config):
self.max_list_names = config.get('ldap_lists', {}).get('max_unrolled_users', 0)
def get(self, role, target):
if role == 'mailing-list':
return self.unroll_mailing_list(target)
else:
return None
def unroll_mailing_list(self, list_name):
connection = db.engine.raw_connection()
cursor = connection.cursor()
cursor.execute('''
SELECT `mailing_list`.`target_id`,
`mailing_list`.`count`
FROM `mailing_list`
JOIN `target` on `target`.`id` = `mailing_list`.`target_id`
WHERE `target`.`name` = %s
''', list_name)
list_info = cursor.fetchone()
if not list_info:
logger.warn('Invalid mailing list %s', list_name)
cursor.close()
connection.close()
return None
list_id, list_count = list_info
if self.max_list_names > 0 and list_count >= self.max_list_names:
logger.warn('Not returning any results for list group %s as it contains too many members (%s > %s)',
list_name, list_count, self.max_list_names)
cursor.close()
connection.close()
raise IrisRoleLookupException('List %s contains too many members to safely expand (%s >= %s)' % (list_name, list_count, self.max_list_names))
cursor.execute('''SELECT `target`.`name`
FROM `mailing_list_membership`
JOIN `target` on `target`.`id` = `mailing_list_membership`.`user_id`
WHERE `mailing_list_membership`.`list_id` = %s''', [list_id])
names = [row[0] for row in cursor]
cursor.close()
connection.close()
logger.info('Unfurled %s people from list %s', len(names), list_name)
return names
| dwang159/iris-api | src/iris/role_lookup/mailing_list.py | Python | bsd-2-clause | 2,203 | 0.002724 |
#! /usr/bin/python2.7
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import requests
from requests_oauthlib import OAuth1
from urlparse import parse_qs
REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize?oauth_token="
ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
def setup_oauth1(keys):
"""Authorize your app via identifier."""
# Request token
oauth = OAuth1(keys['consumer_key'], client_secret=keys['consumer_secret'])
r = requests.post(url=REQUEST_TOKEN_URL, auth=oauth)
credentials = parse_qs(r.content)
resource_owner_key = credentials.get('oauth_token')[0]
resource_owner_secret = credentials.get('oauth_token_secret')[0]
# Authorize
authorize_url = AUTHORIZE_URL + resource_owner_key
print 'Please go here and authorize: ' + authorize_url
verifier = raw_input('Please input the verifier: ')
oauth = OAuth1(keys['consumer_key'],
client_secret=keys['consumer_secret'],
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
# Finally, obtain the Access Token
r = requests.post(url=ACCESS_TOKEN_URL, auth=oauth)
credentials = parse_qs(r.content)
token = credentials.get('oauth_token')[0]
secret = credentials.get('oauth_token_secret')[0]
return token, secret
def get_oauth1(keys):
if not keys['oauth_token']:
keys['oauth_token'], keys['oauth_token_secret']\
= setup_oauth1(keys)
print '\nInput the keys below to twitter/settings.py'
import pprint; pprint.pprint(keys)
import sys; sys.exit()
oauth = OAuth1(keys['consumer_key'],
client_secret=keys['consumer_secret'],
resource_owner_key=keys['oauth_token'],
resource_owner_secret=keys['oauth_token_secret'])
return oauth
| teampopong/crawlers | twitter/setup.py | Python | agpl-3.0 | 1,997 | 0.002504 |
#!/usr/bin/env python
from setuptools import setup
setup(
name='MCP',
version='0.2',
author='Keegan Carruthers-Smith',
author_email='[email protected]',
url='https://github.com/keegancsmith/MCP',
license='BSD',
py_modules=['mcp'],
description='A program to orchestrate Entellect Challenge bot matches.',
long_description=file('README.rst').read(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Utilities',
],
entry_points={'console_scripts': ['mcp = mcp:main']},
)
| keegancsmith/MCP | setup.py | Python | bsd-2-clause | 772 | 0 |
# returns the current PeopleSoft semester code, as of today
# if today is between semesters, returns the next semester code
import cx_Oracle
def getCurrentOrNextSemesterCX (self):
file = open('/opt/Plone-2.5.5/zeocluster/client1/Extensions/Oracle_Database_Connection_NGUYEN_PRD.txt', 'r')
for line in file.readlines():
if line <> "" and not line.startswith('#'):
connString = line
file.close()
connection = cx_Oracle.connect(connString)
cursor = connection.cursor()
# get the current semester code if we are within a semester
cursor.execute("""select strm from ps_term_tbl where institution = 'UWOSH' and acad_career = 'UGRD' and term_begin_dt <= sysdate and term_end_dt >= sysdate""")
for column_1 in cursor:
try:
return column_1[0]
except:
pass
# otherwise get the next semester code
cursor.execute("""select t1.strm, t1.descr from ps_term_tbl t1 where t1.institution = 'UWOSH' and t1.acad_career = 'UGRD' and t1.term_begin_dt = (select min(term_begin_dt) from ps_term_tbl t2 where t2.institution = t1.institution and t2.acad_career = t1.acad_career and term_begin_dt > sysdate)""")
for column_1 in cursor:
try:
return column_1[0]
except:
pass
| uwosh/Campus_Directory_web_service | getCurrentOrNextSemesterCX.py | Python | gpl-2.0 | 1,292 | 0.006192 |
#!/usr/bin/env python
import warnings
# Dropping a table inexplicably produces a warning despite
# the "IF EXISTS" clause. Squelch these warnings.
warnings.simplefilter('ignore')
import logging
import unittest
import environment
import tablet
import utils
use_mysqlctld = True
tablet_master = tablet.Tablet(use_mysqlctld=use_mysqlctld)
tablet_replica1 = tablet.Tablet(use_mysqlctld=use_mysqlctld)
tablet_replica2 = tablet.Tablet(use_mysqlctld=use_mysqlctld)
setup_procs = []
def setUpModule():
try:
environment.topo_server().setup()
# start mysql instance external to the test
global setup_procs
setup_procs = [
tablet_master.init_mysql(),
tablet_replica1.init_mysql(),
tablet_replica2.init_mysql(),
]
if use_mysqlctld:
tablet_master.wait_for_mysqlctl_socket()
tablet_replica1.wait_for_mysqlctl_socket()
tablet_replica2.wait_for_mysqlctl_socket()
else:
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
if use_mysqlctld:
# Try to terminate mysqlctld gracefully, so it kills its mysqld.
for proc in setup_procs:
utils.kill_sub_process(proc, soft=True)
teardown_procs = setup_procs
else:
teardown_procs = [
tablet_master.teardown_mysql(),
tablet_replica1.teardown_mysql(),
tablet_replica2.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_master.remove_tree()
tablet_replica1.remove_tree()
tablet_replica2.remove_tree()
class TestBackup(unittest.TestCase):
def tearDown(self):
tablet.Tablet.check_vttablet_count()
environment.topo_server().wipe()
for t in [tablet_master, tablet_replica1, tablet_replica2]:
t.reset_replication()
t.clean_dbs()
_create_vt_insert_test = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
def _insert_master(self, index):
tablet_master.mquery(
'vt_test_keyspace',
"insert into vt_insert_test (msg) values ('test %s')" %
index, write=True)
def test_backup(self):
"""Test backup flow.
test_backup will:
- create a shard with master and replica1 only
- run InitShardMaster
- insert some data
- take a backup
- insert more data on the master
- bring up tablet_replica2 after the fact, let it restore the backup
- check all data is right (before+after backup data)
- list the backup, remove it
"""
for t in tablet_master, tablet_replica1:
t.create_db('vt_test_keyspace')
tablet_master.init_tablet('master', 'test_keyspace', '0', start=True,
supports_backups=True)
tablet_replica1.init_tablet('replica', 'test_keyspace', '0', start=True,
supports_backups=True)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
tablet_master.tablet_alias])
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_master(1)
timeout = 10
while True:
try:
result = tablet_replica1.mquery(
'vt_test_keyspace', 'select count(*) from vt_insert_test')
if result[0][0] == 1:
break
except:
# ignore exceptions, we'll just timeout (the tablet creation
# can take some time to replicate, and we get a 'table vt_insert_test
# does not exist exception in some rare cases)
logging.exception('exception waiting for data to replicate')
timeout = utils.wait_step('slave tablet getting data', timeout)
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_master(2)
# now bring up the other slave, health check on, init_tablet on, restore on
tablet_replica2.start_vttablet(wait_for_state='SERVING',
target_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0',
supports_backups=True)
# check the new slave has the data
timeout = 10
while True:
result = tablet_replica2.mquery(
'vt_test_keyspace', 'select count(*) from vt_insert_test')
if result[0][0] == 2:
break
timeout = utils.wait_step('new slave tablet getting data', timeout)
# list the backups
backups, _ = utils.run_vtctl(tablet.get_backup_storage_flags() +
['ListBackups', 'test_keyspace/0'],
mode=utils.VTCTL_VTCTL, trap_output=True)
backups = backups.splitlines()
logging.debug('list of backups: %s', backups)
self.assertEqual(len(backups), 1)
self.assertTrue(backups[0].endswith(tablet_replica1.tablet_alias))
# remove the backup
utils.run_vtctl(
tablet.get_backup_storage_flags() +
['RemoveBackup', 'test_keyspace/0', backups[0]],
auto_log=True, mode=utils.VTCTL_VTCTL)
# make sure the list of backups is empty now
backups, err = utils.run_vtctl(tablet.get_backup_storage_flags() +
['ListBackups', 'test_keyspace/0'],
mode=utils.VTCTL_VTCTL, trap_output=True)
backups = backups.splitlines()
logging.debug('list of backups after remove: %s', backups)
self.assertEqual(len(backups), 0)
for t in tablet_master, tablet_replica1, tablet_replica2:
t.kill_vttablet()
if __name__ == '__main__':
utils.main()
| yaoshengzhe/vitess | test/backup.py | Python | bsd-3-clause | 5,848 | 0.007011 |
from __future__ import unicode_literals
import ctypes
import json
import random
from binascii import a2b_hex, b2a_hex
from io import BytesIO
from unittest import skipUnless
from django.contrib.gis import gdal
from django.contrib.gis.geos import (
HAS_GEOS, GeometryCollection, GEOSException, GEOSGeometry, LinearRing,
LineString, MultiLineString, MultiPoint, MultiPolygon, Point, Polygon,
fromfile, fromstr,
)
from django.contrib.gis.geos.libgeos import geos_version_info
from django.contrib.gis.shortcuts import numpy
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase, ignore_warnings, mock
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_bytes
from django.utils.six.moves import range
from ..test_data import TestDataMixin
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSTest(SimpleTestCase, TestDataMixin):
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
if geom.hasz:
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertIs(GEOSGeometry(hexewkb_3d).hasz, True)
# Same for EWKB.
self.assertEqual(six.memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
self.assertEqual(six.memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml:
self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
fromstr(err.wkt)
# Bad WKB
with self.assertRaises(GEOSException):
GEOSGeometry(six.memoryview(b'0'))
class NotAGeometry(object):
pass
# Some other object
with self.assertRaises(TypeError):
GEOSGeometry(NotAGeometry())
# None
with self.assertRaises(TypeError):
GEOSGeometry(None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = six.memoryview(a2b_hex(g.hex.encode()))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(force_bytes(ref_pnt.wkt))
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo': 'bar'})
self.assertNotEqual(g, False)
def test_eq_with_srid(self):
"Testing non-equivalence with different srids."
p0 = Point(5, 23)
p1 = Point(5, 23, srid=4326)
p2 = Point(5, 23, srid=32632)
# GEOS
self.assertNotEqual(p0, p1)
self.assertNotEqual(p1, p2)
# EWKT
self.assertNotEqual(p0, p1.ewkt)
self.assertNotEqual(p1, p0.ewkt)
self.assertNotEqual(p1, p2.ewkt)
# Equivalence with matching SRIDs
self.assertEqual(p2, p2)
self.assertEqual(p2, p2.ewkt)
# WKT contains no SRID so will not equal
self.assertNotEqual(p2, p2.wkt)
# SRID of 0
self.assertEqual(p0, 'SRID=0;POINT (5 23)')
self.assertNotEqual(p1, 'SRID=0;POINT (5 23)')
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(pnt.dims, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(pnt, fromstr(p.wkt))
self.assertEqual(False, pnt == prev) # Use assertEqual to test __eq__
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertIs(pnt.hasz, True)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertIs(pnt.hasz, False)
self.assertIsNone(pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(pnt, pnt2)
self.assertEqual(pnt, pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertEqual(mpnt.dims, 0)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
with self.assertRaises(IndexError):
mpnt.__getitem__(len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertIs(p.empty, False)
self.assertIs(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.dims, 1)
self.assertIs(ls.empty, False)
self.assertIs(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(ls, fromstr(l.wkt))
self.assertEqual(False, ls == prev) # Use assertEqual to test __eq__
with self.assertRaises(IndexError):
ls.__getitem__(len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
# Point individual arguments
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt)
if numpy:
self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
with self.assertRaisesMessage(TypeError, 'Each coordinate should be a sequence (list or tuple)'):
LineString((0, 0))
with self.assertRaisesMessage(ValueError, 'LineString requires at least 2 points, got 1.'):
LineString([(0, 0)])
if numpy:
with self.assertRaisesMessage(ValueError, 'LineString requires at least 2 points, got 1.'):
LineString(numpy.array([(0, 0)]))
with mock.patch('django.contrib.gis.geos.linestring.numpy', False):
with self.assertRaisesMessage(TypeError, 'Invalid initialization input for LineStrings.'):
LineString('wrong input')
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertEqual(ml.dims, 1)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(ml, fromstr(l.wkt))
self.assertEqual(False, ml == prev) # Use assertEqual to test __eq__
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertIs(ls.empty, False)
with self.assertRaises(IndexError):
ml.__getitem__(len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(lr.dims, 1)
self.assertEqual(rr.n_p, len(lr))
self.assertIs(lr.valid, True)
self.assertIs(lr.empty, False)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy:
self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 3.'):
LinearRing((0, 0), (1, 1), (0, 0))
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 1.'):
LinearRing([(0, 0)])
if numpy:
with self.assertRaisesMessage(ValueError, 'LinearRing requires at least 4 points, got 1.'):
LinearRing(numpy.array([(0, 0)]))
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.dims, 2)
self.assertIs(poly.empty, False)
self.assertIs(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(poly, fromstr(p.wkt))
# Should not be equal to previous geometry
self.assertEqual(False, poly == prev) # Use assertEqual to test __eq__
self.assertNotEqual(poly, prev) # Use assertNotEqual to test __ne__
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
with self.assertRaises(IndexError):
poly.__getitem__(len(poly))
with self.assertRaises(IndexError):
poly.__setitem__(len(poly), False)
with self.assertRaises(IndexError):
poly.__getitem__(-1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
with self.assertRaises(TypeError):
Polygon(0, [1, 2, 3])
with self.assertRaises(TypeError):
Polygon('foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygons_templates(self):
# Accessing Polygon attributes in templates should work.
engine = Engine()
template = engine.from_string('{{ polygons.0.wkt }}')
polygons = [fromstr(p.wkt) for p in self.geometries.multipolygons[:2]]
content = template.render(Context({'polygons': polygons}))
self.assertIn('MULTIPOLYGON (((100', content)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertGreater(p1, p2)
self.assertLess(p2, p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertGreater(p4, p3)
self.assertLess(p3, p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mpoly.dims, 2)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
with self.assertRaises(IndexError):
mpoly.__getitem__(len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertIs(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
# #### Memory issues with rings and poly
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
str(ring1)
str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in range(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2:
tset = (5, 23)
else:
tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
with self.assertRaises(GEOSException):
g.relate_pattern(0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertIs(a.intersects(b), True)
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_unary_union(self):
"Testing unary_union."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = GeometryCollection(a, b).unary_union
self.assertTrue(u1.equals(u2))
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
with self.assertRaises(ctypes.ArgumentError):
g.buffer(width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in range(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in range(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_covers(self):
poly = Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0)))
self.assertTrue(poly.covers(Point(5, 5)))
self.assertFalse(poly.covers(Point(100, 100)))
def test_closed(self):
ls_closed = LineString((0, 0), (1, 1), (0, 0))
ls_not_closed = LineString((0, 0), (1, 1))
self.assertFalse(ls_not_closed.closed)
self.assertTrue(ls_closed.closed)
if geos_version_info()['version'] >= '3.5':
self.assertFalse(MultiLineString(ls_closed, ls_not_closed).closed)
self.assertTrue(MultiLineString(ls_closed, ls_closed).closed)
with mock.patch('django.contrib.gis.geos.collections.geos_version_info', lambda: {'version': '3.4.9'}):
with self.assertRaisesMessage(GEOSException, "MultiLineString.closed requires GEOS >= 3.5.0."):
MultiLineString().closed
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
with self.assertRaises(ctypes.ArgumentError):
pnt.srid = '4326'
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly:
self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)):
self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
p2 = fromstr(p1.hex)
self.assertIsNone(p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
# Testing that geometry SRID could be set to its own value
pnt_wo_srid = Point(1, 1)
pnt_wo_srid.srid = pnt_wo_srid.srid
def test_custom_srid(self):
"""Test with a null srid and a srid unknown to GDAL."""
for srid in [None, 999999]:
pnt = Point(111200, 220900, srid=srid)
self.assertTrue(pnt.ewkt.startswith(("SRID=%s;" % srid if srid else '') + "POINT (111200"))
self.assertIsInstance(pnt.ogr, gdal.OGRGeometry)
self.assertIsNone(pnt.srs)
# Test conversion from custom to a known srid
c2w = gdal.CoordTransform(
gdal.SpatialReference(
'+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 '
'+datum=WGS84 +units=m +no_defs'
),
gdal.SpatialReference(4326))
new_pnt = pnt.transform(c2w, clone=True)
self.assertEqual(new_pnt.srid, 4326)
self.assertAlmostEqual(new_pnt.x, 1, 3)
self.assertAlmostEqual(new_pnt.y, 2, 3)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
# ### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
with self.assertRaises(TypeError):
poly.__setitem__(0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup:
new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
# ### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in range(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in range(len(poly)):
r = poly[j]
for k in range(len(r)):
r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
# mpoly[0][0][0] = (3.14, 2.71)
# self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
# self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
# del mpoly
def test_point_list_assignment(self):
p = Point(0, 0)
p[:] = (1, 2, 3)
self.assertEqual(p, Point(1, 2, 3))
p[:] = ()
self.assertEqual(p.wkt, Point())
p[:] = (1, 2)
self.assertEqual(p.wkt, Point(1, 2))
with self.assertRaises(ValueError):
p[:] = (1,)
with self.assertRaises(ValueError):
p[:] = (1, 2, 3, 4, 5)
def test_linestring_list_assignment(self):
ls = LineString((0, 0), (1, 1))
ls[:] = ()
self.assertEqual(ls, LineString())
ls[:] = ((0, 0), (1, 1), (2, 2))
self.assertEqual(ls, LineString((0, 0), (1, 1), (2, 2)))
with self.assertRaises(ValueError):
ls[:] = (1,)
def test_linearring_list_assignment(self):
ls = LinearRing((0, 0), (0, 1), (1, 1), (0, 0))
ls[:] = ()
self.assertEqual(ls, LinearRing())
ls[:] = ((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))
self.assertEqual(ls, LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
with self.assertRaises(ValueError):
ls[:] = ((0, 0), (1, 1), (2, 2))
def test_polygon_list_assignment(self):
pol = Polygon()
pol[:] = (((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)),)
self.assertEqual(pol, Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)),))
pol[:] = ()
self.assertEqual(pol, Polygon())
def test_geometry_collection_list_assignment(self):
p = Point()
gc = GeometryCollection()
gc[:] = [p]
self.assertEqual(gc, GeometryCollection(p))
gc[:] = ()
self.assertEqual(gc, GeometryCollection())
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2., 3., 8.), pnt.coords)
with self.assertRaises(TypeError):
pnt.tuple = (1., 2.)
pnt.coords = (1., 2., 3.)
self.assertEqual((1., 2., 3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple)
with self.assertRaises(TypeError):
ls.__setitem__(0, (1., 2.))
ls[0] = (1., 2., 3.)
self.assertEqual((1., 2., 3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumference of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
geoms = [
GeometryCollection([]),
fromstr('GEOMETRYCOLLECTION EMPTY'),
GeometryCollection(),
fromstr('POINT EMPTY'),
Point(),
fromstr('LINESTRING EMPTY'),
LineString(),
fromstr('POLYGON EMPTY'),
Polygon(),
fromstr('MULTILINESTRING EMPTY'),
MultiLineString(),
fromstr('MULTIPOLYGON EMPTY'),
MultiPolygon(()),
MultiPolygon(),
]
if numpy:
geoms.append(LineString(numpy.array([])))
for g in geoms:
self.assertIs(g.empty, True)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
with self.assertRaises(IndexError):
g.x
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertIs(lr.empty, True)
with self.assertRaises(IndexError):
lr.__getitem__(0)
else:
with self.assertRaises(IndexError):
g.__getitem__(0)
def test_collection_dims(self):
gc = GeometryCollection([])
self.assertEqual(gc.dims, -1)
gc = GeometryCollection(Point(0, 0))
self.assertEqual(gc.dims, 0)
gc = GeometryCollection(LineString((0, 0), (1, 1)), Point(0, 0))
self.assertEqual(gc.dims, 1)
gc = GeometryCollection(LineString((0, 0), (1, 1)), Polygon(((0, 0), (0, 1), (1, 1), (0, 0))), Point(0, 0))
self.assertEqual(gc.dims, 2)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend(mls.wkt for mls in self.geometries.multilinestrings)
coll.extend(p.wkt for p in self.geometries.polygons)
coll.extend(mp.wkt for mp in self.geometries.multipoints)
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
self.assertEqual(p3d.z, 100)
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
with self.assertRaises(GEOSException):
g.transform(2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
with self.assertRaises(GEOSException):
g.transform(2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
with self.assertRaises(GEOSException):
g.transform(2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
with self.assertRaises(GEOSException):
g.transform(2774, clone=True)
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
from django.utils.six.moves import cPickle
import pickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857))
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
self.assertEqual(geom.srid, tmpg.srid)
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
for pnt in pnts:
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(mpoly.covers(pnt), prep.covers(pnt))
self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)')))
self.assertTrue(prep.disjoint(Point(-5, -5)))
poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1)))
self.assertTrue(prep.overlaps(poly))
poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0)))
self.assertTrue(prep.touches(poly))
poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1)))
self.assertTrue(prep.within(poly))
# Original geometry deletion should not crash the prepared one (#21662)
del mpoly
self.assertTrue(prep.covers(Point(5, 5)))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_deconstructible(self):
"""
Geometry classes should be deconstructible.
"""
point = Point(4.337844, 50.827537, srid=4326)
path, args, kwargs = point.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.point.Point')
self.assertEqual(args, (4.337844, 50.827537))
self.assertEqual(kwargs, {'srid': 4326})
ls = LineString(((0, 0), (1, 1)))
path, args, kwargs = ls.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.linestring.LineString')
self.assertEqual(args, (((0, 0), (1, 1)),))
self.assertEqual(kwargs, {})
ls2 = LineString([Point(0, 0), Point(1, 1)], srid=4326)
path, args, kwargs = ls2.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.linestring.LineString')
self.assertEqual(args, ([Point(0, 0), Point(1, 1)],))
self.assertEqual(kwargs, {'srid': 4326})
ext_coords = ((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))
int_coords = ((0.4, 0.4), (0.4, 0.6), (0.6, 0.6), (0.6, 0.4), (0.4, 0.4))
poly = Polygon(ext_coords, int_coords)
path, args, kwargs = poly.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.polygon.Polygon')
self.assertEqual(args, (ext_coords, int_coords))
self.assertEqual(kwargs, {})
lr = LinearRing((0, 0), (0, 1), (1, 1), (0, 0))
path, args, kwargs = lr.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.linestring.LinearRing')
self.assertEqual(args, ((0, 0), (0, 1), (1, 1), (0, 0)))
self.assertEqual(kwargs, {})
mp = MultiPoint(Point(0, 0), Point(1, 1))
path, args, kwargs = mp.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiPoint')
self.assertEqual(args, (Point(0, 0), Point(1, 1)))
self.assertEqual(kwargs, {})
ls1 = LineString((0, 0), (1, 1))
ls2 = LineString((2, 2), (3, 3))
mls = MultiLineString(ls1, ls2)
path, args, kwargs = mls.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiLineString')
self.assertEqual(args, (ls1, ls2))
self.assertEqual(kwargs, {})
p1 = Polygon(((0, 0), (0, 1), (1, 1), (0, 0)))
p2 = Polygon(((1, 1), (1, 2), (2, 2), (1, 1)))
mp = MultiPolygon(p1, p2)
path, args, kwargs = mp.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.MultiPolygon')
self.assertEqual(args, (p1, p2, ))
self.assertEqual(kwargs, {})
poly = Polygon(((0, 0), (0, 1), (1, 1), (0, 0)))
gc = GeometryCollection(Point(0, 0), MultiPoint(Point(0, 0), Point(1, 1)), poly)
path, args, kwargs = gc.deconstruct()
self.assertEqual(path, 'django.contrib.gis.geos.collections.GeometryCollection')
self.assertEqual(args, (Point(0, 0), MultiPoint(Point(0, 0), Point(1, 1)), poly))
self.assertEqual(kwargs, {})
def test_subclassing(self):
"""
GEOSGeometry subclass may itself be subclassed without being forced-cast
to the parent class during `__init__`.
"""
class ExtendedPolygon(Polygon):
def __init__(self, *args, **kwargs):
data = kwargs.pop('data', 0)
super(ExtendedPolygon, self).__init__(*args, **kwargs)
self._data = data
def __str__(self):
return "EXT_POLYGON - data: %d - %s" % (self._data, self.wkt)
ext_poly = ExtendedPolygon(((0, 0), (0, 1), (1, 1), (0, 0)), data=3)
self.assertEqual(type(ext_poly), ExtendedPolygon)
# ExtendedPolygon.__str__ should be called (instead of Polygon.__str__).
self.assertEqual(str(ext_poly), "EXT_POLYGON - data: 3 - POLYGON ((0 0, 0 1, 1 1, 0 0))")
def test_geos_version(self):
"""Testing the GEOS version regular expression."""
from django.contrib.gis.geos.libgeos import version_regex
versions = [('3.0.0rc4-CAPI-1.3.3', '3.0.0', '1.3.3'),
('3.0.0-CAPI-1.4.1', '3.0.0', '1.4.1'),
('3.4.0dev-CAPI-1.8.0', '3.4.0', '1.8.0'),
('3.4.0dev-CAPI-1.8.0 r0', '3.4.0', '1.8.0'),
('3.6.2-CAPI-1.10.2 4d2925d6', '3.6.2', '1.10.2')]
for v_init, v_geos, v_capi in versions:
m = version_regex.match(v_init)
self.assertTrue(m, msg="Unable to parse the version string '%s'" % v_init)
self.assertEqual(m.group('version'), v_geos)
self.assertEqual(m.group('capi_version'), v_capi)
def test_from_gml(self):
self.assertEqual(
GEOSGeometry('POINT(0 0)'),
GEOSGeometry.from_gml(
'<gml:Point gml:id="p21" srsName="http://www.opengis.net/def/crs/EPSG/0/4326">'
' <gml:pos srsDimension="2">0 0</gml:pos>'
'</gml:Point>'
),
)
def test_normalize(self):
g = MultiPoint(Point(0, 0), Point(2, 2), Point(1, 1))
self.assertIsNone(g.normalize())
self.assertTrue(g.equals_exact(MultiPoint(Point(2, 2), Point(1, 1), Point(0, 0))))
def test_empty_point(self):
p = Point(srid=4326)
self.assertEqual(p.ogr.ewkt, p.ewkt)
self.assertEqual(p.transform(2774, clone=True), Point(srid=2774))
p.transform(2774)
self.assertEqual(p, Point(srid=2774))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_deprecated_srid_getters_setters(self):
p = Point(1, 2, srid=123)
self.assertEqual(p.get_srid(), p.srid)
p.set_srid(321)
self.assertEqual(p.srid, 321)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_deprecated_point_coordinate_getters_setters(self):
p = Point(1, 2, 3)
self.assertEqual((p.get_x(), p.get_y(), p.get_z()), (p.x, p.y, p.z))
p.set_x(3)
p.set_y(2)
p.set_z(1)
self.assertEqual((p.x, p.y, p.z), (3, 2, 1))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_deprecated_point_tuple_getters_setters(self):
p = Point(1, 2, 3)
self.assertEqual(p.get_coords(), (p.x, p.y, p.z))
p.set_coords((3, 2, 1))
self.assertEqual(p.get_coords(), (3, 2, 1))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_deprecated_cascaded_union(self):
for geom in self.geometries.multipolygons:
mpoly = GEOSGeometry(geom.wkt)
self.assertEqual(mpoly.cascaded_union, mpoly.unary_union)
| kawamon/hue | desktop/core/ext-py/Django-1.11.29/tests/gis_tests/geos_tests/test_geos.py | Python | apache-2.0 | 54,041 | 0.001129 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1.types import annotation
from google.cloud.aiplatform_v1.types import data_item
from google.cloud.aiplatform_v1.types import dataset as gca_dataset
from google.cloud.aiplatform_v1.types import operation
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"CreateDatasetRequest",
"CreateDatasetOperationMetadata",
"GetDatasetRequest",
"UpdateDatasetRequest",
"ListDatasetsRequest",
"ListDatasetsResponse",
"DeleteDatasetRequest",
"ImportDataRequest",
"ImportDataResponse",
"ImportDataOperationMetadata",
"ExportDataRequest",
"ExportDataResponse",
"ExportDataOperationMetadata",
"ListDataItemsRequest",
"ListDataItemsResponse",
"GetAnnotationSpecRequest",
"ListAnnotationsRequest",
"ListAnnotationsResponse",
},
)
class CreateDatasetRequest(proto.Message):
r"""Request message for
[DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset].
Attributes:
parent (str):
Required. The resource name of the Location to create the
Dataset in. Format:
``projects/{project}/locations/{location}``
dataset (google.cloud.aiplatform_v1.types.Dataset):
Required. The Dataset to create.
"""
parent = proto.Field(proto.STRING, number=1,)
dataset = proto.Field(proto.MESSAGE, number=2, message=gca_dataset.Dataset,)
class CreateDatasetOperationMetadata(proto.Message):
r"""Runtime operation information for
[DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset].
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
The operation generic information.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class GetDatasetRequest(proto.Message):
r"""Request message for
[DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset].
Attributes:
name (str):
Required. The name of the Dataset resource.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
"""
name = proto.Field(proto.STRING, number=1,)
read_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,)
class UpdateDatasetRequest(proto.Message):
r"""Request message for
[DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset].
Attributes:
dataset (google.cloud.aiplatform_v1.types.Dataset):
Required. The Dataset which replaces the
resource on the server.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to the resource. For the
``FieldMask`` definition, see
[google.protobuf.FieldMask][google.protobuf.FieldMask].
Updatable fields:
- ``display_name``
- ``description``
- ``labels``
"""
dataset = proto.Field(proto.MESSAGE, number=1, message=gca_dataset.Dataset,)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class ListDatasetsRequest(proto.Message):
r"""Request message for
[DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets].
Attributes:
parent (str):
Required. The name of the Dataset's parent resource. Format:
``projects/{project}/locations/{location}``
filter (str):
An expression for filtering the results of the request. For
field names both snake_case and camelCase are supported.
- ``display_name``: supports = and !=
- ``metadata_schema_uri``: supports = and !=
- ``labels`` supports general map functions that is:
- ``labels.key=value`` - key:value equality
- \`labels.key:\* or labels:key - key existence
- A key including a space must be quoted.
``labels."a key"``.
Some examples:
- ``displayName="myDisplayName"``
- ``labels.myKey="myValue"``
page_size (int):
The standard list page size.
page_token (str):
The standard list page token.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
order_by (str):
A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for
descending. Supported fields:
- ``display_name``
- ``create_time``
- ``update_time``
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask_pb2.FieldMask,)
order_by = proto.Field(proto.STRING, number=6,)
class ListDatasetsResponse(proto.Message):
r"""Response message for
[DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets].
Attributes:
datasets (Sequence[google.cloud.aiplatform_v1.types.Dataset]):
A list of Datasets that matches the specified
filter in the request.
next_page_token (str):
The standard List next-page token.
"""
@property
def raw_page(self):
return self
datasets = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_dataset.Dataset,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeleteDatasetRequest(proto.Message):
r"""Request message for
[DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset].
Attributes:
name (str):
Required. The resource name of the Dataset to delete.
Format:
``projects/{project}/locations/{location}/datasets/{dataset}``
"""
name = proto.Field(proto.STRING, number=1,)
class ImportDataRequest(proto.Message):
r"""Request message for
[DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData].
Attributes:
name (str):
Required. The name of the Dataset resource. Format:
``projects/{project}/locations/{location}/datasets/{dataset}``
import_configs (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]):
Required. The desired input locations. The
contents of all input locations will be imported
in one batch.
"""
name = proto.Field(proto.STRING, number=1,)
import_configs = proto.RepeatedField(
proto.MESSAGE, number=2, message=gca_dataset.ImportDataConfig,
)
class ImportDataResponse(proto.Message):
r"""Response message for
[DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData].
"""
class ImportDataOperationMetadata(proto.Message):
r"""Runtime operation information for
[DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData].
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
The common part of the operation metadata.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class ExportDataRequest(proto.Message):
r"""Request message for
[DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData].
Attributes:
name (str):
Required. The name of the Dataset resource. Format:
``projects/{project}/locations/{location}/datasets/{dataset}``
export_config (google.cloud.aiplatform_v1.types.ExportDataConfig):
Required. The desired output location.
"""
name = proto.Field(proto.STRING, number=1,)
export_config = proto.Field(
proto.MESSAGE, number=2, message=gca_dataset.ExportDataConfig,
)
class ExportDataResponse(proto.Message):
r"""Response message for
[DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData].
Attributes:
exported_files (Sequence[str]):
All of the files that are exported in this
export operation.
"""
exported_files = proto.RepeatedField(proto.STRING, number=1,)
class ExportDataOperationMetadata(proto.Message):
r"""Runtime operation information for
[DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData].
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
The common part of the operation metadata.
gcs_output_directory (str):
A Google Cloud Storage directory which path
ends with '/'. The exported data is stored in
the directory.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
gcs_output_directory = proto.Field(proto.STRING, number=2,)
class ListDataItemsRequest(proto.Message):
r"""Request message for
[DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems].
Attributes:
parent (str):
Required. The resource name of the Dataset to list DataItems
from. Format:
``projects/{project}/locations/{location}/datasets/{dataset}``
filter (str):
The standard list filter.
page_size (int):
The standard list page size.
page_token (str):
The standard list page token.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
order_by (str):
A comma-separated list of fields to order by,
sorted in ascending order. Use "desc" after a
field name for descending.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask_pb2.FieldMask,)
order_by = proto.Field(proto.STRING, number=6,)
class ListDataItemsResponse(proto.Message):
r"""Response message for
[DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems].
Attributes:
data_items (Sequence[google.cloud.aiplatform_v1.types.DataItem]):
A list of DataItems that matches the
specified filter in the request.
next_page_token (str):
The standard List next-page token.
"""
@property
def raw_page(self):
return self
data_items = proto.RepeatedField(
proto.MESSAGE, number=1, message=data_item.DataItem,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetAnnotationSpecRequest(proto.Message):
r"""Request message for
[DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec].
Attributes:
name (str):
Required. The name of the AnnotationSpec resource. Format:
``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}``
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
"""
name = proto.Field(proto.STRING, number=1,)
read_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,)
class ListAnnotationsRequest(proto.Message):
r"""Request message for
[DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations].
Attributes:
parent (str):
Required. The resource name of the DataItem to list
Annotations from. Format:
``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}``
filter (str):
The standard list filter.
page_size (int):
The standard list page size.
page_token (str):
The standard list page token.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
order_by (str):
A comma-separated list of fields to order by,
sorted in ascending order. Use "desc" after a
field name for descending.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask_pb2.FieldMask,)
order_by = proto.Field(proto.STRING, number=6,)
class ListAnnotationsResponse(proto.Message):
r"""Response message for
[DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations].
Attributes:
annotations (Sequence[google.cloud.aiplatform_v1.types.Annotation]):
A list of Annotations that matches the
specified filter in the request.
next_page_token (str):
The standard List next-page token.
"""
@property
def raw_page(self):
return self
annotations = proto.RepeatedField(
proto.MESSAGE, number=1, message=annotation.Annotation,
)
next_page_token = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
| sasha-gitg/python-aiplatform | google/cloud/aiplatform_v1/types/dataset_service.py | Python | apache-2.0 | 14,664 | 0.00075 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import event_event
from . import event_registration
from . import event_type
from . import website
from . import website_event_menu
from . import website_menu
from . import website_visitor
| rven/odoo | addons/website_event/models/__init__.py | Python | agpl-3.0 | 296 | 0 |
from django.conf.urls.defaults import patterns
urlpatterns = patterns("jimi.catalog.views",
(r"^/?$", "all_categories"),
(r"^(?P<slug>[-\w]+)/$", "node", {}, "node"),
)
| bhell/jimi | jimi/jimi/catalog/urls.py | Python | bsd-3-clause | 178 | 0.005618 |
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from . import payment_mode
from . import res_company
from . import res_partner
from . import account_tax_template
from . import account_tax
from . import res_currency
from . import account_invoice_integration
from . import account_invoice_integration_method
from . import account_invoice_integration_log
from . import account_invoice
| factorlibre/l10n-spain | l10n_es_facturae/models/__init__.py | Python | agpl-3.0 | 405 | 0 |
class save_and_reraise_exception(object):
def __init__(self):
self.reraise = True
def __enter__(self):
self.type_, self.value, self.tb, = sys.exc_info()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
six.reraise(self.type_, self.value, self.tb)
| windskyer/mvpn | mvpn/openstack/common/excutils.py | Python | gpl-2.0 | 655 | 0.001527 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-21 01:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0002_contact'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='alternate_email',
field=models.EmailField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='contact',
name='alternate_phone',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='contact',
name='phone',
field=models.CharField(blank=True, max_length=50),
),
]
| fernandolobato/balarco | clients/migrations/0003_auto_20170221_0107.py | Python | mit | 804 | 0 |
##############################################################################################
# Copyright 2014-2015 Cloud Media Sdn. Bhd.
#
# This file is part of Xuan Application Development SDK.
#
# Xuan Application Development SDK is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Xuan Application Development SDK is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Xuan Application Development SDK. If not, see <http://www.gnu.org/licenses/>.
##############################################################################################
from com.cloudMedia.theKuroBox.sdk.paramTypes.kbxObject import KBXObjectType
from com.cloudMedia.theKuroBox.sdk.paramTypes.kbxParamType import KBXParamType
from com.cloudMedia.theKuroBox.sdk.paramTypes.kbxParamWrapper import KBXParamWrapper
from com.cloudMedia.theKuroBox.sdk.util.logger import Logger
class KBXHSBColorType(KBXObjectType):
TYPE_NAME = "kbxHSBColor"
PROP_KBX_PARAM_OBJ_KEY_HUE = "h"
PROP_KBX_PARAM_OBJ_KEY_SATURATION = "s"
PROP_KBX_PARAM_OBJ_KEY_BRIGHTNESS = "b"
def __init__(self, kbxParamIsRequired=True):
pass
def cast(self, value):
pass
class DTO(dict):
@staticmethod
def build(h, s, b):
pass
def set_hue(self, value):
pass
def set_saturation(self, value):
pass
def set_brightness(self, value):
pass
def get_hue(self):
pass
def get_saturation(self):
pass
def get_brightness(self):
pass
class KBXHSBColor(KBXHSBColorType, KBXParamWrapper):
def __init__(self, kbxParamName, kbxParamIsRequired=True, **kbxParamProps):
pass
| TheStackBox/xuansdk | SDKLibrary/com/cloudMedia/theKuroBox/sdk/paramTypes/kbxHSBColor.py | Python | gpl-3.0 | 2,217 | 0.00406 |
# $HeadURL: $
''' CacheFeederAgent
This agent feeds the Cache tables with the outputs of the cache commands.
'''
from DIRAC import S_OK#, S_ERROR, gConfig
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.LCG.GOCDBClient import GOCDBClient
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Command import CommandCaller
#from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
from DIRAC.ResourceStatusSystem.Utilities import Utils
ResourceManagementClient = getattr(Utils.voimport( 'DIRAC.ResourceStatusSystem.Client.ResourceManagementClient' ),'ResourceManagementClient')
__RCSID__ = '$Id: $'
AGENT_NAME = 'ResourceStatus/CacheFeederAgent'
class CacheFeederAgent( AgentModule ):
'''
The CacheFeederAgent feeds the cache tables for the client and the accounting.
It runs periodically a set of commands, and stores it's results on the
tables.
'''
# Too many public methods
# pylint: disable-msg=R0904
def __init__( self, *args, **kwargs ):
AgentModule.__init__( self, *args, **kwargs )
self.commands = {}
self.clients = {}
self.cCaller = None
self.rmClient = None
def initialize( self ):
self.am_setOption( 'shifterProxy', 'DataManager' )
self.rmClient = ResourceManagementClient()
self.commands[ 'Downtime' ] = [ { 'Downtime' : {} } ]
self.commands[ 'SpaceTokenOccupancy' ] = [ { 'SpaceTokenOccupancy' : {} } ]
#PilotsCommand
# self.commands[ 'Pilots' ] = [
# { 'PilotsWMS' : { 'element' : 'Site', 'siteName' : None } },
# { 'PilotsWMS' : { 'element' : 'Resource', 'siteName' : None } }
# ]
#FIXME: do not forget about hourly vs Always ...etc
#AccountingCacheCommand
# self.commands[ 'AccountingCache' ] = [
# {'SuccessfullJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
# {'FailedJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
# {'SuccessfullPilotsBySiteSplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'FailedPilotsBySiteSplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'SuccessfullPilotsByCESplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'FailedPilotsByCESplitted' :{'hours' :24, 'plotType' :'Pilot' }},
# {'RunningJobsBySiteSplitted' :{'hours' :24, 'plotType' :'Job' }},
## {'RunningJobsBySiteSplitted' :{'hours' :168, 'plotType' :'Job' }},
## {'RunningJobsBySiteSplitted' :{'hours' :720, 'plotType' :'Job' }},
## {'RunningJobsBySiteSplitted' :{'hours' :8760, 'plotType' :'Job' }},
# ]
#VOBOXAvailability
# self.commands[ 'VOBOXAvailability' ] = [
# { 'VOBOXAvailability' : {} }
#
#Reuse clients for the commands
self.clients[ 'GOCDBClient' ] = GOCDBClient()
self.clients[ 'ReportGenerator' ] = RPCClient( 'Accounting/ReportGenerator' )
self.clients[ 'ReportsClient' ] = ReportsClient()
self.clients[ 'ResourceStatusClient' ] = ResourceStatusClient()
self.clients[ 'ResourceManagementClient' ] = ResourceManagementClient()
self.clients[ 'WMSAdministrator' ] = RPCClient( 'WorkloadManagement/WMSAdministrator' )
self.cCaller = CommandCaller
return S_OK()
def loadCommand( self, commandModule, commandDict ):
commandName = commandDict.keys()[ 0 ]
commandArgs = commandDict[ commandName ]
commandTuple = ( '%sCommand' % commandModule, '%sCommand' % commandName )
commandObject = self.cCaller.commandInvocation( commandTuple, pArgs = commandArgs,
clients = self.clients )
if not commandObject[ 'OK' ]:
self.log.error( 'Error initializing %s' % commandName )
return commandObject
commandObject = commandObject[ 'Value' ]
# Set master mode
commandObject.masterMode = True
self.log.info( '%s/%s' % ( commandModule, commandName ) )
return S_OK( commandObject )
def execute( self ):
for commandModule, commandList in self.commands.items():
self.log.info( '%s module initialization' % commandModule )
for commandDict in commandList:
commandObject = self.loadCommand( commandModule, commandDict )
if not commandObject[ 'OK' ]:
self.log.error( commandObject[ 'Message' ] )
continue
commandObject = commandObject[ 'Value' ]
results = commandObject.doCommand()
if not results[ 'OK' ]:
self.log.error( results[ 'Message' ] )
continue
results = results[ 'Value' ]
if not results:
self.log.info( 'Empty results' )
continue
self.log.verbose( 'Command OK Results' )
self.log.verbose( results )
return S_OK()
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| calancha/DIRAC | ResourceStatusSystem/Agent/CacheFeederAgent.py | Python | gpl-3.0 | 6,141 | 0.033708 |
import logging
from xml.dom.minidom import *
from jinja2 import Environment, Template
from edge.dateutility import DateUtility
from edge.opensearch.response import Response
class TemplateResponse(Response):
def __init__(self):
super(TemplateResponse, self).__init__()
self.env = Environment()
self.env.trim_blocks = True
self.env.autoescape = True
self.variables = {}
self.env.filters['convertISOTime'] = DateUtility.convertISOTime
def setTemplate(self, template):
self.template = self.env.from_string(template)
def generate(self, pretty=False):
logging.debug('TemplateResponse.generate is called.')
if pretty:
try :
xmlStr = self.template.render(self.variables).encode('utf-8').replace('\n', '')
except Exception as e:
logging.debug("Problem generating template " + str(e))
xmlStr = self.template.render({}).encode('utf-8').replace('\n', '')
document = xml.dom.minidom.parseString(xmlStr)
return document.toprettyxml()
else:
return self.template.render(self.variables).replace('\n', '')
| dataplumber/edge | src/main/python/libraries/edge/opensearch/templateresponse.py | Python | apache-2.0 | 1,202 | 0.00416 |
import os
import unittest
from vsg.rules import generate
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_015_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_015_test_input.fixed.vhd'), lExpected)
class test_generate_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_015(self):
oRule = generate.rule_015()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'generate')
self.assertEqual(oRule.identifier, '015')
lExpected = [20, 25, 30]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_015(self):
oRule = generate.rule_015()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| jeremiah-c-leary/vhdl-style-guide | vsg/tests/generate/test_rule_015.py | Python | gpl-3.0 | 1,158 | 0.004318 |
from setuptools import find_packages, setup
setup(
name='blns',
version='0.1.7',
url='https://github.com/danggrianto/big-list-of-naughty-strings',
license='MIT',
author='Daniel Anggrianto',
author_email='[email protected]',
description='Big List of Naughty String. Forked from https://github.com/minimaxir/big-list-of-naughty-strings',
keywords='Big List of Naughty String',
packages=['blns'],
platforms='any',
)
| danggrianto/big-list-of-naughty-strings | setup.py | Python | mit | 458 | 0.002183 |
#coding: utf-8
#!/usr/bin/env python3
#Initial test code for MiSynth Wave Generator
#Opens Wave Files And Cuts And Plays Them As The FPGA will
#Synth plays back 2048 samples at frequency of note
#Effective sample rate is 901,120Hz @ 440Hz
#CURRENTLY A DRAWING LOOP TO BE SOLVED, THANKS WX/PYTHON FOR YOUR
#COMPLETE LACK OF TRANSPARENCY
#ALWAYS USE TKINTER
import wave
import wx
import audiothread
import wavehandle
import sdisp
class MyFrame(wx.Frame):
def __init__(self, parent, title, wavehandle):
wx.Frame.__init__(self, parent, -1, title, size=(1024, 624))
self.wavehandle = wavehandle
self.scale = 8
self.shift = 0
self.drawcnt = 0
self.scope = [0]
# Create the menubar
menuBar = wx.MenuBar()
menu = wx.Menu()
menu.Append(wx.ID_OPEN, "Open\tAlt-O", "Open Wave")
menu.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Exit")
# bind the menu event s
self.Bind(wx.EVT_MENU, self.OnOpenButton, id=wx.ID_OPEN)
self.Bind(wx.EVT_MENU, self.OnQuitButton, id=wx.ID_EXIT)
menuBar.Append(menu, "&Actions")
self.SetMenuBar(menuBar)
self.wavepanel = WavePanel(self, self.getscale, self.setsector)
self.wavepanel.SetBackgroundColour(wx.Colour(32,55,91))
self.scopepanel = ScopePanel(self)
self.scopepanel.SetBackgroundColour(wx.Colour(20,25,20))
self.buttonpanel = wx.Panel(self, -1, pos=(0, 384), size=(1024, 40))
self.textpanel = sdisp.TextPanel(self)
self.timestamp = wx.StaticText(self.wavepanel, -1,
("Time: " + str(0.0)
+ "/" + str(0.0)),
pos=(2, 2),
style=wx.ALIGN_LEFT)
self.timestamp.SetForegroundColour((217, 66, 244))
btnOpen = wx.Button(self.buttonpanel, wx.ID_OPEN, "Open",
pos=(2, 0), size=(80, 40))
btnExport = wx.Button(self.buttonpanel, -1, "Export",
pos=(84, 0), size=(80, 40))
btnQuit = wx.Button(self.buttonpanel, wx.ID_EXIT, "Quit",
pos=(166, 0), size=(80, 40))
self.btnPlay = wx.ToggleButton(self.buttonpanel, -1, "Play",
pos=(943, 0), size=(80, 40))
# bind the button events to handlers
self.Bind(wx.EVT_BUTTON, self.OnOpenButton, btnOpen)
self.Bind(wx.EVT_BUTTON, self.OnExportButton, btnExport)
self.Bind(wx.EVT_BUTTON, self.OnQuitButton, btnQuit)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnPlayButton, self.btnPlay)
self.Bind(wx.EVT_MOUSEWHEEL, self.onMouseWheel)
self.wavepanel.Bind(wx.EVT_PAINT, self.onPaint)
self.contentNotSaved = False
self.fileloaded = False
self.quadrant = -1
self.Centre()
def setsector(self, sector):
self.quadrant = abs(sector)
self.Refresh()
def getscale(self):
return self.scale
def getSample(self, sector):
print("obtaining sample")
if self.quadrant == -1:
self.setsector(1)
sample = self.wavehandle.getaudiodata(self.shift, 0, sector)
return sample
def onPaint(self, event):
self.drawcnt += 1
#print("Drawing" + str(self.drawcnt))
dc = wx.PaintDC(self.wavepanel)
dc.Clear()
totalseconds = self.wavehandle.gettotaltime()
shiftseconds = self.wavehandle.framestoseconds(self.shift)
self.timestamp.SetLabel("Time: " + str(shiftseconds) + "/" + str(
totalseconds))
dc.SetBrush(wx.Brush(wx.Colour(16, 28, 45), wx.SOLID))
dc.DrawRectangle(256, 0, 512, 256)
# Centre Line
pointdata = self.wavehandle.getdrawpoints(self.shift)
for x in range(1, 1024): # Ugly
if (x > 256) and (x < 768):
dc.SetPen(wx.Pen((0, 255, 242), 1, wx.PENSTYLE_SOLID))
else:
dc.SetPen(wx.Pen((183, 204, 163), 1, wx.PENSTYLE_SOLID))
dc.DrawLine(x - 1, pointdata[x - 1], x, pointdata[x])
#dc.DrawPoint(x, pointdata[x])
if (x == 256) or (x == 768):
dc.SetPen(wx.Pen((0, 0, 0), 1, wx.PENSTYLE_DOT))
dc.DrawLine(x, 0, x, 256)
if (x == 496) or (x == 528):
dc.SetPen(wx.Pen((0, 0, 0), 1, wx.PENSTYLE_DOT))
dc.DrawLine(x, 0, x, 256)
dc = wx.PaintDC(self.scopepanel)
dc.Clear()
dc.SetPen(wx.Pen((256,0,0), 1, wx.PENSTYLE_SOLID))
for x in range(0, 1024):
if len(self.scope) > 1:
p = self.scope[x % len(self.scope)] + 64
else:
p = 64
dc.DrawPoint(x, p)
def OnPlayButton(self, event):
if self.btnPlay.GetValue():
self.audiohandle = audiothread.AudioHandler()
if self.fileloaded:
self.audiohandle.setsample(self.getSample(self.quadrant), 2048)
self.scope = self.audiohandle.getscopesample()
print("sample length: " + str(len(self.scope)))
self.audiohandle.start()
else:
self.audiohandle.stop()
self.audiohandle = None
def onMouseWheel(self, event):
if self.wavepanel.mouseOver:
if self.wavepanel.ctrlDown:
if event.GetWheelRotation() > 0:
if(self.scale > 1):
self.scale = self.scale >> 1
else:
if(self.scale < 2097151):
self.scale = self.scale << 1
self.Refresh()
else:
if event.GetWheelRotation() > 0:
if(self.shift > 0):
self.shift -= 2000
else:
if (self.shift < 10000000):
self.shift += 2000
self.Refresh()
if self.scopepanel.mouseOver:
if event.GetWheelRotation() > 0:
self.audiohandle.setshift(1)
else:
self.audiohandle.setshift(-1)
self.scope = self.audiohandle.getscopesample()
self.Refresh()
def OnOpenButton(self, evt):
#Open file
with wx.FileDialog(self, "Open .wav file.", wildcard="WAV files (*.wav)|*.wav",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
pathname = fileDialog.GetPath()
try:
with wave.open(pathname, 'r') as file:
self.wavehandle.loadwave(file)
self.Refresh()
self.fileloaded = True
except IOError:
wx.LogError("Cannot open file '%s'." % pathname)
def OnExportButton(self, evt):
print("Export")
def OnQuitButton(self, evt):
self.Close()
class WavePanel(wx.Panel): #just handles mouseover events
def __init__(self, parent, getter, sender):
wx.Panel.__init__(self, parent, pos=(0,0),size=(1024, 256))
self.mouseOver = False
self.ctrlDown = False
self.Bind(wx.EVT_ENTER_WINDOW, self.onMouseOver)
self.Bind(wx.EVT_LEAVE_WINDOW, self.onMouseLeave)
self.Bind(wx.EVT_KEY_DOWN, self.onKeyPress)
self.Bind(wx.EVT_KEY_UP, self.onKeyRelease)
self.Bind(wx.EVT_LEFT_DOWN, self.onMouseClick)
self.getter = getter
self.sender = sender
def onMouseClick(self, event):
if self.mouseOver:
x, y = self.ScreenToClient(wx.GetMousePosition())
sector = abs(x // (2048 / self.getter()))
self.sender(sector)
def onMouseOver(self, event):
self.mouseOver = True
def onMouseLeave(self, event):
self.mouseOver = False
def onKeyPress(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_CONTROL:
self.ctrlDown = True
def onKeyRelease(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_CONTROL:
self.ctrlDown = False
class ScopePanel(wx.Panel): #just handles mouseover events
def __init__(self, parent):
wx.Panel.__init__(self, parent, pos=(0, 256), size=(1024, 128))
self.mouseOver = False
self.Bind(wx.EVT_ENTER_WINDOW, self.onMouseOver)
self.Bind(wx.EVT_LEAVE_WINDOW, self.onMouseLeave)
def onMouseOver(self, event):
self.mouseOver = True
def onMouseLeave(self, event):
self.mouseOver = False
class MyApp(wx.App):
def OnInit(self):
waveHandle = wavehandle.WaveHandler()
frame = MyFrame(None, "MiSynth Editor", waveHandle)
self.SetTopWindow(frame)
frame.Show(True)
return True
if __name__ == '__main__':
app = MyApp(redirect=True)
app.MainLoop()
| magicmilo/MiSynth-Wavetable-Generator | main.py | Python | apache-2.0 | 9,015 | 0.003439 |
import setuptools
setuptools.setup(name='txyoga',
version='0',
description='REST toolkit for Twisted',
url='https://github.com/lvh/txyoga',
author='Laurens Van Houtven',
author_email='[email protected]',
packages = setuptools.find_packages(),
requires=['twisted'],
license='ISC',
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Twisted",
"License :: OSI Approved :: ISC License (ISCL)",
"Topic :: Internet :: WWW/HTTP",
])
| lvh/txyoga | setup.py | Python | isc | 528 | 0.017045 |
import chainer
import chainer.functions as F
import chainer.links as L
class Cifar10(chainer.Chain):
def __init__(self, n_class, in_ch):
super().__init__(
conv1=L.Convolution2D(in_ch, 32, 5, pad=2),
conv2=L.Convolution2D(32, 32, 5, pad=2),
conv3=L.Convolution2D(32, 64, 5, pad=2),
fc4=F.Linear(1344, 4096),
fc5=F.Linear(4096, n_class),
)
self.train = True
self.n_class = n_class
def __call__(self, x, t):
x.volatile = True
h = F.max_pooling_2d(F.elu(self.conv1(x)), 3, stride=2)
h = F.max_pooling_2d(F.elu(self.conv2(h)), 3, stride=2)
h = F.elu(self.conv3(h))
h.volatile = False
h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D)
h = F.dropout(F.elu(self.fc4(h)), ratio=0.5, train=self.train)
h = self.fc5(h)
self.prob = F.softmax(h)
self.loss = F.softmax_cross_entropy(h, t)
self.accuracy = F.accuracy(h, t)
chainer.report({'loss': self.loss, 'accuracy': self.accuracy}, self)
return self.loss
| 0shimax/DL-vision | src/net/cifar10.py | Python | mit | 1,111 | 0 |
from fastapi import FastAPI
app = FastAPI(swagger_ui_parameters={"syntaxHighlight": False})
@app.get("/users/{username}")
async def read_user(username: str):
return {"message": f"Hello {username}"}
| tiangolo/fastapi | docs_src/extending_openapi/tutorial003.py | Python | mit | 205 | 0 |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from collections import defaultdict
# from pprint import pprint
import argparse # Note: python 2.7+
import datetime
import json
import logging
import os
import sys
import requests
from githubinfo import __version__
ORG_REPOS_URL = 'https://api.github.com/orgs/{organization}/repos'
COMMITS_URL = 'https://api.github.com/repos/{owner}/{project}/commits'
BRANCHES_URL = 'https://api.github.com/repos/{owner}/{project}/branches'
# Settings are global and can be modified by some setup/init method.
SETTINGS = {
'auth': None, # Set it to ('username', 'very_secret').
'days': 7,
'organizations': [
'ddsc',
'lizardsystem',
'nens',
],
'extra_projects': [
# ('organization', 'project'),
('reinout', 'buildout'),
('reinout', 'django-rest-framework'),
('reinout', 'serverinfo'),
('reinout', 'z3c.dependencychecker'),
('rvanlaar', 'djangorecipe'),
('zestsoftware', 'zest.releaser'),
],
}
SETTINGS_FILENAME = 'settings.json'
logger = logging.getLogger(__name__)
def since():
"""Return iso-formatted string for github from-that-date query."""
now = datetime.datetime.now()
a_while_ago = now - datetime.timedelta(days=SETTINGS['days'])
return a_while_ago.isoformat()
def grab_json(url, params=None, second_try=False):
"""Return json from URL, including handling pagination."""
auth = SETTINGS['auth']
if isinstance(auth, list):
auth = tuple(auth)
req = requests.get(url, auth=auth, params=params)
if req.status_code == 401 and not second_try:
# Unauthorized. Somehow this happens to me in rare cases.
# Retry it once.
logger.warn("Got a 401 unauthorized on %s, retrying it", url)
return grab_json(url, params=params, second_try=True)
result = req.json()
is_expected_type = (isinstance(result, list) or isinstance(result, dict))
if not is_expected_type and not second_try:
# Wrong type. String error message, probably.
# Retry it once.
logger.warn("Got a wrong type (%r) on %s, retrying it", result, url)
return grab_json(url, params=params, second_try=True)
if req.links.get('next'):
# Paginated content, so we want to grab the rest.
url = req.links['next']['url']
# The assumption is "paginated content means it is a list".
result += grab_json(url, params=params)
return result
def is_testfile(fileinfo):
filepath = fileinfo['filename']
if 'testsettings.py' in filepath:
# This one almost always doesn't have anything to do with
# an added test.
return False
if 'test' in filepath:
return True
if filepath.endswith('.rst') or filepath.endswith('.txt'):
# Possible doctest.
if '>>>' in fileinfo.get('patch', ''):
return True
return False
def load_custom_settings(settings_file=SETTINGS_FILENAME):
"""Update our default settings with the json found in the settings file.
"""
# Note: settings_file is only a kwarg to make it testable.
if os.path.exists(settings_file):
custom_settings = json.loads(open(settings_file).read())
SETTINGS.update(custom_settings)
class Commit(object):
"""Wrapper around a commit dict from github's API."""
def __init__(self, the_dict):
self.num_testfiles_changed = 0
self.user = the_dict['commit']['committer']['name']
commit_url = the_dict['url']
commit_info = grab_json(commit_url)
for changed_file in commit_info.get('files', []):
if is_testfile(changed_file):
self.num_testfiles_changed += 1
logger.debug("Test file: {}".format(changed_file['filename']))
@property
def is_testcommit(self):
return bool(self.num_testfiles_changed)
class TestCommitCounter(object):
def __init__(self):
self.num_commits = 0
self.num_testcommits = 0
self.testfiles_changed = 0
def __cmp__(self, other):
return cmp((-self.num_testcommits, self.num_commits),
(-other.num_testcommits, other.num_commits))
def add_commit(self, commit):
self.num_commits += 1
if commit.is_testcommit:
self.num_testcommits += 1
self.testfiles_changed += commit.num_testfiles_changed
@property
def percentage(self):
"""Return percentage of test commits to total.
Return it as a string including parentheses.
If there are no test commits, omit the percentage.
"""
if not self.num_testcommits:
return ''
result = str(int(100.0 * self.num_testcommits / self.num_commits))
return '({}%)'.format(result)
def print_info(self):
msg = "{name}: {tested} {percentage}"
print(msg.format(name=self.name,
tested=self.num_testcommits,
percentage=self.percentage))
def as_dict(self):
percentage = self.percentage.replace('(', '').replace(')', '') # Sigh.
return dict(name=self.name,
num_testcommits=self.num_testcommits,
percentage=percentage)
class Project(TestCommitCounter):
def __init__(self, owner, project, users,
restrict_to_known_users=False):
super(Project, self).__init__()
self.owner = owner
self.name = project
self.users = users
self.restrict_to_known_users = restrict_to_known_users
def load(self):
logger.debug("Loading project {}...".format(self.name))
self.branch_SHAs = self.load_branches()
self.commits = self.load_project_commits()
self.load_individual_commits()
def load_branches(self):
"""Return SHAs of commits for branches."""
url = BRANCHES_URL.format(owner=self.owner, project=self.name)
branches = grab_json(url)
if not isinstance(branches, list):
logger.warn("Expected list, got %r, retrying.", branches)
return self.load_branches()
return [branch['commit']['sha'] for branch in branches]
def load_project_commits(self):
result = []
url = COMMITS_URL.format(owner=self.owner, project=self.name)
for branch_SHA in self.branch_SHAs:
result += grab_json(url, params={'since': since(),
'sha': branch_SHA})
return result
def load_individual_commits(self):
for commit in self.commits:
if not isinstance(commit, dict):
logger.warn("dict in commit isn't a dict: %r" % commit)
logger.debug("the full list of commits:")
logger.debug(self.commits)
logger.warn("Continuing anyway...")
continue
the_commit = Commit(commit)
if self.restrict_to_known_users:
if the_commit.user not in self.users:
continue
self.users[the_commit.user].add_commit(the_commit)
self.add_commit(the_commit)
@property
def is_active(self):
return bool(self.num_commits)
class User(TestCommitCounter):
name = None # We set that from within the commits.
def add_commit(self, commit):
if not self.name:
self.name = commit.user
TestCommitCounter.add_commit(self, commit)
def show_config():
"""Print the current configuration
TODO: add some usage instructions.
"""
if not os.path.exists(SETTINGS_FILENAME):
logger.warn("""
%s does not exist. See https://pypi.python.org/pypi/githubinfo for
a configuration explanation.
The defaults are probably not what you want :-)""")
logger.info("The current settings are:")
print(json.dumps(SETTINGS, indent=2))
sys.exit(0)
def parse_commandline():
"""Parse commandline options and set up logging.
"""
parser = argparse.ArgumentParser(
description='Print number of test-related github commits.')
parser.add_argument('-v',
'--verbose',
action='store_true',
help="make logging more verbose",
dest='verbose')
parser.add_argument('--json-output',
help="export results as json to [FILENAME]",
metavar='FILENAME',
dest='json_filename')
parser.add_argument('--show-config',
action='store_true',
help="show the current configuration",
dest='show_config')
parser.add_argument('--version',
action='version',
version='%(prog)s ' + __version__)
args = parser.parse_args()
loglevel = args.verbose and logging.DEBUG or logging.INFO
logging.basicConfig(level=loglevel,
format="%(levelname)s: %(message)s")
# And... shut up the ``requests`` library's logging.
requests_logger = logging.getLogger("requests")
requests_logger.setLevel(logging.WARNING)
if args.show_config:
show_config()
return args
def collect_info():
"""Return collected info on projects and users.
"""
users = defaultdict(User)
projects = []
for organization in SETTINGS['organizations']:
logger.info("Looking for projects in organization %s...",
organization)
url = ORG_REPOS_URL.format(organization=organization)
repos = grab_json(url)
project_names = [repo['name'] for repo in repos]
for project_name in project_names:
project = Project(organization, project_name, users)
project.load()
if project.is_active:
projects.append(project)
for (organization, project_name) in SETTINGS['extra_projects']:
project = Project(organization, project_name, users,
restrict_to_known_users=True)
project.load()
if project.is_active:
projects.append(project)
users = users.values() # Defaultdict isn't handy anymore here.
users.sort()
projects.sort()
return (projects, users)
def main():
load_custom_settings()
args = parse_commandline()
projects, users = collect_info()
print("""
Test statistics
===============
We want more and better testing. For a quick and dirty quantity
indication ('more'), here are the commits that have the string
'test' in of of the commit's touched filenames.
Period: {period} days.
Github organizations that I queried: {orgs}
Projects sorted by amount of commits with tests
-----------------------------------------------
""".format(period=SETTINGS['days'],
orgs=', '.join(SETTINGS['organizations'])))
for project in projects:
project.print_info()
print("""
Committers sorted by amount of commits with tests
-------------------------------------------------
""")
for user in users:
user.print_info()
if args.json_filename:
output = {'projects': [project.as_dict() for project in projects],
'users': [user.as_dict() for user in users]}
open(args.json_filename, 'w').write(json.dumps(output, indent=2))
logger.info("Wrote results to %s", args.json_filename)
if __name__ == '__main__':
main()
| nens/githubinfo | githubinfo/commits.py | Python | gpl-3.0 | 11,594 | 0.000086 |
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
import sys
from glob import glob
from setuptools import setup
NAME = "pycopia3-process"
VERSION = "1.0"
if sys.platform not in ("win32", "cli"):
DATA_FILES = [
('/etc/pycopia', glob("etc/*")),
]
else:
DATA_FILES = []
setup(name=NAME, version=VERSION,
namespace_packages=["pycopia"],
packages=["pycopia"],
test_suite="test.ProcessTests",
# install_requires=['pycopia-core>=1.0.dev-r138,==dev'],
data_files=DATA_FILES,
description="Modules for running, interacting with, and managing processes.", # noqa
long_description=open("README.md").read(),
license="LGPL",
author="Keith Dart",
keywords="pycopia framework",
url="http://www.pycopia.net/",
classifiers=["Operating System :: POSIX",
"Topic :: Software Development :: Libraries :: Python Modules", # noqa
"Topic :: System :: Operating System",
"Intended Audience :: Developers"],
)
| kdart/pycopia3 | process/setup.py | Python | apache-2.0 | 1,057 | 0 |
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from s3transfer.exceptions import InvalidSubscriberMethodError
from s3transfer.subscribers import BaseSubscriber
from tests import unittest
class ExtraMethodsSubscriber(BaseSubscriber):
def extra_method(self):
return 'called extra method'
class NotCallableSubscriber(BaseSubscriber):
on_done = 'foo'
class NoKwargsSubscriber(BaseSubscriber):
def on_done(self):
pass
class OverrideMethodSubscriber(BaseSubscriber):
def on_queued(self, **kwargs):
return kwargs
class OverrideConstructorSubscriber(BaseSubscriber):
def __init__(self, arg1, arg2):
self.arg1 = arg1
self.arg2 = arg2
class TestSubscribers(unittest.TestCase):
def test_can_instantiate_base_subscriber(self):
try:
BaseSubscriber()
except InvalidSubscriberMethodError:
self.fail('BaseSubscriber should be instantiable')
def test_can_call_base_subscriber_method(self):
subscriber = BaseSubscriber()
try:
subscriber.on_done(future=None)
except Exception as e:
self.fail(
'Should be able to call base class subscriber method. '
'instead got: %s' % e
)
def test_subclass_can_have_and_call_additional_methods(self):
subscriber = ExtraMethodsSubscriber()
self.assertEqual(subscriber.extra_method(), 'called extra method')
def test_can_subclass_and_override_method_from_base_subscriber(self):
subscriber = OverrideMethodSubscriber()
# Make sure that the overridden method is called
self.assertEqual(subscriber.on_queued(foo='bar'), {'foo': 'bar'})
def test_can_subclass_and_override_constructor_from_base_class(self):
subscriber = OverrideConstructorSubscriber('foo', arg2='bar')
# Make sure you can create a custom constructor.
self.assertEqual(subscriber.arg1, 'foo')
self.assertEqual(subscriber.arg2, 'bar')
def test_invalid_arguments_in_constructor_of_subclass_subscriber(self):
# The override constructor should still have validation of
# constructor args.
with self.assertRaises(TypeError):
OverrideConstructorSubscriber()
def test_not_callable_in_subclass_subscriber_method(self):
with self.assertRaisesRegex(
InvalidSubscriberMethodError, 'must be callable'
):
NotCallableSubscriber()
def test_no_kwargs_in_subclass_subscriber_method(self):
with self.assertRaisesRegex(
InvalidSubscriberMethodError, 'must accept keyword'
):
NoKwargsSubscriber()
| boto/s3transfer | tests/unit/test_subscribers.py | Python | apache-2.0 | 3,197 | 0 |
from interval import interval, inf, imath, fpu
from complexinterval import ComplexInterval, _one, _zero
from complexpolynomial import ComplexPolynomial
class Newton:
def __init__(self, start, poly):
self.start = start
self.poly = poly
self.iterates = 0
self.deriv = poly.derive()
self.step = start
def iterate(self):
"""
Performs one Newton iteration, returns change between values.
"""
self.iterates += 1
x = self.step.midpoint()
fx = self.poly(x)
## iterate on derivative
## self.deriv = self.deriv.derive()
self.step = x - (fx / self.deriv(x))
## return the change
diff = x - self.step
return diff
def iterate_until(self, res = 10**-6, max_iterates = 20):
"""
Iterates until at resolution or until maximum number
of iterations has been reached. Returns True if convergence
achieved, returns False otherwise.
"""
res_box = ComplexInterval(interval([res, -res]), interval([res, -res]))
while (self.iterates < max_iterates - 1):
if self.iterate() in res_box:
return True
if self.iterate() in res_box:
return True
return False
def __str__(self):
"""
Returns string representation
"""
return "Newton's Iterator\n" + "Start: " + str(self.start) + "\nFunction: " + str(self.poly)
def main():
print("Testing Newton")
print("Testing Complex Polynomials")
print("----------------------------")
xa = interval([1, 2])
xb = interval([5, 6])
x = ComplexInterval(xa, xb)
ya = interval([4, 7])
yb = interval([2, 3])
y = ComplexInterval(ya, yb)
wa = interval([2, 2])
wb = interval([3, 3])
w = ComplexInterval(wa, wb)
za = interval([4, 4])
zb = interval([5, 5])
z = ComplexInterval(za, zb)
a_0_a = interval([1, 1])
a_0_b = interval([5, 5])
a_0 = ComplexInterval(a_0_a, a_0_b)
a_1_a = interval([1, 1])
a_1_b = interval([5, 5])
a_1 = ComplexInterval(a_1_a, a_1_b)
a_2_a = interval([3, 3])
a_2_b = interval([2, 2])
a_2 = ComplexInterval(a_2_a, a_2_b)
a_3_a = interval([7, 7])
a_3_b = interval([-4, -4])
a_3 = ComplexInterval(a_3_a, a_3_b)
a_4_a = interval([-6, -6])
a_4_b = interval([1, 1])
a_4 = ComplexInterval(a_4_a, a_4_b)
a_5 = ComplexInterval(interval([2]), interval([0]))
a_6 = ComplexInterval(interval([2]), interval([0]))
coeffs = [a_0, a_1, a_2, a_3, a_4, a_5, a_6]
print("Testing Complex Constructor")
print("----------------------------")
poly_1 = ComplexPolynomial(coeffs)
print(poly_1)
poly_2 = ComplexPolynomial([_zero(), a_4])
print(poly_2)
poly_3 = ComplexPolynomial([a_5, a_6, a_3, a_1, a_0])
print(poly_3)
print("============================")
print("Testing Evaluation")
print("----------------------------")
print(poly_1(w))
print(poly_1(_one()))
print(poly_1(_zero()))
print("")
print(poly_2(w))
print(poly_2(_one()))
print(poly_2(_zero()))
print("")
print(poly_3(w))
print(poly_3(_one()))
print(poly_3(_zero()))
print("============================")
print("Derivation")
print("----------------------------")
print(poly_1.derive())
print(poly_1.derive().derive())
print(poly_1.derive().derive().derive())
print("")
print(poly_2.derive())
print(poly_2.derive().derive())
print("")
print(poly_3.derive())
print(poly_3.derive().derive())
print("============================")
print("Newton's Method Constructor")
print("----------------------------")
start1 = ComplexInterval(interval([0]), interval([0]))
start2 = ComplexInterval(interval([1]), interval([1]))
start3 = ComplexInterval(interval([0]), interval([0]))
n_1 = Newton(start1, poly_1)
n_2 = Newton(start2, poly_2)
n_3 = Newton(start3, poly_3)
print(n_1)
print("")
print(n_2)
print("")
print(n_3)
print("")
print("============================")
print("Testing Iteration")
print("----------------------------")
for i in range(10):
print(n_1.iterate())
print("----------------------------")
for i in range(10):
print(n_2.iterate())
print("----------------------------")
for i in range(10):
print(n_3.iterate())
# print(fpu.isnan(n_3.iterate().a))
print("============================")
print("Testing convergence")
print("----------------------------")
print(n_1.iterate_until())
print("----------------------------")
print(n_2.iterate_until())
print("----------------------------")
print(n_3.iterate_until())
# print(fpu.isnan(n_3.iterate().a))
print("============================")
if __name__=="__main__":
main() | yuanagain/seniorthesis | src/intervals/newton.py | Python | mit | 4,396 | 0.036624 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-03-08 07:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gardens', '0025_auto_20180216_1951'),
]
operations = [
migrations.AlterModelOptions(
name='worktype',
options={'ordering': ('position',), 'verbose_name': 'Work Type', 'verbose_name_plural': 'Work Types'},
),
migrations.AddField(
model_name='worktype',
name='position',
field=models.PositiveIntegerField(default=0),
),
]
| bengosney/rhgd3 | gardens/migrations/0026_auto_20180308_0720.py | Python | gpl-3.0 | 653 | 0.001531 |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from phd/calc_serviceRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class calc_serviceRequest(genpy.Message):
_md5sum = "504533770c671b8893346f8f23298fee"
_type = "phd/calc_serviceRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32[] pre_ids
int32[] post_ids
string datum
string location
"""
__slots__ = ['pre_ids','post_ids','datum','location']
_slot_types = ['int32[]','int32[]','string','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
pre_ids,post_ids,datum,location
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(calc_serviceRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.pre_ids is None:
self.pre_ids = []
if self.post_ids is None:
self.post_ids = []
if self.datum is None:
self.datum = ''
if self.location is None:
self.location = ''
else:
self.pre_ids = []
self.post_ids = []
self.datum = ''
self.location = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.pre_ids)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.pre_ids))
length = len(self.post_ids)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(struct.pack(pattern, *self.post_ids))
_x = self.datum
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.location
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.pre_ids = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.post_ids = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.datum = str[start:end].decode('utf-8')
else:
self.datum = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.location = str[start:end].decode('utf-8')
else:
self.location = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.pre_ids)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.pre_ids.tostring())
length = len(self.post_ids)
buff.write(_struct_I.pack(length))
pattern = '<%si'%length
buff.write(self.post_ids.tostring())
_x = self.datum
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.location
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.pre_ids = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%si'%length
start = end
end += struct.calcsize(pattern)
self.post_ids = numpy.frombuffer(str[start:end], dtype=numpy.int32, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.datum = str[start:end].decode('utf-8')
else:
self.datum = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.location = str[start:end].decode('utf-8')
else:
self.location = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from phd/calc_serviceResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
import sensor_msgs.msg
class calc_serviceResponse(genpy.Message):
_md5sum = "d638895a709be2cef85df359cc39f0dc"
_type = "phd/calc_serviceResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """sensor_msgs/PointCloud2 cloud_out
================================================================================
MSG: sensor_msgs/PointCloud2
# This message holds a collection of N-dimensional points, which may
# contain additional information such as normals, intensity, etc. The
# point data is stored as a binary blob, its layout described by the
# contents of the "fields" array.
# The point cloud data may be organized 2d (image-like) or 1d
# (unordered). Point clouds organized as 2d images may be produced by
# camera depth sensors such as stereo or time-of-flight.
# Time of sensor data acquisition, and the coordinate frame ID (for 3d
# points).
Header header
# 2D structure of the point cloud. If the cloud is unordered, height is
# 1 and width is the length of the point cloud.
uint32 height
uint32 width
# Describes the channels and their layout in the binary data blob.
PointField[] fields
bool is_bigendian # Is this data bigendian?
uint32 point_step # Length of a point in bytes
uint32 row_step # Length of a row in bytes
uint8[] data # Actual point data, size is (row_step*height)
bool is_dense # True if there are no invalid points
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: sensor_msgs/PointField
# This message holds the description of one point entry in the
# PointCloud2 message format.
uint8 INT8 = 1
uint8 UINT8 = 2
uint8 INT16 = 3
uint8 UINT16 = 4
uint8 INT32 = 5
uint8 UINT32 = 6
uint8 FLOAT32 = 7
uint8 FLOAT64 = 8
string name # Name of field
uint32 offset # Offset from start of point struct
uint8 datatype # Datatype enumeration, see above
uint32 count # How many elements in the field
"""
__slots__ = ['cloud_out']
_slot_types = ['sensor_msgs/PointCloud2']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
cloud_out
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(calc_serviceResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.cloud_out is None:
self.cloud_out = sensor_msgs.msg.PointCloud2()
else:
self.cloud_out = sensor_msgs.msg.PointCloud2()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.cloud_out.header.seq, _x.cloud_out.header.stamp.secs, _x.cloud_out.header.stamp.nsecs))
_x = self.cloud_out.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.cloud_out.height, _x.cloud_out.width))
length = len(self.cloud_out.fields)
buff.write(_struct_I.pack(length))
for val1 in self.cloud_out.fields:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_IBI.pack(_x.offset, _x.datatype, _x.count))
_x = self
buff.write(_struct_B2I.pack(_x.cloud_out.is_bigendian, _x.cloud_out.point_step, _x.cloud_out.row_step))
_x = self.cloud_out.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(self.cloud_out.is_dense))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.cloud_out is None:
self.cloud_out = sensor_msgs.msg.PointCloud2()
end = 0
_x = self
start = end
end += 12
(_x.cloud_out.header.seq, _x.cloud_out.header.stamp.secs, _x.cloud_out.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.cloud_out.header.frame_id = str[start:end].decode('utf-8')
else:
self.cloud_out.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.cloud_out.height, _x.cloud_out.width,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.cloud_out.fields = []
for i in range(0, length):
val1 = sensor_msgs.msg.PointField()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8')
else:
val1.name = str[start:end]
_x = val1
start = end
end += 9
(_x.offset, _x.datatype, _x.count,) = _struct_IBI.unpack(str[start:end])
self.cloud_out.fields.append(val1)
_x = self
start = end
end += 9
(_x.cloud_out.is_bigendian, _x.cloud_out.point_step, _x.cloud_out.row_step,) = _struct_B2I.unpack(str[start:end])
self.cloud_out.is_bigendian = bool(self.cloud_out.is_bigendian)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.cloud_out.data = str[start:end]
start = end
end += 1
(self.cloud_out.is_dense,) = _struct_B.unpack(str[start:end])
self.cloud_out.is_dense = bool(self.cloud_out.is_dense)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.cloud_out.header.seq, _x.cloud_out.header.stamp.secs, _x.cloud_out.header.stamp.nsecs))
_x = self.cloud_out.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.cloud_out.height, _x.cloud_out.width))
length = len(self.cloud_out.fields)
buff.write(_struct_I.pack(length))
for val1 in self.cloud_out.fields:
_x = val1.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_struct_IBI.pack(_x.offset, _x.datatype, _x.count))
_x = self
buff.write(_struct_B2I.pack(_x.cloud_out.is_bigendian, _x.cloud_out.point_step, _x.cloud_out.row_step))
_x = self.cloud_out.data
length = len(_x)
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(self.cloud_out.is_dense))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.cloud_out is None:
self.cloud_out = sensor_msgs.msg.PointCloud2()
end = 0
_x = self
start = end
end += 12
(_x.cloud_out.header.seq, _x.cloud_out.header.stamp.secs, _x.cloud_out.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.cloud_out.header.frame_id = str[start:end].decode('utf-8')
else:
self.cloud_out.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.cloud_out.height, _x.cloud_out.width,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.cloud_out.fields = []
for i in range(0, length):
val1 = sensor_msgs.msg.PointField()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.name = str[start:end].decode('utf-8')
else:
val1.name = str[start:end]
_x = val1
start = end
end += 9
(_x.offset, _x.datatype, _x.count,) = _struct_IBI.unpack(str[start:end])
self.cloud_out.fields.append(val1)
_x = self
start = end
end += 9
(_x.cloud_out.is_bigendian, _x.cloud_out.point_step, _x.cloud_out.row_step,) = _struct_B2I.unpack(str[start:end])
self.cloud_out.is_bigendian = bool(self.cloud_out.is_bigendian)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
self.cloud_out.data = str[start:end]
start = end
end += 1
(self.cloud_out.is_dense,) = _struct_B.unpack(str[start:end])
self.cloud_out.is_dense = bool(self.cloud_out.is_dense)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_IBI = struct.Struct("<IBI")
_struct_3I = struct.Struct("<3I")
_struct_B = struct.Struct("<B")
_struct_2I = struct.Struct("<2I")
_struct_B2I = struct.Struct("<B2I")
class calc_service(object):
_type = 'phd/calc_service'
_md5sum = '3e940f6cd215fa9d81d97d506548ddde'
_request_class = calc_serviceRequest
_response_class = calc_serviceResponse
| mikewrock/phd_backup_full | devel/lib/python2.7/dist-packages/phd/srv/_calc_service.py | Python | apache-2.0 | 19,928 | 0.018567 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-21 16:16
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
import picklefield.fields
class Migration(migrations.Migration):
dependencies = [
('essauth', '0015_proxypermission'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='language',
field=models.CharField(default='en', max_length=10),
),
]
| ESSolutions/ESSArch_Core | ESSArch_Core/auth/migrations/0016_auto_20181221_1716.py | Python | gpl-3.0 | 505 | 0 |
import google.appengine.ext.ndb as ndb
import json
import logging
import datetime
from flask import Blueprint
from flask import Response
from natsort import natsorted
from sparkprs import cache, app
from sparkprs.models import Issue, JIRAIssue
prs = Blueprint('prs', __name__)
@prs.route('/search-open-prs')
@cache.cached(timeout=60)
def search_open_prs():
prs = Issue.query(Issue.state == "open").order(-Issue.updated_at).fetch()
return search_prs(prs)
@prs.route('/search-stale-prs')
@cache.cached(timeout=60)
def search_stale_prs():
issueQuery = ndb.AND(Issue.state == "open",
Issue.updated_at < datetime.datetime.today() - datetime.timedelta(days=30))
stalePrs = Issue.query(issueQuery).order(-Issue.updated_at).fetch()
return search_prs(stalePrs)
def search_prs(prs):
json_dicts = []
for pr in prs:
try:
last_jenkins_comment_dict = None
if pr.last_jenkins_comment:
last_jenkins_comment_dict = {
'body': pr.last_jenkins_comment['body'],
'user': {'login': pr.last_jenkins_comment['user']['login']},
'html_url': pr.last_jenkins_comment['html_url'],
'date': [pr.last_jenkins_comment['created_at']],
}
d = {
'parsed_title': pr.parsed_title,
'number': pr.number,
'updated_at': str(pr.updated_at),
'user': pr.user,
'state': pr.state,
'components': pr.components,
'lines_added': pr.lines_added,
'lines_deleted': pr.lines_deleted,
'lines_changed': pr.lines_changed,
'is_mergeable': pr.is_mergeable,
'commenters': [
{
'username': u,
'data': d,
'is_committer': u in app.config.get('COMMITTER_GITHUB_USERNAMES', []),
} for (u, d) in pr.commenters],
'last_jenkins_outcome': pr.last_jenkins_outcome,
'last_jenkins_comment': last_jenkins_comment_dict,
}
# Use the first JIRA's information to populate the "Priority" and "Issue Type" columns:
jiras = pr.parsed_title["jiras"]
if jiras:
d['closed_jiras'] = []
first_jira = JIRAIssue.get_by_id("%s-%i" % (app.config['JIRA_PROJECT'], jiras[0]))
if first_jira:
d['jira_priority_name'] = first_jira.priority_name
d['jira_priority_icon_url'] = first_jira.priority_icon_url
d['jira_issuetype_name'] = first_jira.issuetype_name
d['jira_issuetype_icon_url'] = first_jira.issuetype_icon_url
d['jira_shepherd_display_name'] = first_jira.shepherd_display_name
# If a pull request is linked against multiple JIRA issues, then the target
# versions should be union of the individual issues' target versions:
target_versions = set()
for jira_number in jiras:
jira = JIRAIssue.get_by_id("%s-%i" % (app.config['JIRA_PROJECT'], jira_number))
if jira:
target_versions.update(jira.target_versions)
if jira.is_closed:
d['closed_jiras'].append(jira_number)
if target_versions:
d['jira_target_versions'] = natsorted(target_versions)
json_dicts.append(d)
except:
logging.error("Exception while processing PR #%i", pr.number)
raise
response = Response(json.dumps(json_dicts), mimetype='application/json')
return response
| databricks/spark-pr-dashboard | sparkprs/controllers/prs.py | Python | apache-2.0 | 3,838 | 0.002866 |
from . import keyboards
| chancegrissom/qmk_firmware | lib/python/qmk/cli/list/__init__.py | Python | gpl-2.0 | 24 | 0 |
"""This component provides HA sensor support for Ring Door Bell/Chimes."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import (
PLATFORM_SCHEMA, BinarySensorDevice)
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_ENTITY_NAMESPACE, CONF_MONITORED_CONDITIONS)
import homeassistant.helpers.config_validation as cv
from . import ATTRIBUTION, DATA_RING, DEFAULT_ENTITY_NAMESPACE
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
# Sensor types: Name, category, device_class
SENSOR_TYPES = {
'ding': ['Ding', ['doorbell'], 'occupancy'],
'motion': ['Motion', ['doorbell', 'stickup_cams'], 'motion'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_ENTITY_NAMESPACE, default=DEFAULT_ENTITY_NAMESPACE):
cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a sensor for a Ring device."""
ring = hass.data[DATA_RING]
sensors = []
for device in ring.doorbells: # ring.doorbells is doing I/O
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if 'doorbell' in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingBinarySensor(hass, device, sensor_type))
for device in ring.stickup_cams: # ring.stickup_cams is doing I/O
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
if 'stickup_cams' in SENSOR_TYPES[sensor_type][1]:
sensors.append(RingBinarySensor(hass, device, sensor_type))
add_entities(sensors, True)
class RingBinarySensor(BinarySensorDevice):
"""A binary sensor implementation for Ring device."""
def __init__(self, hass, data, sensor_type):
"""Initialize a sensor for Ring device."""
super(RingBinarySensor, self).__init__()
self._sensor_type = sensor_type
self._data = data
self._name = "{0} {1}".format(
self._data.name, SENSOR_TYPES.get(self._sensor_type)[0])
self._device_class = SENSOR_TYPES.get(self._sensor_type)[2]
self._state = None
self._unique_id = '{}-{}'.format(self._data.id, self._sensor_type)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of the binary sensor."""
return self._device_class
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
attrs['device_id'] = self._data.id
attrs['firmware'] = self._data.firmware
attrs['timezone'] = self._data.timezone
if self._data.alert and self._data.alert_expires_at:
attrs['expires_at'] = self._data.alert_expires_at
attrs['state'] = self._data.alert.get('state')
return attrs
def update(self):
"""Get the latest data and updates the state."""
self._data.check_alerts()
if self._data.alert:
if self._sensor_type == self._data.alert.get('kind') and \
self._data.account_id == self._data.alert.get('doorbot_id'):
self._state = True
else:
self._state = False
| jnewland/home-assistant | homeassistant/components/ring/binary_sensor.py | Python | apache-2.0 | 3,663 | 0 |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple Google App Engine wiki application.
The main distinguishing feature is that editing is in a WYSIWYG editor
rather than a text editor with special syntax. This application uses
google.appengine.api.datastore to access the datastore. This is a
lower-level API on which google.appengine.ext.db depends.
"""
#__author__ = 'Bret Taylor'
__author__ = 'Elliot Foster'
import cgi
import datetime
import os
import re
import sys
import urllib
import urlparse
import logging
import wikimarkup
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
# for Data::Dumper-like stuff
#import pprint
#pp = pprint.PrettyPrinter(indent=4)
#lib_path = os.path.join(os.path.dirname(__file__), 'lib')
#sys.path.append(lib_path)
_DEBUG = True
class BaseRequestHandler(webapp.RequestHandler):
def generate(self, template_name, template_values={}):
values = {
'request': self.request,
'user': users.get_current_user(),
'login_url': users.create_login_url(self.request.uri),
'logout_url': users.create_logout_url(self.request.uri),
'application_name': 'lilwiki',
}
values.update(template_values)
directory = os.path.dirname(__file__)
path = os.path.join(directory, os.path.join('templates', template_name))
self.response.out.write(template.render(path, values, debug=_DEBUG))
def head(self, *args):
pass
def get(self, *args):
pass
def post(self, *args):
pass
class MainPageHandler(BaseRequestHandler):
def get(self):
user = users.get_current_user();
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
query = datastore.Query('Page')
query['owner'] = user
query.Order(('modified', datastore.Query.DESCENDING))
page_list = []
for entity in query.Get(100):
page_list.append(Page(entity['name'], entity))
self.generate('index.html', {
'pages': page_list,
})
class PageRequestHandler(BaseRequestHandler):
def get(self, page_name):
# if we don't have a user, we won't know which namespace to use (for now)
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
page_name = urllib.unquote(page_name)
page = Page.load(page_name, user)
modes = ['view', 'edit']
mode = self.request.get('mode')
if not page.entity:
logging.debug('page "' + page_name + '" not found, creating new instance.')
mode = 'edit'
if not mode in modes:
logging.debug('defaulting mode to view')
mode = 'view'
self.generate(mode + '.html', {
'page': page,
})
def post(self, page_name):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
page_name = urllib.unquote(page_name)
page = Page.load(page_name, user)
page.content = self.request.get('content')
page.save()
self.redirect(page.view_url())
class Page(object):
""" A wiki page, has attributes:
name
content
owner
is_public -- implement later
"""
def __init__(self, name, entity=None):
self.name = name
self.entity = entity
if entity:
self.content = entity['content']
self.owner = entity['owner']
self.modified = entity['modified']
else:
self.content = '= ' + self.name + " =\n\nStarting writing about " + self.name + ' here.'
def entity(self):
return self.entity
def edit_url(self):
return '/%s?mode=edit' % (urllib.quote(self.name))
def view_url(self):
name = self.name
name = urllib.quote(name)
return '/' + name
def save(self):
if self.entity:
entity = self.entity
logging.debug('saving existing page ' + self.name)
else:
logging.debug('saving new page ' + self.name)
entity = datastore.Entity('Page')
entity['owner'] = users.get_current_user()
entity['name'] = self.name
entity['content'] = datastore_types.Text(self.content)
entity['modified'] = datetime.datetime.now()
datastore.Put(entity)
def wikified_content(self):
# TODO: check memcache for rendered page?
# replacements here
transforms = [
AutoLink(),
WikiWords(),
HideReferers(),
]
content = self.content
content = wikimarkup.parse(content)
for transform in transforms:
content = transform.run(content, self)
return content
@staticmethod
def load(name, owner):
if not owner:
owner = users.get_current_user()
query = datastore.Query('Page')
query['name'] = name
query['owner'] = owner
entities = query.Get(1)
if len(entities) < 1:
return Page(name)
else:
return Page(name, entities[0])
@staticmethod
def exists(name, owner):
logging.debug('looking up ' + name)
if not owner:
logging.debug('Were not given a user when looking up ' + name)
owner = users.get_current_user()
return Page.load(name, owner).entity
class Transform(object):
"""Abstraction for a regular expression transform.
Transform subclasses have two properties:
regexp: the regular expression defining what will be replaced
replace(MatchObject): returns a string replacement for a regexp match
We iterate over all matches for that regular expression, calling replace()
on the match to determine what text should replace the matched text.
The Transform class is more expressive than regular expression replacement
because the replace() method can execute arbitrary code to, e.g., look
up a WikiWord to see if the page exists before determining if the WikiWord
should be a link.
"""
def run(self, content, page):
"""Runs this transform over the given content.
Args:
content: The string data to apply a transformation to.
Returns:
A new string that is the result of this transform.
"""
self.page = page
parts = []
offset = 0
for match in self.regexp.finditer(content):
parts.append(content[offset:match.start(0)])
parts.append(self.replace(match))
offset = match.end(0)
parts.append(content[offset:])
return ''.join(parts)
class WikiWords(Transform):
"""Translates WikiWords to links.
"""
def __init__(self):
self.regexp = re.compile(r'(?<![A-Za-z])[A-Z][a-z]*([A-Z][a-z]+/?)+(?P<link_close>[^<]*</[Aa]>)?')
def replace(self, match):
wikiword = match.group(0)
if (match.group('link_close')):
# we're inside a link element, so don't rewrite
return wikiword
if wikiword == self.page.name:
# don't link to the current page
return wikiword
if Page.exists(wikiword, self.page.owner):
# link to that page
return '<a class="wikiword" href="/%s">%s</a>' % (wikiword, wikiword)
else:
# link to that page, making it clear it does not exist.
return '<a class="wikiword missing" href="/%s">%s?</a>' % (wikiword, wikiword)
class AutoLink(Transform):
"""A transform that auto-links URLs."""
def __init__(self):
self.regexp = re.compile(r'([^"])\b((http|https)://[^ \t\n\r<>\(\)&"]+' \
r'[^ \t\n\r<>\(\)&"\.])')
def replace(self, match):
url = match.group(2)
return match.group(1) + '<a class="autourl" href="%s">%s</a>' % (url, url)
class HideReferers(Transform):
"""A transform that hides referers for external hyperlinks."""
def __init__(self):
self.regexp = re.compile(r'href="(http[^"]+)"')
def replace(self, match):
url = match.group(1)
scheme, host, path, parameters, query, fragment = urlparse.urlparse(url)
url = 'http://www.google.com/url?sa=D&q=' + urllib.quote(url)
return 'href="%s"' % (url,)
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication(
#[('/', MainPageHandler)],
[
('/', MainPageHandler),
('/(.*)', PageRequestHandler),
],
debug=_DEBUG,
)
run_wsgi_app(application)
if __name__ == '__main__':
main()
"""
# Models
class Owner(db.Model):
user = db.UserProperty(required=True)
namespace = db.TextProperty()
class Page(db.Model):
owner = db.UserProperty(required=True)
name = db.StringProperty(required=True)
content = db.StringProperty()
is_public = db.BooleanProperty(default=False)
def load(name):
query = Page.gql("WHERE owner = :owner AND name = :name", owner=users.get_current_user(), name=name)
return query.fetch
"""
| elliotf/appenginewiki | wiki.py | Python | lgpl-2.1 | 9,987 | 0.003304 |
"""
WSGI config for p1 project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "p1.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "conference.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| pablo-the-programmer/Registration | conference/conference/wsgi.py | Python | mit | 1,415 | 0.000707 |
"""
Tests related to deprecation warnings. Also a convenient place
to document how deprecations should eventually be turned into errors.
"""
from __future__ import division, absolute_import, print_function
import datetime
import sys
import operator
import warnings
import pytest
import shutil
import tempfile
import numpy as np
from numpy.testing import (
assert_raises, assert_warns, assert_, assert_array_equal
)
from numpy.core._multiarray_tests import fromstring_null_term_c_api
try:
import pytz
_has_pytz = True
except ImportError:
_has_pytz = False
class _DeprecationTestCase(object):
# Just as warning: warnings uses re.match, so the start of this message
# must match.
message = ''
warning_cls = DeprecationWarning
def setup(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
# can give very confusing results because of
# https://bugs.python.org/issue4180 and it is probably simplest to
# try to keep the tests cleanly giving only the right warning type.
# (While checking them set to "error" those are ignored anyway)
# We still have them show up, because otherwise they would be raised
warnings.filterwarnings("always", category=self.warning_cls)
warnings.filterwarnings("always", message=self.message,
category=self.warning_cls)
def teardown(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
function_fails=False,
exceptions=np._NoValue,
args=(), kwargs={}):
"""Test if DeprecationWarnings are given and raised.
This first checks if the function when called gives `num`
DeprecationWarnings, after that it tries to raise these
DeprecationWarnings and compares them with `exceptions`.
The exceptions can be different for cases where this code path
is simply not anticipated and the exception is replaced.
Parameters
----------
function : callable
The function to test
num : int
Number of DeprecationWarnings to expect. This should normally be 1.
ignore_others : bool
Whether warnings of the wrong type should be ignored (note that
the message is not checked)
function_fails : bool
If the function would normally fail, setting this will check for
warnings inside a try/except block.
exceptions : Exception or tuple of Exceptions
Exception to expect when turning the warnings into an error.
The default checks for DeprecationWarnings. If exceptions is
empty the function is expected to run successfully.
args : tuple
Arguments for `function`
kwargs : dict
Keyword arguments for `function`
"""
# reset the log
self.log[:] = []
if exceptions is np._NoValue:
exceptions = (self.warning_cls,)
try:
function(*args, **kwargs)
except (Exception if function_fails else tuple()):
pass
# just in case, clear the registry
num_found = 0
for warning in self.log:
if warning.category is self.warning_cls:
num_found += 1
elif not ignore_others:
raise AssertionError(
"expected %s but got: %s" %
(self.warning_cls.__name__, warning.category))
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
lst = [str(w) for w in self.log]
raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
category=self.warning_cls)
try:
function(*args, **kwargs)
if exceptions != tuple():
raise AssertionError(
"No error raised during function call")
except exceptions:
if exceptions == tuple():
raise AssertionError(
"Error raised during function call")
def assert_not_deprecated(self, function, args=(), kwargs={}):
"""Test that warnings are not raised.
This is just a shorthand for:
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
"""
self.assert_deprecated(function, num=0, ignore_others=True,
exceptions=tuple(), args=args, kwargs=kwargs)
class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning
class TestNonTupleNDIndexDeprecation(object):
def test_basic(self):
a = np.zeros((5, 5))
with warnings.catch_warnings():
warnings.filterwarnings('always')
assert_warns(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
assert_warns(FutureWarning, a.__getitem__, [slice(None)])
warnings.filterwarnings('error')
assert_raises(FutureWarning, a.__getitem__, [[0, 1], [0, 1]])
assert_raises(FutureWarning, a.__getitem__, [slice(None)])
# a a[[0, 1]] always was advanced indexing, so no error/warning
a[[0, 1]]
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-element-wise comparison logic.
This used to mean that when an error occurred during element-wise comparison
(i.e. broadcasting) NotImplemented was returned, but also in the comparison
itself, False was given instead of the error.
Also test FutureWarning for the None comparison.
"""
message = "elementwise.* comparison failed; .*"
def test_normal_types(self):
for op in (operator.eq, operator.ne):
# Broadcasting errors:
self.assert_deprecated(op, args=(np.zeros(3), []))
a = np.zeros(3, dtype='i,i')
# (warning is issued a couple of times here)
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
# Element comparison error (numpy array can't be compared).
a = np.array([1, np.array([1,2,3])], dtype=object)
b = np.array([1, np.array([1,2,3])], dtype=object)
self.assert_deprecated(op, args=(a, b), num=None)
def test_string(self):
# For two string arrays, strings always raised the broadcasting error:
a = np.array(['a', 'b'])
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
# The empty list is not cast to string, and this used to pass due
# to dtype mismatch; now (2018-06-21) it correctly leads to a
# FutureWarning.
assert_warns(FutureWarning, lambda: a == [])
def test_void_dtype_equality_failures(self):
class NotArray(object):
def __array__(self):
raise TypeError
# Needed so Python 3 does not raise DeprecationWarning twice.
def __ne__(self, other):
return NotImplemented
self.assert_deprecated(lambda: np.arange(2) == NotArray())
self.assert_deprecated(lambda: np.arange(2) != NotArray())
struct1 = np.zeros(2, dtype="i4,i4")
struct2 = np.zeros(2, dtype="i4,i4,i4")
assert_warns(FutureWarning, lambda: struct1 == 1)
assert_warns(FutureWarning, lambda: struct1 == struct2)
assert_warns(FutureWarning, lambda: struct1 != 1)
assert_warns(FutureWarning, lambda: struct1 != struct2)
def test_array_richcompare_legacy_weirdness(self):
# It doesn't really work to use assert_deprecated here, b/c part of
# the point of assert_deprecated is to check that when warnings are
# set to "error" mode then the error is propagated -- which is good!
# But here we are testing a bunch of code that is deprecated *because*
# it has the habit of swallowing up errors and converting them into
# different warnings. So assert_warns will have to be sufficient.
assert_warns(FutureWarning, lambda: np.arange(2) == "a")
assert_warns(FutureWarning, lambda: np.arange(2) != "a")
# No warning for scalar comparisons
with warnings.catch_warnings():
warnings.filterwarnings("error")
assert_(not (np.array(0) == "a"))
assert_(np.array(0) != "a")
assert_(not (np.int16(0) == "a"))
assert_(np.int16(0) != "a")
for arg1 in [np.asarray(0), np.int16(0)]:
struct = np.zeros(2, dtype="i4,i4")
for arg2 in [struct, "a"]:
for f in [operator.lt, operator.le, operator.gt, operator.ge]:
if sys.version_info[0] >= 3:
# py3
with warnings.catch_warnings() as l:
warnings.filterwarnings("always")
assert_raises(TypeError, f, arg1, arg2)
assert_(not l)
else:
# py2
assert_warns(DeprecationWarning, f, arg1, arg2)
class TestDatetime64Timezone(_DeprecationTestCase):
"""Parsing of datetime64 with timezones deprecated in 1.11.0, because
datetime64 is now timezone naive rather than UTC only.
It will be quite a while before we can remove this, because, at the very
least, a lot of existing code uses the 'Z' modifier to avoid conversion
from local time to UTC, even if otherwise it handles time in a timezone
naive fashion.
"""
def test_string(self):
self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
@pytest.mark.skipif(not _has_pytz,
reason="The pytz module is not available.")
def test_datetime(self):
tz = pytz.timezone('US/Eastern')
dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
self.assert_deprecated(np.datetime64, args=(dt,))
class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
"""View of non-C-contiguous arrays deprecated in 1.11.0.
The deprecation will not be raised for arrays that are both C and F
contiguous, as C contiguous is dominant. There are more such arrays
with relaxed stride checking than without so the deprecation is not
as visible with relaxed stride checking in force.
"""
def test_fortran_contiguous(self):
self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,))
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
class TestInvalidOrderParameterInputForFlattenArrayDeprecation(_DeprecationTestCase):
"""Invalid arguments to the ORDER parameter in array.flatten() should not be
allowed and should raise an error. However, in the interests of not breaking
code that may inadvertently pass invalid arguments to this parameter, a
DeprecationWarning will be issued instead for the time being to give developers
time to refactor relevant code.
"""
def test_flatten_array_non_string_arg(self):
x = np.zeros((3, 5))
self.message = ("Non-string object detected for "
"the array ordering. Please pass "
"in 'C', 'F', 'A', or 'K' instead")
self.assert_deprecated(x.flatten, args=(np.pi,))
def test_flatten_array_invalid_string_arg(self):
# Tests that a DeprecationWarning is raised
# when a string of length greater than one
# starting with "C", "F", "A", or "K" (case-
# and unicode-insensitive) is passed in for
# the ORDER parameter. Otherwise, a TypeError
# will be raised!
x = np.zeros((3, 5))
self.message = ("Non length-one string passed "
"in for the array ordering. Please "
"pass in 'C', 'F', 'A', or 'K' instead")
self.assert_deprecated(x.flatten, args=("FACK",))
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
out in gh-7093. Eventually, such assignment should NOT be allowed, but
in the interests of maintaining backwards compatibility, only a Deprecation-
Warning will be raised instead for the time being to give developers time to
refactor relevant code.
"""
def test_data_attr_assignment(self):
a = np.arange(10)
b = np.linspace(0, 1, 10)
self.message = ("Assigning the 'data' attribute is an "
"inherently unsafe operation and will "
"be removed in the future.")
self.assert_deprecated(a.__setattr__, args=('data', b.data))
class TestLinspaceInvalidNumParameter(_DeprecationTestCase):
"""Argument to the num parameter in linspace that cannot be
safely interpreted as an integer is deprecated in 1.12.0.
Argument to the num parameter in linspace that cannot be
safely interpreted as an integer should not be allowed.
In the interest of not breaking code that passes
an argument that could still be interpreted as an integer, a
DeprecationWarning will be issued for the time being to give
developers time to refactor relevant code.
"""
def test_float_arg(self):
# 2016-02-25, PR#7328
self.assert_deprecated(np.linspace, args=(0, 10, 2.5))
class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
"""
If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
represent the number in base 2 (positive) or 2's complement (negative) form,
the function used to silently ignore the parameter and return a representation
using the minimal number of bits needed for the form in question. Such behavior
is now considered unsafe from a user perspective and will raise an error in the future.
"""
def test_insufficient_width_positive(self):
args = (10,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
def test_insufficient_width_negative(self):
args = (-5,)
kwargs = {'width': 2}
self.message = ("Insufficient bit width provided. This behavior "
"will raise an error in the future.")
self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
class TestNumericStyleTypecodes(_DeprecationTestCase):
"""
Deprecate the old numeric-style dtypes, which are especially
confusing for complex types, e.g. Complex32 -> complex64. When the
deprecation cycle is complete, the check for the strings should be
removed from PyArray_DescrConverter in descriptor.c, and the
deprecated keys should not be added as capitalized aliases in
_add_aliases in numerictypes.py.
"""
def test_all_dtypes(self):
deprecated_types = [
'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0'
]
if sys.version_info[0] < 3:
deprecated_types.extend(['Unicode0', 'String0'])
for dt in deprecated_types:
self.assert_deprecated(np.dtype, exceptions=(TypeError,),
args=(dt,))
class TestTestDeprecated(object):
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
test_case_instance.setup()
assert_raises(AssertionError,
test_case_instance.assert_deprecated,
lambda: None)
def foo():
warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
test_case_instance.assert_deprecated(foo)
test_case_instance.teardown()
class TestClassicIntDivision(_DeprecationTestCase):
"""
See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2
if used for division
List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html
"""
def test_int_dtypes(self):
#scramble types and do some mix and match testing
deprecated_types = [
'bool_', 'int_', 'intc', 'uint8', 'int8', 'uint64', 'int32', 'uint16',
'intp', 'int64', 'uint32', 'int16'
]
if sys.version_info[0] < 3 and sys.py3kwarning:
import operator as op
dt2 = 'bool_'
for dt1 in deprecated_types:
a = np.array([1,2,3], dtype=dt1)
b = np.array([1,2,3], dtype=dt2)
self.assert_deprecated(op.div, args=(a,b))
dt2 = dt1
class TestNonNumericConjugate(_DeprecationTestCase):
"""
Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
which conflicts with the error behavior of np.conjugate.
"""
def test_conjugate(self):
for a in np.array(5), np.array(5j):
self.assert_not_deprecated(a.conjugate)
for a in (np.array('s'), np.array('2016', 'M'),
np.array((1, 2), [('a', int), ('b', int)])):
self.assert_deprecated(a.conjugate)
class TestNPY_CHAR(_DeprecationTestCase):
# 2017-05-03, 1.13.0
def test_npy_char_deprecation(self):
from numpy.core._multiarray_tests import npy_char_deprecation
self.assert_deprecated(npy_char_deprecation)
assert_(npy_char_deprecation() == 'S1')
class TestPyArray_AS1D(_DeprecationTestCase):
def test_npy_pyarrayas1d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
class TestPyArray_AS2D(_DeprecationTestCase):
def test_npy_pyarrayas2d_deprecation(self):
from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
class Test_UPDATEIFCOPY(_DeprecationTestCase):
"""
v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use
WRITEBACKIFCOPY instead
"""
def test_npy_updateifcopy_deprecation(self):
from numpy.core._multiarray_tests import npy_updateifcopy_deprecation
arr = np.arange(9).reshape(3, 3)
v = arr.T
self.assert_deprecated(npy_updateifcopy_deprecation, args=(v,))
class TestDatetimeEvent(_DeprecationTestCase):
# 2017-08-11, 1.14.0
def test_3_tuple(self):
for cls in (np.datetime64, np.timedelta64):
# two valid uses - (unit, num) and (unit, num, den, None)
self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
# trying to use the event argument, removed in 1.7.0, is deprecated
# it used to be a uint8
self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
class TestTruthTestingEmptyArrays(_DeprecationTestCase):
# 2017-09-25, 1.14.0
message = '.*truth value of an empty array is ambiguous.*'
def test_1d(self):
self.assert_deprecated(bool, args=(np.array([]),))
def test_2d(self):
self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
class TestBincount(_DeprecationTestCase):
# 2017-06-01, 1.14.0
def test_bincount_minlength(self):
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
class TestAlen(_DeprecationTestCase):
# 2019-08-02, 1.18.0
def test_alen(self):
self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3])))
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
def test_generator_sum(self):
self.assert_deprecated(np.sum, args=((i for i in range(5)),))
class TestSctypeNA(_VisibleDeprecationTestCase):
# 2018-06-24, 1.16
def test_sctypeNA(self):
self.assert_deprecated(lambda: np.sctypeNA['?'])
self.assert_deprecated(lambda: np.typeNA['?'])
self.assert_deprecated(lambda: np.typeNA.get('?'))
class TestPositiveOnNonNumerical(_DeprecationTestCase):
# 2018-06-28, 1.16.0
def test_positive_on_non_number(self):
self.assert_deprecated(operator.pos, args=(np.array('foo'),))
class TestFromstring(_DeprecationTestCase):
# 2017-10-19, 1.14
def test_fromstring(self):
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
class TestFromStringAndFileInvalidData(_DeprecationTestCase):
# 2019-06-08, 1.17.0
# Tests should be moved to real tests when deprecation is done.
message = "string or file could not be read to its end"
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_data_file(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with tempfile.TemporaryFile(mode="w") as f:
x.tofile(f, sep=',', format='%.2f')
f.write(invalid_str)
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=","))
f.seek(0)
self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
# Should not raise:
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
f.seek(0)
res = np.fromfile(f, sep=",", count=4)
assert_array_equal(res, x)
@pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
def test_deprecate_unparsable_string(self, invalid_str):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
x_str = "1.51,2,3.51,4{}".format(invalid_str)
self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
# The C-level API can use not fixed size, but 0 terminated strings,
# so test that as well:
bytestr = x_str.encode("ascii")
self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
with assert_warns(DeprecationWarning):
# this is slightly strange, in that fromstring leaves data
# potentially uninitialized (would be good to error when all is
# read, but count is larger then actual data maybe).
res = np.fromstring(x_str, sep=",", count=5)
assert_array_equal(res[:-1], x)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# Should not raise:
res = np.fromstring(x_str, sep=",", count=4)
assert_array_equal(res, x)
class Test_GetSet_NumericOps(_DeprecationTestCase):
# 2018-09-20, 1.16.0
def test_get_numeric_ops(self):
from numpy.core._multiarray_tests import getset_numericops
self.assert_deprecated(getset_numericops, num=2)
# empty kwargs prevents any state actually changing which would break
# other tests.
self.assert_deprecated(np.set_numeric_ops, kwargs={})
assert_raises(ValueError, np.set_numeric_ops, add='abc')
class TestShape1Fields(_DeprecationTestCase):
warning_cls = FutureWarning
# 2019-05-20, 1.17.0
def test_shape_1_fields(self):
self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
class TestNonZero(_DeprecationTestCase):
# 2019-05-26, 1.17.0
def test_zerod(self):
self.assert_deprecated(lambda: np.nonzero(np.array(0)))
self.assert_deprecated(lambda: np.nonzero(np.array(1)))
| MSeifert04/numpy | numpy/core/tests/test_deprecations.py | Python | bsd-3-clause | 24,541 | 0.001222 |
import unittest
from mahjong.models import Table, ALL_WINDS, WIND_EAST, WIND_NORTH, WIND_SOUTH
import mahjong.services.stone
class MahjongTabelModelTestCase(unittest.TestCase):
def setUp(self):
self.table = Table(stones=mahjong.services.stone.get_all_shuffled())
self.table.wall_wind = WIND_EAST # Open the table
def test_walls_are_created(self):
"""
Case: A table is initialized
Expected: The walls are created
"""
self.assertEqual(len(self.table.walls), 4)
for wind, wall in self.table.walls.items():
self.assertEqual(len(wall), 36)
self.assertIn(wind, ALL_WINDS)
def test_get_current_wall(self):
"""
Case: current wall get requested
Expected: The wall of the wall wind is returned
"""
self.assertEqual(
self.table.walls[self.table.wall_wind],
self.table.current_wall
)
def test_stone_iteration(self):
"""
Case: we iterate throught the stones of the table
Expected: we get the same stones as the list we give
"""
stones = mahjong.services.stone.get_all_shuffled()
table = Table(stones=stones)
table.wall_wind = WIND_NORTH # Last wind
table.wall_index = 35 # Last stone
for stone in table:
self.assertEqual(stone, stones.pop())
def test_number_stones_returned(self):
self.table.wall_wind = WIND_NORTH
self.table.wall_index = 35
stones = self.table.get_stones(count=3)
self.assertEqual(len(stones), 3)
| Peter-Slump/mahjong | tests/mahjong/models/test_tabel.py | Python | mit | 1,613 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import shortcuts
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from horizon import api
from horizon import exceptions
from horizon import forms
LOG = logging.getLogger(__name__)
class AddUser(forms.SelfHandlingForm):
tenant_id = forms.CharField(widget=forms.widgets.HiddenInput())
user_id = forms.CharField(widget=forms.widgets.HiddenInput())
role_id = forms.ChoiceField(label=_("Role"))
def __init__(self, *args, **kwargs):
roles = kwargs.pop('roles')
super(AddUser, self).__init__(*args, **kwargs)
role_choices = [(role.id, role.name) for role in roles]
self.fields['role_id'].choices = role_choices
def handle(self, request, data):
try:
api.add_tenant_user_role(request,
data['tenant_id'],
data['user_id'],
data['role_id'])
messages.success(request, _('Successfully added user to project.'))
except:
exceptions.handle(request, _('Unable to add user to project.'))
return shortcuts.redirect('horizon:syspanel:projects:users',
tenant_id=data['tenant_id'])
class CreateTenant(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"))
description = forms.CharField(
widget=forms.widgets.Textarea(),
label=_("Description"))
enabled = forms.BooleanField(label=_("Enabled"), required=False,
initial=True)
def handle(self, request, data):
try:
LOG.info('Creating project with name "%s"' % data['name'])
api.tenant_create(request,
data['name'],
data['description'],
data['enabled'])
messages.success(request,
_('%s was successfully created.')
% data['name'])
except:
exceptions.handle(request, _('Unable to create project.'))
return shortcuts.redirect('horizon:syspanel:projects:index')
class UpdateTenant(forms.SelfHandlingForm):
id = forms.CharField(label=_("ID"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
name = forms.CharField(label=_("Name"))
description = forms.CharField(
widget=forms.widgets.Textarea(),
label=_("Description"))
enabled = forms.BooleanField(required=False, label=_("Enabled"))
def handle(self, request, data):
try:
LOG.info('Updating project with id "%s"' % data['id'])
api.tenant_update(request,
data['id'],
data['name'],
data['description'],
data['enabled'])
messages.success(request,
_('%s was successfully updated.')
% data['name'])
except:
exceptions.handle(request, _('Unable to update project.'))
return shortcuts.redirect('horizon:syspanel:projects:index')
class UpdateQuotas(forms.SelfHandlingForm):
tenant_id = forms.CharField(label=_("ID (name)"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}))
metadata_items = forms.IntegerField(label=_("Metadata Items"))
injected_files = forms.IntegerField(label=_("Injected Files"))
injected_file_content_bytes = forms.IntegerField(label=_("Injected File "
"Content Bytes"))
cores = forms.IntegerField(label=_("VCPUs"))
instances = forms.IntegerField(label=_("Instances"))
volumes = forms.IntegerField(label=_("Volumes"))
gigabytes = forms.IntegerField(label=_("Gigabytes"))
ram = forms.IntegerField(label=_("RAM (in MB)"))
floating_ips = forms.IntegerField(label=_("Floating IPs"))
def handle(self, request, data):
ifcb = data['injected_file_content_bytes']
try:
api.nova.tenant_quota_update(request,
data['tenant_id'],
metadata_items=data['metadata_items'],
injected_file_content_bytes=ifcb,
volumes=data['volumes'],
gigabytes=data['gigabytes'],
ram=data['ram'],
floating_ips=data['floating_ips'],
instances=data['instances'],
injected_files=data['injected_files'],
cores=data['cores'])
messages.success(request,
_('Quotas for %s were successfully updated.')
% data['tenant_id'])
except:
exceptions.handle(request, _('Unable to update quotas.'))
return shortcuts.redirect('horizon:syspanel:projects:index')
| savi-dev/horizon | horizon/dashboards/syspanel/projects/forms.py | Python | apache-2.0 | 5,991 | 0.001335 |
#!/usr/bin/env python
import sys
import gobject
import dbus.mainloop.glib
dbus.mainloop.glib.DBusGMainLoop(set_as_default = True)
import telepathy
DBUS_PROPERTIES = 'org.freedesktop.DBus.Properties'
def get_registry():
reg = telepathy.client.ManagerRegistry()
reg.LoadManagers()
return reg
def get_connection_manager(reg):
cm = reg.GetManager('bluewire')
return cm
class Action(object):
def __init__(self):
self._action = None
def queue_action(self):
pass
def append_action(self, action):
assert self._action is None
self._action = action
def get_next_action(self):
assert self._action is not None
return self._action
def _on_done(self):
if self._action is None:
return
self._action.queue_action()
def _on_error(self, error):
print error
def _on_generic_message(self, *args):
pass
class DummyAction(Action):
def queue_action(self):
gobject.idle_add(self._on_done)
class QuitLoop(Action):
def __init__(self, loop):
super(QuitLoop, self).__init__()
self._loop = loop
def queue_action(self):
self._loop.quit()
class DisplayParams(Action):
def __init__(self, cm):
super(DisplayParams, self).__init__()
self._cm = cm
def queue_action(self):
self._cm[telepathy.interfaces.CONN_MGR_INTERFACE].GetParameters(
'bluetooth,
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self, params):
print "Connection Parameters:"
for name, flags, signature, default in params:
print "\t%s (%s)" % (name, signature),
if flags & telepathy.constants.CONN_MGR_PARAM_FLAG_REQUIRED:
print "required",
if flags & telepathy.constants.CONN_MGR_PARAM_FLAG_REGISTER:
print "register",
if flags & telepathy.constants.CONN_MGR_PARAM_FLAG_SECRET:
print "secret",
if flags & telepathy.constants.CONN_MGR_PARAM_FLAG_DBUS_PROPERTY:
print "dbus-property",
if flags & telepathy.constants.CONN_MGR_PARAM_FLAG_HAS_DEFAULT:
print "has-default(%s)" % default,
print ""
super(DisplayParams, self)._on_done()
class RequestConnection(Action):
def __init__(self, cm, username, password, forward):
super(RequestConnection, self).__init__()
self._cm = cm
self._conn = None
self._serviceName = None
self._username = username
self._password = password
self._forward = forward
@property
def conn(self):
return self._conn
@property
def serviceName(self):
return self._serviceName
def queue_action(self):
self._cm[telepathy.server.CONNECTION_MANAGER].RequestConnection(
'bluetooth",
{
'account': self._username,
'password': self._password,
'forward': self._forward,
},
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self, busName, objectPath):
self._serviceName = busName
self._conn = telepathy.client.Connection(busName, objectPath)
super(RequestConnection, self)._on_done()
class Connect(Action):
def __init__(self, connAction):
super(Connect, self).__init__()
self._connAction = connAction
def queue_action(self):
self._connAction.conn[telepathy.server.CONNECTION].connect_to_signal(
'StatusChanged',
self._on_change,
)
self._connAction.conn[telepathy.server.CONNECTION].Connect(
reply_handler = self._on_generic_message,
error_handler = self._on_error,
)
def _on_done(self):
super(Connect, self)._on_done()
def _on_change(self, status, reason):
if status == telepathy.constants.CONNECTION_STATUS_DISCONNECTED:
print "Disconnected!"
self._conn = None
elif status == telepathy.constants.CONNECTION_STATUS_CONNECTED:
print "Connected"
self._on_done()
elif status == telepathy.constants.CONNECTION_STATUS_CONNECTING:
print "Connecting"
else:
print "Status: %r" % status
class SimplePresenceOptions(Action):
def __init__(self, connAction):
super(SimplePresenceOptions, self).__init__()
self._connAction = connAction
def queue_action(self):
self._connAction.conn[DBUS_PROPERTIES].Get(
telepathy.server.CONNECTION_INTERFACE_SIMPLE_PRESENCE,
'Statuses',
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self, statuses):
print "\tAvailable Statuses"
for (key, value) in statuses.iteritems():
print "\t\t - %s" % key
super(SimplePresenceOptions, self)._on_done()
class NullHandle(object):
@property
def handle(self):
return 0
@property
def handles(self):
return []
class UserHandle(Action):
def __init__(self, connAction):
super(UserHandle, self).__init__()
self._connAction = connAction
self._handle = None
@property
def handle(self):
return self._handle
@property
def handles(self):
return [self._handle]
def queue_action(self):
self._connAction.conn[telepathy.server.CONNECTION].GetSelfHandle(
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self, handle):
self._handle = handle
super(UserHandle, self)._on_done()
class RequestHandle(Action):
def __init__(self, connAction, handleType, handleNames):
super(RequestHandle, self).__init__()
self._connAction = connAction
self._handle = None
self._handleType = handleType
self._handleNames = handleNames
@property
def handle(self):
return self._handle
@property
def handles(self):
return [self._handle]
def queue_action(self):
self._connAction.conn[telepathy.server.CONNECTION].RequestHandles(
self._handleType,
self._handleNames,
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self, handles):
self._handle = handles[0]
super(RequestHandle, self)._on_done()
class RequestChannel(Action):
def __init__(self, connAction, handleAction, channelType, handleType):
super(RequestChannel, self).__init__()
self._connAction = connAction
self._handleAction = handleAction
self._channel = None
self._channelType = channelType
self._handleType = handleType
@property
def channel(self):
return self._channel
def queue_action(self):
self._connAction.conn[telepathy.server.CONNECTION].RequestChannel(
self._channelType,
self._handleType,
self._handleAction.handle,
True,
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self, channelObjectPath):
self._channel = telepathy.client.Channel(self._connAction.serviceName, channelObjectPath)
super(RequestChannel, self)._on_done()
class EnsureChannel(Action):
def __init__(self, connAction, channelType, handleType, handleId):
super(EnsureChannel, self).__init__()
self._connAction = connAction
self._channel = None
self._channelType = channelType
self._handleType = handleType
self._handleId = handleId
self._handle = None
@property
def channel(self):
return self._channel
@property
def handle(self):
return self._handle
@property
def handles(self):
return [self._handle]
def queue_action(self):
properties = {
telepathy.server.CHANNEL_INTERFACE+".ChannelType": self._channelType,
telepathy.server.CHANNEL_INTERFACE+".TargetHandleType": self._handleType,
telepathy.server.CHANNEL_INTERFACE+".TargetID": self._handleId,
}
self._connAction.conn[telepathy.server.CONNECTION_INTERFACE_REQUESTS].EnsureChannel(
properties,
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self, yours, channelObjectPath, properties):
print "Create?", not not yours
print "Path:", channelObjectPath
print "Properties:", properties
self._channel = telepathy.client.Channel(self._connAction.serviceName, channelObjectPath)
self._handle = properties[telepathy.server.CHANNEL_INTERFACE+".TargetHandle"]
super(EnsureChannel, self)._on_done()
class CloseChannel(Action):
def __init__(self, connAction, chanAction):
super(CloseChannel, self).__init__()
self._connAction = connAction
self._chanAction = chanAction
self._handles = []
def queue_action(self):
self._chanAction.channel[telepathy.server.CHANNEL].Close(
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self):
super(CloseChannel, self)._on_done()
class ContactHandles(Action):
def __init__(self, connAction, chanAction):
super(ContactHandles, self).__init__()
self._connAction = connAction
self._chanAction = chanAction
self._handles = []
@property
def handles(self):
return self._handles
def queue_action(self):
self._chanAction.channel[DBUS_PROPERTIES].Get(
telepathy.server.CHANNEL_INTERFACE_GROUP,
'Members',
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self, handles):
self._handles = list(handles)
super(ContactHandles, self)._on_done()
class SimplePresenceStatus(Action):
def __init__(self, connAction, handleAction):
super(SimplePresenceStatus, self).__init__()
self._connAction = connAction
self._handleAction = handleAction
def queue_action(self):
self._connAction.conn[telepathy.server.CONNECTION_INTERFACE_SIMPLE_PRESENCE].GetPresences(
self._handleAction.handles,
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self, aliases):
print "\tPresences:"
for hid, (presenceType, presence, presenceMessage) in aliases.iteritems():
print "\t\t%s:" % hid, presenceType, presence, presenceMessage
super(SimplePresenceStatus, self)._on_done()
class SetSimplePresence(Action):
def __init__(self, connAction, status, message):
super(SetSimplePresence, self).__init__()
self._connAction = connAction
self._status = status
self._message = message
def queue_action(self):
self._connAction.conn[telepathy.server.CONNECTION_INTERFACE_SIMPLE_PRESENCE].SetPresence(
self._status,
self._message,
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self):
super(SetSimplePresence, self)._on_done()
class Aliases(Action):
def __init__(self, connAction, handleAction):
super(Aliases, self).__init__()
self._connAction = connAction
self._handleAction = handleAction
def queue_action(self):
self._connAction.conn[telepathy.server.CONNECTION_INTERFACE_ALIASING].RequestAliases(
self._handleAction.handles,
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self, aliases):
print "\tAliases:"
for h, alias in zip(self._handleAction.handles, aliases):
print "\t\t", h, alias
super(Aliases, self)._on_done()
class Call(Action):
def __init__(self, connAction, chanAction, handleAction):
super(Call, self).__init__()
self._connAction = connAction
self._chanAction = chanAction
self._handleAction = handleAction
def queue_action(self):
self._chanAction.channel[telepathy.server.CHANNEL_TYPE_STREAMED_MEDIA].RequestStreams(
self._handleAction.handle,
[telepathy.constants.MEDIA_STREAM_TYPE_AUDIO],
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self, handle):
print "Call started"
super(Call, self)._on_done()
class SendText(Action):
def __init__(self, connAction, chanAction, handleAction, messageType, message):
super(SendText, self).__init__()
self._connAction = connAction
self._chanAction = chanAction
self._handleAction = handleAction
self._messageType = messageType
self._message = message
def queue_action(self):
self._chanAction.channel[telepathy.server.CHANNEL_TYPE_TEXT].Send(
self._messageType,
self._message,
reply_handler = self._on_done,
error_handler = self._on_error,
)
def _on_done(self,):
print "Message sent"
super(SendText, self)._on_done()
class Sleep(Action):
def __init__(self, length):
super(Sleep, self).__init__()
self._length = length
def queue_action(self):
gobject.timeout_add(self._length, self._on_done)
class Block(Action):
def __init__(self):
super(Block, self).__init__()
def queue_action(self):
print "Blocking"
def _on_done(self):
#super(SendText, self)._on_done()
pass
class Disconnect(Action):
def __init__(self, connAction):
super(Disconnect, self).__init__()
self._connAction = connAction
def queue_action(self):
self._connAction.conn[telepathy.server.CONNECTION].Disconnect(
reply_handler = self._on_done,
error_handler = self._on_error,
)
if __name__ == '__main__':
loop = gobject.MainLoop()
reg = get_registry()
cm = get_connection_manager(reg)
nullHandle = NullHandle()
dummy = DummyAction()
firstAction = dummy
lastAction = dummy
if True:
dp = DisplayParams(cm)
lastAction.append_action(dp)
lastAction = lastAction.get_next_action()
if True:
username = sys.argv[1]
password = sys.argv[2]
forward = sys.argv[3]
reqcon = RequestConnection(cm, username, password, forward)
lastAction.append_action(reqcon)
lastAction = lastAction.get_next_action()
if False:
reqcon = RequestConnection(cm, username, password, forward)
lastAction.append_action(reqcon)
lastAction = lastAction.get_next_action()
con = Connect(reqcon)
lastAction.append_action(con)
lastAction = lastAction.get_next_action()
if True:
spo = SimplePresenceOptions(reqcon)
lastAction.append_action(spo)
lastAction = lastAction.get_next_action()
if True:
uh = UserHandle(reqcon)
lastAction.append_action(uh)
lastAction = lastAction.get_next_action()
ua = Aliases(reqcon, uh)
lastAction.append_action(ua)
lastAction = lastAction.get_next_action()
sps = SimplePresenceStatus(reqcon, uh)
lastAction.append_action(sps)
lastAction = lastAction.get_next_action()
if False:
setdnd = SetSimplePresence(reqcon, "dnd", "")
lastAction.append_action(setdnd)
lastAction = lastAction.get_next_action()
sps = SimplePresenceStatus(reqcon, uh)
lastAction.append_action(sps)
lastAction = lastAction.get_next_action()
setdnd = SetSimplePresence(reqcon, "available", "")
lastAction.append_action(setdnd)
lastAction = lastAction.get_next_action()
sps = SimplePresenceStatus(reqcon, uh)
lastAction.append_action(sps)
lastAction = lastAction.get_next_action()
if False:
sl = Sleep(10 * 1000)
lastAction.append_action(sl)
lastAction = lastAction.get_next_action()
if False:
rclh = RequestHandle(reqcon, telepathy.HANDLE_TYPE_LIST, ["subscribe"])
lastAction.append_action(rclh)
lastAction = lastAction.get_next_action()
rclc = RequestChannel(
reqcon,
rclh,
telepathy.CHANNEL_TYPE_CONTACT_LIST,
telepathy.HANDLE_TYPE_LIST,
)
lastAction.append_action(rclc)
lastAction = lastAction.get_next_action()
ch = ContactHandles(reqcon, rclc)
lastAction.append_action(ch)
lastAction = lastAction.get_next_action()
ca = Aliases(reqcon, ch)
lastAction.append_action(ca)
lastAction = lastAction.get_next_action()
if True:
accountNumber = sys.argv[4]
enChan = EnsureChannel(reqcon, telepathy.CHANNEL_TYPE_TEXT, telepathy.HANDLE_TYPE_CONTACT, accountNumber)
lastAction.append_action(enChan)
lastAction = lastAction.get_next_action()
sendDebugtext = SendText(reqcon, enChan, enChan, telepathy.CHANNEL_TEXT_MESSAGE_TYPE_NORMAL, "Boo!")
lastAction.append_action(sendDebugtext)
lastAction = lastAction.get_next_action()
if False:
rch = RequestHandle(reqcon, telepathy.HANDLE_TYPE_CONTACT, ["18005558355"]) #(1-800-555-TELL)
lastAction.append_action(rch)
lastAction = lastAction.get_next_action()
# making a phone call
if True:
smHandle = rch
smHandleType = telepathy.HANDLE_TYPE_CONTACT
else:
smHandle = nullHandle
smHandleType = telepathy.HANDLE_TYPE_NONE
rsmc = RequestChannel(
reqcon,
smHandle,
telepathy.CHANNEL_TYPE_STREAMED_MEDIA,
smHandleType,
)
lastAction.append_action(rsmc)
lastAction = lastAction.get_next_action()
if False:
call = Call(reqcon, rsmc, rch)
lastAction.append_action(call)
lastAction = lastAction.get_next_action()
# sending a text
rtc = RequestChannel(
reqcon,
rch,
telepathy.CHANNEL_TYPE_TEXT,
smHandleType,
)
lastAction.append_action(rtc)
lastAction = lastAction.get_next_action()
if True:
closechan = CloseChannel(reqcon, rtc)
lastAction.append_action(closechan)
lastAction = lastAction.get_next_action()
rtc = RequestChannel(
reqcon,
rch,
telepathy.CHANNEL_TYPE_TEXT,
smHandleType,
)
lastAction.append_action(rtc)
lastAction = lastAction.get_next_action()
if False:
sendtext = SendText(reqcon, rtc, rch, telepathy.CHANNEL_TEXT_MESSAGE_TYPE_NORMAL, "Boo!")
lastAction.append_action(sendtext)
lastAction = lastAction.get_next_action()
if False:
bl = Block()
lastAction.append_action(bl)
lastAction = lastAction.get_next_action()
if False:
sl = Sleep(30 * 1000)
lastAction.append_action(sl)
lastAction = lastAction.get_next_action()
dis = Disconnect(reqcon)
lastAction.append_action(dis)
lastAction = lastAction.get_next_action()
quitter = QuitLoop(loop)
lastAction.append_action(quitter)
lastAction = lastAction.get_next_action()
firstAction.queue_action()
loop.run()
| epage/telepathy-bluewire | hand_tests/generic.py | Python | lgpl-2.1 | 17,072 | 0.034149 |
# -*- encoding: utf-8 -*-
##############################################################################
# Copyright (c) 2015 - Present All Rights Reserved
# Author: Cesar Lage <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at:
# <http://www.gnu.org/licenses/gpl.html>.
##############################################################################
import main
| ITPS/odoo-saas-tools | saas_portal_demo/controllers/__init__.py | Python | gpl-3.0 | 924 | 0 |
# authenticates with twitter, searches for microsoft, evaluates overall
# sentiment for microsoft
import numpy as np
import twitter
from textblob import TextBlob
f = open('me.auth')
keys = f.readlines()
# Read in keys
keys = [x.strip('\n') for x in keys]
# Connect
api = twitter.Api(consumer_key = keys[0],
consumer_secret = keys[1],
access_token_key = keys[2],
access_token_secret = keys[3])
print 'logged in as ', api.VerifyCredentials().name
search = api.GetSearch(term='microsoft', )
# Make text blobs out of status content
blobs = [ TextBlob(status.text) for status in search ]
sentiments = [ blob.sentiment.polarity for blob in blobs ]
filtered_sentiments = filter(lambda a: a!=0.0, sentiments)
overall_sentiment = sum(filtered_sentiments)/len(filtered_sentiments)
print 'Overall sentiment for microsoft: {0}'.format(overall_sentiment)
| dankolbman/MarketCents | twitter_feed.py | Python | mit | 920 | 0.018478 |
# -*- coding: utf-8 -*-
from django.http import HttpResponse, Http404
from django.template import Context
from django.contrib.sites.models import Site
from listings.syndication.models import Feed
from listings.models import POSTING_ACTIVE
def display_feed(request, feed_url):
site = Site.objects.get_current()
try:
feed = site.feed_set.get(feed_url=feed_url)
except Feed.DoesNotExist:
raise Http404
template = feed.get_template()
context = Context({'ads': feed.ads.filter(status=POSTING_ACTIVE)})
return HttpResponse(template.render(context), content_type=feed.content_type)
| wtrevino/django-listings | listings/syndication/views.py | Python | mit | 619 | 0.001616 |
#!/usr/bin/env python
# _*_ coding:utf-8 _*
from commands import StartCommand
from commands import StopCommand
from commands import TwitterCommand
from handlers.ExceptionHandler import ExceptionHandler
COMMANDS = {
'/start': StartCommand.process_message,
'/stop': StopCommand.process_message,
'/tweet': TwitterCommand.process_message
}
def process_message(twitter_api, telegram_message):
try:
msg_command = telegram_message.message.text.split()[0].lower()
return COMMANDS[msg_command](twitter_api, telegram_message)
except Exception as e:
ExceptionHandler.handle_exception(e, False)
| CorunaDevelopers/teleTweetBot | teleTweetBot/handlers/TelegramMessageHandler.py | Python | gpl-3.0 | 632 | 0 |
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OrganizationUnitProvisioningClient simplifies OrgUnit Provisioning API calls.
OrganizationUnitProvisioningClient extends gdata.client.GDClient to ease
interaction with the Google Organization Unit Provisioning API.
These interactions include the ability to create, retrieve, update and delete
organization units, move users within organization units, retrieve customerId
and update and retrieve users in organization units.
"""
__author__ = 'Gunjan Sharma <[email protected]>'
import urllib
import gdata.apps.organization.data
import gdata.client
CUSTOMER_ID_URI_TEMPLATE = '/a/feeds/customer/%s/customerId'
# OrganizationUnit URI templates
# The strings in this template are eventually replaced with the feed type
# (orgunit/orguser), API version and Google Apps domain name, respectively.
ORGANIZATION_UNIT_URI_TEMPLATE = '/a/feeds/%s/%s/%s'
# The value for orgunit requests
ORGANIZATION_UNIT_FEED = 'orgunit'
# The value for orguser requests
ORGANIZATION_USER_FEED = 'orguser'
class OrganizationUnitProvisioningClient(gdata.client.GDClient):
"""Client extension for the Google Org Unit Provisioning API service.
Attributes:
host: string The hostname for the MultiDomain Provisioning API service.
api_version: string The version of the MultiDomain Provisioning API.
"""
host = 'apps-apis.google.com'
api_version = '2.0'
auth_service = 'apps'
auth_scopes = gdata.gauth.AUTH_SCOPES['apps']
ssl = True
def __init__(self, domain, auth_token=None, **kwargs):
"""Constructs a new client for the Organization Unit Provisioning API.
Args:
domain: string The Google Apps domain with Organization Unit
Provisioning.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the Organization
Units.
"""
gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs)
self.domain = domain
def make_organization_unit_provisioning_uri(
self, feed_type, customer_id, org_unit_path_or_user_email=None,
params=None):
"""Creates a resource feed URI for the Organization Unit Provisioning API.
Using this client's Google Apps domain, create a feed URI for organization
unit provisioning in that domain. If an org unit path or org user email
address is provided, return a URI for that specific resource.
If params are provided, append them as GET params.
Args:
feed_type: string The type of feed (orgunit/orguser)
customer_id: string The customerId of the user.
org_unit_path_or_user_email: string (optional) The org unit path or
org user email address for which to make a feed URI.
params: dict (optional) key -> value params to append as GET vars to the
URI. Example: params={'start': 'my-resource-id'}
Returns:
A string giving the URI for organization unit provisioning for this
client's Google Apps domain.
"""
uri = ORGANIZATION_UNIT_URI_TEMPLATE % (feed_type, self.api_version,
customer_id)
if org_unit_path_or_user_email:
uri += '/' + org_unit_path_or_user_email
if params:
uri += '?' + urllib.urlencode(params)
return uri
MakeOrganizationUnitProvisioningUri = make_organization_unit_provisioning_uri
def make_organization_unit_orgunit_provisioning_uri(self, customer_id,
org_unit_path=None,
params=None):
"""Creates a resource feed URI for the orgunit's Provisioning API calls.
Using this client's Google Apps domain, create a feed URI for organization
unit orgunit's provisioning in that domain. If an org_unit_path is
provided, return a URI for that specific resource.
If params are provided, append them as GET params.
Args:
customer_id: string The customerId of the user.
org_unit_path: string (optional) The organization unit's path for which
to make a feed URI.
params: dict (optional) key -> value params to append as GET vars to the
URI. Example: params={'start': 'my-resource-id'}
Returns:
A string giving the URI for organization unit provisioning for
given org_unit_path
"""
return self.make_organization_unit_provisioning_uri(
ORGANIZATION_UNIT_FEED, customer_id, org_unit_path, params)
MakeOrganizationUnitOrgunitProvisioningUri = make_organization_unit_orgunit_provisioning_uri
def make_organization_unit_orguser_provisioning_uri(self, customer_id,
org_user_email=None,
params=None):
"""Creates a resource feed URI for the orguser's Provisioning API calls.
Using this client's Google Apps domain, create a feed URI for organization
unit orguser's provisioning in that domain. If an org_user_email is
provided, return a URI for that specific resource.
If params are provided, append them as GET params.
Args:
customer_id: string The customerId of the user.
org_user_email: string (optional) The organization unit's path for which
to make a feed URI.
params: dict (optional) key -> value params to append as GET vars to the
URI. Example: params={'start': 'my-resource-id'}
Returns:
A string giving the URI for organization user provisioning for
given org_user_email
"""
return self.make_organization_unit_provisioning_uri(
ORGANIZATION_USER_FEED, customer_id, org_user_email, params)
MakeOrganizationUnitOrguserProvisioningUri = make_organization_unit_orguser_provisioning_uri
def make_customer_id_feed_uri(self):
"""Creates a feed uri for retrieving customerId of the user.
Returns:
A string giving the URI for retrieving customerId of the user.
"""
uri = CUSTOMER_ID_URI_TEMPLATE % (self.api_version)
return uri
MakeCustomerIdFeedUri = make_customer_id_feed_uri
def retrieve_customer_id(self, **kwargs):
"""Retrieve the Customer ID for the customer domain.
Returns:
A gdata.apps.organization.data.CustomerIdEntry.
"""
uri = self.MakeCustomerIdFeedUri()
return self.GetEntry(
uri,
desired_class=gdata.apps.organization.data.CustomerIdEntry,
**kwargs)
RetrieveCustomerId = retrieve_customer_id
def create_org_unit(self, customer_id, name, parent_org_unit_path='/',
description='', block_inheritance=False, **kwargs):
"""Create a Organization Unit.
Args:
customer_id: string The ID of the Google Apps customer.
name: string The simple organization unit text name, not the full path
name.
parent_org_unit_path: string The full path of the parental tree to this
organization unit (default: '/').
[Note: Each element of the path MUST be URL encoded
(example: finance%2Forganization/suborganization)]
description: string The human readable text description of the
organization unit (optional).
block_inheritance: boolean This parameter blocks policy setting
inheritance from organization units higher in
the organization tree (default: False).
Returns:
A gdata.apps.organization.data.OrgUnitEntry representing an organization
unit.
"""
new_org_unit = gdata.apps.organization.data.OrgUnitEntry(
org_unit_name=name, parent_org_unit_path=parent_org_unit_path,
org_unit_description=description,
org_unit_block_inheritance=block_inheritance)
return self.post(
new_org_unit,
self.MakeOrganizationUnitOrgunitProvisioningUri(customer_id), **kwargs)
CreateOrgUnit = create_org_unit
def update_org_unit(self, customer_id, org_unit_path, org_unit_entry,
**kwargs):
"""Update a Organization Unit.
Args:
customer_id: string The ID of the Google Apps customer.
org_unit_path: string The organization's full path name.
[Note: Each element of the path MUST be URL encoded
(example: finance%2Forganization/suborganization)]
org_unit_entry: gdata.apps.organization.data.OrgUnitEntry
The updated organization unit entry.
Returns:
A gdata.apps.organization.data.OrgUnitEntry representing an organization
unit.
"""
if not org_unit_entry.GetParentOrgUnitPath():
org_unit_entry.SetParentOrgUnitPath('/')
return self.update(org_unit_entry,
uri=self.MakeOrganizationUnitOrgunitProvisioningUri(
customer_id, org_unit_path=org_unit_path), **kwargs)
UpdateOrgUnit = update_org_unit
def move_users_to_org_unit(self, customer_id, org_unit_path, users_to_move,
**kwargs):
"""Move a user to an Organization Unit.
Args:
customer_id: string The ID of the Google Apps customer.
org_unit_path: string The organization's full path name.
[Note: Each element of the path MUST be URL encoded
(example: finance%2Forganization/suborganization)]
users_to_move: list Email addresses of users to move in list format.
[Note: You can move a maximum of 25 users at one time.]
Returns:
A gdata.apps.organization.data.OrgUnitEntry representing
an organization unit.
"""
org_unit_entry = self.retrieve_org_unit(customer_id, org_unit_path)
org_unit_entry.SetUsersToMove(', '.join(users_to_move))
if not org_unit_entry.GetParentOrgUnitPath():
org_unit_entry.SetParentOrgUnitPath('/')
return self.update(org_unit_entry,
uri=self.MakeOrganizationUnitOrgunitProvisioningUri(
customer_id, org_unit_path=org_unit_path), **kwargs)
MoveUserToOrgUnit = move_users_to_org_unit
def retrieve_org_unit(self, customer_id, org_unit_path, **kwargs):
"""Retrieve a Orgunit based on its path.
Args:
customer_id: string The ID of the Google Apps customer.
org_unit_path: string The organization's full path name.
[Note: Each element of the path MUST be URL encoded
(example: finance%2Forganization/suborganization)]
Returns:
A gdata.apps.organization.data.OrgUnitEntry representing
an organization unit.
"""
uri = self.MakeOrganizationUnitOrgunitProvisioningUri(
customer_id, org_unit_path=org_unit_path)
return self.GetEntry(
uri, desired_class=gdata.apps.organization.data.OrgUnitEntry, **kwargs)
RetrieveOrgUnit = retrieve_org_unit
def retrieve_feed_from_uri(self, uri, desired_class, **kwargs):
"""Retrieve feed from given uri.
Args:
uri: string The uri from where to get the feed.
desired_class: Feed The type of feed that if to be retrieved.
Returns:
Feed of type desired class.
"""
return self.GetFeed(uri, desired_class=desired_class, **kwargs)
RetrieveFeedFromUri = retrieve_feed_from_uri
def retrieve_all_org_units_from_uri(self, uri, **kwargs):
"""Retrieve all OrgUnits from given uri.
Args:
uri: string The uri from where to get the orgunits.
Returns:
gdata.apps.organisation.data.OrgUnitFeed object
"""
orgunit_feed = gdata.apps.organization.data.OrgUnitFeed()
temp_feed = self.RetrieveFeedFromUri(
uri, gdata.apps.organization.data.OrgUnitFeed)
orgunit_feed.entry = temp_feed.entry
next_link = temp_feed.GetNextLink()
while next_link is not None:
uri = next_link.GetAttributes()[0].value
temp_feed = self.GetFeed(
uri, desired_class=gdata.apps.organization.data.OrgUnitFeed, **kwargs)
orgunit_feed.entry[0:0] = temp_feed.entry
next_link = temp_feed.GetNextLink()
return orgunit_feed
RetrieveAllOrgUnitsFromUri = retrieve_all_org_units_from_uri
def retrieve_all_org_units(self, customer_id, **kwargs):
"""Retrieve all OrgUnits in the customer's domain.
Args:
customer_id: string The ID of the Google Apps customer.
Returns:
gdata.apps.organisation.data.OrgUnitFeed object
"""
uri = self.MakeOrganizationUnitOrgunitProvisioningUri(
customer_id, params={'get': 'all'}, **kwargs)
return self.RetrieveAllOrgUnitsFromUri(uri)
RetrieveAllOrgUnits = retrieve_all_org_units
def retrieve_page_of_org_units(self, customer_id, startKey=None, **kwargs):
"""Retrieve one page of OrgUnits in the customer's domain.
Args:
customer_id: string The ID of the Google Apps customer.
startKey: string The key to continue for pagination through all OrgUnits.
Returns:
gdata.apps.organisation.data.OrgUnitFeed object
"""
uri = ''
if startKey is not None:
uri = self.MakeOrganizationUnitOrgunitProvisioningUri(
customer_id, params={'get': 'all', 'startKey': startKey}, **kwargs)
else:
uri = self.MakeOrganizationUnitOrgunitProvisioningUri(
customer_id, params={'get': 'all'}, **kwargs)
return self.GetFeed(
uri, desired_class=gdata.apps.organization.data.OrgUnitFeed, **kwargs)
RetrievePageOfOrgUnits = retrieve_page_of_org_units
def retrieve_sub_org_units(self, customer_id, org_unit_path, **kwargs):
"""Retrieve all Sub-OrgUnits of the provided OrgUnit.
Args:
customer_id: string The ID of the Google Apps customer.
org_unit_path: string The organization's full path name.
[Note: Each element of the path MUST be URL encoded
(example: finance%2Forganization/suborganization)]
Returns:
gdata.apps.organisation.data.OrgUnitFeed object
"""
uri = self.MakeOrganizationUnitOrgunitProvisioningUri(
customer_id,
params={'get': 'children', 'orgUnitPath': org_unit_path}, **kwargs)
return self.RetrieveAllOrgUnitsFromUri(uri)
RetrieveSubOrgUnits = retrieve_sub_org_units
def delete_org_unit(self, customer_id, org_unit_path, **kwargs):
"""Delete a Orgunit based on its path.
Args:
customer_id: string The ID of the Google Apps customer.
org_unit_path: string The organization's full path name.
[Note: Each element of the path MUST be URL encoded
(example: finance%2Forganization/suborganization)]
Returns:
An HTTP response object. See gdata.client.request().
"""
return self.delete(self.MakeOrganizationUnitOrgunitProvisioningUri(
customer_id, org_unit_path=org_unit_path), **kwargs)
DeleteOrgUnit = delete_org_unit
def update_org_user(self, customer_id, user_email, org_unit_path, **kwargs):
"""Update the OrgUnit of a OrgUser.
Args:
customer_id: string The ID of the Google Apps customer.
user_email: string The email address of the user.
org_unit_path: string The new organization's full path name.
[Note: Each element of the path MUST be URL encoded
(example: finance%2Forganization/suborganization)]
Returns:
A gdata.apps.organization.data.OrgUserEntry representing
an organization user.
"""
old_user_entry = self.RetrieveOrgUser(customer_id, user_email)
old_org_unit_path = old_user_entry.GetOrgUnitPath()
if not old_org_unit_path:
old_org_unit_path = '/'
old_user_entry.SetOldOrgUnitPath(old_org_unit_path)
old_user_entry.SetOrgUnitPath(org_unit_path)
return self.update(old_user_entry,
uri=self.MakeOrganizationUnitOrguserProvisioningUri(
customer_id, user_email), **kwargs)
UpdateOrgUser = update_org_user
def retrieve_org_user(self, customer_id, user_email, **kwargs):
"""Retrieve an organization user.
Args:
customer_id: string The ID of the Google Apps customer.
user_email: string The email address of the user.
Returns:
A gdata.apps.organization.data.OrgUserEntry representing
an organization user.
"""
uri = self.MakeOrganizationUnitOrguserProvisioningUri(customer_id,
user_email)
return self.GetEntry(
uri, desired_class=gdata.apps.organization.data.OrgUserEntry, **kwargs)
RetrieveOrgUser = retrieve_org_user
def retrieve_all_org_users_from_uri(self, uri, **kwargs):
"""Retrieve all OrgUsers from given uri.
Args:
uri: string The uri from where to get the orgusers.
Returns:
gdata.apps.organisation.data.OrgUserFeed object
"""
orguser_feed = gdata.apps.organization.data.OrgUserFeed()
temp_feed = self.RetrieveFeedFromUri(
uri, gdata.apps.organization.data.OrgUserFeed)
orguser_feed.entry = temp_feed.entry
next_link = temp_feed.GetNextLink()
while next_link is not None:
uri = next_link.GetAttributes()[0].value
temp_feed = self.GetFeed(
uri, desired_class=gdata.apps.organization.data.OrgUserFeed, **kwargs)
orguser_feed.entry[0:0] = temp_feed.entry
next_link = temp_feed.GetNextLink()
return orguser_feed
RetrieveAllOrgUsersFromUri = retrieve_all_org_users_from_uri
def retrieve_all_org_users(self, customer_id, **kwargs):
"""Retrieve all OrgUsers in the customer's domain.
Args:
customer_id: string The ID of the Google Apps customer.
Returns:
gdata.apps.organisation.data.OrgUserFeed object
"""
uri = self.MakeOrganizationUnitOrguserProvisioningUri(
customer_id, params={'get': 'all'}, **kwargs)
return self.RetrieveAllOrgUsersFromUri(uri)
RetrieveAllOrgUsers = retrieve_all_org_users
def retrieve_page_of_org_users(self, customer_id, startKey=None, **kwargs):
"""Retrieve one page of OrgUsers in the customer's domain.
Args:
customer_id: string The ID of the Google Apps customer.
startKey: The string key to continue for pagination through all OrgUnits.
Returns:
gdata.apps.organisation.data.OrgUserFeed object
"""
uri = ''
if startKey is not None:
uri = self.MakeOrganizationUnitOrguserProvisioningUri(
customer_id, params={'get': 'all', 'startKey': startKey}, **kwargs)
else:
uri = self.MakeOrganizationUnitOrguserProvisioningUri(
customer_id, params={'get': 'all'})
return self.GetFeed(
uri, desired_class=gdata.apps.organization.data.OrgUserFeed, **kwargs)
RetrievePageOfOrgUsers = retrieve_page_of_org_users
def retrieve_org_unit_users(self, customer_id, org_unit_path, **kwargs):
"""Retrieve all OrgUsers of the provided OrgUnit.
Args:
customer_id: string The ID of the Google Apps customer.
org_unit_path: string The organization's full path name.
[Note: Each element of the path MUST be URL encoded
(example: finance%2Forganization/suborganization)]
Returns:
gdata.apps.organisation.data.OrgUserFeed object
"""
uri = self.MakeOrganizationUnitOrguserProvisioningUri(
customer_id,
params={'get': 'children', 'orgUnitPath': org_unit_path})
return self.RetrieveAllOrgUsersFromUri(uri, **kwargs)
RetrieveOrgUnitUsers = retrieve_org_unit_users
| boxed/CMi | web_frontend/gdata/apps/organization/client.py | Python | mit | 20,094 | 0.003583 |
from matplotlib import pyplot as plt
from matplotlib import cm
from os import path
import numpy as np
import cv2
import pandas as pd
from math import exp, pi, sqrt
import mahotas as mh
from numbapro import vectorize
def show_images(images,titles=None, scale=1.3):
"""Display a list of images"""
n_ims = len(images)
if titles is None: titles = ['(%d)' % i for i in range(1,n_ims + 1)]
fig = plt.figure()
n = 1
for image,title in zip(images,titles):
a = fig.add_subplot(1,n_ims,n) # Make subplot
if image.ndim == 2: # Is image grayscale?
plt.imshow(image, cmap = cm.Greys_r)
else:
plt.imshow(cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
a.set_title(title)
plt.axis("off")
n += 1
fig.set_size_inches(np.array(fig.get_size_inches(), dtype=np.float) * n_ims / scale)
plt.show()
plt.close()
# Pyramid Down & blurr
# Easy-peesy
def pyr_blurr(image):
return cv2.GaussianBlur(cv2.pyrDown(image), (7, 7), 30.)
def median_blurr(image, size = 7):
return cv2.medianBlur(image, size)
def display_contours(image, contours, color = (255, 0, 0), thickness = -1, title = None):
imShow = image.copy()
for i in range(0, len(contours)):
cv2.drawContours(imShow, contours, i, color, thickness)
show_images([imShow], scale=0.7, titles=title)
def salt_and_peper(im, fraction = 0.01):
assert (0 < fraction <= 1.), "Fraction must be in (0, 1]"
sp = np.zeros(im.shape)
percent = round(fraction * 100 / 2.)
cv2.randu(sp, 0, 100)
# quarter salt quarter pepper
im_sp = im.copy()
im_sp [sp < percent] = 0
im_sp [sp > 100 - percent] = 255
return im_sp
def remove_light_reflex(im, ksize = 5):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (ksize, ksize))
return cv2.morphologyEx(im, cv2.MORPH_OPEN, kernel)
def _filter_kernel_mf_fdog(L, sigma, t = 3, mf = True):
dim_y = int(L)
dim_x = 2 * int(t * sigma)
arr = np.zeros((dim_y, dim_x), 'f')
ctr_x = dim_x / 2
ctr_y = int(dim_y / 2.)
# an un-natural way to set elements of the array
# to their x coordinate
it = np.nditer(arr, flags=['multi_index'])
while not it.finished:
arr[it.multi_index] = it.multi_index[1] - ctr_x
it.iternext()
two_sigma_sq = 2 * sigma * sigma
sqrt_w_pi_sigma = 1. / (sqrt(2 * pi) * sigma)
if not mf:
sqrt_w_pi_sigma = sqrt_w_pi_sigma / sigma ** 2
@vectorize(['float32(float32)'], target='cpu')
def k_fun(x):
return sqrt_w_pi_sigma * exp(-x * x / two_sigma_sq)
@vectorize(['float32(float32)'], target='cpu')
def k_fun_derivative(x):
return -x * sqrt_w_pi_sigma * exp(-x * x / two_sigma_sq)
if mf:
kernel = k_fun(arr)
kernel = kernel - kernel.mean()
else:
kernel = k_fun_derivative(arr)
# return the correlation kernel for filter2D
return cv2.flip(kernel, -1)
def fdog_filter_kernel(L, sigma, t = 3):
'''
K = - (x/(sqrt(2 * pi) * sigma ^3)) * exp(-x^2/2sigma^2), |y| <= L/2, |x| < s * t
'''
return _filter_kernel_mf_fdog(L, sigma, t, False)
def gaussian_matched_filter_kernel(L, sigma, t = 3):
'''
K = 1/(sqrt(2 * pi) * sigma ) * exp(-x^2/2sigma^2), |y| <= L/2, |x| < s * t
'''
return _filter_kernel_mf_fdog(L, sigma, t, True)
def createMatchedFilterBank(K, n = 12):
'''
Given a kernel, create matched filter bank
'''
rotate = 180 / n
center = (K.shape[1] / 2, K.shape[0] / 2)
cur_rot = 0
kernels = [K]
for i in range(1, n):
cur_rot += rotate
r_mat = cv2.getRotationMatrix2D(center, cur_rot, 1)
k = cv2.warpAffine(K, r_mat, (K.shape[1], K.shape[0]))
kernels.append(k)
return kernels
def applyFilters(im, kernels):
'''
Given a filter bank, apply them and record maximum response
'''
images = np.array([cv2.filter2D(im, -1, k) for k in kernels])
return np.max(images, 0)
def gabor_filters(ksize, sigma = 4.0, lmbda = 10.0, n = 16):
'''
Create a bank of Gabor filters spanning 180 degrees
'''
filters = []
for theta in np.arange(0, np.pi, np.pi / n):
kern = cv2.getGaborKernel((ksize, ksize), sigma, theta, lmbda, 0.5, 0, ktype=cv2.CV_64F)
kern /= 1.5*kern.sum()
filters.append(kern)
return filters
def saturate (v):
return np.array(map(lambda a: min(max(round(a), 0), 255), v))
def calc_hist(images, masks):
channels = map(lambda i: cv2.split(i), images)
imMask = zip(channels, masks)
nonZeros = map(lambda m: cv2.countNonZero(m), masks)
# grab three histograms - one for each channel
histPerChannel = map(lambda (c, mask): \
[cv2.calcHist([cimage], [0], mask, [256], np.array([0, 255])) for cimage in c], imMask)
# compute the cdf's.
# they are normalized & saturated: values over 255 are cut off.
cdfPerChannel = map(lambda (hChan, nz): \
[saturate(np.cumsum(h) * 255.0 / nz) for h in hChan], \
zip(histPerChannel, nonZeros))
return np.array(cdfPerChannel)
# compute color map based on minimal distances beteen cdf values of ref and input images
def getMin (ref, img):
l = [np.argmin(np.abs(ref - i)) for i in img]
return np.array(l)
# compute and apply color map on all channels of the image
def map_image(image, refHist, imageHist):
# each of the arguments contains histograms over 3 channels
mp = np.array([getMin(r, i) for (r, i) in zip(refHist, imageHist)])
channels = np.array(cv2.split(image))
mappedChannels = np.array([mp[i,channels[i]] for i in range(0, 3)])
return cv2.merge(mappedChannels).astype(np.uint8)
# compute the histograms on all three channels for all images
def histogram_specification(ref, images, masks):
'''
ref - reference image
images - a set of images to have color transferred via histogram specification
masks - masks to apply
'''
cdfs = calc_hist(images, masks)
mapped = [map_image(images[i], ref[0], cdfs[i, :, :]) for i in range(len(images))]
return mapped
def max_labelled_region(labels, Bc = None):
'''
Labelled region of maximal area
'''
return np.argmax(mh.labeled.labeled_size(labels)[1:]) + 1
def saturate (v):
return map(lambda a: min(max(round(a), 0), 255), v)
def plot_hist(hst, color):
fig = plt.figure()
plt.bar(np.arange(256), hst, width=2, color=color, edgecolor='none')
fig.set_size_inches(np.array(fig.get_size_inches(), dtype=np.float) * 2)
plt.show()
| fierval/retina | DiabeticRetinopathy/Refactoring/kobra/imaging.py | Python | mit | 6,612 | 0.013313 |
"""
General Character commands usually availabe to all characters
"""
from django.conf import settings
from evennia.utils import utils, prettytable
from evennia.commands.default.muxcommand import MuxCommand
# limit symbol import for API
__all__ = ("CmdHome", "CmdLook", "CmdNick",
"CmdInventory", "CmdGet", "CmdDrop", "CmdGive",
"CmdSay", "CmdPose", "CmdAccess")
class CmdHome(MuxCommand):
"""
move to your character's home location
Usage:
home
Teleports you to your home location.
"""
key = "home"
locks = "cmd:perm(home) or perm(Builders)"
arg_regex = r"$"
def func(self):
"Implement the command"
caller = self.caller
home = caller.home
if not home:
caller.msg("You have no home!")
elif home == caller.location:
caller.msg("You are already home!")
else:
caller.move_to(home)
caller.msg("There's no place like home ...")
class CmdLook(MuxCommand):
"""
look at location or object
Usage:
look
look <obj>
look *<player>
Observes your location or objects in your vicinity.
"""
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""
Handle the looking.
"""
caller = self.caller
args = self.args
if args:
# Use search to handle duplicate/nonexistant results.
looking_at_obj = caller.search(args, use_nicks=True)
if not looking_at_obj:
return
else:
looking_at_obj = caller.location
if not looking_at_obj:
caller.msg("You have no location to look at!")
return
if not hasattr(looking_at_obj, 'return_appearance'):
# this is likely due to us having a player instead
looking_at_obj = looking_at_obj.character
if not looking_at_obj.access(caller, "view"):
caller.msg("Could not find '%s'." % args)
return
# get object's appearance
caller.msg(looking_at_obj.return_appearance(caller))
# the object's at_desc() method.
looking_at_obj.at_desc(looker=caller)
class CmdNick(MuxCommand):
"""
define a personal alias/nick
Usage:
nick[/switches] <nickname> = [<string>]
alias ''
Switches:
object - alias an object
player - alias a player
clearall - clear all your aliases
list - show all defined aliases (also "nicks" works)
Examples:
nick hi = say Hello, I'm Sarah!
nick/object tom = the tall man
A 'nick' is a personal shortcut you create for your own use. When
you enter the nick, the alternative string will be sent instead.
The switches control in which situations the substitution will
happen. The default is that it will happen when you enter a
command. The 'object' and 'player' nick-types kick in only when
you use commands that requires an object or player as a target -
you can then use the nick to refer to them.
Note that no objects are actually renamed or changed by this
command - the nick is only available to you. If you want to
permanently add keywords to an object for everyone to use, you
need build privileges and to use the @alias command.
"""
key = "nick"
aliases = ["nickname", "nicks", "@nick", "alias"]
locks = "cmd:all()"
def func(self):
"Create the nickname"
caller = self.caller
switches = self.switches
nicks = caller.nicks.get(return_obj=True)
if 'list' in switches:
table = prettytable.PrettyTable(["{wNickType",
"{wNickname",
"{wTranslates-to"])
for nick in utils.make_iter(nicks):
table.add_row([nick.db_category, nick.db_key, nick.db_strvalue])
string = "{wDefined Nicks:{n\n%s" % table
caller.msg(string)
return
if 'clearall' in switches:
caller.nicks.clear()
caller.msg("Cleared all aliases.")
return
if not self.args or not self.lhs:
caller.msg("Usage: nick[/switches] nickname = [realname]")
return
nick = self.lhs
real = self.rhs
if real == nick:
caller.msg("No point in setting nick same as the string to replace...")
return
# check so we have a suitable nick type
if not any(True for switch in switches if switch in ("object", "player", "inputline")):
switches = ["inputline"]
string = ""
for switch in switches:
oldnick = caller.nicks.get(key=nick, category=switch)
if not real:
# removal of nick
if oldnick:
# clear the alias
string += "\nNick '%s' (= '%s') was cleared." % (nick, oldnick)
caller.nicks.delete(nick, category=switch)
else:
string += "\nNo nick '%s' found, so it could not be removed." % nick
else:
# creating new nick
if oldnick:
string += "\nNick %s changed from '%s' to '%s'." % (nick, oldnick, real)
else:
string += "\nNick set: '%s' = '%s'." % (nick, real)
caller.nicks.add(nick, real, category=switch)
caller.msg(string)
class CmdInventory(MuxCommand):
"""
view inventory
Usage:
inventory
inv
Shows your inventory.
"""
key = "inventory"
aliases = ["inv", "i"]
locks = "cmd:all()"
arg_regex = r"$"
def func(self):
"check inventory"
items = self.caller.contents
if not items:
string = "You are not carrying anything."
else:
table = prettytable.PrettyTable(["name", "desc"])
table.header = False
table.border = False
for item in items:
table.add_row(["{C%s{n" % item.name, item.db.desc and item.db.desc or ""])
string = "{wYou are carrying:\n%s" % table
self.caller.msg(string)
class CmdGet(MuxCommand):
"""
pick up something
Usage:
get <obj>
Picks up an object from your location and puts it in
your inventory.
"""
key = "get"
aliases = "grab"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"implements the command."
caller = self.caller
if not self.args:
caller.msg("Get what?")
return
#print "general/get:", caller, caller.location, self.args, caller.location.contents
obj = caller.search(self.args, location=caller.location)
if not obj:
return
if caller == obj:
caller.msg("You can't get yourself.")
return
if not obj.access(caller, 'get'):
if obj.db.get_err_msg:
caller.msg(obj.db.get_err_msg)
else:
caller.msg("You can't get that.")
return
obj.move_to(caller, quiet=True)
caller.msg("You pick up %s." % obj.name)
caller.location.msg_contents("%s picks up %s." %
(caller.name,
obj.name),
exclude=caller)
# calling hook method
obj.at_get(caller)
class CmdDrop(MuxCommand):
"""
drop something
Usage:
drop <obj>
Lets you drop an object from your inventory into the
location you are currently in.
"""
key = "drop"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"Implement command"
caller = self.caller
if not self.args:
caller.msg("Drop what?")
return
# Because the DROP command by definition looks for items
# in inventory, call the search function using location = caller
obj = caller.search(self.args, location=caller,
nofound_string="You aren't carrying %s." % self.args,
multimatch_string="You carry more than one %s:" % self.args)
if not obj:
return
obj.move_to(caller.location, quiet=True)
caller.msg("You drop %s." % (obj.name,))
caller.location.msg_contents("%s drops %s." %
(caller.name, obj.name),
exclude=caller)
# Call the object script's at_drop() method.
obj.at_drop(caller)
class CmdGive(MuxCommand):
"""
give away something to someone
Usage:
give <inventory obj> = <target>
Gives an items from your inventory to another character,
placing it in their inventory.
"""
key = "give"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"Implement give"
caller = self.caller
if not self.args or not self.rhs:
caller.msg("Usage: give <inventory object> = <target>")
return
to_give = caller.search(self.lhs, location=caller,
nofound_string="You aren't carrying %s." % self.lhs,
multimatch_string="You carry more than one %s:" % self.lhs)
target = caller.search(self.rhs)
if not (to_give and target):
return
if target == caller:
caller.msg("You keep %s to yourself." % to_give.key)
return
if not to_give.location == caller:
caller.msg("You are not holding %s." % to_give.key)
return
# give object
caller.msg("You give %s to %s." % (to_give.key, target.key))
to_give.move_to(target, quiet=True)
target.msg("%s gives you %s." % (caller.key, to_give.key))
class CmdDesc(MuxCommand):
"""
describe yourself
Usage:
desc <description>
Add a description to yourself. This
will be visible to people when they
look at you.
"""
key = "desc"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"add the description"
if not self.args:
self.caller.msg("You must add a description.")
return
self.caller.db.desc = self.args.strip()
self.caller.msg("You set your description.")
class CmdSay(MuxCommand):
"""
speak as your character
Usage:
say <message>
Talk to those in your current location.
"""
key = "say"
aliases = ['"', "'"]
locks = "cmd:all()"
def func(self):
"Run the say command"
caller = self.caller
if not self.args:
caller.msg("Say what?")
return
speech = self.args
# calling the speech hook on the location
speech = caller.location.at_say(caller, speech)
# Feedback for the object doing the talking.
caller.msg('You say, "%s{n"' % speech)
# Build the string to emit to neighbors.
emit_string = '%s says, "%s{n"' % (caller.name,
speech)
caller.location.msg_contents(emit_string,
exclude=caller)
class CmdPose(MuxCommand):
"""
strike a pose
Usage:
pose <pose text>
pose's <pose text>
Example:
pose is standing by the wall, smiling.
-> others will see:
Tom is standing by the wall, smiling.
Describe an action being taken. The pose text will
automatically begin with your name.
"""
key = "pose"
aliases = [":", "emote"]
locks = "cmd:all()"
def parse(self):
"""
Custom parse the cases where the emote
starts with some special letter, such
as 's, at which we don't want to separate
the caller's name and the emote with a
space.
"""
args = self.args
if args and not args[0] in ["'", ",", ":"]:
args = " %s" % args.strip()
self.args = args
def func(self):
"Hook function"
if not self.args:
msg = "What do you want to do?"
self.caller.msg(msg)
else:
msg = "%s%s" % (self.caller.name, self.args)
self.caller.location.msg_contents(msg)
class CmdAccess(MuxCommand):
"""
show your current game access
Usage:
access
This command shows you the permission hierarchy and
which permission groups you are a member of.
"""
key = "access"
aliases = ["groups", "hierarchy"]
locks = "cmd:all()"
arg_regex = r"$"
def func(self):
"Load the permission groups"
caller = self.caller
hierarchy_full = settings.PERMISSION_HIERARCHY
string = "\n{wPermission Hierarchy{n (climbing):\n %s" % ", ".join(hierarchy_full)
#hierarchy = [p.lower() for p in hierarchy_full]
if self.caller.player.is_superuser:
cperms = "<Superuser>"
pperms = "<Superuser>"
else:
cperms = ", ".join(caller.permissions.all())
pperms = ", ".join(caller.player.permissions.all())
string += "\n{wYour access{n:"
string += "\nCharacter {c%s{n: %s" % (caller.key, cperms)
if hasattr(caller, 'player'):
string += "\nPlayer {c%s{n: %s" % (caller.player.key, pperms)
caller.msg(string)
| mrkulk/text-world | evennia/commands/default/general.py | Python | bsd-3-clause | 13,667 | 0.001537 |
from battlePy.utils import docprop
(UP, DOWN, LEFT, RIGHT) = SHIP_ORIENTATIONS = range(4)
VECTOR_DICT = {UP: (0, 1), DOWN: (0, -1), LEFT: (-1, 0), RIGHT: (1, 0)}
class Ship(object):
name = docprop('name', 'Name of the ship')
size = docprop('size', 'Size of the ship')
hits = docprop('hits', 'Set of current hit locations')
locations = docprop('locations', 'Set of ship coordinates')
game = docprop('game', 'The game this Ship belongs to')
def __init__(self, name, size, game):
self.name = name
self.size = size
self.hits = set()
self.locations = set()
self.game = game
self.symbol = self.name[0]
def __repr__(self):
return "<%s %s %s>" % (self.__class__.__name__, id(self), self.name)
def placeShip(self, location, orientation):
self.locations = set()
newLocation = location
self.locations.add(newLocation)
for i in range(self.size - 1):
newLocation = (
newLocation[0] + VECTOR_DICT[orientation][0],
newLocation[1] + VECTOR_DICT[orientation][1],
)
self.locations.add(newLocation)
def isPlacementValid(self):
return self.game.isValidShipPlacement(self)
def addHit(self, location):
if location not in self.locations:
return
self.hits.add(location)
def isSunk(self):
return self.hits == self.locations
def getProtoShip(self):
return Ship(self.name, self.size)
| kyokley/BattlePyEngine | src/battlePy/ship.py | Python | mit | 1,526 | 0 |
# Copyright (c) 2014 Yubico AB
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this program, or any covered work, by linking or
# combining it with the OpenSSL project's OpenSSL library (or a
# modified version of that library), containing parts covered by the
# terms of the OpenSSL or SSLeay licenses, We grant you additional
# permission to convey the resulting work. Corresponding Source for a
# non-source form of such a combination shall include the source code
# for the parts of OpenSSL used as well as that of the covered work.
"""
Strings for Yubico Authenticator.
Note: String names must not start with underscore (_).
"""
organization = "Yubico"
domain = "yubico.com"
app_name = "Yubico Authenticator"
win_title_1 = "Yubico Authenticator (%s)"
about_1 = "About: %s"
copyright = "Copyright © Yubico"
version_1 = "Version: %s"
wait = "Please wait..."
error = "Error"
menu_file = "&File"
menu_help = "&Help"
action_about = "&About"
action_add = "&Add..."
action_import = "&Import..."
action_reset = "&Reset"
action_password = "Set/Change &password"
action_settings = "&Settings"
action_delete = "&Delete"
action_show = "&Show credentials"
action_close = "&Close Window"
action_quit = "&Quit"
password = "Password"
settings = "Settings"
advanced = "Advanced"
search = "Search"
pass_required = "Password required"
remember = "Remember password"
no_key = "Insert a YubiKey..."
key_busy = "YubiKey already in use!"
key_present = "YubiKey found. Reading..."
key_removed = "YubiKey removed"
key_removed_desc = "There was an error communicating with the device!"
n_digits = "Number of digits"
enable_systray = "Show in system tray"
kill_scdaemon = "Kill scdaemon on show"
reader_name = "Card reader name"
no_creds = "No credentials available"
add_cred = "New credential"
cred_name = "Credential name"
cred_key = "Secret key (base32)"
cred_type = "Credential type"
cred_totp = "Time based (TOTP)"
cred_hotp = "Counter based (HOTP)"
algorithm = "Algorithm"
invalid_name = "Invalid name"
invalid_name_desc = "Name must be at least 3 characters"
invalid_key = "Invalid key"
invalid_key_desc = "Key must be base32 encoded"
set_pass = "Set password"
new_pass = "New password (blank for none)"
ver_pass = "Verify new password"
pass_mismatch = "Passwords do not match"
pass_mismatch_desc = "Please enter the same password twice"
touch_title = "Touch required"
touch_desc = "Touch your YubiKey now"
reset_title = "Confirm reset"
reset_warning_desc = """<span>Are you sure you want to delete all OATH credentials on the device?</span>
<br><br>
<b>This action cannot be undone.</b>
<br><br>
"""
imported = "Import complete"
imported_desc = "Found %d tokens, successfully imported %d tokens.%s"
delete_title = "Confirm credential deletion"
delete_desc_1 = """<span>Are you sure you want to delete the credential?</span>
<br>
This action cannot be undone.
<br><br>
<b>Delete credential: %s</b>
"""
free = "free"
in_use = "in use"
require_touch = "Require touch"
require_manual_refresh = "Require manual refresh"
overwrite_entry = "Overwrite entry?"
overwrite_entry_desc = "An entry with this username already exists.\n\nDo " \
"you wish to overwrite it? This action cannot be undone."
qr_scan = "Scan a QR code"
qr_scanning = "Scanning for QR code..."
qr_not_found = "QR code not found"
qr_not_found_desc = "No usable QR code detected. Make sure the QR code is " \
"fully visible on your primary screen and try again."
qr_invalid_type = "Invalid OTP type"
qr_invalid_type_desc = "Only TOTP and HOTP types are supported."
qr_invalid_digits = "Invalid number of digits"
qr_invalid_digits_desc = "An OTP may only contain 6 or 8 digits."
qr_invalid_algo = "Unsupported algorithm"
qr_invalid_algo_desc = "HMAC algorithm '%s' is not supported."
qr_missing_key = "Invalid QR code"
qr_missing_key_desc = "The QR code found on screen is missing the '%s' attribute."
tt_num_digits = "The number of digits to show for the credential."
tt_systray = "When checked, display an icon in the systray, and leave the " \
"application running there when closed."
tt_kill_scdaemon = "Kills any running scdaemon process when the window is " \
"shown. This is useful when using this application together with GnuPG " \
"to avoid GnuPG locking the device."
tt_reader_name = "Changes the default smartcard reader name to look for. " \
"This can be used to target a specific YubiKey when multiple are used, " \
"or to target an NFC reader."
ccid_disabled = '<b>CCID (smart card capabilities) is disabled on the ' \
'inserted YubiKey.</b><br><br>Without CCID enabled, you will only be ' \
'able to store 2 credentials.<br><br>' \
'<a href="%s">Learn how to enable CCID</a><br>'
no_space = "No space available"
no_space_desc = "There is not enough space to add another " \
"credential on your device.\n\nTo create free space to add a " \
"new credential, delete those you no longer need."
oath_backend = "OATH Storage Backend"
oath_backend_ccid = "Smart Card"
oath_backend_sqlite = "SQLite"
| tycho/yubioath-desktop | yubioath/gui/messages.py | Python | gpl-3.0 | 5,696 | 0.000351 |
import re
from models.contact import Contact
def test_all_contacts_on_homepage(app, db):
if app.contact.count() == 0:
app.contact.add(Contact(first_name="Mister", last_name="Muster", mobile_phone="123", email_1="[email protected]"))
contacts_from_homepage = sorted(app.contact.get_contact_list(), key = Contact.contact_id_or_max)
contacts_from_db = sorted(db.get_contact_list(), key = Contact.contact_id_or_max)
for i in range(len(contacts_from_homepage)):
hp_contact=contacts_from_homepage[i]
db_contact=contacts_from_db[i]
assert hp_contact.first_name == db_contact.first_name
assert hp_contact.last_name == db_contact.last_name
assert clear_address(hp_contact.address) == clear_address(db_contact.address)
assert clear_phone(hp_contact.all_phones_homepage) == clear_phone(merge_phones_homepage(db_contact))
assert hp_contact.all_emails_homepage == merge_emails_homepage(db_contact)
print("Successfully verified %s contacts vs Database" % str(len(contacts_from_homepage)))
"""def test_contact_on_homepage(app):
if app.contact.count() == 0:
app.contact.add(Contact(first_name="Mister", last_name="Muster", mobile_phone="123", email_1="[email protected]"))
index = randrange(len(app.contact.get_contact_list()))
contact_from_homepage = app.contact.get_contact_list()[index]
contact_from_editpage = app.contact.get_contact_data_editpage(index)
assert contact_from_homepage.first_name == contact_from_editpage.first_name
assert contact_from_homepage.last_name == contact_from_editpage.last_name
assert contact_from_homepage.address == contact_from_editpage.address
assert contact_from_homepage.all_phones_homepage == merge_phones_homepage(contact_from_editpage)
assert contact_from_homepage.all_emails_homepage == merge_emails_homepage(contact_from_editpage)"""
"""def test_phones_on_viewpage(app):
contact_from_viewpage = app.contact.get_contact_data_viewpage(0)
contact_from_editpage = app.contact.get_contact_data_editpage(0)
assert contact_from_viewpage.home_phone == contact_from_editpage.home_phone
assert contact_from_viewpage.work_phone == contact_from_editpage.work_phone
assert contact_from_viewpage.mobile_phone == contact_from_editpage.mobile_phone
assert contact_from_viewpage.fax == contact_from_editpage.fax"""
def clear(s):
#return "".join(symbol for symbol in s if symbol not in "[]()- 0")
return re.sub("[- ()]", "", s)
def clear_phone(number):
return re.sub("0", "", number)
def clear_address(address):
return re.sub("[\n\r\s+]", "", address)
def merge_phones_homepage(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home_phone, contact.mobile_phone, contact.work_phone]))))
def merge_emails_homepage(contact):
return "\n".join(filter(lambda x: x != "", filter(lambda x: x is not None,
[contact.email_1, contact.email_2, contact.email_3])))
| rgurevych/python_for_testers | tests/test_contacts_data_compliance.py | Python | apache-2.0 | 3,163 | 0.007272 |
# Generated by Django 2.2 on 2019-04-22 16:27
from django.db import migrations
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('schedules', '0008_auto_20180208_1946'),
]
operations = [
migrations.AlterField(
model_name='shiftslotdayrule',
name='rule',
field=model_utils.fields.StatusField(choices=[(0, 'dummy')], default='mo', max_length=2, no_check_for_status=True),
),
]
| KSG-IT/ksg-nett | schedules/migrations/0009_auto_20190422_1627.py | Python | gpl-3.0 | 489 | 0.002045 |
"""Tests for base extension."""
import unittest
from grow.extensions import base_extension
class BaseExtensionTestCase(unittest.TestCase):
"""Test the base extension."""
def test_config_disabled(self):
"""Uses the disabled config."""
ext = base_extension.BaseExtension(None, {
'disabled': [
'a',
],
'enabled': [
'a',
],
})
self.assertFalse(ext.hooks.is_enabled('a'))
self.assertFalse(ext.hooks.is_enabled('b'))
def test_config_enabled(self):
"""Uses the enabled config."""
ext = base_extension.BaseExtension(None, {
'enabled': [
'a',
],
})
self.assertTrue(ext.hooks.is_enabled('a'))
self.assertFalse(ext.hooks.is_enabled('b'))
| grow/pygrow | grow/extensions/base_extension_test.py | Python | mit | 844 | 0 |
import sys
import logging
import urllib
import urllib2
import json
from lxml import etree
class GGeocode():
""" Wrapper for Google Geocode API v3.
https://developers.google.com/maps/documentation/geocoding/
"""
def __init__(self, method='http',
output='json',
sensor='false',
address='',
components='',
latlng='',
client='',
signature='',
bounds='',
language='',
region=''):
# Construct base url
self.method = method.lower()
if method not in ['http','https']:
raise ValueError("""'method' is '%s' -
needs to be either 'http' or 'https'""" % (method,))
self.output = output.lower()
if output not in ['json','xml']:
raise ValueError("""'output' is '%s' -
needs to be either 'xml' or 'json'""" % (output,))
self.base_url = '%s://maps.googleapis.com/maps/api/geocode/%s?' % \
(method, output)
# Collect parameters
self.params = {}
# required parameters:
# sensor
# address or latlng or components
self.params['sensor'] = sensor.lower()
if sensor not in ['true','false']:
raise ValueError("""'sensor' is '%s' -
needs to be either 'true' or 'false'""" % (sensor,))
if (address and (latlng or components)) or (latlng and components):
raise ValueError("""Only supply one of these (not more):
address, latlng, or components""")
if not address and not latlng and not components:
raise ValueError("""Must supply one of the following:
address, latlng, or components""")
if address: self.params['address'] = address
if latlng: self.params['latlng'] = latlng
if components:
for component in components.split('|'):
if ':' not in component:
raise ValueError("""Component is %s - must be in the form
of 'component:value'""" % (component,))
if component.split(':')[0] not in ['route',
'locality',
'administrative_area',
'postal_code',
'country']:
raise ValueError("""Component is %s - must be:
route, locality, administrative_area,
postal_code or country""" % (component.split(':')[0],))
self.params['components'] = components
# optional parameters:
# client and signature
# bounds
# language
# region
if (client and not signature) or (signature and not client):
raise ValueError("""Must supply both client and signature.""")
if client and signature:
self.params['client'] = client
self.params['signature'] = signature
if bounds: self.params['bounds'] = bounds
if language: self.params['language'] = language
# Access Google Geocoder API
try:
self.url = '%s%s' % (self.base_url,
urllib.urlencode(self.params))
self.response = urllib2.urlopen(self.url).read()
except:
e = sys.exc_info()[1]
logging.error(e)
# Get status and results
if output == 'json':
self.output = json.loads(self.response)
self.status = self.output['status']
self.results = self.output['results']
self.results_count = len(self.results)
if address or components:
self.lat = self.results[0]['geometry']['location']['lat']
self.lon = self.results[0]['geometry']['location']['lng']
elif latlng:
self.address = self.results[0]['formatted_address']
elif output == 'xml':
self.output = etree.fromstring(self.response)
self.status = self.output.xpath('/GeocodeResponse/status/text()')[0]
self.results = self.output.xpath('/GeocodeResponse/result')
self.results_count = len(self.results)
if address or components:
self.lat = self.results[0].xpath('geometry/location/lat/text()')[0]
self.lon = self.results[0].xpath('geometry/location/lng/text()')[0]
elif latlng:
self.address = self.results[0].xpath('formatted_address')[0]
if self.status != 'OK':
logging.error("Call to %s unsuccessful (Error code '%s')" % \
(self.url,self.status)) | leeclemmer/ggeocode | ggeocode/ggeocode.py | Python | mit | 3,893 | 0.041613 |
from __future__ import unicode_literals
from django.db import models
from django_evolution.mutations import AddField
MUTATIONS = [
AddField('ReviewRequest', 'changedescs', models.ManyToManyField,
related_model='changedescs.ChangeDescription'),
AddField('ReviewRequestDraft', 'changedesc', models.ForeignKey,
initial=None, null=True,
related_model='changedescs.ChangeDescription')
]
| chipx86/reviewboard | reviewboard/reviews/evolutions/change_descriptions.py | Python | mit | 432 | 0 |
import operator
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
allows_group_by_pk = True
related_fields_match_type = True
# MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME.
allow_sliced_subqueries_with_in = False
has_select_for_update = True
supports_forward_references = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
can_introspect_autofield = True
can_introspect_binary_field = False
can_introspect_duration_field = False
can_introspect_small_integer_field = True
can_introspect_positive_integer_field = True
introspected_boolean_field_type = 'IntegerField'
supports_index_column_ordering = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_auto_pk_0 = False
can_release_savepoints = True
atomic_transactions = False
can_clone_databases = True
supports_temporal_subtraction = True
supports_select_intersection = False
supports_select_difference = False
supports_slicing_ordering_in_compound = True
supports_index_on_text_field = False
has_case_insensitive_like = False
create_test_procedure_without_params_sql = """
CREATE PROCEDURE test_procedure ()
BEGIN
DECLARE V_I INTEGER;
SET V_I = 1;
END;
"""
create_test_procedure_with_int_param_sql = """
CREATE PROCEDURE test_procedure (P_I INTEGER)
BEGIN
DECLARE V_I INTEGER;
SET V_I = P_I;
END;
"""
db_functions_convert_bytes_to_str = True
# Neither MySQL nor MariaDB support partial indexes.
supports_partial_indexes = False
supports_order_by_nulls_modifier = False
order_by_nulls_first = True
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
with self.connection.cursor() as cursor:
cursor.execute("SELECT ENGINE FROM INFORMATION_SCHEMA.ENGINES WHERE SUPPORT = 'DEFAULT'")
result = cursor.fetchone()
return result[0]
@cached_property
def update_can_self_select(self):
return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 3, 2)
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != 'MyISAM'
@cached_property
def can_return_columns_from_insert(self):
return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 5, 0)
can_return_rows_from_bulk_insert = property(operator.attrgetter('can_return_columns_from_insert'))
@cached_property
def has_zoneinfo_database(self):
# Test if the time zone definitions are installed. CONVERT_TZ returns
# NULL if 'UTC' timezone isn't loaded into the mysql.time_zone.
with self.connection.cursor() as cursor:
cursor.execute("SELECT CONVERT_TZ('2001-01-01 01:00:00', 'UTC', 'UTC')")
return cursor.fetchone()[0] is not None
@cached_property
def is_sql_auto_is_null_enabled(self):
with self.connection.cursor() as cursor:
cursor.execute('SELECT @@SQL_AUTO_IS_NULL')
result = cursor.fetchone()
return result and result[0] == 1
@cached_property
def supports_over_clause(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 2)
supports_frame_range_fixed_distance = property(operator.attrgetter('supports_over_clause'))
@cached_property
def supports_column_check_constraints(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 2, 1)
return self.connection.mysql_version >= (8, 0, 16)
supports_table_check_constraints = property(operator.attrgetter('supports_column_check_constraints'))
@cached_property
def can_introspect_check_constraints(self):
if self.connection.mysql_is_mariadb:
version = self.connection.mysql_version
return (version >= (10, 2, 22) and version < (10, 3)) or version >= (10, 3, 10)
return self.connection.mysql_version >= (8, 0, 16)
@cached_property
def has_select_for_update_skip_locked(self):
return not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1)
@cached_property
def has_select_for_update_nowait(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 3, 0)
return self.connection.mysql_version >= (8, 0, 1)
@cached_property
def supports_explain_analyze(self):
return self.connection.mysql_is_mariadb or self.connection.mysql_version >= (8, 0, 18)
@cached_property
def supported_explain_formats(self):
# Alias MySQL's TRADITIONAL to TEXT for consistency with other
# backends.
formats = {'JSON', 'TEXT', 'TRADITIONAL'}
if not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 16):
formats.add('TREE')
return formats
@cached_property
def supports_transactions(self):
"""
All storage engines except MyISAM support transactions.
"""
return self._mysql_storage_engine != 'MyISAM'
@cached_property
def ignores_table_name_case(self):
with self.connection.cursor() as cursor:
cursor.execute('SELECT @@LOWER_CASE_TABLE_NAMES')
result = cursor.fetchone()
return result and result[0] != 0
@cached_property
def supports_default_in_lead_lag(self):
# To be added in https://jira.mariadb.org/browse/MDEV-12981.
return not self.connection.mysql_is_mariadb
@cached_property
def supports_json_field(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 2, 7)
return self.connection.mysql_version >= (5, 7, 8)
@cached_property
def can_introspect_json_field(self):
if self.connection.mysql_is_mariadb:
return self.supports_json_field and self.can_introspect_check_constraints
return self.supports_json_field
| theo-l/django | django/db/backends/mysql/features.py | Python | bsd-3-clause | 6,495 | 0.002002 |
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import base64
import logging
from odoo import models, api, fields
_logger = logging.getLogger(__name__)
try:
from wand.image import Image
except ImportError:
_logger.warning("Please install wand to use PDF Previews")
class PdfPreviewWizard(models.TransientModel):
"""
Generate pdf of communication.
"""
_name = "partner.communication.pdf.wizard"
_description = "Partner Communication - PDF Wizard"
communication_id = fields.Many2one(
"partner.communication.job", required=True, ondelete="cascade", readonly=False
)
preview = fields.Binary(compute="_compute_pdf")
state = fields.Selection(related="communication_id.send_mode")
send_state = fields.Selection(related="communication_id.state")
body_html = fields.Html(compute="_compute_html")
@api.multi
def _compute_pdf(self):
if self.state != "physical":
return
comm = self.communication_id
report = comm.report_id.with_context(
lang=comm.partner_id.lang, must_skip_send_to_printer=True, bin_size=False
)
data = report.render_qweb_pdf(comm.ids)
with Image(blob=data[0], resolution=150) as pdf_image:
preview = base64.b64encode(pdf_image.make_blob(format="jpeg"))
self.preview = preview
@api.multi
def _compute_html(self):
comm = self.communication_id
template = getattr(comm.email_template_id, "sendgrid_localized_template", False)
if template:
body_html = template.html_content.replace("<%body%>", comm.body_html)
self.body_html = body_html
self.body_html = comm.body_html
@api.multi
def send(self):
return self.communication_id.send()
| CompassionCH/compassion-modules | partner_communication/wizards/pdf_wizard.py | Python | agpl-3.0 | 2,115 | 0.001891 |
from flask_example.models import user
from social.apps.flask_app import models
| nvbn/python-social-auth | examples/flask_example/models/__init__.py | Python | bsd-3-clause | 79 | 0 |
import os
import shutil
import unittest
from parameterized.parameterized import parameterized
import six
from conans.client import defs_to_string
from conans.client.build.meson import Meson
from conans.client.conf import default_settings_yml
from conans.client.tools import args_to_string
from conans.errors import ConanException
from conans.model.settings import Settings
from conans.test.utils.conanfile import ConanFileMock, MockDepsCppInfo
from conans.test.utils.test_files import temp_folder
class MesonTest(unittest.TestCase):
def setUp(self):
self.tempdir = temp_folder(path_with_spaces=False)
def tearDown(self):
shutil.rmtree(self.tempdir)
def _check_commands(self, cmd_ref, cmd_test):
cmd_ref_splitted = cmd_ref.split(' ')
cmd_test_splitted = cmd_test.split(' ')
self.assertEqual(cmd_ref_splitted[:3], cmd_test_splitted[:3])
self.assertEqual(set(cmd_ref_splitted[3:]), set(cmd_test_splitted[3:]))
def partial_build_test(self):
conan_file = ConanFileMock()
conan_file.settings = Settings()
conan_file.should_configure = False
conan_file.should_build = False
conan_file.package_folder = os.path.join(self.tempdir, "my_cache_package_folder")
meson = Meson(conan_file)
meson.configure()
self.assertIsNone(conan_file.command)
meson.build()
self.assertIsNone(conan_file.command)
meson.test()
self.assertIsNone(conan_file.command)
meson.install()
self.assertIsNone(conan_file.command)
def folders_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "6.3"
settings.arch = "x86"
settings.build_type = "Release"
package_folder = os.path.join(self.tempdir, "my_cache_package_folder")
conan_file = ConanFileMock()
conan_file.deps_cpp_info = MockDepsCppInfo()
conan_file.settings = settings
conan_file.source_folder = os.path.join(self.tempdir, "my_cache_source_folder")
conan_file.build_folder = os.path.join(self.tempdir, "my_cache_build_folder")
conan_file.package_folder = package_folder
meson = Meson(conan_file)
defs = {
'default_library': 'shared',
'prefix': package_folder,
'libdir': 'lib',
'bindir': 'bin',
'sbindir': 'bin',
'libexecdir': 'bin',
'includedir': 'include',
'cpp_std': 'none'
}
meson.configure(source_dir=os.path.join(self.tempdir, "../subdir"),
build_dir=os.path.join(self.tempdir, "build"))
source_expected = os.path.join(self.tempdir, "../subdir")
build_expected = os.path.join(self.tempdir, "build")
cmd_expected = 'meson "%s" "%s" --backend=ninja %s --buildtype=release' \
% (source_expected, build_expected, defs_to_string(defs))
self._check_commands(cmd_expected, conan_file.command)
meson.configure(build_dir=os.path.join(self.tempdir, "build"))
source_expected = os.path.join(self.tempdir, "my_cache_source_folder")
build_expected = os.path.join(self.tempdir, "build")
cmd_expected = 'meson "%s" "%s" --backend=ninja %s --buildtype=release' \
% (source_expected, build_expected, defs_to_string(defs))
self._check_commands(cmd_expected, conan_file.command)
meson.configure()
source_expected = os.path.join(self.tempdir, "my_cache_source_folder")
build_expected = os.path.join(self.tempdir, "my_cache_build_folder")
cmd_expected = 'meson "%s" "%s" --backend=ninja %s --buildtype=release' \
% (source_expected, build_expected, defs_to_string(defs))
self._check_commands(cmd_expected, conan_file.command)
meson.configure(source_folder="source", build_folder="build")
build_expected = os.path.join(self.tempdir, "my_cache_build_folder", "build")
source_expected = os.path.join(self.tempdir, "my_cache_source_folder", "source")
cmd_expected = 'meson "%s" "%s" --backend=ninja %s --buildtype=release' \
% (source_expected, build_expected, defs_to_string(defs))
self._check_commands(cmd_expected, conan_file.command)
conan_file.in_local_cache = True
meson.configure(source_folder="source", build_folder="build",
cache_build_folder="rel_only_cache")
build_expected = os.path.join(self.tempdir, "my_cache_build_folder", "rel_only_cache")
source_expected = os.path.join(self.tempdir, "my_cache_source_folder", "source")
cmd_expected = 'meson "%s" "%s" --backend=ninja %s --buildtype=release' \
% (source_expected, build_expected, defs_to_string(defs))
self._check_commands(cmd_expected, conan_file.command)
conan_file.in_local_cache = False
meson.configure(source_folder="source", build_folder="build",
cache_build_folder="rel_only_cache")
build_expected = os.path.join(self.tempdir, "my_cache_build_folder", "build")
source_expected = os.path.join(self.tempdir, "my_cache_source_folder", "source")
cmd_expected = 'meson "%s" "%s" --backend=ninja %s --buildtype=release' \
% (source_expected, build_expected, defs_to_string(defs))
self._check_commands(cmd_expected, conan_file.command)
conan_file.in_local_cache = True
meson.configure(build_dir="build", cache_build_folder="rel_only_cache")
build_expected = os.path.join(self.tempdir, "my_cache_build_folder", "rel_only_cache")
source_expected = os.path.join(self.tempdir, "my_cache_source_folder")
cmd_expected = 'meson "%s" "%s" --backend=ninja %s --buildtype=release' \
% (source_expected, build_expected, defs_to_string(defs))
self._check_commands(cmd_expected, conan_file.command)
args = ['--werror', '--warnlevel 3']
defs['default_library'] = 'static'
meson.configure(source_folder="source", build_folder="build", args=args,
defs={'default_library': 'static'})
build_expected = os.path.join(self.tempdir, "my_cache_build_folder", "build")
source_expected = os.path.join(self.tempdir, "my_cache_source_folder", "source")
cmd_expected = 'meson "%s" "%s" --backend=ninja %s %s --buildtype=release' \
% (source_expected, build_expected, args_to_string(args), defs_to_string(defs))
self._check_commands(cmd_expected, conan_file.command)
# Raise mixing
with six.assertRaisesRegex(self, ConanException, "Use 'build_folder'/'source_folder'"):
meson.configure(source_folder="source", build_dir="build")
meson.test()
self.assertEqual("ninja -C \"%s\" %s" % (build_expected, args_to_string(["test"])), conan_file.command)
meson.install()
self.assertEqual("ninja -C \"%s\" %s" % (build_expected, args_to_string(["install"])), conan_file.command)
def prefix_test(self):
conan_file = ConanFileMock()
conan_file.deps_cpp_info = MockDepsCppInfo()
conan_file.settings = Settings()
conan_file.package_folder = os.getcwd()
expected_prefix = '-Dprefix="%s"' % os.getcwd()
meson = Meson(conan_file)
meson.configure()
self.assertIn(expected_prefix, conan_file.command)
meson.build()
self.assertIn("ninja -C", conan_file.command)
meson.install()
self.assertIn("ninja -C", conan_file.command)
def no_prefix_test(self):
conan_file = ConanFileMock()
conan_file.deps_cpp_info = MockDepsCppInfo()
conan_file.settings = Settings()
conan_file.package_folder = None
meson = Meson(conan_file)
meson.configure()
self.assertNotIn('-Dprefix', conan_file.command)
meson.build()
self.assertIn("ninja -C", conan_file.command)
with self.assertRaises(TypeError):
meson.install()
@parameterized.expand([('Linux', 'gcc', '6.3', 'x86', None, '-m32'),
('Linux', 'gcc', '6.3', 'x86_64', None, '-m64'),
('Windows', 'Visual Studio', '15', 'x86', 'MD', '-MD')])
def flags_applied_test(self, the_os, compiler, version, arch, runtime, flag):
settings = Settings.loads(default_settings_yml)
settings.os = the_os
settings.compiler = compiler
settings.compiler.version = version
settings.arch = arch
if runtime:
settings.compiler.runtime = runtime
settings.build_type = "Release"
conan_file = ConanFileMock()
conan_file.deps_cpp_info = MockDepsCppInfo()
conan_file.settings = settings
conan_file.package_folder = None
meson = Meson(conan_file)
meson.configure()
meson.build()
self.assertIn(flag, conan_file.captured_env["CFLAGS"])
self.assertIn(flag, conan_file.captured_env["CXXFLAGS"])
| memsharded/conan | conans/test/unittests/client/build/meson_test.py | Python | mit | 9,218 | 0.003688 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin, Page
from django.conf import settings
class InheritPagePlaceholder(CMSPlugin):
"""
Provides the ability to inherit plugins for a certain placeholder from an associated "parent" page instance
"""
from_page = models.ForeignKey(Page, null=True, blank=True, help_text=_("Choose a page to include its plugins into this placeholder, empty will choose current page"))
from_language = models.CharField(_("language"), max_length=5, choices=settings.CMS_LANGUAGES, blank=True, null=True, help_text=_("Optional: the language of the plugins you want"))
| hzlf/openbroadcast | website/cms/plugins/inherit/models.py | Python | gpl-3.0 | 681 | 0.005874 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gtdweb.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| lanbing510/GTDWeb | manage.py | Python | gpl-2.0 | 249 | 0 |
'''
RP_extract: Rhythm Patterns Audio Feature Extractor
@author: 2014-2015 Alexander Schindler, Thomas Lidy
Re-implementation by Alexander Schindler of RP_extract for Matlab
Matlab version originally by Thomas Lidy, based on Musik Analysis Toolbox by Elias Pampalk
( see http://ifs.tuwien.ac.at/mir/downloads.html )
Main function is rp_extract. See function definition and description for more information,
or example usage in main function.
Note: All required functions are provided by the two main scientific libraries numpy and scipy.
Note: In case you alter the code to use transform2mel, librosa needs to be installed: pip install librosa
'''
import numpy as np
from scipy import stats
from scipy.fftpack import fft
#from scipy.fftpack import rfft # Discrete Fourier transform of a real sequence.
from scipy import interpolate
# suppress numpy warnings (divide by 0 etc.)
np.set_printoptions(suppress=True)
# required for debugging
np.set_printoptions(precision=8,
threshold=10,
suppress=True,
linewidth=200,
edgeitems=10)
# INITIALIZATION: Constants & Mappings
# Bark Scale
bark = [100, 200, 300, 400, 510, 630, 770, 920, 1080, 1270, 1480, 1720, 2000, 2320, 2700, 3150, 3700, 4400, 5300, 6400, 7700, 9500, 12000, 15500]
n_bark_bands = len(bark)
# copy the bark vector (using [:]) and add a 0 in front (to make calculations below easier)
barks = bark[:]
barks.insert(0,0)
# Phone Scale
phon = [3, 20, 40, 60, 80, 100, 101]
# copy the bark vector (using [:]) and add a 0 in front (to make calculations below easier)
phons = phon[:]
phons.insert(0,0)
phons = np.asarray(phons)
# Loudness Curves
eq_loudness = np.array([[55, 40, 32, 24, 19, 14, 10, 6, 4, 3, 2, 2, 0,-2,-5,-4, 0, 5, 10, 14, 25, 35],
[66, 52, 43, 37, 32, 27, 23, 21, 20, 20, 20, 20,19,16,13,13,18, 22, 25, 30, 40, 50],
[76, 64, 57, 51, 47, 43, 41, 41, 40, 40, 40,39.5,38,35,33,33,35, 41, 46, 50, 60, 70],
[89, 79, 74, 70, 66, 63, 61, 60, 60, 60, 60, 59,56,53,52,53,56, 61, 65, 70, 80, 90],
[103, 96, 92, 88, 85, 83, 81, 80, 80, 80, 80, 79,76,72,70,70,75, 79, 83, 87, 95,105],
[118,110,107,105,103,102,101,100,100,100,100, 99,97,94,90,90,95,100,103,105,108,115]])
loudn_freq = np.array([31.62, 50, 70.7, 100, 141.4, 200, 316.2, 500, 707.1, 1000, 1414, 1682, 2000, 2515, 3162, 3976, 5000, 7071, 10000, 11890, 14140, 15500])
# We have the loudness values for the frequencies in loudn_freq
# now we calculate in loudn_bark a matrix of loudness sensation values for the bark bands margins
i = 0
j = 0
loudn_bark = np.zeros((eq_loudness.shape[0], len(bark)))
for bsi in bark:
while j < len(loudn_freq) and bsi > loudn_freq[j]:
j += 1
j -= 1
if np.where(loudn_freq == bsi)[0].size != 0: # loudness value for this frequency already exists
loudn_bark[:,i] = eq_loudness[:,np.where(loudn_freq == bsi)][:,0,0]
else:
w1 = 1 / np.abs(loudn_freq[j] - bsi)
w2 = 1 / np.abs(loudn_freq[j + 1] - bsi)
loudn_bark[:,i] = (eq_loudness[:,j]*w1 + eq_loudness[:,j+1]*w2) / (w1 + w2)
i += 1
# SPECTRAL MASKING Spreading Function
# CONST_spread contains matrix of spectral frequency masking factors
CONST_spread = np.zeros((n_bark_bands,n_bark_bands))
for i in range(n_bark_bands):
CONST_spread[i,:] = 10**((15.81+7.5*((i-np.arange(n_bark_bands))+0.474)-17.5*(1+((i-np.arange(n_bark_bands))+0.474)**2)**0.5)/10)
# UTILITY FUNCTIONS
def nextpow2(num):
'''NextPow2
find the next highest number to the power of 2 to a given number
and return the exponent to 2
(analogously to Matlab's nextpow2() function)
'''
n = 2
i = 1
while n < num:
n *= 2
i += 1
return i
# FFT FUNCTIONS
def periodogram(x,win,Fs=None,nfft=1024):
''' Periodogram
Periodogram power spectral density estimate
Note: this function was written with 1:1 Matlab compatibility in mind.
The number of points, nfft, in the discrete Fourier transform (DFT) is the maximum of 256 or the next power of two greater than the signal length.
:param x: time series data (e.g. audio signal), ideally length matches nfft
:param win: window function to be applied (e.g. Hanning window). in this case win expects already data points of the window to be provided.
:param Fs: sampling frequency (unused)
:param nfft: number of bins for FFT (ideally matches length of x)
:return: Periodogram power spectrum (np.array)
'''
#if Fs == None:
# Fs = 2 * np.pi # commented out because unused
U = np.dot(win.conj().transpose(), win) # compensates for the power of the window.
Xx = fft((x * win),nfft) # verified
P = Xx*np.conjugate(Xx)/U
# Compute the 1-sided or 2-sided PSD [Power/freq] or mean-square [Power].
# Also, compute the corresponding freq vector & freq units.
# Generate the one-sided spectrum [Power] if so wanted
if nfft % 2 != 0:
select = np.arange((nfft+1)/2) # ODD
P = P[select,:] # Take only [0,pi] or [0,pi)
P[1:-1] = P[1:-1] * 2 # Only DC is a unique point and doesn't get doubled
else:
#select = np.arange(nfft/2+1); # EVEN
#P = P[select,:] # Take only [0,pi] or [0,pi) # TODO: why commented out?
P[1:-2] = P[1:-2] * 2
P = P / (2 * np.pi)
return P
def calc_spectrogram(wavsegment,fft_window_size,fft_overlap = 0.5,real_values=True):
''' Calc_Spectrogram
calculate spectrogram using periodogram function (which performs FFT) to convert wave signal data
from time to frequency domain (applying a Hanning window and (by default) 50 % window overlap)
:param wavsegment: audio wave file data for a segment to be analyzed (mono (i.e. 1-dimensional vector) only
:param fft_window_size: windows size to apply FFT to
:param fft_overlap: overlap to apply during FFT analysis in % fraction (e.g. default = 0.5, means 50% overlap)
:param real_values: if True, return real values by taking abs(spectrogram), if False return complex values
:return: spectrogram matrix as numpy array (fft_window_size, n_frames)
'''
# hop_size (increment step in samples, determined by fft_window_size and fft_overlap)
hop_size = int(fft_window_size*(1-fft_overlap))
# this would compute the segment length, but it's pre-defined above ...
# segment_size = fft_window_size + (frames-1) * hop_size
# ... therefore we convert the formula to give the number of frames needed to iterate over the segment:
n_frames = (wavsegment.shape[0] - fft_window_size) / hop_size + 1
# n_frames_old = wavsegment.shape[0] / fft_window_size * 2 - 1 # number of iterations with 50% overlap
# TODO: provide this as parameter for better caching?
han_window = np.hanning(fft_window_size) # verified
# initialize result matrix for spectrogram
spectrogram = np.zeros((fft_window_size, n_frames), dtype=np.complex128)
# start index for frame-wise iteration
ix = 0
for i in range(n_frames): # stepping through the wave segment, building spectrum for each window
spectrogram[:,i] = periodogram(wavsegment[ix:ix+fft_window_size], win=han_window,nfft=fft_window_size)
ix = ix + hop_size
# NOTE: tested scipy periodogram BUT it delivers totally different values AND takes 2x the time of our periodogram function (0.13 sec vs. 0.06 sec)
# from scipy.signal import periodogram # move on top
#f, spec = periodogram(x=wavsegment[idx],fs=samplerate,window='hann',nfft=fft_window_size,scaling='spectrum',return_onesided=True)
if real_values: spectrogram = np.abs(spectrogram)
return (spectrogram)
# FEATURE FUNCTIONS
def calc_statistical_features(matrix):
result = np.zeros((matrix.shape[0],7))
result[:,0] = np.mean(matrix, axis=1)
result[:,1] = np.var(matrix, axis=1, dtype=np.float64) # the values for variance differ between MATLAB and Numpy!
result[:,2] = stats.skew(matrix, axis=1)
result[:,3] = stats.kurtosis(matrix, axis=1, fisher=False) # Matlab calculates Pearson's Kurtosis
result[:,4] = np.median(matrix, axis=1)
result[:,5] = np.min(matrix, axis=1)
result[:,6] = np.max(matrix, axis=1)
result[np.where(np.isnan(result))] = 0
return result
# PSYCHO-ACOUSTIC TRANSFORMS as individual functions
# Transform 2 Mel Scale: NOT USED by rp_extract, but included for testing purposes or for import into other programs
def transform2mel(spectrogram,samplerate,fft_window_size,n_mel_bands = 80,freq_min = 0,freq_max = None):
'''Transform to Mel
convert a spectrogram to a Mel scale spectrogram by grouping original frequency bins
to Mel frequency bands (using Mel filter from Librosa)
Parameters
spectrogram: input spectrogram
samplerate: samplerate of audio signal
fft_window_size: number of time window / frequency bins in the FFT analysis
n_mel_bands: number of desired Mel bands, typically 20, 40, 80 (max. 128 which is default when 'None' is provided)
freq_min: minimum frequency (Mel filters will be applied >= this frequency, but still return n_meld_bands number of bands)
freq_max: cut-off frequency (Mel filters will be applied <= this frequency, but still return n_meld_bands number of bands)
Returns:
mel_spectrogram: Mel spectrogram: np.array of shape(n_mel_bands,frames) maintaining the number of frames in the original spectrogram
'''
import librosa.filters
# Syntax: librosa.filters.mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False)
mel_basis = librosa.filters.mel(samplerate,fft_window_size, n_mels=n_mel_bands,fmin=freq_min,fmax=freq_max)
freq_bin_max = mel_basis.shape[1] # will be fft_window_size / 2 + 1
# IMPLEMENTATION WITH FOR LOOP
# initialize Mel Spectrogram matrix
#n_mel_bands = mel_basis.shape[0] # get the number of bands from result in case 'None' was specified as parameter
#mel_spectrogram = np.empty((n_mel_bands, frames))
#for i in range(frames): # stepping through the wave segment, building spectrum for each window
# mel_spectrogram[:,i] = np.dot(mel_basis,spectrogram[0:freq_bin_max,i])
# IMPLEMENTATION WITH DOT PRODUCT (15% faster)
# multiply the mel filter of each band with the spectogram frame (dot product executes it on all frames)
mel_spectrogram = np.dot(mel_basis,spectrogram[0:freq_bin_max,:])
return (mel_spectrogram)
# Bark Transform: Convert Spectrogram to Bark Scale
# matrix: Spectrogram values as returned from periodogram function
# freq_axis: array of frequency values along the frequency axis
# max_bands: limit number of Bark bands (1...24) (counting from lowest band)
def transform2bark(matrix, freq_axis, max_bands=None):
# barks and n_bark_bands have been initialized globally above
if max_bands == None:
max_band = n_bark_bands
else:
max_band = min(n_bark_bands,max_bands)
matrix_out = np.zeros((max_band,matrix.shape[1]),dtype=matrix.dtype)
for b in range(max_band-1):
matrix_out[b] = np.sum(matrix[((freq_axis >= barks[b]) & (freq_axis < barks[b+1]))], axis=0)
return(matrix_out)
# Spectral Masking (assumes values are arranged in <=24 Bark bands)
def do_spectral_masking(matrix):
n_bands = matrix.shape[0]
# CONST_spread has been initialized globally above
spread = CONST_spread[0:n_bands,0:n_bands] # not sure if column limitation is right here; was originally written for n_bark_bands = 24 only
matrix = np.dot(spread, matrix)
return(matrix)
# Map to Decibel Scale
def transform2db(matrix):
'''Map to Decibel Scale'''
matrix[np.where(matrix < 1)] = 1
matrix = 10 * np.log10(matrix)
return(matrix)
# Transform to Phon (assumes matrix is in dB scale)
def transform2phon(matrix):
old_npsetting = np.seterr(invalid='ignore') # avoid 'RuntimeWarning: invalid value encountered in divide' at ifac division below
# number of bark bands, matrix length in time dim
n_bands = matrix.shape[0]
t = matrix.shape[1]
# DB-TO-PHON BARK-SCALE-LIMIT TABLE
# introducing 1 level more with level(1) being infinite
# to avoid (levels - 1) producing errors like division by 0
#%%table_dim = size(CONST_loudn_bark,2);
table_dim = n_bands; # OK
cbv = np.concatenate((np.tile(np.inf,(table_dim,1)), loudn_bark[:,0:n_bands].transpose()),1) # OK
# init lowest level = 2
levels = np.tile(2,(n_bands,t)) # OK
for lev in range(1,6): # OK
db_thislev = np.tile(np.asarray([cbv[:,lev]]).transpose(),(1,t))
levels[np.where(matrix > db_thislev)] = lev + 2
# the matrix 'levels' stores the correct Phon level for each data point
cbv_ind_hi = np.ravel_multi_index(dims=(table_dim,7), multi_index=np.array([np.tile(np.array([range(0,table_dim)]).transpose(),(1,t)), levels-1]), order='F')
cbv_ind_lo = np.ravel_multi_index(dims=(table_dim,7), multi_index=np.array([np.tile(np.array([range(0,table_dim)]).transpose(),(1,t)), levels-2]), order='F')
# interpolation factor % OPT: pre-calc diff
ifac = (matrix[:,0:t] - cbv.transpose().ravel()[cbv_ind_lo]) / (cbv.transpose().ravel()[cbv_ind_hi] - cbv.transpose().ravel()[cbv_ind_lo])
ifac[np.where(levels==2)] = 1 # keeps the upper phon value;
ifac[np.where(levels==8)] = 1 # keeps the upper phon value;
# phons has been initialized globally above
matrix[:,0:t] = phons.transpose().ravel()[levels - 2] + (ifac * (phons.transpose().ravel()[levels - 1] - phons.transpose().ravel()[levels - 2])) # OPT: pre-calc diff
np.seterr(invalid=old_npsetting['invalid']) # restore RuntimeWarning setting for np division error
return(matrix)
# Transform to Sone scale (assumes matrix is in Phon scale)
def transform2sone(matrix):
idx = np.where(matrix >= 40)
not_idx = np.where(matrix < 40)
matrix[idx] = 2**((matrix[idx]-40)/10) #
matrix[not_idx] = (matrix[not_idx]/40)**2.642 # max => 438.53
return(matrix)
# MAIN Rhythm Pattern Extraction Function
def rp_extract( wavedata, # pcm (wav) signal data normalized to (-1,1)
samplerate, # signal sampling rate
# which features to extract
extract_rp = False, # extract Rhythm Patterns features
extract_ssd = False, # extract Statistical Spectrum Descriptor
extract_tssd = False, # extract temporal Statistical Spectrum Descriptor
extract_rh = False, # extract Rhythm Histogram features
extract_rh2 = False, # extract Rhythm Histogram features including Fluctuation Strength Weighting
extract_trh = False, # extract temporal Rhythm Histogram features
extract_mvd = False, # extract modulation variance descriptor
# processing options
skip_leadin_fadeout = 1, # >=0 how many sample windows to skip at the beginning and the end
step_width = 1, # >=1 each step_width'th sample window is analyzed
n_bark_bands = 24, # 2..24 number of desired Bark bands (from low frequencies to high) (e.g. 15 or 20 or 24 for 11, 22 and 44 kHz audio respectively) (1 delivers undefined output)
mod_ampl_limit = 60, # 2..257 number of modulation frequencies on x-axis
# enable/disable parts of feature extraction
transform_bark = True, # [S2] transform to Bark scale
spectral_masking = True, # [S3] compute Spectral Masking
transform_db = True, # [S4] transfrom to dB: advisable only to turn off when [S5] and [S6] are turned off too
transform_phon = True, # [S5] transform to Phon: if disabled, Sone_transform will be disabled too
transform_sone = True, # [S6] transform to Sone scale (only applies if transform_phon = True)
fluctuation_strength_weighting = True, # [R2] apply Fluctuation Strength weighting curve
#blurring = True # [R3] Gradient+Gauss filter # TODO: not yet implemented
return_segment_features = False, # this will return features per each analyzed segment instead of aggregated ones
verbose = False # print messages whats going on
):
'''Rhythm Pattern Feature Extraction
performs segment-wise audio feature extraction from provided audio wave (PCM) data
and extracts the following features:
Rhythm Pattern
Statistical Spectrum Descriptor
Statistical Histogram
temporal Statistical Spectrum Descriptor
Rhythm Histogram
temporal Rhythm Histogram features
Modulation Variance Descriptor
Examples:
>>> from audiofile_read import *
>>> samplerate, samplewidth, wavedata = audiofile_read("music/BoxCat_Games_-_10_-_Epic_Song.mp3") #doctest: +ELLIPSIS
Decoded .mp3 with: mpg123 -q -w /....wav music/BoxCat_Games_-_10_-_Epic_Song.mp3
>>> feat = rp_extract(wavedata, samplerate, extract_rp=True, extract_ssd=True, extract_rh=True)
Analyzing 7 segments
>>> for k in feat.keys():
... print k.upper() + ":", feat[k].shape[0], "dimensions"
SSD: 168 dimensions
RH: 60 dimensions
RP: 1440 dimensions
>>> print feat["rp"]
[ 0.01599218 0.01979605 0.01564305 0.01674175 0.00959912 0.00931604 0.00937831 0.00709122 0.00929631 0.00754473 ..., 0.02998088 0.03602739 0.03633861 0.03664331 0.02589753 0.02110256
0.01457744 0.01221825 0.0073788 0.00164668]
>>> print feat["rh"]
[ 7.11614842 12.58303013 6.96717295 5.24244146 6.49677561 4.21249659 12.43844045 4.19672357 5.30714983 6.1674115 ..., 1.55870044 2.69988854 2.75075831 3.67269877 13.0351257
11.7871738 3.76106713 2.45225195 2.20457928 2.06494926]
>>> print feat["ssd"]
[ 3.7783279 5.84444695 5.58439197 4.87849697 4.14983056 4.09638223 4.04971225 3.96152261 3.65551062 3.2857232 ..., 14.45953191 14.6088727 14.03351539 12.84783095 10.81735946
9.04121124 7.13804008 5.6633501 3.09678286 0.52076428]
'''
# PARAMETER INITIALIZATION
# non-exhibited parameters
include_DC = False
FLATTEN_ORDER = 'F' # order how matrices are flattened to vector: 'F' for Matlab/Fortran, 'C' for C order (IMPORTANT TO USE THE SAME WHEN reading+reshaping the features)
# segment_size should always be ~6 sec, fft_window_size should always be ~ 23ms
if (samplerate == 11025):
segment_size = 2**16
fft_window_size = 256
elif (samplerate == 22050):
segment_size = 2**17
fft_window_size = 512
elif (samplerate == 44100):
segment_size = 2**18
fft_window_size = 1024
else:
# throw error not supported
raise ValueError('A sample rate of ' + str(samplerate) + " is not supported (only 11, 22 and 44 kHz).")
# calculate frequency values on y-axis (for Bark scale calculation):
# freq_axis = float(samplerate)/fft_window_size * np.arange(0,(fft_window_size/2) + 1)
# linear space from 0 to samplerate/2 in (fft_window_size/2+1) steps
freq_axis = np.linspace(0, float(samplerate)/2, int(fft_window_size//2) + 1, endpoint=True)
# CONVERT STEREO TO MONO: Average the channels
if wavedata.ndim > 1: # if we have more than 1 dimension
if wavedata.shape[1] == 1: # check if 2nd dimension is just 1
wavedata = wavedata[:,0] # then we take first and only channel
else:
wavedata = np.mean(wavedata, 1) # otherwise we average the signals over the channels
# SEGMENT INITIALIZATION
# find positions of wave segments
skip_seg = skip_leadin_fadeout
seg_pos = np.array([1, segment_size]) # array with 2 entries: start and end position of selected segment
seg_pos_list = [] # list to store all the individual segment positions (only when return_segment_features == True)
# if file is too small, don't skip leadin/fadeout and set step_width to 1
"""
if ((skip_leadin_fadeout > 0) or (step_width > 1)):
duration = wavedata.shape[0]/samplerate
if (duration < 45):
step_width = 1
skip_seg = 0
# TODO: do this as a warning?
if verbose: print "Duration < 45 seconds: setting step_width to 1 and skip_leadin_fadeout to 0."
else:
# advance by number of skip_seg segments (i.e. skip lead_in)
seg_pos = seg_pos + segment_size * skip_seg
"""
# calculate number of segments
n_segments = 1 #int(np.floor( (np.floor( (wavedata.shape[0] - (skip_seg*2*segment_size)) / segment_size ) - 1 ) / step_width ) + 1)
if verbose: print "Analyzing", n_segments, "segments"
#if n_segments == 0:
# raise ValueError("Not enough data to analyze! Minimum sample length needs to be " +
# str(segment_size) + " (5.94 seconds) but it is " + str(wavedata.shape[0]) +
# " (" + str(round(wavedata.shape[0] * 1.0 / samplerate,2)) + " seconds)")
# initialize output
features = {}
ssd_list = []
sh_list = []
rh_list = []
rh2_list = []
rp_list = []
mvd_list = []
hearing_threshold_factor = 0.0875 * (2**15)
# SEGMENT ITERATION
for seg_id in range(n_segments):
# keep track of segment position
if return_segment_features:
seg_pos_list.append(seg_pos)
# EXTRACT WAVE SEGMENT that will be processed
# data is assumed to be mono waveform
wavsegment = wavedata #[seg_pos[0]-1:seg_pos[1]] # verified
# v210715
# Python : [-0.0269165 -0.02128601 -0.01864624 -0.01893616 -0.02166748 -0.02694702 -0.03457642 -0.04333496 -0.05166626 -0.05891418]
# Matlab : [-0,0269165 -0,02125549 -0,01861572 -0,01893616 -0,02165222 -0,02694702 -0,03456115 -0,04331970 -0,05166626 -0,05891418]
# adjust hearing threshold # TODO: move after stereo-mono conversion above?
wavsegment = wavsegment * hearing_threshold_factor
# v210715
# Python : [ -77.175 -61.03125 -53.4625 -54.29375 -62.125 -77.2625 -99.1375 -124.25 -148.1375 -168.91875]
# Matlab : [ -77,175 -60,94375 -53,3750 -54,29375 -62,081 -77,2625 -99,0938 -124,21 -148,1375 -168,91875]
matrix = calc_spectrogram(wavsegment,fft_window_size)
# v210715
#Python: 0.01372537 0.51454915 72.96077581 84.86663379 2.09940049 3.29631279 97373.2756834 23228.2065494 2678.44451741 30467.235416
# : 84.50635406 58.32826049 1263.82538188 234.11858349 85.48176796 97.26094525 214067.91208223 3570917.53366476 2303291.96676741 1681002.94519665
# : 171.47168402 1498.04129116 3746.45491915 153.01444364 37.20801758 177.74229702 238810.1975412 3064388.50572536 5501187.79635479 4172009.81345923
#Matlab: 0,01528259 0,49653179 73,32978523 85,38774541 2,00416767 3,36618763 97416,24267209 23239,84650814 2677,01521862 30460,9231041364
# : 84,73805309 57,84524803 1263,40594029 235,62185973 85,13826606 97,61122652 214078,02415144 3571346,74831746 2303286,74666381 1680967,41922679
# : 170,15377915 1500,98052242 3744,98456435 154,14108817 36,69362260 177,48982263 238812,02171250 3064642,99278220 5501230,26588318 4172058,72803277
#
# PSYCHO-ACOUSTIC TRANSFORMS
# Map to Bark Scale
if transform_bark:
matrix = transform2bark(matrix,freq_axis,n_bark_bands)
# v210715
# Python: 255.991763 1556.884100 5083.2410768 471.9996609 124.789186 278.299555 550251.385306 6658534.245939 7807158.207639 5883479.99407189
# : 77128.354925 10446.109041 22613.8525735 13266.2502432 2593.395039 1367.697057 675114.554043 23401741.536499 6300109.471193 8039710.71759598
# : 127165.795400 91270.354107 15240.3501050 16291.2234730 1413.851495 2166.723800 868138.817452 20682384.237884 8971171.605009 5919089.97818692
# Matlab: 254,907114 1559,322302 5081,720289 475,1506933 123,836056 278,46723 550306,288536 6659229,587607 7807194,027765 5883487,07036370
# : 77118,196343 10447,961479 22605,559124 13266,4432995 2591,064037 1368,48462 675116,996782 23400723,570438 6300124,132022 8039688,83884099
# : 127172,560642 91251,040768 15246,639683 16286,4542687 1414,053166 2166,42874 868063,055613 20681863,052695 8971108,607811 5919136,16752791
# Spectral Masking
if spectral_masking:
matrix = do_spectral_masking(matrix)
# v210715
# Python: 12978.051641 3416.109125 8769.913963 2648.888265 547.12360 503.50224 660888.17361 10480839.33617 8840234.405272 7193404.23970964
# : 100713.471006 27602.656332 27169.741240 16288.350176 2887.60281 1842.05959 1021358.42618 29229962.41626 10653981.441005 11182818.62910279
# : 426733.607945 262537.326945 43522.106075 41091.381283 4254.39289 4617.45877 1315036.85377 31353824.35688 12417010.121754 9673923.23590653
# Matlab: 12975,335615 3418,81282 8767,062187 2652,061105 545,79379 503,79683 660943,32199 10481368,76411 8840272,477464 7193407,85259461
# : 100704,175421 27602,34142 27161,901160 16288,924458 2884,94883 1842,86020 1021368,99046 29229118,99738 10653999,341989 11182806,7524195
# : 426751,992198 262523,89306 43524,970883 41085,415594 4253,42029 4617,35691 1314966,73269 31353021,99155 12416968,806879 9673951,88376021
# Map to Decibel Scale
if transform_db:
matrix = transform2db(matrix)
# v210715
# Python: 41.13209498 35.33531736 39.42995333 34.23063639 27.38085455 27.02001413 58.2012798 70.20396064 69.46463781 68.56934467
# : 50.03087564 44.40950878 44.34085502 42.11877097 34.60537456 32.65303677 60.09178176 74.65828257 70.27511936 70.48551281
# : 56.30156848 54.19191059 46.38709903 46.1375074 36.28837595 36.64403027 61.18937924 74.96290521 70.94017035 69.85602637
# Matlab: 41,13118599 35,33875324 39,42854087 34,23583526 27,37028596 27,02255437 58,20164218 70,20418000 69,46465651 68,56934684
# : 50,03047477 44,40945923 44,33960164 42,11892409 34,60138115 32,65492392 60,09182668 74,65815725 70,27512665 70,48550820
# : 56,30175557 54,19168835 46,38738489 46,13687684 36,28738298 36,64393446 61,18914765 74,96279407 70,94015590 69,85603922
# Transform Phon
if transform_phon:
matrix = transform2phon(matrix)
# v210715
# Python: 25.90299283 17.82310731 23.4713619 16.37852452 7.42111749 6.94924924 47.58029453 60.22662293 59.43646085 58.49404702
# : 47.03087564 41.40950878 41.34085502 38.89846372 29.5067182 27.06629597 57.09178176 71.65828257 67.27511936 67.48551281
# : 55.02273887 52.91308099 45.10826943 44.8586778 34.3678058 34.769195 59.91054964 73.68407561 69.66134075 68.57719676
# Matlab: 25,90169428 17,82760039 23,46934410 16,38532303 7,40729702 6,95257110 47,58067598 60,22686667 59,43648053 58,49404931
# : 47,03047477 41,40945923 41,33960164 38,89865511 29,50172644 27,06865491 57,09182668 71,65815725 67,27512665 67,48550820
# : 55,02292596 52,91285875 45,10855528 44,85804723 34,36668514 34,76908687 59,91031805 73,68396446 69,66132629 68,57720962
# Transform Sone
if transform_sone:
matrix = transform2sone(matrix)
# v210715
# Python: 0.31726931 0.11815598 0.24452297 0.09450863 0.01167179 0.009812 1.6911791 4.06332931 3.84676603 3.60351463
# : 1.62798518 1.10263162 1.09739697 0.92887876 0.44759842 0.35631529 3.26974511 8.97447943 6.62312431 6.72041945
# : 2.83288863 2.44749871 1.42486669 1.40042797 0.669685 0.69054778 3.97527582 10.327417 7.81439442 7.24868691
# Matlab: 0,31722728 0,11823469 0,24446743 0,09461230 0,01161444 0,00982439 1,69122381 4,06339796 3,84677128 3,60351520
# : 1,62793994 1,10262783 1,09730163 0,92889083 0,44739839 0,35639734 3,26975529 8,97440147 6,62312765 6,72041730
# : 2,83292537 2,44746100 1,42489491 1,40036676 0,66962731 0,69054210 3,97521200 10,32733744 7,81438659 7,24869337
# FEATURES: now we got a Sonogram and extract statistical features
# SSD: Statistical Spectrum Descriptors
if (extract_ssd or extract_tssd):
ssd = calc_statistical_features(matrix)
ssd_list.append(ssd.flatten(FLATTEN_ORDER))
# v210715
# Python: 2.97307486 5.10356599 0.65305978 2.35489911 2.439558 0.009812 8.1447095
# : 4.72262845 7.30899976 0.17862996 2.10446264 4.58595337 0.25538117 12.83339251
# : 4.77858109 5.52646859 0.23911764 2.9056742 4.96338019 0.589568 13.6683906
# : 4.43503421 3.69422906 0.41473155 3.06743402 4.33220988 0.88354694 10.89393754
# : 3.77216546 2.3993334 0.84001713 4.35548197 3.65140589 1.01199696 11.07806891
# : 3.60563073 2.09907968 1.49906811 7.07183968 3.35596471 1.00619842 11.2872743
# : 3.56816128 2.20237398 1.69790808 7.57870223 3.33806767 1.10826324 10.84965392
# : 3.43734647 2.38648202 1.59655791 6.86704341 3.23361995 1.10198021 11.89470587
# : 3.18466303 2.39479532 1.99223131 8.83987184 2.8819031 0.93982524 11.28737448
# : 2.90996406 1.85412568 1.97247446 8.36738395 2.68063918 0.81760102 9.64247378
# Matlab: 2,97309758 5,11366933 0,65306558 2,35489605 2,43956735 0,00982439 8,14473582
# : 4,72264163 7,32338449 0,17863061 2,10444843 4,58593777 0,25568703 12,83335168
# : 4,77859306 5,53731457 0,23911126 2,90567055 4,96338616 0,58959588 13,66839858
# : 4,43505068 3,70148292 0,41473410 3,06742263 4,33222037 0,88357883 10,89397920
# : 3,77217541 2,40405654 0,84000183 4,35540491 3,65136495 1,01191651 11,07802201
# : 3,60563459 2,10319516 1,49905911 7,07181623 3,35609824 1,00628652 11,28728291
# : 3,56820841 2,20675908 1,69792784 7,57880557 3,33819690 1,10830805 10,84975850
# : 3,43736757 2,39117736 1,59656951 6,86710630 3,23366165 1,10199096 11,89486723
# : 3,18467212 2,39951286 1,99223621 8,83991021 2,88200015 0,93978494 11,28733449
# : 2,90997546 1,85776617 1,97246361 8,36742039 2,68074853 0,81790606 9,64262886
# values verified
# RP: RHYTHM PATTERNS
feature_part_xaxis1 = range(0,mod_ampl_limit) # take first (opts.mod_ampl_limit) values of fft result including DC component
feature_part_xaxis2 = range(1,mod_ampl_limit+1) # leave DC component and take next (opts.mod_ampl_limit) values of fft result
if (include_DC):
feature_part_xaxis_rp = feature_part_xaxis1
else:
feature_part_xaxis_rp = feature_part_xaxis2
# 2nd FFT
fft_size = 2**(nextpow2(matrix.shape[1]))
if (mod_ampl_limit >= fft_size):
return {"rh":[]}
#raise(ValueError("mod_ampl_limit option must be smaller than FFT window size (" + str(fft_size) + ")."))
# NOTE: in fact only half of it (256) makes sense due to the symmetry of the FFT result
rhythm_patterns = np.zeros((matrix.shape[0], fft_size), dtype=np.complex128)
#rhythm_patterns = np.zeros((matrix.shape[0], fft_size), dtype=np.float64)
# real_matrix = abs(matrix)
for b in range(0,matrix.shape[0]):
rhythm_patterns[b,:] = fft(matrix[b,:], fft_size)
# tried this instead, but ...
#rhythm_patterns[b,:] = fft(real_matrix[b,:], fft_size) # ... no performance improvement
#rhythm_patterns[b,:] = rfft(real_matrix[b,:], fft_size) # ... different output values
rhythm_patterns = rhythm_patterns / 256 # why 256?
# convert from complex128 to float64 (real)
rp = np.abs(rhythm_patterns[:,feature_part_xaxis_rp]) # verified
# MVD: Modulation Variance Descriptors
if extract_mvd:
mvd = calc_statistical_features(rp.transpose()) # verified
mvd_list.append(mvd.flatten(FLATTEN_ORDER))
# RH: Rhythm Histograms - OPTION 1: before fluctuation_strength_weighting (as in Matlab)
if extract_rh:
rh = np.sum(np.abs(rhythm_patterns[:,feature_part_xaxis2]),axis=0) #without DC component # verified
rh_list.append(rh.flatten(FLATTEN_ORDER))
# final steps for RP:
# Fluctuation Strength weighting curve
if fluctuation_strength_weighting:
# modulation frequency x-axis (after 2nd FFT)
# mod_freq_res = resolution of modulation frequency axis (0.17 Hz)
mod_freq_res = 1 / (float(segment_size) / samplerate)
# modulation frequencies along x-axis from index 0 to 256)
mod_freq_axis = mod_freq_res * np.array(feature_part_xaxis_rp)
# fluctuation strength curve
fluct_curve = 1 / (mod_freq_axis/4 + 4/mod_freq_axis)
for b in range(rp.shape[0]):
rp[b,:] = rp[b,:] * fluct_curve #[feature_part_xaxis_rp]
#values verified
# RH: Rhythm Histograms - OPTION 2 (after Fluctuation weighting)
if extract_rh2:
rh2 = np.sum(rp,axis=0) #TODO: adapt to do always without DC component
rh2_list.append(rh2.flatten(FLATTEN_ORDER))
# Gradient+Gauss filter
#if extract_rp:
# TODO Gradient+Gauss filter
#for i in range(1,rp.shape[1]):
# rp[:,i-1] = np.abs(rp[:,i] - rp[:,i-1]);
#
#rp = blur1 * rp * blur2;
rp_list.append(rp.flatten(FLATTEN_ORDER))
seg_pos = seg_pos + segment_size * step_width
if extract_rp:
if return_segment_features:
features["rp"] = np.array(rp_list)
else:
features["rp"] = np.median(np.asarray(rp_list), axis=0)
if extract_ssd:
if return_segment_features:
features["ssd"] = np.array(ssd_list)
else:
features["ssd"] = np.mean(np.asarray(ssd_list), axis=0)
if extract_rh:
if return_segment_features:
features["rh"] = np.array(rh_list)
else:
features["rh"] = np.median(np.asarray(rh_list), axis=0)
if extract_mvd:
if return_segment_features:
features["mvd"] = np.array(mvd_list)
else:
features["mvd"] = np.mean(np.asarray(mvd_list), axis=0)
# NOTE: no return_segment_features for temporal features as they measure variation of features over time
if extract_tssd:
features["tssd"] = calc_statistical_features(np.asarray(ssd_list).transpose()).flatten(FLATTEN_ORDER)
if extract_trh:
features["trh"] = calc_statistical_features(np.asarray(rh_list).transpose()).flatten(FLATTEN_ORDER)
if return_segment_features:
# also include the segment positions in the result
features["segpos"] = np.array(seg_pos_list)
features["timepos"] = features["segpos"] / (samplerate * 1.0)
return features
# function to self test rp_extract if working properly
def self_test():
import doctest
#doctest.testmod()
doctest.run_docstring_examples(rp_extract, globals(), verbose=True)
if __name__ == '__main__':
import sys
from audiofile_read import * # import our library for reading wav and mp3 files
# process file given on command line or default song (included)
if len(sys.argv) > 1:
if sys.argv[1] == '-test': # RUN DOCSTRING SELF TEST
print "Doing self test. If nothing is printed, it is ok."
import doctest
doctest.run_docstring_examples(rp_extract, globals()) #, verbose=True)
exit() # Note: no output means that everything went fine
else:
audiofile = sys.argv[1]
else:
audiofile = "music/BoxCat_Games_-_10_-_Epic_Song.mp3"
# Read audio file and extract features
try:
samplerate, samplewidth, wavedata = audiofile_read(audiofile)
np.set_printoptions(suppress=True)
bark_bands = 24 # choose the number of Bark bands (2..24)
mod_ampl_limit = 60 # number modulation frequencies on x-axis
feat = rp_extract(wavedata,
samplerate,
extract_rp=True,
extract_ssd=True,
extract_tssd=False,
extract_rh=True,
n_bark_bands=bark_bands,
spectral_masking=True,
transform_db=True,
transform_phon=True,
transform_sone=True,
fluctuation_strength_weighting=True,
skip_leadin_fadeout=1,
step_width=1,
mod_ampl_limit=mod_ampl_limit)
# feat is a dict containing arrays for different feature sets
print "Successfully extracted features:" , feat.keys()
except ValueError, e:
print e
exit()
print "Rhythm Histogram feature vector:"
print feat["rh"]
# EXAMPLE on how to plot the features
do_plots = False
if do_plots:
from rp_plot import *
plotrp(feat["rp"],rows=bark_bands,cols=mod_ampl_limit)
plotrh(feat["rh"])
plotssd(feat["ssd"],rows=bark_bands)
# EXAMPLE on how to store RP features in CSV file
# import pandas as pd
# filename = "features.rp.csv"
# rp = pd.DataFrame(feat["rp"].reshape([1,feat["rp"].shape[0]]))
# rp.to_csv(filename) | bastustrump/genimpro | rp_extract.py | Python | mit | 39,383 | 0.013737 |
#!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
# generated by wxGlade 0.3.5.1 on Sat Jun 04 00:11:24 2005
import wx
class FilenameViewModuleMixinFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: FilenameViewModuleMixinFrame.__init__
kwds["style"] = wx.CAPTION|wx.MINIMIZE_BOX|wx.MAXIMIZE_BOX|wx.SYSTEM_MENU|wx.RESIZE_BORDER
wx.Frame.__init__(self, *args, **kwds)
self.viewFramePanel = wx.Panel(self, -1)
self.label_8_copy_1 = wx.StaticText(self.viewFramePanel, -1, "Filename")
self.filenameText = wx.TextCtrl(self.viewFramePanel, -1, "")
self.browseButtonId = wx.NewId()
self.browseButton = wx.Button(self.viewFramePanel, self.browseButtonId, "Browse")
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: FilenameViewModuleMixinFrame.__set_properties
self.SetTitle("SomeModule")
# end wxGlade
def __do_layout(self):
# begin wxGlade: FilenameViewModuleMixinFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_5 = wx.BoxSizer(wx.VERTICAL)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_3.Add(self.label_8_copy_1, 0, wx.LEFT|wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, 2)
sizer_3.Add(self.filenameText, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_3.Add(self.browseButton, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_5.Add(sizer_3, 1, wx.ALL|wx.EXPAND, 7)
self.viewFramePanel.SetAutoLayout(True)
self.viewFramePanel.SetSizer(sizer_5)
sizer_5.Fit(self.viewFramePanel)
sizer_5.SetSizeHints(self.viewFramePanel)
sizer_1.Add(self.viewFramePanel, 1, wx.EXPAND, 0)
self.SetAutoLayout(True)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
sizer_1.SetSizeHints(self)
self.Layout()
# end wxGlade
# end of class FilenameViewModuleMixinFrame
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = FilenameViewModuleMixinFrame(None, -1, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
| chrisidefix/devide | resources/python/filename_view_module_mixin_frame.py | Python | bsd-3-clause | 2,169 | 0.005533 |
#! /usr/bin/env python
# encoding: utf-8
"""
Force the execution output to be synchronized
May deadlock with a lot of output (subprocess limitation)
"""
import sys
from waflib.Build import BuildContext
from waflib import Utils, Logs
def exec_command(self, cmd, **kw):
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
Logs.debug('runner: %r' % cmd)
Logs.debug('runner_env: kw=%s' % kw)
try:
kw['stdout'] = kw['stderr'] = subprocess.PIPE
p = subprocess.Popen(cmd, **kw)
(out, err) = p.communicate()
if out:
sys.stdout.write(out.decode(sys.stdout.encoding or 'iso8859-1'))
if err:
sys.stdout.write(err.decode(sys.stdout.encoding or 'iso8859-1'))
return p.returncode
except OSError:
return -1
BuildContext.exec_command = exec_command
| Gnomescroll/Gnomescroll | server/waflib/extras/sync_exec.py | Python | gpl-3.0 | 777 | 0.023166 |
# SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from .common import _ParserScraper
class NamirDeiter(_ParserScraper):
imageSearch = '//img[contains(@src, "comics/")]'
prevSearch = ('//a[@rel="prev"]',
'//a[./img[contains(@src, "previous")]]',
'//a[contains(text(), "Previous")]')
def __init__(self, name, baseUrl, first=None, last=None):
if name == 'NamirDeiter':
super(NamirDeiter, self).__init__(name)
else:
super(NamirDeiter, self).__init__('NamirDeiter/' + name)
self.url = 'https://' + baseUrl + '/'
self.stripUrl = self.url + 'comics/index.php?date=%s'
if first:
self.firstStripUrl = self.stripUrl % first
else:
self.firstStripUrl = self.url + 'comics/'
if last:
self.url = self.stripUrl % last
self.endOfLife = True
def link_modifier(self, fromurl, tourl):
# Links are often absolute and keep jumping between http and https
return tourl.replace('http:', 'https:').replace('/www.', '/')
@classmethod
def getmodules(cls):
return (
cls('ApartmentForTwo', 'apartmentfor2.com'),
cls('NamirDeiter', 'namirdeiter.com', last='20150410'),
cls('NicoleAndDerek', 'nicoleandderek.com'),
cls('OneHundredPercentCat', 'ndunlimited.com/100cat', last='20121001'),
cls('SpareParts', 'sparepartscomics.com', first='20031022', last='20080331'),
cls('TheNDU', 'thendu.com'),
cls('WonderKittens', 'wonderkittens.com'),
cls('YouSayItFirst', 'yousayitfirst.com', first='20040220', last='20130125'),
)
class UnlikeMinerva(_ParserScraper):
name = 'NamirDeiter/UnlikeMinerva'
baseUrl = 'https://unlikeminerva.com/archive/index.php'
stripUrl = baseUrl + '?week=%s'
url = stripUrl % '127'
firstStripUrl = stripUrl % '26'
imageSearch = '//img[contains(@src, "archive/")]'
prevSearch = '//a[./img[contains(@src, "previous")]]'
multipleImagesPerStrip = True
endOfLife = True
| webcomics/dosage | dosagelib/plugins/namirdeiter.py | Python | mit | 2,179 | 0.001377 |
from utils.header import MagicField, Field
from load_command import LoadCommandHeader, LoadCommandCommand
class PrebindCksumCommand(LoadCommandHeader):
ENDIAN = None
FIELDS = (
MagicField('cmd', 'I', {LoadCommandCommand.COMMANDS['LC_DYSYMTAB']: 'LC_DYSYMTAB'}),
Field('cmdsize', 'I'),
Field('cksum', 'I'),
)
def __init__(self, bytes_=None, **kwargs):
self.cksum = None
super(PrebindCksumCommand, self).__init__(bytes_, **kwargs)
| hkkwok/MachOTool | mach_o/headers/prebind_cksum_command.py | Python | apache-2.0 | 488 | 0.002049 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Subscribe function."""
import contextlib
import re
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
def _recursive_apply(tensors, apply_fn):
"""Helper method to recursively apply a function to structure of tensors.
The structure of the tensors should take the form similar to fetches in
`tf.compat.v1.Session` and includes single `Tensor`, `list`, nested `list`,
`tuple`,
`namedtuple`, or `dict`.
Args:
tensors: Single `Tensor`, `list`, nested `list, `tuple`, `namedtuple`, or
`dict`.
apply_fn: Function to apply to each `Tensor` and should return a `Tensor`.
Returns:
Returns the modified tensors with the same structure.
Raises:
`TypeError` if undefined type in the tensors structure.
"""
tensors_type = type(tensors)
if tensors_type is ops.Tensor:
return apply_fn(tensors)
elif isinstance(tensors, variables.Variable):
return apply_fn(tensors.value())
elif isinstance(tensors, (list, tuple)):
tensors = [_recursive_apply(t, apply_fn) for t in tensors]
if tensors_type is list:
return list(tensors)
elif tensors_type is tuple:
return tuple(tensors)
return tensors_type(*tensors) # collections.namedtuple
elif tensors_type is dict:
return dict((k, _recursive_apply(v, apply_fn)) for k, v in tensors.items())
else:
raise TypeError(f'_recursive_apply argument {tensors!r} has invalid type '
f'{tensors_type!r}')
class _ControlOutputCache(object):
"""Helper class to manage calculating and caching control_outputs in graph."""
__slots__ = ['cache']
def __init__(self):
self.cache = {}
def calc_control_outputs(self, graph):
"""Returns the map of control_outputs for a given graph.
Args:
graph: The graph to parse.
Returns:
A map of the control outputs.
"""
control_outputs = {}
for op in graph.get_operations():
for control_input in op.control_inputs:
if control_input not in control_outputs:
control_outputs[control_input] = set()
control_outputs[control_input].add(op)
return control_outputs
def get_control_outputs(self, op):
"""Return the control outputs for a given op.
Args:
op: The op to fetch control outputs for.
Returns:
Iterable of control output ops.
"""
if op.graph not in self.cache:
control_outputs = self.calc_control_outputs(op.graph)
self.cache[op.graph] = control_outputs
else:
control_outputs = self.cache[op.graph]
return control_outputs.get(op, [])
def _subscribe_new(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
Args:
tensor: `tf.Tensor`
side_effects: List of side_effect functions see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects.
"""
update_input = []
for consumer_op in list(tensor.consumers()): # explicit copy
update_input.append((consumer_op, list(consumer_op.inputs).index(tensor)))
update_control_input = control_cache.get_control_outputs(tensor.op)
# Trailing slash on name scope to replace the scope.
name_scope = tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
outs = []
for s in side_effects:
outs += s(tensor)
with ops.control_dependencies(outs):
out = array_ops.identity(tensor)
for consumer_op, index in update_input:
consumer_op._update_input(index, out) # pylint: disable=protected-access
for consumer_op in update_control_input:
# If an op has more than one output and two or more of its output tensors
# are subscribed at the same time, we remove the control dependency from
# the original op only once and we add the dependencies to all the
# new identities.
new_control_inputs = consumer_op.control_inputs
if tensor.op in new_control_inputs:
new_control_inputs.remove(tensor.op)
new_control_inputs.append(out.op)
# pylint: disable=protected-access
consumer_op._remove_all_control_inputs()
consumer_op._add_control_inputs(new_control_inputs)
# pylint: enable=protected-access
return out
def _subscribe_extend(tensor, side_effects):
"""Helper method to extend the list of side_effects for a subscribed tensor.
Args:
tensor: A `tf.Tensor` as returned by subscribe().
side_effects: List of side_effect functions, see subscribe for details.
Returns:
The given subscribed tensor (for API consistency).
"""
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
# Build the side effect graphs and add their outputs to the list of control
# dependencies for the subscribed tensor.
outs = []
name_scope = source_tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
for s in side_effects:
outs += s(source_tensor)
out_ops = [out.op if isinstance(out, ops.Tensor) else out for out in outs]
tensor.op._add_control_inputs(out_ops) # pylint: disable=protected-access
return tensor
def _is_subscribed_identity(tensor):
"""Checks if the given tensor is an identity op returned by `subscribe()`.
Args:
tensor: A `tf.Tensor` to check.
Returns:
True if the given tensor matches the criteria for subscription identities:
its op type is `Identity`, its name matches the name of its input and
conforms to the convention for subscribed nodes.
False otherwise.
"""
# Subscribed tensor are assumed to be identity ops.
if tensor.op.type != 'Identity':
return False
# Check that the tensor name matches the convention in place for identity ops
# created by subscribe().
match = re.match(r'(?P<prefix_name>^.*?)/subscription/Identity[^/]+',
tensor.name)
if match is None or len(match.groups()) != 1:
return False
prefix_name = match.group('prefix_name')
# Get a reference to the source tensor and check that it has a matching name.
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
if prefix_name != source_tensor.op.name:
return False
return True
def _subscribe(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
This method will check if the given tensor has already been subscribed or if
it's a tensor returned by a previous call to `subscribe()` and, if so, will
reuse the existing identity op, appending the given side effects to the list
of existing ones.
Args:
tensor: The `tf.Tensor` to be subscribed.
side_effects: List of side_effect functions, see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects or the given tensor, if it was already been subscribed.
"""
# Check if the given tensor has a numpy compatible type (see dtypes.py).
# If not, we cannot subscribe it, so we just return the original tensor.
if not tensor.dtype.is_numpy_compatible:
logging.debug(('Tensor {} has an un-supported {} type and cannot be '
'subscribed.').format(tensor.name, tensor.dtype))
return tensor
if _is_subscribed_identity(tensor):
return _subscribe_extend(tensor, side_effects)
# Check if the given tensor has already been subscribed by inspecting its
# outputs.
name_scope = tensor.op.name + '/subscription/Identity'
consumers = tensor.consumers()
matching_ops = [op for op in consumers if op.name.startswith(name_scope)]
assert len(matching_ops) <= 1, ('Op {} must only have one subscription '
'op connected to it').format(tensor.op.name)
if len(matching_ops) == 1:
candidate_tensor = matching_ops[0].outputs[0]
if _is_subscribed_identity(candidate_tensor):
return _subscribe_extend(candidate_tensor, side_effects)
return _subscribe_new(tensor, side_effects, control_cache)
@contextlib.contextmanager
def _preserve_control_flow_context(tensor):
"""Preserve the control flow context for the given tensor.
Sets the graph context to the tensor's context so that side effect ops are
added under the same context.
This is needed when subscribing to tensors defined within a conditional
block or a while loop. In these cases we need that the side-effect ops
are created within the same control flow context as that of the tensor
they are attached to.
Args:
tensor: tensor whose context should be preserved.
Yields:
None
"""
# pylint: disable=protected-access
context = tensor.op._get_control_flow_context()
# pylint: enable=protected-access
if context:
context.Enter()
try:
yield
finally:
if context:
context.Exit()
def _scoped_subscribe(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
This is a thin wrapper around `_subscribe` and ensures that the side effect
ops are added within the same device and control flow context of the
subscribed tensor.
Args:
tensor: The `tf.Tensor` to be subscribed.
side_effects: List of side_effect functions, see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects or the given tensor, if it was already been subscribed.
"""
with ops.device(tensor.device):
with _preserve_control_flow_context(tensor):
return _subscribe(tensor, side_effects, control_cache)
def subscribe(tensors, side_effects):
"""Subscribe to a tensor.
This method will attach side effect graphs to a given set
of tensors. Set of tensors follows from session.run and supports
single `Tensor`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. It
returns the tensors in the same passed in structure, but as clones with
side effects applied. The supplied side effect graphs are specified
as a constructor function which takes the target tensor and
constructs a side effect graph and returns a list of ops that should
be control dependencies on fetching the tensor. It will append
'subscription' to the name scope of the tensor for every node in
the side effect graph. These control dependencies are what trigger
the side effects. Subscribe will construct the additions to your
graph and return the created identity tensor downstream of the control
dependencies. Use these tensors as you would normally in the rest of
your tensorflow code. If a given tensor has already been subscribed or a
tensor returned by a call to subscribe is passed, the previously created
identity tensor will be reused and the side effect graphs will be added to
the existing ones.
Args:
tensors: `Tensor` or set of tensors to subscribe to. Set of tensors format
follows from `Session.run` and supports single `Tensor`, `list`, nested
`list`, `tuple`, `namedtuple`, or `dict`.
side_effects: Function(s) that takes a `Tensor`, construct a subgraph, and
return a nonempty list of control dependencies. This can be a single
function or list of functions.
Returns:
Subscribed tensors, which are identity copies of the passed in tensors
in the same passed in structure, but the graph has been modified
such that these are downstream of the control dependencies for
the side effect graphs. Use these functionally equivalent tensors
instead of the passed in tensors for further construction or running.
"""
if not hasattr(side_effects, '__iter__'):
side_effects = [side_effects]
control_outputs = _ControlOutputCache()
result = _recursive_apply(
tensors, lambda t: _scoped_subscribe(t, side_effects, control_outputs))
return result
| tensorflow/tensorflow | tensorflow/python/framework/subscribe.py | Python | apache-2.0 | 12,914 | 0.006814 |
"""
Django settings for dnd_tracker project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'juchgjo=*=80&i=5xw18eg0-43h&wjms1wvi4j2u#8_uq0&1kc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'frag_tracker.apps.FragTrackerConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dnd_tracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dnd_tracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Copenhagen'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/characterlistview' | MariusLauge/dnd_tracker | dnd_tracker/settings.py | Python | gpl-3.0 | 3,300 | 0.001818 |
"""
https://github.com/renskiy/fabricio/blob/master/examples/hello_world
"""
from fabricio import tasks, docker
from fabricio.misc import AvailableVagrantHosts
app = tasks.DockerTasks(
service=docker.Container(
name='app',
image='nginx:stable-alpine',
options={
# `docker run` options
'env': 'FOO=42',
},
),
hosts=AvailableVagrantHosts(),
# rollback_command=True, # show `rollback` command in the list
# migrate_commands=True, # show `migrate` and `migrate-back` commands in the list
# backup_commands=True, # show `backup` and `restore` commands in the list
# pull_command=True, # show `pull` command in the list
# update_command=True, # show `update` command in the list
# revert_command=True, # show `revert` command in the list
# destroy_command=True, # show `destroy` command in the list
)
| renskiy/fabricio | examples/hello_world/fabfile.py | Python | mit | 901 | 0.00111 |
"""Adds a simulated sensor."""
from datetime import datetime
import math
from random import Random
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
import homeassistant.util.dt as dt_util
CONF_AMP = "amplitude"
CONF_FWHM = "spread"
CONF_MEAN = "mean"
CONF_PERIOD = "period"
CONF_PHASE = "phase"
CONF_SEED = "seed"
CONF_UNIT = "unit"
CONF_RELATIVE_TO_EPOCH = "relative_to_epoch"
DEFAULT_AMP = 1
DEFAULT_FWHM = 0
DEFAULT_MEAN = 0
DEFAULT_NAME = "simulated"
DEFAULT_PERIOD = 60
DEFAULT_PHASE = 0
DEFAULT_SEED = 999
DEFAULT_UNIT = "value"
DEFAULT_RELATIVE_TO_EPOCH = True
ICON = "mdi:chart-line"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_AMP, default=DEFAULT_AMP): vol.Coerce(float),
vol.Optional(CONF_FWHM, default=DEFAULT_FWHM): vol.Coerce(float),
vol.Optional(CONF_MEAN, default=DEFAULT_MEAN): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PERIOD, default=DEFAULT_PERIOD): cv.positive_int,
vol.Optional(CONF_PHASE, default=DEFAULT_PHASE): vol.Coerce(float),
vol.Optional(CONF_SEED, default=DEFAULT_SEED): cv.positive_int,
vol.Optional(CONF_UNIT, default=DEFAULT_UNIT): cv.string,
vol.Optional(
CONF_RELATIVE_TO_EPOCH, default=DEFAULT_RELATIVE_TO_EPOCH
): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the simulated sensor."""
name = config.get(CONF_NAME)
unit = config.get(CONF_UNIT)
amp = config.get(CONF_AMP)
mean = config.get(CONF_MEAN)
period = config.get(CONF_PERIOD)
phase = config.get(CONF_PHASE)
fwhm = config.get(CONF_FWHM)
seed = config.get(CONF_SEED)
relative_to_epoch = config.get(CONF_RELATIVE_TO_EPOCH)
sensor = SimulatedSensor(
name, unit, amp, mean, period, phase, fwhm, seed, relative_to_epoch
)
add_entities([sensor], True)
class SimulatedSensor(Entity):
"""Class for simulated sensor."""
def __init__(
self, name, unit, amp, mean, period, phase, fwhm, seed, relative_to_epoch
):
"""Init the class."""
self._name = name
self._unit = unit
self._amp = amp
self._mean = mean
self._period = period
self._phase = phase # phase in degrees
self._fwhm = fwhm
self._seed = seed
self._random = Random(seed) # A local seeded Random
self._start_time = (
datetime(1970, 1, 1, tzinfo=dt_util.UTC)
if relative_to_epoch
else dt_util.utcnow()
)
self._relative_to_epoch = relative_to_epoch
self._state = None
def time_delta(self):
"""Return the time delta."""
dt0 = self._start_time
dt1 = dt_util.utcnow()
return dt1 - dt0
def signal_calc(self):
"""Calculate the signal."""
mean = self._mean
amp = self._amp
time_delta = self.time_delta().total_seconds() * 1e6 # to milliseconds
period = self._period * 1e6 # to milliseconds
fwhm = self._fwhm / 2
phase = math.radians(self._phase)
if period == 0:
periodic = 0
else:
periodic = amp * (math.sin((2 * math.pi * time_delta / period) + phase))
noise = self._random.gauss(mu=0, sigma=fwhm)
return round(mean + periodic + noise, 3)
async def async_update(self):
"""Update the sensor."""
self._state = self.signal_calc()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
return {
"amplitude": self._amp,
"mean": self._mean,
"period": self._period,
"phase": self._phase,
"spread": self._fwhm,
"seed": self._seed,
"relative_to_epoch": self._relative_to_epoch,
}
| sdague/home-assistant | homeassistant/components/simulated/sensor.py | Python | apache-2.0 | 4,535 | 0.000441 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 15:45
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotUserMigration('org.biorxiv'),
),
migrations.RunPython(
code=share.robot.RobotOauthTokenMigration('org.biorxiv'),
),
migrations.RunPython(
code=share.robot.RobotScheduleMigration('org.biorxiv'),
),
]
| zamattiac/SHARE | providers/org/biorxiv/migrations/0001_initial.py | Python | apache-2.0 | 658 | 0 |
"""
__init__.py
ist303-miye
Copyright (C) 2017
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 2 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA
"""
from .cwebview import *
| morpheby/ist303-miye | client/__init__.py | Python | gpl-3.0 | 755 | 0.005298 |
from PyInstaller.utils.hooks import collect_submodules, collect_data_files
datas = collect_data_files('vispy')
| informatics-isi-edu/synspy | hook-vispy.py | Python | bsd-3-clause | 111 | 0 |
# -*- coding: utf-8 -*-
import os
import csv
from bs4 import BeautifulSoup
INFO = {'download_path': 'download/docs',
'doc_base_url':
'https://facebook.github.io/react-native/releases/0.40/docs{}',
'out_file': 'output.txt'}
HOME_LINK= 'http://facebook.github.io/react-native/docs/getting-started.html'
"""
This design is based on the python fathead
(zeroclickinfo-fathead/lib/fathead/python)
"""
class Data(object):
"""
Object responsible for loading raw HTML docs:
"""
def __init__(self, file):
"""
Initialize PythonData object. Load data from HTML.
"""
self.HTML = ""
self.FILE = file
self.load_data()
def load_data(self):
"""
Open the HTML file and load it into the object.
"""
with open(self.FILE, 'r') as data_file:
self.HTML = data_file.read()
def get_raw_data(self):
"""
Returns: The raw HTML that was loaded.
"""
return self.HTML
def get_file(self):
"""
Returns: The file path of the file being used.
"""
return self.FILE
class DataParser(object):
"""
Object responsible for parsing the raw HTML that contains data
"""
def __init__(self, data_object, info):
self.parsed_data = None
self.prop_sections = []
self.method_sections = []
self.intro_text = ''
self.title = ''
self.info = info
self.file_being_used = data_object.get_file()
soup_data = BeautifulSoup(data_object.get_raw_data(), 'html.parser')
self.title = soup_data.title.text
# Extract intro text
first_paragraph=soup_data.h1.find_next('p')
# There is only an intro text for the whole component, if there is not
# a h2 before the first paragraph
if soup_data.h1.find_next('p').find_previous('h2') is None:
self.intro_text += self._format_output(first_paragraph.text)
prop_div=soup_data.find('div', {'class': 'props'})
if prop_div:
self.prop_sections=prop_div.find_all('div')
# Methods come after a h3 with the text "Methods"
for h3 in soup_data.find_all('h3'):
if h3.text=="Methods #":
props=h3.parent.find('div', {'class': 'props'})
self.method_sections=props.find_all('div')
def parse_for_prop_name(self, section):
"""
Returns the function name
Args:
section: A section of parsed HTML that represents a function
definition
Returns:
Name of function
"""
prop_name_h4 = section.find('h4', {'class': 'propTitle'})
# The h4 prop section is consisting of the elements:
# <a class="anchor"> (Anchor-link),
# (optional) <span class="platform"> (platform span element),
# the name of the prop as clear text,
# <a class="hash-link"> (hash link)
link_to_general_props="View props... #"
if prop_name_h4 and prop_name_h4.text != link_to_general_props:
prop_name=prop_name_h4.next.next
if prop_name_h4.find('span', {'class': 'platform'}):
prop_name=prop_name_h4.find(
'span', {'class': 'platform'}).next.next
if not isinstance(prop_name, str):
# The prop_name is not a bs4.element.NavigableString
# It is probably a "ScrollView props..." link or something else
# that does not conform to the general format of the docs.
return None
return prop_name
def parse_for_first_paragraph(self, section):
"""
Returns the first paragraph of text for a given function
Fixes up some weird double spacing and newlines.
Args:
section: A section of parsed HTML that represents a function
definition
Returns:
First paragraph found with text
"""
paragraphs = section.find_all('p')
for paragraph in paragraphs:
if paragraph.text:
return self._format_output(paragraph.text)
return ''
def parse_for_anchor(self, section):
"""
Returns the anchor link to specific function doc
Args:
section: A section of parsed HTML that represents a function
definition
Returns:
The href value of the link to doc
"""
a_tag = section.find('a', {'class': 'anchor'})
if a_tag:
return a_tag['name']
return ''
def parse_for_signature(self, section, titleName):
"""
Returns the signature
Args:
section: A section of parsed HTML that represents a definition of
a property or method
Returns:
The signature
"""
h4 = section.find('h4', {'class': titleName})
contents=[]
for e in h4.strings:
contents.append(e)
# Remove the last item (and the preceding space), it is a hash link
del contents[-1]
del contents[-1]
# If platform is present, remove it - relevant for Properties
if h4.find('span', {'class': 'platform'}):
del contents[0]
# If there are two spans with class methodType, the first is not wanted,
# because it is "static".
# Relevant for methods section
if len(h4.find_all('span', {'class': 'methodType'})) > 1:
del contents[0]
if contents:
signature=''
for el in contents:
signature+=el
return '<pre><code>{}</code></pre>'.format(
self._format_output(signature))
return ''
def parse_for_method_name(self, section):
"""
Returns the name of a method
Args:
section: A section of parsed HTML that represents a method definition
Returns:
The method name
"""
method_name_h4 = section.find('h4', {'class': 'methodTitle'})
# The h4 method name section is consisting of the elements:
# <a class="anchor"> (Anchor-link),
# <span class="methodType"> (method type span element),
# the name of the prop as clear text,
# <span class="methodType"> (method signature span element),
# <a class="hash-link"> (hash link)
if method_name_h4:
method_name=method_name_h4.next.next
nbr_of_methodType_tags_in_h4=len(method_name_h4.find_all(
'span', {'class': 'methodType'}))
if nbr_of_methodType_tags_in_h4 > 1:
method_name=method_name_h4.find(
'span', {'class': 'methodType'}).next.next
return method_name
def create_url(self, anchor):
"""
Helper method to create URL back to document
Args:
anchor: #anchor
Returns:
Full URL to function on the python doc
"""
file_path = self.file_being_used.replace(self.info['download_path'], '')
return self.info['doc_base_url'].format(
'{}#{}'.format(file_path, anchor))
def parse_for_data(self):
"""
Main gateway into parsing the data. Will retrieve all necessary data
elements.
"""
data = []
if self.intro_text and self.title:
data_elements = {
'module': self.title,
'function': '',
'method_signature': '',
'first_paragraph': self.intro_text,
'url': self.create_url('')
}
data.append(data_elements)
titleName='propTitle'
for prop_section in self.prop_sections:
prop_name = self.parse_for_prop_name(prop_section)
if prop_name:
prop_signature = self.parse_for_signature(prop_section,
titleName)
first_paragraph = self.parse_for_first_paragraph(prop_section)
anchor = self.parse_for_anchor(prop_section)
url = self.create_url(anchor)
data_elements = {
'module': self.title, # "Module" is another name for
# "component" or "API" in this
# fathead
'function': prop_name,
'method_signature': prop_signature,
'first_paragraph': first_paragraph,
'url': url,
}
data.append(data_elements)
titleName='methodTitle'
for method_section in self.method_sections:
method_name=self.parse_for_method_name(method_section)
if method_name:
method_signature = self.parse_for_signature(method_section,
titleName)
first_paragraph = self.parse_for_first_paragraph(method_section)
anchor = self.parse_for_anchor(method_section)
url = self.create_url(anchor)
data_elements = {
'module': self.title,
'function': method_name,
'method_signature': method_signature,
'first_paragraph': first_paragraph,
'url': url,
}
data.append(data_elements)
self.parsed_data = data
def get_data(self):
"""
Get the parsed data.
Returns:
self.parsed_data: Dict containing necessary data elements
"""
return self.parsed_data
def _format_output(self, text):
"""
Helper method to format the output appropriately.
"""
return text.replace(' ', ' ').replace('\n', ' ').replace('\\n', r'\\n')
class DataOutput(object):
"""
Object responsible for outputting data into the output.txt file
"""
def __init__(self, data):
self.data = data
self.output = INFO['out_file']
def create_names_from_data(self, data_element):
"""
Figure out the name of the function. Will contain the module name if
one exists.
Args:
data_element: Incoming data dict
Returns:
Name, with whitespace stripped out
"""
module = data_element.get('module')
function = data_element.get('function')
dotted_name = '{}{}{}'.format(module, '.'
if module and function else '', function)
spaced_name = '{} {}'.format(module, function)
return dotted_name.strip(), spaced_name.strip()
def create_file(self):
"""
Iterate through the data and create the needed output.txt file,
appending to file as necessary.
"""
with open(self.output, 'a') as output_file:
for data_element in self.data:
if data_element.get('module') or data_element.get('function'):
method_signature = data_element.get('method_signature')
first_paragraph_text=data_element.get('first_paragraph')
first_paragraph=''
if (first_paragraph_text):
first_paragraph='<p>'
first_paragraph+=first_paragraph_text
first_paragraph+='</p>'
name, redirect = self.create_names_from_data(data_element)
abstract='<section class="prog__container">'
abstract+='{}{}{}'.format(first_paragraph,
'' ,
method_signature)
abstract+='</section>'
url = data_element.get('url')
list_of_data = [
name, # unique name
'A', # type is article
'', # no redirect data
'', # ignore
'', # no categories
'', # ignore
'', # no related topics
'', # ignore
HOME_LINK, # add an external link back to react native home
'', # no disambiguation
'', # images
abstract, # abstract
url # url to doc
]
output_file.write('{}\n'.format('\t'.join(list_of_data)))
# Add redirect if we got a redirect name that is different from the original name
if redirect != name:
list_of_data = [
redirect, # unique name
'R', # type is redirect
name, # redirect alias, to the original data
'', # ignore
'', # no categories
'', # ignore
'', # no related topics
'', # ignore
'', # no external link
'', # no disambiguation
'', # images
'', # no abstract
'' # no url
]
output_file.write('{}\n'.format('\t'.join(list_of_data)))
def cleanup(out_file):
"""
Cleanup output.txt's files. Mostly for use during local dev/testing.
"""
if os.path.isfile(out_file):
os.remove(out_file)
if __name__ == "__main__":
cleanup('output.txt')
for dir_path, dir_name, file_names in os.walk(INFO['download_path']):
for file_name in file_names:
if '.html' in file_name:
print("Processing %s " % file_name)
file_path = '/'.join((dir_path, file_name))
data = Data(file_path)
parser = DataParser(data, INFO)
parser.parse_for_data()
output = DataOutput(parser.get_data())
output.create_file()
| rasikapohankar/zeroclickinfo-fathead | lib/fathead/react_native/parse.py | Python | apache-2.0 | 15,135 | 0.004361 |
__author__ = 'canderson'
import os
import webapp2
import jinja2
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class MainPage(Handler):
def get(self):
#self.write("asciichan!")
self.render('form.html')
app = webapp2.WSGIApplication([('/', MainPage)], debug=True)
| W0mpRat/WebDev03 | UdacityFrameWork.py | Python | unlicense | 781 | 0.002561 |
"""The tests for the Template switch platform."""
from homeassistant.core import callback
from homeassistant import setup
import homeassistant.components as core
from homeassistant.const import STATE_ON, STATE_OFF
from tests.common import (
get_test_home_assistant, assert_setup_component)
class TestTemplateSwitch:
"""Test the Template switch."""
hass = None
calls = None
# pylint: disable=invalid-name
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.calls = []
@callback
def record_call(service):
"""Track function calls.."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_template_state_text(self):
"""Test the state text of a template."""
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_ON
state = self.hass.states.set('switch.test_state', STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_OFF
def test_template_state_boolean_on(self):
"""Test the setting of the state with boolean on."""
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ 1 == 1 }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_ON
def test_template_state_boolean_off(self):
"""Test the setting of the state with off."""
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ 1 == 2 }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_OFF
def test_icon_template(self):
"""Test icon template."""
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
'icon_template':
"{% if states.switch.test_state.state %}"
"mdi:check"
"{% endif %}"
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.attributes.get('icon') == ''
state = self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.attributes['icon'] == 'mdi:check'
def test_entity_picture_template(self):
"""Test entity_picture template."""
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
'entity_picture_template':
"{% if states.switch.test_state.state %}"
"/local/switch.png"
"{% endif %}"
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.attributes.get('entity_picture') == ''
state = self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.attributes['entity_picture'] == '/local/switch.png'
def test_template_syntax_error(self):
"""Test templating syntax error."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{% if rubbish %}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_name_does_not_create(self):
"""Test invalid name."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test INVALID switch': {
'value_template':
"{{ rubbish }",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_switch_does_not_create(self):
"""Test invalid switch."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': 'Invalid'
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_no_switches_does_not_create(self):
"""Test if there are no switches no creation."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template'
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_template_does_not_create(self):
"""Test missing template."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'not_value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_on_does_not_create(self):
"""Test missing on."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'not_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_off_does_not_create(self):
"""Test missing off."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'not_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_on_action(self):
"""Test on action."""
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'test.automation'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('switch.test_state', STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_OFF
core.switch.turn_on(self.hass, 'switch.test_template_switch')
self.hass.block_till_done()
assert len(self.calls) == 1
def test_off_action(self):
"""Test off action."""
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'test.automation'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_ON
core.switch.turn_off(self.hass, 'switch.test_template_switch')
self.hass.block_till_done()
assert len(self.calls) == 1
| persandstrom/home-assistant | tests/components/switch/test_template.py | Python | apache-2.0 | 16,305 | 0 |
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'komsukomsuhuu.settings')
from django.conf import settings
app = Celery('komsukomsuhuu',
broker='amqp://',
backend='amqp://',
)
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) | fabteam1/komsukomsuhuhu | komsukomsuhuu/komsukomsuhuu/celery.py | Python | mit | 655 | 0.003053 |
#!/usr/bin/python
# the sum of two elements defines the next
a, b = 0, 1
while b < 10:
print b
a, b = b, a + b | Bodidze/21v-python | unit_01/15.py | Python | mit | 114 | 0.026316 |
# coding=utf-8
# Author: CristianBB
# Greetings to Mr. Pine-apple
# URL: https://sick-rage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
from requests.compat import urljoin
from sickbeard import helpers, logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import convert_size
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class newpctProvider(TorrentProvider):
def __init__(self):
TorrentProvider.__init__(self, 'Newpct')
self.onlyspasearch = None
self.url = 'http://www.newpct.com'
self.urls = {'search': urljoin(self.url, 'index.php')}
self.cache = tvcache.TVCache(self, min_time=20)
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals
"""
Search query:
http://www.newpct.com/index.php?l=doSearch&q=fringe&category_=All&idioma_=1&bus_de_=All
q => Show name
category_ = Category 'Shows' (767)
idioma_ = Language Spanish (1), All
bus_de_ = Date from (All, mes, semana, ayer, hoy)
"""
results = []
# Only search if user conditions are true
lang_info = '' if not ep_obj or not ep_obj.show else ep_obj.show.lang
search_params = {
'l': 'doSearch',
'q': '',
'category_': 'All',
'idioma_': 1,
'bus_de_': 'All'
}
for mode in search_strings:
items = []
logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
if self.onlyspasearch:
search_params['idioma_'] = 1
else:
search_params['idioma_'] = 'All'
# Only search if user conditions are true
if self.onlyspasearch and lang_info != 'es' and mode != 'RSS':
logger.log('Show info is not spanish, skipping provider search', logger.DEBUG)
continue
search_params['bus_de_'] = 'All' if mode != 'RSS' else 'semana'
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log('Search string: {0}'.format
(search_string.decode('utf-8')), logger.DEBUG)
search_params['q'] = search_string
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', id='categoryTable')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 3: # Headers + 1 Torrent + Pagination
logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
continue
# 'Fecha', 'Título', 'Tamaño', ''
# Date, Title, Size
labels = [label.get_text(strip=True) for label in torrent_rows[0]('th')]
for row in torrent_rows[1:-1]:
try:
cells = row('td')
torrent_row = row.find('a')
download_url = torrent_row.get('href', '')
title = self._processTitle(torrent_row.get('title', ''), download_url)
if not all([title, download_url]):
continue
# Provider does not provide seeders/leechers
seeders = 1
leechers = 0
#2 is the 'Tamaño' column.
torrent_size = cells[2].get_text(strip=True)
size = convert_size(torrent_size) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log('Found result: {0}'.format(title), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError):
continue
results += items
return results
def get_url(self, url, post_data=None, params=None, timeout=30, **kwargs): # pylint: disable=too-many-arguments
"""
returns='content' when trying access to torrent info (For calling torrent client). Previously we must parse
the URL to get torrent file
"""
trickery = kwargs.pop('returns', '')
if trickery == 'content':
kwargs['returns'] = 'text'
data = super(newpctProvider, self).get_url(url, post_data=post_data, params=params, timeout=timeout, **kwargs)
url = re.search(r'http://tumejorserie.com/descargar/.+\.torrent', data, re.DOTALL).group()
url = urljoin(self.url, url.rsplit('=', 1)[-1])
kwargs['returns'] = trickery
return super(newpctProvider, self).get_url(url, post_data=post_data, params=params,
timeout=timeout, **kwargs)
def download_result(self, result):
"""
Save the result to disk.
"""
# check for auth
if not self.login():
return False
urls, filename = self._make_url(result)
for url in urls:
# Search results don't return torrent files directly, it returns show sheets so we must parse showSheet to access torrent.
data = self.get_url(url, returns='text')
url_torrent = re.search(r'http://tumejorserie.com/descargar/.+\.torrent', data, re.DOTALL).group()
if url_torrent.startswith('http'):
self.headers.update({'Referer': '/'.join(url_torrent.split('/')[:3]) + '/'})
logger.log('Downloading a result from {0}'.format(url))
if helpers.download_file(url_torrent, filename, session=self.session, headers=self.headers):
if self._verify_download(filename):
logger.log('Saved result to {0}'.format(filename), logger.INFO)
return True
else:
logger.log('Could not download {0}'.format(url), logger.WARNING)
helpers.remove_file_failed(filename)
if urls:
logger.log('Failed to download any results', logger.WARNING)
return False
@staticmethod
def _processTitle(title, url):
# Remove 'Mas informacion sobre ' literal from title
title = title[22:]
title = re.sub(r'[ ]{2,}', ' ', title, flags=re.I)
# Quality - Use re module to avoid case sensitive problems with replace
title = re.sub(r'\[HDTV 1080p?[^\[]*]', '1080p HDTV x264', title, flags=re.I)
title = re.sub(r'\[HDTV 720p?[^\[]*]', '720p HDTV x264', title, flags=re.I)
title = re.sub(r'\[ALTA DEFINICION 720p?[^\[]*]', '720p HDTV x264', title, flags=re.I)
title = re.sub(r'\[HDTV]', 'HDTV x264', title, flags=re.I)
title = re.sub(r'\[DVD[^\[]*]', 'DVDrip x264', title, flags=re.I)
title = re.sub(r'\[BluRay 1080p?[^\[]*]', '1080p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BluRay Rip 1080p?[^\[]*]', '1080p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BluRay Rip 720p?[^\[]*]', '720p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BluRay MicroHD[^\[]*]', '1080p BluRay x264', title, flags=re.I)
title = re.sub(r'\[MicroHD 1080p?[^\[]*]', '1080p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BLuRay[^\[]*]', '720p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BRrip[^\[]*]', '720p BluRay x264', title, flags=re.I)
title = re.sub(r'\[BDrip[^\[]*]', '720p BluRay x264', title, flags=re.I)
#detect hdtv/bluray by url
#hdtv 1080p example url: http://www.newpct.com/descargar-seriehd/foo/capitulo-610/hdtv-1080p-ac3-5-1/
#hdtv 720p example url: http://www.newpct.com/descargar-seriehd/foo/capitulo-26/hdtv-720p-ac3-5-1/
#hdtv example url: http://www.newpct.com/descargar-serie/foo/capitulo-214/hdtv/
#bluray compilation example url: http://www.newpct.com/descargar-seriehd/foo/capitulo-11/bluray-1080p/
title_hdtv = re.search(r'HDTV', title, flags=re.I)
title_720p = re.search(r'720p', title, flags=re.I)
title_1080p = re.search(r'1080p', title, flags=re.I)
title_x264 = re.search(r'x264', title, flags=re.I)
title_bluray = re.search(r'bluray', title, flags=re.I)
title_serie_hd = re.search(r'descargar\-seriehd', title, flags=re.I)
url_hdtv = re.search(r'HDTV', url, flags=re.I)
url_720p = re.search(r'720p', url, flags=re.I)
url_1080p = re.search(r'1080p', url, flags=re.I)
url_bluray = re.search(r'bluray', url, flags=re.I)
if not title_hdtv and url_hdtv:
title += ' HDTV'
if not title_x264:
title += ' x264'
if not title_bluray and url_bluray:
title += ' BluRay'
if not title_x264:
title += ' x264'
if not title_1080p and url_1080p:
title += ' 1080p'
title_1080p = True
if not title_720p and url_720p:
title += ' 720p'
title_720p = True
if not (title_720p or title_1080p) and title_serie_hd:
title += ' 720p'
# Language
title = re.sub(r'\[Spanish[^\[]*]', 'SPANISH AUDIO', title, flags=re.I)
title = re.sub(r'\[Castellano[^\[]*]', 'SPANISH AUDIO', title, flags=re.I)
title = re.sub(r'\[Español[^\[]*]', 'SPANISH AUDIO', title, flags=re.I)
title = re.sub(r'\[AC3 5\.1 Español[^\[]*]', 'SPANISH AUDIO', title, flags=re.I)
if re.search(r'\[V.O.[^\[]*]', title, flags=re.I):
title += '-NEWPCTVO'
else:
title += '-NEWPCT'
return title.strip()
provider = newpctProvider()
| Arcanemagus/SickRage | sickbeard/providers/newpct.py | Python | gpl-3.0 | 10,924 | 0.00403 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models
from odoo.tools import pycompat
class Board(models.AbstractModel):
_name = 'board.board'
_description = "Board"
_auto = False
@api.model
def create(self, vals):
return self
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
"""
Overrides orm field_view_get.
@return: Dictionary of Fields, arch and toolbar.
"""
res = super(Board, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
custom_view = self.env['ir.ui.view.custom'].search([('user_id', '=', self.env.uid), ('ref_id', '=', view_id)], limit=1)
if custom_view:
res.update({'custom_view_id': custom_view.id,
'arch': custom_view.arch})
res.update({
'arch': self._arch_preprocessing(res['arch']),
'toolbar': {'print': [], 'action': [], 'relate': []}
})
return res
@api.model
def _arch_preprocessing(self, arch):
from lxml import etree
def remove_unauthorized_children(node):
for child in node.iterchildren():
if child.tag == 'action' and child.get('invisible'):
node.remove(child)
else:
remove_unauthorized_children(child)
return node
archnode = etree.fromstring(arch)
return etree.tostring(remove_unauthorized_children(archnode), pretty_print=True, encoding='unicode')
| Aravinthu/odoo | addons/board/models/board.py | Python | agpl-3.0 | 1,665 | 0.002402 |
import numpy
import pandas
import statsmodels.api as sm
'''
In this exercise, we will perform some rudimentary practices similar to those of
an actual data scientist.
Part of a data scientist's job is to use her or his intuition and insight to
write algorithms and heuristics. A data scientist also creates mathematical models
to make predictions based on some attributes from the data that they are examining.
We would like for you to take your knowledge and intuition about the Titanic
and its passengers' attributes to predict whether or not the passengers survived
or perished. You can read more about the Titanic and specifics about this dataset at:
http://en.wikipedia.org/wiki/RMS_Titanic
http://www.kaggle.com/c/titanic-gettingStarted
In this exercise and the following ones, you are given a list of Titantic passengers
and their associated information. More information about the data can be seen at the
link below:
http://www.kaggle.com/c/titanic-gettingStarted/data.
For this exercise, you need to write a simple heuristic that will use
the passengers' gender to predict if that person survived the Titanic disaster.
You prediction should be 78% accurate or higher.
Here's a simple heuristic to start off:
1) If the passenger is female, your heuristic should assume that the
passenger survived.
2) If the passenger is male, you heuristic should
assume that the passenger did not survive.
You can access the gender of a passenger via passenger['Sex'].
If the passenger is male, passenger['Sex'] will return a string "male".
If the passenger is female, passenger['Sex'] will return a string "female".
Write your prediction back into the "predictions" dictionary. The
key of the dictionary should be the passenger's id (which can be accessed
via passenger["PassengerId"]) and the associated value should be 1 if the
passenger survied or 0 otherwise.
For example, if a passenger is predicted to have survived:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 1
And if a passenger is predicted to have perished in the disaster:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 0
You can also look at the Titantic data that you will be working with
at the link below:
https://www.dropbox.com/s/r5f9aos8p9ri9sa/titanic_data.csv
'''
def simple_heuristic(file_path):
predictions = {}
df = pandas.read_csv(file_path)
for passenger_index, passenger in df.iterrows():
passenger_id = passenger['PassengerId']
if passenger['Sex'] == 'female':
predictions[passenger_id] = 1
else:
predictions[passenger_id] = 0
#print predictions
return predictions
| rmhyman/DataScience | Lesson1/titanic_data_heuristic1.py | Python | mit | 2,937 | 0.007831 |
NAME = 'logcrypto'
CFLAGS = []
LDFLAGS = []
LIBS = []
GCC_LIST = ['logcrypto']
| goal/uwsgi | plugins/logcrypto/uwsgiplugin.py | Python | gpl-2.0 | 80 | 0 |
# sde_solvers.py - Collection of numerical methods to solve (vector-valued) SDEs
#
# Author: Stefan Fuertinger [[email protected]]
# Created: February 19 2014
# Last modified: <2017-09-15 11:31:25>
from __future__ import division
import numpy as np
from scipy.stats import norm
def rk_1(func,x0,tsteps,**kwargs):
r"""
Explicit first order (strong and weak) Runge--Kutta method for SDEs with additive/multiplicative (non-)autonomous scalar noise
Parameters
----------
func : callable (X,t,**kwargs)
Returns drift `A` and diffusion `B` of the SDE. See Examples for details.
x0 : NumPy 1darray
Initial condition
tsteps : NumPy 1darray
Sequence of time points for which to solve (including initial time `t0`)
**kwargs : keyword arguments
Additional keyword arguments to be passed on to `func`. See `Examples` for details.
Returns
-------
Y : NumPy 2darray
Approximate solution at timepoints given by `tsteps`. Format is
`Y[:,tk]` approximate solution at time `tk`
Thus `Y` is a `numstate`-by-`timesteps` array
Notes
-----
The general form of an SDE with additive/multiplicative (non-)autonomous scalar noise is
.. math:: (1) \qquad dX_t = A(X_t,t)dt + B(X_t,t)dW_t, \quad X(t_0) = x_0
The method for solving the SDE (1) is described in Sec. 11.1 of
Kloeden, P.E., & Platen, E. (1999). `Numerical Solution of Stochastic Differential Equations.`
Berlin: Springer.
Examples
--------
Consider the SDE system
.. math::
dV_t & = - \alpha t V_t + t Z_t \beta dW_t,\\
dZ_t & = \alpha t Z_t + t V_t \gamma dW_t,\\
V_{t_0} & = 0.5, \quad Z_{t_0} = -0.5, \quad t_0 = 1,
thus with :math:`X_t = (V_t,Z_t)` we have
.. math::
A(X_t,t) & = (-\alpha t V_t,\alpha t Z_t),\\
B(t) & = (t Z_t \beta,t V_t \gamma).
Hence `func` would look like this:
::
import numpy as np
def myrhs(Xt,t,alpha=0.2,beta=0.01,gamma=0.02):
A = np.array([-alpha*t*Xt[0],alpha*t*Xt[1]])
B = np.array([t*Xt[1]*beta,t*Xt[0]*gamma])
return A,B
Thus, the full call to `rk_1` to approximate the SDE system on :math:`[t_0,2]` could be
something like (assuming the function `myrhs` is defined in `myrhs.py`)
>>> import numpy as np
>>> from sde_solvers import rk_1
>>> from myrhs import myrhs
>>> Xt = rk_1(myrhs,np.array([0.5,-0.5]),np.arange(1,2,1e-3),beta=.02)
Hence we used :math:`\beta = 0.02` in `myrhs` instead of the default value 0.01.
See also
--------
pc_1 : an implicit first order strong Runge--Kutta method
(it uses a strong order 0.5 Euler--Maruyama method as predictor and an implicit Runge--Kutta
update formula as corrector) for stiff SDEs
"""
# Check for correctness of input and allocate common tmp variables
Y,dt,sqrtdt,zeta1,zeta2 = checkinput(func,x0,tsteps)
# Generate i.i.d. normal random variables with mean=0 (loc) and std=sqrt(delta) (scale) (Var=std^2)
DW = zeta1*sqrtdt
# Compute solution recursively
for n in xrange(tsteps.size - 1):
# Get drift/diffusion from func
t = tsteps[n]
A, B = func(Y[:,n], t,**kwargs)
BGamma = func(Y[:,n] + A*dt + B*sqrtdt, t,**kwargs)[1]
# Compute solution at next time point
Y[:,n+1] = Y[:,n] + A*dt + B*DW[n] + 0.5*(BGamma - B)*(DW[n]**2 - dt)*sqrtdt**(-1)
return Y
def pc_1(func,x0,tsteps,**kwargs):
r"""
Predictor-Corrector solver based on an implicit first order (strong and weak) Runge--Kutta method for SDEs with additive/multiplicative (non-)autonomous scalar noise
Parameters
----------
func : callable (X,t,**kwargs)
Returns drift `A` and diffusion `B` of the SDE. See Examples for details.
x0 : NumPy 1darray
Initial condition
tsteps : NumPy 1darray
Sequence of time points for which to solve (including initial time `t0`)
**kwargs : keyword arguments
Additional keyword arguments to be passed on to `func`. See `Examples` for details.
Returns
-------
Y : NumPy 2darray
Approximate solution at timepoints given by `tsteps`. Format is
`Y[:,tk]` approximate solution at time `tk`
Thus `Y` is a `numstate`-by-`timesteps` array
Notes
-----
The general form of an SDE with additive/multiplicative (non-)autonomous scalar noise is
.. math:: (1) \qquad dX_t = A(X_t,t)dt + B(X_t,t)dW_t, \quad X(t_0) = x_0
The code implements a two-fold approach to approximate solutions of (1). At each time-step
:math:`t_n` an order 0.5 strong Euler--Maruyama method is employed to estimate the solution
at time :math:`t_{n+1}` (predictor). This approximation is then used in the implicit
Runge--Kutta update formula (corrector).
The implicit Runge--Kutta method for solving the SDE (1) is described in Sec. 12.3 of
Kloeden, P.E., & Platen, E. (1999). `Numerical Solution of Stochastic Differential Equations.
Berlin: Springer.` The explicit Euler--Maruyama scheme is detailed in Sec. 9.1 ibid.
Examples
--------
Consider the SDE system
.. math::
dV_t & = - \alpha t V_t + t Z_t \beta dW_t,\\
dZ_t & = \alpha t Z_t + t V_t \gamma dW_t,\\
V_{t_0} & = 0.5, \quad Z_{t_0} = -0.5, \quad t_0 = 1,
thus with :math:`X_t = (V_t,Z_t)` we have
.. math::
A(X_t,t) & = (-\alpha t V_t,\alpha t Z_t),\\
B(t) & = (t Z_t \beta,t V_t \gamma).
Hence `func` would look like this:
::
import numpy as np
def myrhs(Xt,t,alpha=0.2,beta=0.01,gamma=0.02):
A = np.array([-alpha*t*Xt[0],alpha*t*Xt[1]])
B = np.array([t*Xt[1]*beta,t*Xt[0]*gamma])
return A,B
Thus, the full call to `pc_1` to approximate the SDE system on :math:`[t_0,2]` could be
something like (assuming the function `myrhs` is defined in `myrhs.py`)
>>> import numpy as np
>>> from sde_solvers import pc_1
>>> from myrhs import myrhs
>>> Xt = pc_1(myrhs,np.array([0.5,-0.5]),np.arange(1,2,1e-3),beta=.02)
Hence we used :math:`\beta = 0.02` in `myrhs` instead of the default value 0.01.
See also
--------
pc_15 : an implicit order 1.5 order strong Runge--Kutta method (it uses the mehod of ``rk_15``
as predictor and the corresponding implicit update formula as corrector).
"""
# Check for correctness of input and allocate common tmp variables
Y,dt,sqrtdt,zeta1,zeta2 = checkinput(func,x0,tsteps)
# Generate i.i.d. normal random variables with mean=0 (loc) and std=sqrt(delta) (scale) (Var=std^2)
DW = zeta1*sqrtdt
# Compute solution recursively
for n in xrange(tsteps.size - 1):
# Get drift/diffusion from func
t = tsteps[n]
A, B = func(Y[:,n], t,**kwargs)
BGamma = func(Y[:,n] + A*dt + B*sqrtdt, t,**kwargs)[1]
# Explicit Euler-Maruyama step
yt = Y[:,n] + A*dt + B*DW[n]
# Evaluate function at estimate yt and t_n+1
A1 = func(yt, tsteps[n+1],**kwargs)[0]
# Compute solution at next time point
Y[:,n+1] = Y[:,n] + A1*dt + B*DW[n] + 0.5*(BGamma - B)*(DW[n]**2 - dt)*sqrtdt**(-1)
return Y
def rk_15(func,x0,tsteps,**kwargs):
r"""
Explicit order 1.5 strong Runge--Kutta method for SDEs with additive (non-)autonomous scalar noise
Parameters
----------
func : callable (X,t,**kwargs)
Returns drift `A` and diffusion `B` of the SDE. See Examples for details.
x0 : NumPy 1darray
Initial condition
tsteps : NumPy 1darray
Sequence of time points for which to solve (including initial time `t0`)
**kwargs : keyword arguments
Additional keyword arguments to be passed on to `func`. See `Examples` for details.
Returns
-------
Y : NumPy 2darray
Approximate solution at timepoints given by `tsteps`. Format is
`Y[:,tk]` approximate solution at time `tk`
Thus `Y` is a `numstate`-by-`timesteps` array
Notes
-----
The general form of an SDE with additive (non-)autonomous scalar noise is
.. math:: (1) \qquad dX_t = A(X_t,t)dt + B(t)dW_t, \quad X(t_0) = x_0
The method for solving the SDE (1) is described in Sec. 11.2 of
Kloeden, P.E., & Platen, E. (1999). `Numerical Solution of Stochastic Differential Equations.`
Berlin: Springer.
Examples
--------
Consider the SDE system
.. math::
dV_t & = -\alpha \sin(V_t) \cos(t Z_t) + t \beta dW_t,\\
dZ_t & = -\alpha \cos(t V_t) \sin(Z_t) + t \gamma dW_t,\\
V_{t_0} & = 0.5, \quad Z_{t_0} = -0.5, \quad t_0 = 1,
thus with :math:`X_t = (V_t,Z_t)` we have
.. math::
A(X_t,t) & = (-\alpha \sin(V_t) \cos(t Z_t),-\alpha \cos(t V_t) \sin(Z_t)),\\
B(t) & = (t \beta,t \gamma).
Hence `func` would look like this:
::
import numpy as np
def myrhs(Xt,t,alpha=0.2,beta=0.01,gamma=0.02):
A = np.array([-alpha*np.sin(Xt[0])*np.cos(t*Xt[1]),
-alpha*np.cos(t*Xt[0])*np.sin(Xt[1])])
B = np.array([t*beta,t*gamma])
return A,B
Thus, the full call to `rk_15` to approximate the SDE system on :math:`[t_0,2]` could be
something like (assuming the function `myrhs` is defined in `myrhs.py`)
>>> import numpy as np
>>> from sde_solvers import rk_15
>>> from myrhs import myrhs
>>> Xt = rk_15(myrhs,np.array([0.5,-0.5]),np.arange(1,2,1e-3),beta=.02)
Hence we used :math:`\beta = 0.02` in `myrhs` instead of the default value 0.01.
See also
--------
pc_15 : an implicit order 1.5 order strong Runge--Kutta method (it uses the method of ``rk_15``
as predictor and the corresponding implicit update formula as corrector) for stiff SDEs
"""
# Check for correctness of input and allocate common tmp variables
Y,dt,sqrtdt,zeta1,zeta2 = checkinput(func,x0,tsteps)
# Generate pair of correlated normally distributed random variables and a linear combination of them
DW = zeta1*sqrtdt
DZ = 0.5*(zeta1 + np.sqrt(3)**(-1)*zeta2)*sqrtdt**3
DWZ = dt*DW - DZ
# More temp variables
dt1 = dt**(-1)
dt2 = 0.5*sqrtdt**(-1)
dtplus = 0.25*dt + dt2*DZ
dtminus = 0.25*dt - dt2*DZ
# Compute solution recursively
for n in xrange(tsteps.size - 1):
# Get drift/diffusion from func
A, B = func(Y[:,n], tsteps[n],**kwargs)
Atmp = Y[:,n] + A*dt
Btmp = B*sqrtdt
Aplus = func(Atmp + Btmp, tsteps[n+1],**kwargs)[0]
Aminus = func(Atmp - Btmp, tsteps[n+1],**kwargs)[0]
B1 = func(Y[:,n], tsteps[n+1],**kwargs)[1]
# Compute solution at next time point
Y[:,n+1] = Y[:,n] + 0.5*A*dt +B*DW[n] \
+ Aplus*dtplus[n] + Aminus*dtminus[n] \
+ dt1*(B1 - B)*DWZ[n]
return Y
def pc_15(func,x0,tsteps,**kwargs):
r"""
Predictor-Corrector solver based on an implicit order 1.5 strong Runge--Kutta method for SDEs with additive (non-)autonomous scalar noise
Parameters
----------
func : callable (X,t,**kwargs)
Returns drift `A` and diffusion `B` of the SDE. See Examples for details.
x0 : NumPy 1darray
Initial condition
tsteps : NumPy 1darray
Sequence of time points for which to solve (including initial time `t0`)
**kwargs : keyword arguments
Additional keyword arguments to be passed on to `func`. See `Examples` for details.
Returns
-------
Y : NumPy 2darray
Approximate solution at timepoints given by `tsteps`. Format is
`Y[:,tk]` approximate solution at time `tk`
Thus `Y` is a `numstate`-by-`timesteps` array
Notes
-----
The general form of an SDE with additive (non-)autonomous scalar noise is
.. math:: (1) \qquad dX_t = A(X_t,t)dt + B(t)dW_t, \quad X(t_0) = x_0
The code implements a two-fold approach to approximate solutions of (1). At each time-step
:math:`t_n` an explicit order 1.5 strong Runge--Kutta method (compare ``rk_15``) is employed
to estimate the solution at time :math:`t_{n+1}` (predictor). This approximation is then used
in the implicit Runge--Kutta update formula of the same order (corrector).
The implicit Runge--Kutta method for solving the SDE (1) is described in Sec. 12.3 of
Kloeden, P.E., & Platen, E. (1999). `Numerical Solution of Stochastic Differential Equations`.
Berlin: Springer. For details on the explicit Runge--Kutta formula see the documentation of ``rk_15``.
Examples
--------
Consider the SDE system
.. math::
dV_t & = -\alpha \sin(V_t) \cos(t Z_t) + t \beta dW_t,\\
dZ_t & = -\alpha \cos(t V_t) \sin(Z_t) + t \gamma dW_t,\\
V_{t_0} & = 0.5, \quad Z_{t_0} = -0.5, \quad t_0 = 1,
thus with :math:`X_t = (V_t,Z_t)` we have
.. math::
A(X_t,t) & = (-\alpha \sin(V_t) \cos(t Z_t),-\alpha \cos(t V_t) \sin(Z_t)),\\
B(t) & = (t \beta,t \gamma).
Hence `func` would look like this:
::
import numpy as np
def myrhs(Xt,t,alpha=0.2,beta=0.01,gamma=0.02):
A = np.array([-alpha*np.sin(Xt[0])*np.cos(t*Xt[1]),
-alpha*np.cos(t*Xt[0])*np.sin(Xt[1])])
B = np.array([t*beta,t*gamma])
return A,B
Thus, the full call to `pc_15` to approximate the SDE system on :math:`[t_0,2]` could be
something like (assuming the function `myrhs` is defined in `myrhs.py`)
>>> import numpy as np
>>> from sde_solvers import pc_15
>>> from myrhs import myrhs
>>> Xt = pc_15(myrhs,np.array([0.5,-0.5]),np.arange(1,2,1e-3),beta=.02)
Hence we used :math:`\beta = 0.02` in `myrhs` instead of the default value 0.01.
See also
--------
pc_1 : a lower order (but faster) implicit solver
"""
# Check for correctness of input and allocate common tmp variables
Y,dt,sqrtdt,zeta1,zeta2 = checkinput(func,x0,tsteps)
# Generate pair of correlated normally distributed random variables and a linear combination of them
DW = zeta1*sqrtdt
DZ = 0.5*(zeta1 + np.sqrt(3)**(-1)*zeta2)*sqrtdt**3
DWZ = dt*DW - DZ
DWZ2 = DZ - 0.5*dt*DW
# More temp variables
dt1 = dt**(-1)
dt2 = 0.5*sqrtdt**(-1)
dtplus = 0.25*dt + dt2*DZ
dtminus = 0.25*dt - dt2*DZ
# Compute solution recursively
for n in xrange(tsteps.size - 1):
# Get drift/diffusion from func
A, B = func(Y[:,n], tsteps[n],**kwargs)
Atmp = Y[:,n] + A*dt
Btmp = B*sqrtdt
Aplus = func(Atmp + Btmp, tsteps[n+1],**kwargs)[0]
Aminus = func(Atmp - Btmp, tsteps[n+1],**kwargs)[0]
Aplus1 = func(Atmp + Btmp, tsteps[n],**kwargs)[0]
Aminus1 = func(Atmp - Btmp, tsteps[n],**kwargs)[0]
B1 = func(Y[:,n], tsteps[n+1],**kwargs)[1]
# Predictor step: explicit order 1.5 method
Adiff = Aplus*dtplus[n] + Aminus*dtminus[n]
# Adiff = 0.25*(Aplus + Aminus)*dt + dt2*(Aplus - Aminus)*DZ[n]
yt = Y[:,n] + 0.5*A*dt + B*DW[n] + Adiff + dt1*(B1 - B)*DWZ[n]
# Use predicted value to evaluate function at Y_n+1,t_n+1
A1 = func(yt, tsteps[n+1],**kwargs)[0]
# Corrector step: implicit order 1.5 method
Y[:,n+1] = yt + 0.5*A1*dt + dt2*(Aplus1 - Aminus1)*DWZ2[n] - Adiff
return Y
def rk_2(func,x0,tsteps,strato_p=15,strato_q=30,**kwargs):
r"""
Explicit second order strong Runge--Kutta method for SDEs with additive (non-)autonomous scalar noise
Parameters
----------
func : callable (X,t,**kwargs)
Returns drift `A` and diffusion `B` of the SDE. See Examples for details.
x0 : NumPy 1darray
Initial condition.
tsteps : NumPy 1darray
Sequence of time points for which to solve (including initial time `t0`).
strato_p : int
Approximation order used to estimate the occuring multiple Stratonovich integrals.
Can be lowered to `strato_p = 1` in many cases, however, only change if you know
what you are doing.
strato_q : int
Number of summands for partial sum approximation of Stratonovich integral
coefficients. Can be lowered to `strato_q = 2` in many cases, however, only change
if you know what you are doing.
**kwargs : keyword arguments
Additional keyword arguments to be passed on to `func`. See `Examples` for details.
Returns
-------
Y : NumPy 2darray
Approximate solution at timepoints given by `tsteps`. Format is
`Y[:,tk]` approximate solution at time `tk`
Thus `Y` is a `numstate`-by-`timesteps` array
Examples
--------
Consider the SDE system
.. math::
dV_t & = -\alpha \sin(V_t) \cos(t Z_t) + t \beta dW_t,\\
dZ_t & = -\alpha \cos(t V_t) \sin(Z_t) + t \gamma dW_t,\\
V_{t_0} & = 0.5, \quad Z_{t_0} = -0.5, \quad t_0 = 1,
thus with :math:`X_t = (V_t,Z_t)` we have
.. math::
A(X_t,t) & = (-\alpha \sin(V_t) \cos(t Z_t),-\alpha \cos(t V_t) \sin(Z_t)),\\
B(t) & = (t \beta,t \gamma).
Hence `func` would look like this:
::
import numpy as np
def myrhs(Xt,t,alpha=0.2,beta=0.01,gamma=0.02):
A = np.array([-alpha*np.sin(Xt[0])*np.cos(t*Xt[1]),
-alpha*np.cos(t*Xt[0])*np.sin(Xt[1])])
B = np.array([t*beta,t*gamma])
return A,B
Thus, the full call to `rk_2` to approximate the SDE system on :math:`[t_0,2]` could be
something like (assuming the function `myrhs` is defined in `myrhs.py`)
>>> import numpy as np
>>> from sde_solvers import rk_2
>>> from myrhs import myrhs
>>> Xt = rk_2(myrhs,np.array([0.5,-0.5]),np.arange(1,2,1e-3),beta=.02)
Hence we used :math:`\beta = 0.02` in `myrhs` instead of the default value 0.01.
Notes
-----
The general form of an SDE with additive (non-)autonomous scalar noise is
.. math:: (1) \qquad dX_t = A(X_t,t)dt + B(t)dW_t, \quad X(t_0) = x_0
The method for solving the SDE (1) is described in Sec. 11.3 of
Kloeden, P.E., & Platen, E. (1999). `Numerical Solution of Stochastic Differential Equations.`
Berlin: Springer.
See also
--------
pc_2 : an implicit second order method (it uses the method of ``rk_2`` as predictor
and the corresponding implicit update formula as corrector).
"""
# Check for correctness of input and allocate common tmp variables
Y,dt,sqrtdt,zeta1,zeta2 = checkinput(func,x0,tsteps)
# Compute Stratonovich integral approximation
J,DW,DZ = get_stratonovich(strato_p,strato_q,dt,zeta1,tsteps.size-1)
# More temp variables
dt1 = dt**(-1)
dt2 = 0.5*dt
DWZ = dt*DW - DZ
# This is going to be multiplied by the diffusion term
Jtmp = np.sqrt(np.abs(2*dt*J - DZ**2))
Jplus = dt1*(DZ + Jtmp)
Jminus = dt1*(DZ - Jtmp)
# Compute solution recursively
for n in xrange(tsteps.size - 1):
# Get drift/diffusion from func
A, B = func(Y[:,n], tsteps[n],**kwargs)
Atmp = Y[:,n] + A*dt2
Aplus = func(Atmp + B*Jplus[n], tsteps[n] + dt2,**kwargs)[0]
Aminus = func(Atmp + B*Jminus[n], tsteps[n] + dt2,**kwargs)[0]
B1 = func(Y[:,n], tsteps[n+1],**kwargs)[1]
# Solution at t_n+1
Y[:,n+1] = Y[:,n] + (Aplus + Aminus)*dt2 + B*DW[n] + dt1*(B1 - B)*DWZ[n]
return Y
def pc_2(func,x0,tsteps,strato_p=15,strato_q=30,**kwargs):
r"""
Predictor-Corrector solver based on an implicit second order strong Runge--Kutta method for SDEs with additive (non-)autonomous scalar noise
Parameters
----------
func : callable (X,t,**kwargs)
Returns drift `A` and diffusion `B` of the SDE. See Examples for details.
x0 : NumPy 1darray
Initial condition
tsteps : NumPy 1darray
Sequence of time points for which to solve (including initial time `t0`)
**kwargs : keyword arguments
Additional keyword arguments to be passed on to `func`. See `Examples` for details.
Returns
-------
Y : NumPy 2darray
Approximate solution at timepoints given by `tsteps`. Format is
`Y[:,tk]` approximate solution at time `tk`
Thus `Y` is a `numstate`-by-`timesteps` array
Notes
-----
The general form of an SDE with additive (non-)autonomous scalar noise is
.. math:: (1) \qquad dX_t = A(X_t,t)dt + B(t)dW_t, \quad X(t_0) = x_0
The code implements a two-fold approach to approximate solutions of (1). At each time-step
:math:`t_n` an explicit second order strong Runge--Kutta method (compare `rk_2`) is employed to
estimate the solution at time :math:`t_{n+1}` (predictor). This approximation is then used
in the implicit Runge--Kutta update formula of the same order (corrector).
The implicit Runge--Kutta method for solving the SDE (1) is described in Sec. 12.3 of
Kloeden, P.E., & Platen, E. (1999). `Numerical Solution of Stochastic Differential Equations.`
Berlin: Springer. For details on the explicit Runge--Kutta formula see the documentation of `rk_2`.
Examples
--------
Consider the SDE system
.. math::
dV_t & = -\alpha \sin(V_t) \cos(t Z_t) + t \beta dW_t,\\
dZ_t & = -\alpha \cos(t V_t) \sin(Z_t) + t \gamma dW_t,\\
V_{t_0} & = 0.5, \quad Z_{t_0} = -0.5, \quad t_0 = 1,
thus with :math:`X_t = (V_t,Z_t)` we have
.. math::
A(X_t,t) & = (-\alpha \sin(V_t) \cos(t Z_t),-\alpha \cos(t V_t) \sin(Z_t)),\\
B(t) & = (t \beta,t \gamma).
Hence `func` would look like this:
::
import numpy as np
def myrhs(Xt,t,alpha=0.2,beta=0.01,gamma=0.02):
A = np.array([-alpha*np.sin(Xt[0])*np.cos(t*Xt[1]),
-alpha*np.cos(t*Xt[0])*np.sin(Xt[1])])
B = np.array([t*beta,t*gamma])
return A,B
Thus, the full call to `pc_2` to approximate the SDE system on :math:`[t_0,2]` could be
something like (assuming the function `myrhs` is defined in `myrhs.py`)
>>> import numpy as np
>>> from sde_solvers import pc_2
>>> from myrhs import myrhs
>>> Xt = pc_2(myrhs,np.array([0.5,-0.5]),np.arange(1,2,1e-3),beta=.02)
Hence we used :math:`\beta = 0.02` in `myrhs` instead of the default value 0.01.
See also
--------
pc_15 : a lower order (but faster) implicit solver
"""
# Check for correctness of input and allocate common tmp variables
Y,dt,sqrtdt,zeta1,zeta2 = checkinput(func,x0,tsteps)
# Compute Stratonovich integral approximation
J,DW,DZ = get_stratonovich(strato_p,strato_q,dt,zeta1,tsteps.size-1)
# More temp variables
dt1 = dt**(-1)
dt2 = 0.5*dt
DWZ = dt*DW - DZ
DZZ = 0.5*DZ + 0.25*dt*DW
# This is going to be multiplied by the diffusion term
Jtmp = np.sqrt(np.abs(2*dt*J - DZ**2))
Jplus = dt1*(DZ + Jtmp)
Jminus = dt1*(DZ - Jtmp)
Jtmp = np.sqrt(np.abs(dt*J - 0.25*DZ**2 + 0.125*dt**2*(DW**2 + 0.5*(2*DZ*dt**(-1) - DW)**2)))
Jplus1 = dt*(DZZ + Jtmp)
Jminus1 = dt*(DZZ - Jtmp)
# Compute solution recursively
for n in xrange(tsteps.size - 1):
# Get drift/diffusion from func
A, B = func(Y[:,n], tsteps[n],**kwargs)
Atmp = Y[:,n] + A*dt2
Aplus = func(Atmp + B*Jplus[n], tsteps[n] + dt2,**kwargs)[0]
Aminus = func(Atmp + B*Jminus[n], tsteps[n] + dt2,**kwargs)[0]
Aplus1 = func(Atmp + B*Jplus1[n], tsteps[n],**kwargs)[0]
Aminus1 = func(Atmp + B*Jminus1[n], tsteps[n],**kwargs)[0]
B1 = func(Y[:,n], tsteps[n+1],**kwargs)[1]
Atmp = (Aplus + Aminus)*dt2
# Predictor
yt = Y[:,n] + Atmp + B*DW[n] + dt1*(B1 - B)*DWZ[n]
# Evaluate drift at predicted yt
A1 = func(yt, tsteps[n], **kwargs)[0]
# Corrector
Y[:,n+1] = yt - Atmp + dt*(Aplus1 + Aminus1 - 0.5*(A1 + A))
return Y
def get_stratonovich(p,q,dt,zeta1,tsize):
"""
Function used internally by the second order solvers
to approximate multiple Stratonovich integrals
"""
# Sanity checks
try:
ptest = (p == int(p))
except: raise TypeError("Input strato_p must be an integer!")
if ptest == False: raise ValueError("Input strato_p must be an integer")
if p <= 0: raise ValueError("Input strato_p must be >0!")
try:
qtest = (q == int(q))
except: raise TypeError("Input strato_q must be an integer!")
if qtest == False: raise ValueError("Input strato_q must be an integer")
if q <= 1: raise ValueError("Input strato_q must be >1!")
# Coefficients for approximations below
rho = 0
for r in xrange(1,p+1):
rho += r**(-2)
rho = 1/12 - (2*np.pi**2)**(-1)*rho
al = 0
for r in xrange(1,p+1):
al += r**(-4)
al = np.pi**2/180 - (2*np.pi**2)**(-1)*al
# Allocate memory for random variables
Xi = np.zeros((p,tsize))
Eta = np.zeros((p,tsize))
# Generate standard Gaussian variables
sc = np.sqrt(dt/(2*np.pi**2))
dttmp = np.sqrt(2/dt)*np.pi
for r in xrange(1,p+1):
Xi[r-1,:] = dttmp*norm.rvs(size=(tsize,),loc=0,scale=sc*r**(-1))
Eta[r-1,:] = dttmp*norm.rvs(size=(tsize,),loc=0,scale=sc*r**(-1))
mu = np.zeros((tsize,))
phi = np.zeros((tsize,))
for r in xrange(p+1,p+2+q):
mu += norm.rvs(size=(tsize,),loc=0,scale=sc*r**(-1))
phi += norm.rvs(size=(tsize,),loc=0,scale=sc*r**(-2))
mu = np.sqrt(dt*rho)**(-1)*mu
phi = np.sqrt(dt*al)**(-1)*phi
# Approximation of Stratonovich stochastic integrals
a10 = np.zeros((tsize,))
for r in xrange(1,p+1):
a10 += r**(-1)*Xi[r-1,:]
a10 = -np.pi**(-1)*np.sqrt(2*dt)*a10 - 2*np.sqrt(dt*rho)*mu
b1 = np.zeros((tsize,))
for r in xrange(1,p+1):
b1 += r**(-2)*Eta[r-1,:]
b1 = np.sqrt(0.5*dt)*b1 + np.sqrt(dt*al)*phi
C = np.zeros((tsize,))
for r in xrange(1,p+1):
for l in xrange(1,p+1):
if r != l:
C += r/(r**2 - l**2)*(l**(-1)*Xi[r-1,:]*Xi[l-1,:] - l/r*Eta[r-1,:]*Eta[l-1,:])
C = -(2*np.pi**2)**(-1)*C
# Everything so far was done to compute this monster: a double Stratonovich integral...
J = 1/6*dt**2*zeta1**2 + 0.25*dt*a10**2 - (2*np.pi)**(-1)*dt**(1.5)*zeta1*b1 \
+ 0.25*dt**(1.5)*a10*zeta1 - dt**2*C
# zeta1 = zeta1*sqrtdt**(-1)
DW = zeta1*np.sqrt(dt)
DZ = 0.5*dt*(zeta1*np.sqrt(dt) + a10)
return J, DW, DZ
def checkinput(func,x0,tsteps):
"""
Function used internally by all solvers of this module to perform sanity checks and allocate stuff
"""
# Sanity checks
if type(func).__name__ != 'function' and type(func).__name__ != 'builtin_function_or_method':
raise TypeError("First argument has to be a valid Python function!")
try:
x0s = x0.shape
except:
raise TypeError("Input x0 must be a NumPy 1darray, not "+type(x0).__name__+"!")
if len(x0s) > 2 or (len(x0s)==2 and min(x0s)>1):
raise ValueError("Input x0 must be a NumPy 1darray!")
if np.isnan(x0).max()==True or np.isinf(x0).max()==True or np.isreal(x0).min()==False:
raise ValueError('Input x0 must be a real valued NumPy array without Infs or NaNs!')
try:
tstepss = tsteps.shape
except:
raise TypeError("Input tsteps must be a NumPy 1darray, not "+type(tsteps).__name__+"!")
if len(tstepss) > 1:
raise ValueError("Input tsteps must be a NumPy 1darray!")
if np.isnan(tsteps).max()==True or np.isinf(tsteps).max()==True or np.isreal(tsteps).min()==False:
raise ValueError('Input tsteps must be a real valued NumPy array without Infs or NaNs!')
# Allocate temp variables
Y = np.zeros((x0.size,tsteps.size))
# First component of solution is IC
Y[:,0] = x0
# Time step size and its square root (=std of stochastic terms)
dt = tsteps[1] - tsteps[0]
sqrtdt = np.sqrt(dt)
# Generate i.i.d. normal random variables with mean=0 (loc) and std=1 (scale) (Var=std^2)
zeta1 = norm.rvs(size=(tsteps.size-1,),loc=0,scale=1)
zeta2 = norm.rvs(size=(tsteps.size-1,),loc=0,scale=1)
return Y,dt,sqrtdt,zeta1,zeta2
| pantaray/Analytic-Tools | sde_solvers.py | Python | gpl-3.0 | 29,346 | 0.015743 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hypothesis.strategies as st
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
class TestNGramOps(hu.HypothesisTestCase):
@given(
seed=st.integers(0, 2**32 - 1),
N=st.integers(min_value=10, max_value=100),
D=st.integers(min_value=2, max_value=10),
out_of_vcb=st.floats(min_value=0, max_value=0.5),
max_categorical_limit=st.integers(min_value=5, max_value=20),
max_in_vcb_val=st.integers(min_value=1000, max_value=10000),
**hu.gcs_cpu_only
)
def test_ngram_from_categorical_op(
self,
seed,
N,
D,
out_of_vcb,
max_categorical_limit,
max_in_vcb_val,
gc,
dc,
):
np.random.seed(seed)
col_num = max(int(D / 2), 1)
col_ids = np.random.choice(D, col_num, False).astype(np.int32)
categorical_limits = np.random.randint(
2, high=max_categorical_limit, size=col_num
).astype(np.int32)
vcb = [
np.random.choice(max_in_vcb_val, x, False)
for x in categorical_limits
]
vals = np.array([x for l in vcb for x in l], dtype=np.int32)
# Enforce round(floats) to be negative.
floats = np.random.rand(N, D).astype(np.float32) - 2
expected_output = []
for i in range(N):
val = 0
for (k, j) in enumerate(col_ids):
base = np.prod(categorical_limits[:k])
r = np.random.randint(categorical_limits[k])
p = np.random.rand()
if p > out_of_vcb:
val += base * r
floats[i][j] = vcb[k][r]
expected_output.append(val)
expected_output = np.array(expected_output, dtype=np.int32)
workspace.ResetWorkspace()
workspace.FeedBlob('floats', floats)
op = core.CreateOperator(
"NGramFromCategorical",
['floats'],
['output'],
col_ids=col_ids,
categorical_limits=categorical_limits,
vals=vals,
)
workspace.RunOperatorOnce(op)
output = workspace.blobs['output']
np.testing.assert_array_equal(output, expected_output)
| ryfeus/lambda-packs | pytorch/source/caffe2/python/operator_test/ngram_ops_test.py | Python | mit | 2,472 | 0.000405 |
#!/usr/bin/env python
"""
Custom test runner
If args or options, we run the testsuite as quickly as possible.
If args but no options, we default to using the spec plugin and aborting on
first error/failure.
If options, we ignore defaults and pass options onto Nose.
Examples:
Run all tests (as fast as possible)
$ ./runtests.py
Run all unit tests (using spec output)
$ ./runtests.py tests/unit
Run all checkout unit tests (using spec output)
$ ./runtests.py tests/unit/checkout
Run all tests relating to shipping
$ ./runtests.py --attr=shipping
Re-run failing tests (needs to be run twice to first build the index)
$ ./runtests.py ... --failed
Drop into pdb when a test fails
$ ./runtests.py ... --pdb-failures
"""
import sys
import logging
import warnings
from tests.config import configure
from django.utils.six.moves import map
# No logging
logging.disable(logging.CRITICAL)
def run_tests(verbosity, *test_args):
from django_nose import NoseTestSuiteRunner
test_runner = NoseTestSuiteRunner(verbosity=verbosity)
if not test_args:
test_args = ['tests']
num_failures = test_runner.run_tests(test_args)
if num_failures:
sys.exit(num_failures)
if __name__ == '__main__':
args = sys.argv[1:]
verbosity = 1
if not args:
# If run with no args, try and run the testsuite as fast as possible.
# That means across all cores and with no high-falutin' plugins.
import multiprocessing
try:
num_cores = multiprocessing.cpu_count()
except NotImplementedError:
num_cores = 4 # Guess
args = ['--nocapture', '--stop', '--processes=%s' % num_cores]
else:
# Some args/options specified. Check to see if any nose options have
# been specified. If they have, then don't set any
has_options = any(map(lambda x: x.startswith('--'), args))
if not has_options:
# Default options:
# --stop Abort on first error/failure
# --nocapture Don't capture STDOUT
args.extend(['--nocapture', '--stop'])
else:
# Remove options as nose will pick these up from sys.argv
for arg in args:
if arg.startswith('--verbosity'):
verbosity = int(arg[-1])
args = [arg for arg in args if not arg.startswith('-')]
configure()
with warnings.catch_warnings():
# The warnings module in default configuration will never cause tests
# to fail, as it never raises an exception. We alter that behaviour by
# turning DeprecationWarnings into exceptions, but exclude warnings
# triggered by third-party libs. Note: The context manager is not thread
# safe. Behaviour with multiple threads is undefined.
warnings.filterwarnings('error', category=DeprecationWarning)
warnings.filterwarnings('error', category=RuntimeWarning)
libs = r'(sorl\.thumbnail.*|bs4.*|webtest.*)'
warnings.filterwarnings(
'ignore', r'.*', DeprecationWarning, libs)
run_tests(verbosity, *args)
| kapt/django-oscar | runtests.py | Python | bsd-3-clause | 3,114 | 0.000321 |
"""Application specific storage."""
import wx
from sound_lib.output import Output
from gmusicapi import Mobileclient
name = 'GMP3'
__version__ = '4.3.0'
db_version = 1
url = 'https://github.com/chrisnorman7/gmp3'
app = wx.App(False)
app.SetAppName(name)
paths = wx.StandardPaths.Get()
output = Output()
api = Mobileclient()
api.android_id = '123456789abcde'
frame = None # The main window.
track = None # The current track.
stream = None # The stream of the currently playing track.
library_size = 0 # The size of the library in bytes.
# Prevent the killer bug that makes the timer try and pop up billions of login
# windows:
logging_in = False
| chrisnorman7/gmp3 | application.py | Python | mpl-2.0 | 686 | 0 |
from decimal import Decimal
import ddt
from babel.numbers import format_currency
from django.conf import settings
from django.utils.translation import get_language, to_locale
from oscar.core.loading import get_model
from oscar.test.factories import * # pylint:disable=wildcard-import,unused-wildcard-import
from ecommerce.courses.tests.factories import CourseFactory
from ecommerce.extensions.catalogue.tests.mixins import CourseCatalogTestMixin
from ecommerce.extensions.offer.utils import _remove_exponent_and_trailing_zeros, format_benefit_value
from ecommerce.tests.testcases import TestCase
Benefit = get_model('offer', 'Benefit')
@ddt.ddt
class UtilTests(CourseCatalogTestMixin, TestCase):
def setUp(self):
super(UtilTests, self).setUp()
self.course = CourseFactory()
self.verified_seat = self.course.create_or_update_seat('verified', False, 100, self.partner)
self.stock_record = StockRecord.objects.filter(product=self.verified_seat).first()
self.seat_price = self.stock_record.price_excl_tax
self._range = RangeFactory(products=[self.verified_seat, ])
self.percentage_benefit = BenefitFactory(type=Benefit.PERCENTAGE, range=self._range, value=35.00)
self.value_benefit = BenefitFactory(type=Benefit.FIXED, range=self._range, value=self.seat_price - 10)
def test_format_benefit_value(self):
""" format_benefit_value(benefit) should format benefit value based on benefit type """
benefit_value = format_benefit_value(self.percentage_benefit)
self.assertEqual(benefit_value, '35%')
benefit_value = format_benefit_value(self.value_benefit)
expected_benefit = format_currency(
Decimal((self.seat_price - 10)), settings.OSCAR_DEFAULT_CURRENCY, format=u'#,##0.00',
locale=to_locale(get_language()))
self.assertEqual(benefit_value, '${expected_benefit}'.format(expected_benefit=expected_benefit))
@ddt.data(
('1.0', '1'),
('5000.0', '5000'),
('1.45000', '1.45'),
('5000.40000', '5000.4'),
)
@ddt.unpack
def test_remove_exponent_and_trailing_zeros(self, value, expected):
"""
_remove_exponent_and_trailing_zeros(decimal) should remove exponent and trailing zeros
from decimal number
"""
decimal = _remove_exponent_and_trailing_zeros(Decimal(value))
self.assertEqual(decimal, Decimal(expected))
| mferenca/HMS-ecommerce | ecommerce/extensions/offer/tests/test_utils.py | Python | agpl-3.0 | 2,446 | 0.004088 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Stephen Fromm <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: group
version_added: "0.0.2"
short_description: Add or remove groups
requirements:
- groupadd
- groupdel
- groupmod
description:
- Manage presence of groups on a host.
- For Windows targets, use the M(win_group) module instead.
options:
name:
description:
- Name of the group to manage.
type: str
required: true
gid:
description:
- Optional I(GID) to set for the group.
type: int
state:
description:
- Whether the group should be present or not on the remote host.
type: str
choices: [ absent, present ]
default: present
system:
description:
- If I(yes), indicates that the group created is a system group.
type: bool
default: no
local:
description:
- Forces the use of "local" command alternatives on platforms that implement it.
- This is useful in environments that use centralized authentication when you want to manipulate the local groups.
(e.g. it uses C(lgroupadd) instead of C(groupadd)).
- This requires that these commands exist on the targeted host, otherwise it will be a fatal error.
type: bool
default: no
version_added: "2.6"
non_unique:
description:
- This option allows to change the group ID to a non-unique value. Requires C(gid).
- Not supported on macOS or BusyBox distributions.
type: bool
default: no
version_added: "2.8"
seealso:
- module: user
- module: win_group
author:
- Stephen Fromm (@sfromm)
'''
EXAMPLES = '''
- name: Ensure group "somegroup" exists
group:
name: somegroup
state: present
- name: Ensure group "docker" exists with correct gid
group:
name: docker
state: present
gid: 1750
'''
RETURN = r'''
gid:
description: Group ID of the group.
returned: When C(state) is 'present'
type: int
sample: 1001
name:
description: Group name
returned: always
type: str
sample: users
state:
description: Whether the group is present or not
returned: always
type: str
sample: 'absent'
system:
description: Whether the group is a system group or not
returned: When C(state) is 'present'
type: bool
sample: False
'''
import grp
import os
from ansible.module_utils._text import to_bytes
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.sys_info import get_platform_subclass
class Group(object):
"""
This is a generic Group manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- group_del()
- group_add()
- group_mod()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
GROUPFILE = '/etc/group'
def __new__(cls, *args, **kwargs):
new_cls = get_platform_subclass(Group)
return super(cls, new_cls).__new__(new_cls)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.gid = module.params['gid']
self.system = module.params['system']
self.local = module.params['local']
self.non_unique = module.params['non_unique']
def execute_command(self, cmd):
return self.module.run_command(cmd)
def group_del(self):
if self.local:
command_name = 'lgroupdel'
else:
command_name = 'groupdel'
cmd = [self.module.get_bin_path(command_name, True), self.name]
return self.execute_command(cmd)
def _local_check_gid_exists(self):
if self.gid:
for gr in grp.getgrall():
if self.gid == gr.gr_gid and self.name != gr.gr_name:
self.module.fail_json(msg="GID '{0}' already exists with group '{1}'".format(self.gid, gr.gr_name))
def group_add(self, **kwargs):
if self.local:
command_name = 'lgroupadd'
self._local_check_gid_exists()
else:
command_name = 'groupadd'
cmd = [self.module.get_bin_path(command_name, True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(str(kwargs[key]))
if self.non_unique:
cmd.append('-o')
elif key == 'system' and kwargs[key] is True:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
if self.local:
command_name = 'lgroupmod'
self._local_check_gid_exists()
else:
command_name = 'groupmod'
cmd = [self.module.get_bin_path(command_name, True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('-g')
cmd.append(str(kwargs[key]))
if self.non_unique:
cmd.append('-o')
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self):
# The grp module does not distinguish between local and directory accounts.
# It's output cannot be used to determine whether or not a group exists locally.
# It returns True if the group exists locally or in the directory, so instead
# look in the local GROUP file for an existing account.
if self.local:
if not os.path.exists(self.GROUPFILE):
self.module.fail_json(msg="'local: true' specified but unable to find local group file {0} to parse.".format(self.GROUPFILE))
exists = False
name_test = '{0}:'.format(self.name)
with open(self.GROUPFILE, 'rb') as f:
reversed_lines = f.readlines()[::-1]
for line in reversed_lines:
if line.startswith(to_bytes(name_test)):
exists = True
break
if not exists:
self.module.warn(
"'local: true' specified and group was not found in {file}. "
"The local group may already exist if the local group database exists somewhere other than {file}.".format(file=self.GROUPFILE))
return exists
else:
try:
if grp.getgrnam(self.name):
return True
except KeyError:
return False
def group_info(self):
if not self.group_exists():
return False
try:
info = list(grp.getgrnam(self.name))
except KeyError:
return False
return info
# ===========================================
class SunOS(Group):
"""
This is a SunOS Group manipulation class. Solaris doesn't have
the 'system' group concept.
This overrides the following methods from the generic class:-
- group_add()
"""
platform = 'SunOS'
distribution = None
GROUPFILE = '/etc/group'
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('-g')
cmd.append(str(kwargs[key]))
if self.non_unique:
cmd.append('-o')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class AIX(Group):
"""
This is a AIX Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'AIX'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('rmgroup', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('mkgroup', True)]
for key in kwargs:
if key == 'gid' and kwargs[key] is not None:
cmd.append('id=' + str(kwargs[key]))
elif key == 'system' and kwargs[key] is True:
cmd.append('-a')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('chgroup', True)]
info = self.group_info()
for key in kwargs:
if key == 'gid':
if kwargs[key] is not None and info[2] != int(kwargs[key]):
cmd.append('id=' + str(kwargs[key]))
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class FreeBsdGroup(Group):
"""
This is a FreeBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'FreeBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name]
if self.gid is not None:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
# modify the group if cmd will do anything
if cmd_len != len(cmd):
if self.module.check_mode:
return (0, '', '')
return self.execute_command(cmd)
return (None, '', '')
class DragonFlyBsdGroup(FreeBsdGroup):
"""
This is a DragonFlyBSD Group manipulation class.
It inherits all behaviors from FreeBsdGroup class.
"""
platform = 'DragonFly'
# ===========================================
class DarwinGroup(Group):
"""
This is a Mac macOS Darwin Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
group manipulation are done using dseditgroup(1).
"""
platform = 'Darwin'
distribution = None
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += ['-o', 'create']
if self.gid is not None:
cmd += ['-i', str(self.gid)]
elif 'system' in kwargs and kwargs['system'] is True:
gid = self.get_lowest_available_system_gid()
if gid is not False:
self.gid = str(gid)
cmd += ['-i', str(self.gid)]
cmd += ['-L', self.name]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_del(self):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += ['-o', 'delete']
cmd += ['-L', self.name]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_mod(self, gid=None):
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += ['-o', 'edit']
if gid is not None:
cmd += ['-i', str(gid)]
cmd += ['-L', self.name]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
return (None, '', '')
def get_lowest_available_system_gid(self):
# check for lowest available system gid (< 500)
try:
cmd = [self.module.get_bin_path('dscl', True)]
cmd += ['/Local/Default', '-list', '/Groups', 'PrimaryGroupID']
(rc, out, err) = self.execute_command(cmd)
lines = out.splitlines()
highest = 0
for group_info in lines:
parts = group_info.split(' ')
if len(parts) > 1:
gid = int(parts[-1])
if gid > highest and gid < 500:
highest = gid
if highest == 0 or highest == 499:
return False
return (highest + 1)
except Exception:
return False
class OpenBsdGroup(Group):
"""
This is a OpenBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'OpenBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class NetBsdGroup(Group):
"""
This is a NetBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'NetBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('groupdel', True), self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('groupadd', True)]
if self.gid is not None:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
cmd.append(self.name)
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('groupmod', True)]
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
if len(cmd) == 1:
return (None, '', '')
if self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
class BusyBoxGroup(Group):
"""
BusyBox group manipulation class for systems that have addgroup and delgroup.
It overrides the following methods:
- group_add()
- group_del()
- group_mod()
"""
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('addgroup', True)]
if self.gid is not None:
cmd.extend(['-g', str(self.gid)])
if self.system:
cmd.append('-S')
cmd.append(self.name)
return self.execute_command(cmd)
def group_del(self):
cmd = [self.module.get_bin_path('delgroup', True), self.name]
return self.execute_command(cmd)
def group_mod(self, **kwargs):
# Since there is no groupmod command, modify /etc/group directly
info = self.group_info()
if self.gid is not None and self.gid != info[2]:
with open('/etc/group', 'rb') as f:
b_groups = f.read()
b_name = to_bytes(self.name)
b_current_group_string = b'%s:x:%d:' % (b_name, info[2])
b_new_group_string = b'%s:x:%d:' % (b_name, self.gid)
if b':%d:' % self.gid in b_groups:
self.module.fail_json(msg="gid '{gid}' in use".format(gid=self.gid))
if self.module.check_mode:
return 0, '', ''
b_new_groups = b_groups.replace(b_current_group_string, b_new_group_string)
with open('/etc/group', 'wb') as f:
f.write(b_new_groups)
return 0, '', ''
return None, '', ''
class AlpineGroup(BusyBoxGroup):
platform = 'Linux'
distribution = 'Alpine'
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True),
gid=dict(type='int'),
system=dict(type='bool', default=False),
local=dict(type='bool', default=False),
non_unique=dict(type='bool', default=False),
),
supports_check_mode=True,
required_if=[
['non_unique', True, ['gid']],
],
)
group = Group(module)
module.debug('Group instantiated - platform %s' % group.platform)
if group.distribution:
module.debug('Group instantiated - distribution %s' % group.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = group.name
result['state'] = group.state
if group.state == 'absent':
if group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_del()
if rc != 0:
module.fail_json(name=group.name, msg=err)
elif group.state == 'present':
if not group.group_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = group.group_add(gid=group.gid, system=group.system)
else:
(rc, out, err) = group.group_mod(gid=group.gid)
if rc is not None and rc != 0:
module.fail_json(name=group.name, msg=err)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if group.group_exists():
info = group.group_info()
result['system'] = group.system
result['gid'] = info[2]
module.exit_json(**result)
if __name__ == '__main__':
main()
| indrajitr/ansible | lib/ansible/modules/group.py | Python | gpl-3.0 | 19,765 | 0.001164 |
import pathlib
import meshio
import numpy as np
import pytest
import meshplex
this_dir = pathlib.Path(__file__).resolve().parent
@pytest.mark.parametrize(
"points,cells,ref",
[
# line
([[0.0], [0.35]], [[0, 1]], [0.35]),
([[0.0], [0.35]], [[1, 0]], [-0.35]),
# triangle
([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]], [[0, 1, 2]], [0.5]),
([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0]], [[0, 1, 2]], [-0.5]),
(
[[0.0, 0.0], [1.0, 0.0], [1.1, 1.0], [0.0, 1.0]],
[[0, 1, 2], [0, 3, 2]],
[0.5, -0.55],
),
# tetra
(
[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
[[0, 1, 2, 3]],
[1 / 6],
),
(
[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
[[0, 1, 3, 2]],
[-1 / 6],
),
],
)
def test_signed_area(points, cells, ref):
mesh = meshplex.Mesh(points, cells)
ref = np.array(ref)
assert mesh.signed_cell_volumes.shape == ref.shape
assert np.all(
np.abs(ref - mesh.signed_cell_volumes) < np.abs(ref) * 1.0e-13 + 1.0e-13
)
def test_signed_area_pacman():
mesh = meshio.read(this_dir / "meshes" / "pacman.vtu")
assert np.all(np.abs(mesh.points[:, 2]) < 1.0e-15)
X = mesh.points[:, :2]
mesh = meshplex.Mesh(X, mesh.get_cells_type("triangle"))
vols = mesh.signed_cell_volumes
# all cells are positively oriented in this mesh
assert np.all(mesh.signed_cell_volumes > 0.0)
assert np.all(abs(abs(vols) - mesh.cell_volumes) < 1.0e-12 * mesh.cell_volumes)
| nschloe/voropy | tests/test_signed_area.py | Python | mit | 1,654 | 0.002418 |
import json, codecs, re
from abc import ABCMeta, abstractmethod
from PIL import Image, ExifTags
from witica.util import throw, sstr, suni
#regular expressions regarding item ids
RE_METAFILE = r'^meta\/[^\n]+$'
RE_FIRST_ITEMID = r'(?!meta\/)[^\n?@.]+'
RE_ITEMFILE_EXTENSION = r'[^\n?@\/]+'
RE_ITEMID = r'^' + RE_FIRST_ITEMID + '$'
RE_ITEMFILE = r'^' + RE_FIRST_ITEMID + '\.' + RE_ITEMFILE_EXTENSION + '$'
RE_ITEM_SPLIT_ITEMID_EXTENSION = r'^(' + RE_FIRST_ITEMID + ')\.(' + RE_ITEMFILE_EXTENSION + ')$'
RE_ITEM_REFERENCE = r'^!(?:.\/)?' + RE_FIRST_ITEMID + '$'
#regular expressions to be used for md files parsing
RE_MD_SPLIT_JSON_MD = "^\s*({[\s\S]*?})?[\s]*([^}\s][\s\S]*)$" #splits md file into the json metadata and markdown sections as caputre groups
RE_MD_SPLIT_TITLE_BODY = "^(?:#(?!#)[\t ]*([\S][^\n\r]*)(?:\n|\r\n?|$))?([\s\S]*)$" #splits markdown section into title and body sections as capture groups
RE_MD_NOBRACKET = r'[^\]\[]*'
RE_MD_BRK = ( r'\[('
+ (RE_MD_NOBRACKET + r'(\[')*6
+ (RE_MD_NOBRACKET+ r'\])*')*6
+ RE_MD_NOBRACKET + r')\]' )
RE_MD_IMAGE_LINK = r'\!' + RE_MD_BRK + r'\s*\((?!\!)(<.*?>|([^")]+"[^"]*"|[^\)]*))\)'
#  or 
#RE_MD_ITEM_LINK = r'\!' + RE_MD_BRK + r'\s*\(\!(<.*?>|([^")]+"[^"]*"|[^\)]*))\)'
#  or 
RE_MD_ITEM_LINK = r'!({[\s\S]*?})?\((![\s\S]+?)\)'
# !{renderparametersjson}(!itemid)
registered_extractors = [];
def register(extension, extractor):
"""Register new metadata extractor for file extension"""
for (ext,extr) in registered_extractors:
if extension == ext:
raise ValueError("A metadata extractor for extension '" + extension + "' is already registered.")
#TODO: check type of extractor
registered_extractors.append((extension,extractor))
#print("registered: " + extension + " " + sstr(extractor))
def register_default_extractors():
register("item", JSONExtractor)
register("json", JSONExtractor)
register("md", MDExtractor)
register("txt", MDExtractor)
register("jpg", ImageExtractor)
register("jpeg", ImageExtractor)
def is_supported(extension):
for (ext,extractor) in registered_extractors:
if extension == ext:
return True
return False
def extract_metadata(filename):
extension = filename.rpartition(".")[2]
for (ext,extractor) in registered_extractors:
if extension == ext:
return extractor().extract_metadata(filename)
raise ValueError("Could not extract metadata, because a metadata extractor for extension '" + extension + "' is not registered.")
class MetadataExtractor(object):
__metaclass__ = ABCMeta
"""Abstract class representing a metadata extractor"""
supported_extensions = [];
def __init__(self):
pass
@abstractmethod
def extract_metadata(self, filename):
"""Extract metadata from filename and return metadata as json"""
pass
class JSONExtractor(MetadataExtractor):
__metaclass__ = ABCMeta
"""Extracts metadata from item or json file"""
supported_extensions = ["item", "json"];
def __init__(self):
pass
def extract_metadata(self, filename):
"""Extract metadata from filename and return metadata as json"""
f = codecs.open(filename, mode="r", encoding="utf-8")
return json.loads(f.read())
class MDExtractor(MetadataExtractor):
__metaclass__ = ABCMeta
"""Extracts metadata from markdown file"""
supported_extensions = ["md", "txt"];
def __init__(self):
pass
def extract_metadata(self, filename):
try:
meta = {}
#split into json and markdown part
f = codecs.open(filename, mode="r", encoding="utf-8")
match = re.match(RE_MD_SPLIT_JSON_MD,f.read())
f.close()
if not match:
raise IOError("Extracting metadata from file '" + sstr(filename) + "' failed. Could not split JSON and markdown parts.")
jsonstr, mdstr = match.groups()
#get title string (first heading in markdown string) if available
title = re.match(RE_MD_SPLIT_TITLE_BODY,mdstr).group(1)
if not title == None:
meta["title"] = title
#update with explicit json
if not jsonstr == None:
meta.update(json.loads(jsonstr))
return meta
except Exception, e:
throw(IOError, "Extracting metadata from file '" + sstr(filename) + "' failed.", e)
class ImageExtractor(MetadataExtractor):
__metaclass__ = ABCMeta
"""Extracts metadata from markdown file"""
supported_extensions = ["jpg", "jpeg"];
def __init__(self):
pass
def extract_metadata(self, filename):
try:
meta = {"type": "image"}
img = Image.open(filename)
exif = {
ExifTags.TAGS[k]: v
for k, v in img._getexif().items()
if k in ExifTags.TAGS
}
if ("ImageDescription" in exif or "UserComment" in exif):
if "UserComment" in exif:
meta["title"] = exif["UserComment"]
if "ImageDescription" in exif:
meta["title"] = exif["ImageDescription"]
if ("Make" in exif or "Model" in exif):
meta["camera"] = (exif["Make"] if "Make" in exif else "") + " " + (exif["Model"] if "Model" in exif else "")
if ("Orientation" in exif):
meta["orientation"] = exif["Orientation"]
if ("Artist" in exif):
meta["author"] = exif["Artist"]
if ("DateTimeOriginal" in exif):
meta["created"] = exif["DateTimeOriginal"] #TODO: convert to unix time
if ("Flash" in exif):
meta["flash"] = exif["Flash"]
if ("GPSInfo" in exif):
lat, lon = self.get_lat_lon(exif["GPSInfo"])
if lat and lon:
meta["lat"] = lat
meta["lon"] = lon
return meta
except Exception, e:
throw(IOError, "Extracting metadata from file '" + sstr(filename) + "' failed.", e)
# This remaining functions in the ImageExtracotr class are originally by Eran Sandler (MIT-license), see https://gist.github.com/erans/983821
def _get_if_exist(self, data, key):
if key in data:
return data[key]
return None
def _convert_to_degress(self, value):
"""Helper function to convert the GPS coordinates stored in the EXIF to degress in float format"""
d0 = value[0][0]
d1 = value[0][1]
d = float(d0) / float(d1)
m0 = value[1][0]
m1 = value[1][1]
m = float(m0) / float(m1)
s0 = value[2][0]
s1 = value[2][1]
s = float(s0) / float(s1)
return d + (m / 60.0) + (s / 3600.0)
def get_lat_lon(self, gps_info_exif):
"""Returns the latitude and longitude, if available, from the provided exif_data (obtained through get_exif_data above)"""
lat = None
lon = None
gps_info = {
ExifTags.GPSTAGS[k]: v
for k, v in gps_info_exif.items()
if k in ExifTags.GPSTAGS
}
gps_latitude = self._get_if_exist(gps_info, "GPSLatitude")
gps_latitude_ref = self._get_if_exist(gps_info, 'GPSLatitudeRef')
gps_longitude = self._get_if_exist(gps_info, 'GPSLongitude')
gps_longitude_ref = self._get_if_exist(gps_info, 'GPSLongitudeRef')
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = self._convert_to_degress(gps_latitude)
if gps_latitude_ref != "N":
lat = 0 - lat
lon = self._convert_to_degress(gps_longitude)
if gps_longitude_ref != "E":
lon = 0 - lon
return lat, lon | bitsteller/witica | witica/metadata/extractor.py | Python | mit | 7,072 | 0.035209 |
#!/usr/bin/env python
import os, sys
SCRIPTDIR = os.path.dirname(__file__)
ENGINDIR = os.path.join(SCRIPTDIR, '..', '..', 'engines')
sys.path.append(os.path.abspath(ENGINDIR))
from fpsl_cvxpy import map_inference
PROBLEMDIR = os.path.join(SCRIPTDIR, '..', '..', 'problems', 'paper_review')
sys.path.append(os.path.abspath(PROBLEMDIR))
from grounding import ground
from os.path import join as ojoin
def run_model(data_path, out_path):
rules, hard_rules, _, atoms = ground(data_path)
results = map_inference(rules, hard_rules)
reviews = atoms['review']
with open(ojoin(out_path, 'POSITIVEREVIEW.txt'), 'w') as f:
for (review, paper), (vid, _) in reviews.items():
print("'%s'\t'%s'\t%f"%(review, paper, results[vid]), file=f)
acceptable = atoms['acceptable']
with open(ojoin(out_path, 'ACCEPTABLE.txt'), 'w') as f:
for paper, (vid, _) in acceptable.items():
print("'%s'\t%f"%(paper, results[vid]), file=f)
presents = atoms['presents']
with open(ojoin(out_path, 'PRESENTS.txt'), 'w') as f:
for author, (vid, _) in presents.items():
print("'%s'\t%f"%(author, results[vid]), file=f)
if __name__ == '__main__':
data_path = ojoin(PROBLEMDIR, 'data', '1')
out_path = ojoin('output', 'fpsl_cvxpy')
run_model(data_path, out_path)
| gfarnadi/FairPSL | debug/compare_map/run_fpsl_cvxpy.py | Python | mit | 1,367 | 0.010241 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('printy', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PostItModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('width', models.FloatField()),
('height', models.FloatField()),
],
),
migrations.AlterField(
model_name='postit',
name='print_page',
field=models.ForeignKey(related_name='posts', to='printy.PrintPage'),
),
migrations.AddField(
model_name='printpage',
name='post_it_model',
field=models.ForeignKey(default=1, to='printy.PostItModel'),
preserve_default=False,
),
]
| jdsolucoes/Ppostit | printy/migrations/0002_auto_20150921_2215.py | Python | apache-2.0 | 967 | 0.002068 |
from discuss.discuss.settings import *
##########################################################################
#
# Server settings
#
##########################################################################
ALLOWED_HOSTS = ["localhost"]
WSGI_APPLICATION = 'discuss.discuss.wsgi_production.application'
##########################################################################
#
# Database settings
#
##########################################################################
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(VAR_DIR, 'db', 'production_db.sqlite3'),
}
}
| siggame/discuss | discuss/discuss/production.py | Python | bsd-3-clause | 642 | 0 |
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2013 Paul Tagliamonte <[email protected]>
# Copyright (c) 2013 Bob Tolbert <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import traceback
from clint.textui import colored
from hy._compat import PY3
class HyError(Exception):
"""
Generic Hy error. All internal Exceptions will be subclassed from this
Exception.
"""
pass
class HyCompileError(HyError):
def __init__(self, exception, traceback=None):
self.exception = exception
self.traceback = traceback
def __str__(self):
if isinstance(self.exception, HyTypeError):
return str(self.exception)
if self.traceback:
tb = "".join(traceback.format_tb(self.traceback)).strip()
else:
tb = "No traceback available. 😟"
return("Internal Compiler Bug 😱\n⤷ %s: %s\nCompilation traceback:\n%s"
% (self.exception.__class__.__name__,
self.exception, tb))
class HyTypeError(TypeError):
def __init__(self, expression, message):
super(HyTypeError, self).__init__(message)
self.expression = expression
self.message = message
self.source = None
self.filename = None
def __str__(self):
line = self.expression.start_line
start = self.expression.start_column
end = self.expression.end_column
source = []
if self.source is not None:
source = self.source.split("\n")[line-1:self.expression.end_line]
if line == self.expression.end_line:
length = end - start
else:
length = len(source[0]) - start
result = ""
result += ' File "%s", line %d, column %d\n\n' % (self.filename,
line,
start)
if len(source) == 1:
result += ' %s\n' % colored.red(source[0])
result += ' %s%s\n' % (' '*(start-1),
colored.green('^' + '-'*(length-1) + '^'))
if len(source) > 1:
result += ' %s\n' % colored.red(source[0])
result += ' %s%s\n' % (' '*(start-1),
colored.green('^' + '-'*length))
if len(source) > 2: # write the middle lines
for line in source[1:-1]:
result += ' %s\n' % colored.red("".join(line))
result += ' %s\n' % colored.green("-"*len(line))
# write the last line
result += ' %s\n' % colored.red("".join(source[-1]))
result += ' %s\n' % colored.green('-'*(end-1) + '^')
result += colored.yellow("%s: %s\n\n" %
(self.__class__.__name__,
self.message))
if not PY3:
return result.encode('utf-8')
else:
return result
class HyMacroExpansionError(HyTypeError):
pass
class HyIOError(HyError, IOError):
"""
Trivial subclass of IOError and HyError, to distinguish between
IOErrors raised by Hy itself as opposed to Hy programs.
"""
pass
| paultag/hy | hy/errors.py | Python | mit | 4,271 | 0 |
# -*- coding: utf-8 -*-
"""
Doctest runner for 'birdhousebuilder.recipe.adagucserver'.
"""
__docformat__ = 'restructuredtext'
import os
import sys
import unittest
import zc.buildout.tests
import zc.buildout.testing
from zope.testing import doctest, renormalizing
optionflags = (doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_ONLY_FIRST_FAILURE)
def setUp(test):
zc.buildout.testing.buildoutSetUp(test)
# Install the recipe in develop mode
zc.buildout.testing.install_develop('birdhousebuilder.recipe.adagucserver', test)
test.globs['os'] = os
test.globs['sys'] = sys
test.globs['test_dir'] = os.path.dirname(__file__)
def test_suite():
suite = unittest.TestSuite((
doctest.DocFileSuite(
'../../../../README.rst',
setUp=setUp,
tearDown=zc.buildout.testing.buildoutTearDown,
optionflags=optionflags,
checker=renormalizing.RENormalizing([
# If want to clean up the doctest output you
# can register additional regexp normalizers
# here. The format is a two-tuple with the RE
# as the first item and the replacement as the
# second item, e.g.
# (re.compile('my-[rR]eg[eE]ps'), 'my-regexps')
zc.buildout.testing.normalize_path,
]),
),
))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| bird-house/birdhousebuilder.recipe.adagucserver | birdhousebuilder/recipe/adagucserver/tests/test_docs.py | Python | bsd-3-clause | 1,620 | 0.003086 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.errors",
marshal="google.ads.googleads.v10",
manifest={"TimeZoneErrorEnum",},
)
class TimeZoneErrorEnum(proto.Message):
r"""Container for enum describing possible time zone errors.
"""
class TimeZoneError(proto.Enum):
r"""Enum describing possible currency code errors."""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_TIME_ZONE = 5
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v10/errors/types/time_zone_error.py | Python | apache-2.0 | 1,124 | 0.00089 |
from django_mongoengine.admin.options import *
from django_mongoengine.admin.sites import site
from django.conf import settings
if getattr(settings, 'DJANGO_MONGOENGINE_OVERRIDE_ADMIN', False):
import django.contrib.admin
# copy already registered model admins
# without that the already registered models
# don't show up in the new admin
site._registry = django.contrib.admin.site._registry
django.contrib.admin.site = site | seraphlnWu/django-mongoengine | django_mongoengine/admin/__init__.py | Python | bsd-3-clause | 451 | 0.002217 |
from base import Input
from wapiti import get_json
class GoogleNews(Input):
prefix = 'gn'
def fetch(self):
return get_json('http://ajax.googleapis.com/ajax/services/search/news?v=1.0&q=' + self.page_title)
def process(self, f_res):
if f_res['responseStatus'] == 403 or not f_res.get('responseData', {}).get('cursor', {}).get('estimatedResultCount', {}):
return {}
else:
return super(GoogleNews, self).process(f_res['responseData']['cursor']['estimatedResultCount'])
stats = {
'count': lambda f: f
}
class GoogleSearch(Input):
prefix = 'gs'
def fetch(self):
return get_json('http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=' + self.page_title)
def process(self, f_res):
if f_res['responseStatus'] == 403 or not f_res['responseData']:
return {}
else:
return super(GoogleSearch, self).process(f_res['responseData']['cursor']['estimatedResultCount'])
stats = {
'count': lambda f: f
}
| slaporte/qualityvis | inputs/google.py | Python | gpl-3.0 | 1,054 | 0.004744 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.