text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Written by Vamei
import os
import multiprocessing
import time
#==================
# input worker
def inputQ(queue, info):
# info = str(os.getpid()) + '(put):' + str(time.time())
queue.put(info)
# output worker
def outputQ(queue,lock):
info = queue.get()
print(info)
# lock.acquire()
# print (str(os.getpid()) + '(get):' + info)
# lock.release()
#===================
# Main
if __name__ == "__main__":
record1 = [] # store input processes
record2 = [] # store output processes
lock = multiprocessing.Lock() # To prevent messy print
queue = multiprocessing.Queue(3)
a = range(10)
# input processes
for i in a:
process = multiprocessing.Process(target=inputQ,args=(queue,i))
process.start()
record1.append(process)
# output processes
for i in range(10):
process = multiprocessing.Process(target=outputQ,args=(queue,lock))
process.start()
record2.append(process)
for p in record1:
p.join()
queue.close() # No more object will come, close the queue
for p in record2:
p.join() | JeffpanUK/NuPyTools | multiprocessTutorial.py | Python | mit | 1,085 | 0.029493 |
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2o_H2OFrame_ascharacter():
"""
Python API test: h2o.frame.H2OFrame.ascharacter()
Copied from pyunit_ascharacter.py
"""
h2oframe = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars.csv"))
newFrame = h2oframe['cylinders'].ascharacter()
assert_is_type(newFrame, H2OFrame)
assert newFrame.isstring()[0], "h2o.H2OFrame.ascharacter() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2o_H2OFrame_ascharacter())
else:
h2o_H2OFrame_ascharacter()
| spennihana/h2o-3 | h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_ascharacter.py | Python | apache-2.0 | 734 | 0.013624 |
from datetime import datetime
from .base import BaseCarrier
class FakeCarrier(BaseCarrier):
id = 'fake'
name = 'Fake Carrier'
def _track_single(self, object_id):
package = self.create_package(
object_id=object_id,
service_name='Default',
)
for i in range(1, 5):
package.add_tracking_info(
date=datetime.now(),
location='City {}'.format(i),
status='In transit {}'.format(i),
description='Wow',
)
return package
| rpedigoni/trackr | trackr/carriers/fake.py | Python | mit | 572 | 0 |
"""
Provides ValueMeta metaclass - which allows its descendants to override
__instancecheck__ and __subclasscheck__ to be used as *classmethods*
"""
from __future__ import absolute_import
__all__ = [
'ValueMeta',
'ValueABC',
'InterfaceType',
'ExistingDirectory',
'ExistingFile'
]
from .existing_directory import ExistingDirectory
from .existing_file import ExistingFile
from .interface_type import InterfaceType
from .valueabc import ValueABC
from .valuemeta import ValueMeta
| OaklandPeters/pyinterfaces | pyinterfaces/valueabc/__init__.py | Python | mit | 498 | 0 |
try: paraview.simple
except: from paraview.simple import *
import numpy as np
from mpi4py import MPI
import os
import csv
from scipy import interpolate
import gc
import sys
gc.enable()
comm = MPI.COMM_WORLD
label = 'm_25_3b'
labelo = 'm_25_3b'
basename = 'mli'
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
tt = int(sys.argv[1]) - 1
labelo = sys.argv[2]
label = sys.argv[2]
basename = sys.argv[3]
field = sys.argv[4]
resx = int(sys.argv[5])
resy = int(sys.argv[6])
path = '/scratch/jmensa/'+label+'/'
Xlist = np.linspace(0,10000,resx)
Ylist = np.linspace(0,4000,resy)
#Xlist = np.linspace(0,10000,resx)
#Ylist = np.linspace(0,4000,resy)
Zlist = np.linspace(0,-50,51)
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = np.cumsum(dl)
[X,Y] = np.meshgrid(Xlist,Ylist)
size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
nl = len(Zlist)/size
ll = len(Zlist)%size
mli_pvtu = XMLPartitionedUnstructuredGridReader( FileName=[path+'/'+basename+'_'+str(tt)+'.pvtu'] )
mli_pvtu.PointArrayStatus = [field]
sliceFilter = Slice(mli_pvtu)
sliceFilter.SliceType.Normal = [0,0,1]
if rank == 0:
Tr = np.zeros((len(Ylist),len(Xlist),len(Zlist)))
for n in range(nl+ll):
layer = n+rank*nl
print 'layer:', rank, layer
sliceFilter.SliceType.Origin = [0,0,-1*Zlist[layer]]
DataSliceFile = paraview.servermanager.Fetch(sliceFilter)
points = DataSliceFile.GetPoints()
numPoints = DataSliceFile.GetNumberOfPoints()
#
data=np.zeros((numPoints))
coords=np.zeros((numPoints,3))
#
for x in xrange(numPoints):
data[x] = DataSliceFile.GetPointData().GetArray(field).GetValue(x)
coords[x] = points.GetPoint(x)
Tr[:,:,layer] = interpolate.griddata((coords[:,0],coords[:,1]),data,(X,Y),method='linear')
# print rank, Tr[:,:,:]
if rank > 0:
Tr = np.zeros((len(Ylist),len(Xlist),nl))
for n in xrange(nl):
layer = n+rank*nl
print 'layer:', rank, layer
sliceFilter.SliceType.Origin = [0,0,-1*Zlist[layer]]
DataSliceFile = paraview.servermanager.Fetch(sliceFilter)
points = DataSliceFile.GetPoints()
numPoints = DataSliceFile.GetNumberOfPoints()
#
data=np.zeros((numPoints))
coords=np.zeros((numPoints,3))
#
for x in xrange(numPoints):
data[x] = DataSliceFile.GetPointData().GetArray(field).GetValue(x)
coords[x] = points.GetPoint(x)
Tr[:,:,n] = interpolate.griddata((coords[:,0],coords[:,1]),data,(X,Y),method='linear')
# print rank, Tr[:,:,:]
comm.send(nl*rank+ll, dest=0, tag=10)
comm.send(Tr, dest=0, tag=11)
if rank == 0:
for s in range(size-1):
print 's', s+1
l = comm.recv(source=s+1, tag=10)
print 'l', l
Tr[:,:,l:l+nl] = comm.recv(source=s+1, tag=11)
print Tr
fd = open('./csv/'+field+'_'+labelo+'_'+str(tt)+'.csv','w')
print Tr[:,:,:]
for z in xrange(len(Zlist)):
print z
for j in xrange(len(Ylist)):
for i in xrange(len(Xlist)):
fd.write(str(Tr[j,i,z])+', ')
fd.write('\n')
fd.close()
del mli_pvtu, Tr, coords, data, numPoints, points, DataSliceFile, sliceFilter
gc.collect()
| jungla/ICOM-fluidity-toolbox | 2D/RST/extract_Scalar_temp.py | Python | gpl-2.0 | 3,216 | 0.03949 |
import re
import sublime
from ..versions import version_exclude_prerelease
def filter_releases(package, settings, releases):
"""
Returns all releases in the list of releases that are compatible with
the current platform and version of Sublime Text
:param package:
The name of the package
:param settings:
A dict optionally containing the `install_prereleases` key
:param releases:
A list of release dicts
:return:
A list of release dicts
"""
platform_selectors = [sublime.platform() + '-' + sublime.arch(),
sublime.platform(), '*']
install_prereleases = settings.get('install_prereleases')
allow_prereleases = install_prereleases is True
if not allow_prereleases and isinstance(install_prereleases, list) and package in install_prereleases:
allow_prereleases = True
if not allow_prereleases:
releases = version_exclude_prerelease(releases)
output = []
for release in releases:
platforms = release.get('platforms', '*')
if not isinstance(platforms, list):
platforms = [platforms]
matched = False
for selector in platform_selectors:
if selector in platforms:
matched = True
break
if not matched:
continue
# Default to '*' (for legacy reasons), see #604
if not is_compatible_version(release.get('sublime_text', '*')):
continue
output.append(release)
return output
def is_compatible_version(version_range):
min_version = float("-inf")
max_version = float("inf")
if version_range == '*':
return True
gt_match = re.match('>(\d+)$', version_range)
ge_match = re.match('>=(\d+)$', version_range)
lt_match = re.match('<(\d+)$', version_range)
le_match = re.match('<=(\d+)$', version_range)
range_match = re.match('(\d+) - (\d+)$', version_range)
if gt_match:
min_version = int(gt_match.group(1)) + 1
elif ge_match:
min_version = int(ge_match.group(1))
elif lt_match:
max_version = int(lt_match.group(1)) - 1
elif le_match:
max_version = int(le_match.group(1))
elif range_match:
min_version = int(range_match.group(1))
max_version = int(range_match.group(2))
else:
return None
if min_version > int(sublime.version()):
return False
if max_version < int(sublime.version()):
return False
return True
| herove/dotfiles | sublime/Packages/Package Control/package_control/providers/release_selector.py | Python | mit | 2,521 | 0.003173 |
import flask
import json
import bson
import os
from flask import request, redirect
import sys
from fontana import twitter
import pymongo
DEFAULT_PORT = 2014
DB = 'fontana'
connection = pymongo.Connection("localhost", 27017)
db = connection[DB]
latest_headers = {}
MODERATED_SIZE = 40
class MongoEncoder(json.JSONEncoder):
def default(self, obj, **kwargs):
if isinstance(obj, bson.ObjectId):
return str(obj)
else:
return json.JSONEncoder.default(obj, **kwargs)
app = flask.Flask('fontana')
def twitter_authorisation_begin():
"""
Step 1 and 2 of the Twitter oAuth flow.
"""
callback = absolute_url('twitter_signin')
if 'next' in flask.request.args:
callback = '%s?next=%s' % (callback, flask.request.args['next'])
try:
token = twitter.request_token(app.config, callback)
flask.session['twitter_oauth_token'] = token['oauth_token']
flask.session['twitter_oauth_token_secret'] = token['oauth_token_secret']
return flask.redirect(twitter.authenticate_url(token, callback))
except twitter.TwitterException, e:
return flask.abort(403, str(e))
def twitter_authorisation_done():
"""
Step 3 of the Twitter oAuth flow.
"""
if 'oauth_token' in flask.request.args:
token = flask.request.args
if flask.session['twitter_oauth_token'] != token['oauth_token']:
return flask.abort(403, 'oauth_token mismatch!')
auth = twitter.access_token(app.config, token)
flask.session['twitter_oauth_token'] = auth['oauth_token']
flask.session['twitter_oauth_token_secret'] = auth['oauth_token_secret']
flask.session['twitter_user_id'] = auth['user_id']
flask.session['twitter_screen_name'] = auth['screen_name']
if 'next' in flask.request.args:
return flask.redirect(flask.request.args['next'])
else:
return 'OK'
elif 'denied' in flask.request.args:
return flask.abort(403, 'oauth denied')
else:
return flask.abort(403, 'unknown sign in failure')
@app.route('/api/twitter/session/new/')
def twitter_signin():
"""
Handles the Twitter oAuth flow.
"""
args = flask.request.args
if not args or (len(args) == 1 and 'next' in args):
return twitter_authorisation_begin()
else:
return twitter_authorisation_done()
@app.route('/api/twitter/session/')
def twitter_session():
"""
Check for an active Twitter session. Returns a JSON response with the
active sceen name or a 403 if there is no active session.
"""
if not flask.session.get('twitter_user_id'):
return flask.abort(403, 'no active session')
return (json.dumps({
'screen_name': flask.session['twitter_screen_name']
}), 200, {'content-type': 'application/json'})
@app.route('/api/twitter/search/')
def twitter_search():
"""
Perform a Twitter search
"""
global latest_headers
if not flask.session.get('twitter_user_id'):
return flask.abort(403, 'no active session')
token = {
'oauth_token': flask.session['twitter_oauth_token'],
'oauth_token_secret': flask.session['twitter_oauth_token_secret']
}
# Find out last id
last = db['tweets'].aggregate( { '$group': { '_id':"", 'last': { '$max': "$id" } } } )
since_id = long(flask.request.args.get('since_id'))
params = dict(flask.request.args)
if last.get("ok") == 1 and last['result']:
last = long(last['result'][0]['last'])
params['since_id'] = max(last, since_id)
# Query twitter and cache result into DB
(text, status_code, headers) = twitter.search(app.config, token, params)
data = json.loads(text)
for s in data['statuses']:
s['exclude'] = s['text'].startswith('RT ')
s['classes'] = []
if s['text'].startswith('RT '):
s['classes'].append('RT')
if '?' in s['text']:
s['classes'].append('question')
# Use tweet id as _id so that save will replace existing tweets if necessary
s['_id'] = s['id']
db['tweets'].save(s)
latest_headers = dict(headers)
return (text, status_code, headers)
@app.route('/moderated')
def twitter_moderated():
"""
Return moderated posts
"""
return (json.dumps({ 'statuses': [ s for s in db['tweets'].find({ 'exclude': False }).sort([('id', -1)]).limit(MODERATED_SIZE) ]},
indent=None if request.is_xhr else 2,
cls=MongoEncoder),
200,
{'content-type': 'application/json'})
@app.route('/all')
def twitter_all():
"""
Return all cached posts
"""
since_id = long(request.values.get('since_id', 0))
return (json.dumps({ 'statuses': [ s for s in db['tweets'].find({ 'id': { '$gt': since_id } }).sort([ ('id', -1) ]) ]},
indent=None if request.is_xhr else 2,
cls=MongoEncoder),
200,
latest_headers)
@app.route('/exclude/<path:ident>')
def exclude(ident):
"""Exclude given post.
"""
db['tweets'].update( { 'id_str': ident },
{ '$set': { 'exclude': True } })
return redirect('/admin.html')
@app.route('/set_moderated/<int:length>')
def set_moderated_length(length):
"""Set moderated queue length
"""
global MODERATED_SIZE
if length > 2 and length < 100:
MODERATED_SIZE = length
return redirect('/admin.html')
@app.route('/include/<path:ident>')
def include(ident):
"""Include given post.
"""
db['tweets'].update( { 'id_str': ident },
{ '$set': { 'exclude': False } })
return redirect('/admin.html')
@app.route('/api/session/clear/', methods=['POST'])
def signout():
"""
Perform a sign out, clears the user's session.
"""
flask.session.clear()
return 'OK'
def absolute_url(name):
"""
Flask's url_for with added SERVER_NAME
"""
host = app.config['SERVER_NAME'] or ('localhost:' + str(DEFAULT_PORT))
url = flask.url_for(name)
return 'http://%s%s' % (host, url)
def devserver(extra_conf=None):
"""
Start a development server
"""
from werkzeug.wsgi import SharedDataMiddleware
# Load the "example" conf
root = app.root_path.split(os.path.dirname(__file__))[0]
conf = os.path.join(root, 'backend', 'var', 'conf', 'fontana-example.conf')
app.config.from_pyfile(conf)
if extra_conf:
app.config.from_pyfile(os.path.join(root, extra_conf))
# Serve the frontend files
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/': app.config['STATIC_DIR']
})
# Setup a index.html redirect for convenience sake.
app.route('/')(lambda: flask.redirect('index.html'))
# Run the development or production server
if app.config.get('PROD'):
app.run(debug=False, host='0.0.0.0', port=DEFAULT_PORT)
else:
app.run()
if __name__ == "__main__":
# This will get invoked when you run `python backend/src/fontana.py`
if len(sys.argv) == 2:
devserver(sys.argv[1])
else:
devserver()
| oaubert/TwitterFontana | backend/src/app.py | Python | mit | 7,176 | 0.007664 |
"""Askbot template context processor that makes some parameters
from the django settings, all parameters from the askbot livesettings
and the application available for the templates
"""
import sys
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils import simplejson
import askbot
from askbot import api
from askbot import models
from askbot import const
from askbot.conf import settings as askbot_settings
from askbot.search.state_manager import SearchState
from askbot.utils import url_utils
from askbot.utils.slug import slugify
from askbot.utils.html import site_url
from askbot.utils.translation import get_language
def application_settings(request):
"""The context processor function"""
#if not request.path.startswith('/' + settings.ASKBOT_URL):
# #todo: this is a really ugly hack, will only work
# #when askbot is installed not at the home page.
# #this will not work for the
# #heavy modders of askbot, because their custom pages
# #will not receive the askbot settings in the context
# #to solve this properly we should probably explicitly
# #add settings to the context per page
# return {}
my_settings = askbot_settings.as_dict()
my_settings['LANGUAGE_CODE'] = getattr(request, 'LANGUAGE_CODE', settings.LANGUAGE_CODE)
my_settings['MULTILINGUAL'] = getattr(settings, 'ASKBOT_MULTILINGUAL', False)
my_settings['LANGUAGES_DICT'] = dict(getattr(settings, 'LANGUAGES', []))
my_settings['ALLOWED_UPLOAD_FILE_TYPES'] = \
settings.ASKBOT_ALLOWED_UPLOAD_FILE_TYPES
my_settings['ASKBOT_URL'] = settings.ASKBOT_URL
my_settings['STATIC_URL'] = settings.STATIC_URL
my_settings['IP_MODERATION_ENABLED'] = getattr(settings, 'ASKBOT_IP_MODERATION_ENABLED', False)
my_settings['USE_LOCAL_FONTS'] = getattr(
settings,
'ASKBOT_USE_LOCAL_FONTS',
False
)
my_settings['CSRF_COOKIE_NAME'] = settings.CSRF_COOKIE_NAME
my_settings['DEBUG'] = settings.DEBUG
my_settings['USING_RUNSERVER'] = 'runserver' in sys.argv
my_settings['ASKBOT_VERSION'] = askbot.get_version()
my_settings['LOGIN_URL'] = url_utils.get_login_url()
my_settings['LOGOUT_URL'] = url_utils.get_logout_url()
if my_settings['EDITOR_TYPE'] == 'tinymce':
tinymce_plugins = settings.TINYMCE_DEFAULT_CONFIG.get('plugins', '').split(',')
my_settings['TINYMCE_PLUGINS'] = map(lambda v: v.strip(), tinymce_plugins)
else:
my_settings['TINYMCE_PLUGINS'] = [];
my_settings['LOGOUT_REDIRECT_URL'] = url_utils.get_logout_redirect_url()
my_settings['USE_ASKBOT_LOGIN_SYSTEM'] = 'askbot.deps.django_authopenid' \
in settings.INSTALLED_APPS
current_language = get_language()
#for some languages we will start searching for shorter words
if current_language == 'ja':
#we need to open the search box and show info message about
#the japanese lang search
min_search_word_length = 1
else:
min_search_word_length = my_settings['MIN_SEARCH_WORD_LENGTH']
need_scope_links = askbot_settings.ALL_SCOPE_ENABLED or \
askbot_settings.UNANSWERED_SCOPE_ENABLED or \
(request.user.is_authenticated() and askbot_settings.FOLLOWED_SCOPE_ENABLED)
context = {
'base_url': site_url(''),
'empty_search_state': SearchState.get_empty(),
'min_search_word_length': min_search_word_length,
'current_language_code': current_language,
'settings': my_settings,
'moderation_items': api.get_info_on_moderation_items(request.user),
'need_scope_links': need_scope_links,
'noscript_url': const.DEPENDENCY_URLS['noscript'],
}
if askbot_settings.GROUPS_ENABLED:
#calculate context needed to list all the groups
def _get_group_url(group):
"""calculates url to the group based on its id and name"""
group_slug = slugify(group['name'])
return reverse(
'users_by_group',
kwargs={'group_id': group['id'], 'group_slug': group_slug}
)
#load id's and names of all groups
global_group = models.Group.objects.get_global_group()
groups = models.Group.objects.exclude_personal()
groups = groups.exclude(id=global_group.id)
groups_data = list(groups.values('id', 'name'))
#sort groups_data alphanumerically, but case-insensitive
groups_data = sorted(
groups_data,
lambda x, y: cmp(x['name'].lower(), y['name'].lower())
)
#insert data for the global group at the first position
groups_data.insert(0, {'id': global_group.id, 'name': global_group.name})
#build group_list for the context
group_list = list()
for group in groups_data:
link = _get_group_url(group)
group_list.append({'name': group['name'], 'link': link})
context['group_list'] = simplejson.dumps(group_list)
return context
| jesonyang001/qarepo | askbot/context.py | Python | gpl-3.0 | 5,245 | 0.004385 |
#!/usr/bin/python
# Find all the three word sequences
import sys
for line in sys.stdin:
tok = line.strip().split()
if len(tok)>2:
for i in range(1, len(tok)-1):
word1, word2, word3 = tok[i-1], tok[i], tok[i+1]
word_str = word1 + " " + word2 + " " + word3
print(word_str + "\t1")
else:
continue
| aphaea/exc-MapReduce-UoE | three_wrd_seq_count/mapper.py | Python | mit | 441 | 0.011338 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Interface documentation.
Maintainer: Itamar Shtull-Trauring
"""
from __future__ import division, absolute_import
from zope.interface import Interface, Attribute
from twisted.python import deprecate
from twisted.python.versions import Version
class IAddress(Interface):
"""
An address, e.g. a TCP C{(host, port)}.
Default implementations are in L{twisted.internet.address}.
"""
### Reactor Interfaces
class IConnector(Interface):
"""
Object used to interface between connections and protocols.
Each L{IConnector} manages one connection.
"""
def stopConnecting():
"""
Stop attempting to connect.
"""
def disconnect():
"""
Disconnect regardless of the connection state.
If we are connected, disconnect, if we are trying to connect,
stop trying.
"""
def connect():
"""
Try to connect to remote address.
"""
def getDestination():
"""
Return destination this will try to connect to.
@return: An object which provides L{IAddress}.
"""
class IResolverSimple(Interface):
def getHostByName(name, timeout = (1, 3, 11, 45)):
"""
Resolve the domain name C{name} into an IP address.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{twisted.internet.defer.Deferred}
@return: The callback of the Deferred that is returned will be
passed a string that represents the IP address of the
specified name, or the errback will be called if the
lookup times out. If multiple types of address records
are associated with the name, A6 records will be returned
in preference to AAAA records, which will be returned in
preference to A records. If there are multiple records of
the type to be returned, one will be selected at random.
@raise twisted.internet.defer.TimeoutError: Raised
(asynchronously) if the name cannot be resolved within the
specified timeout period.
"""
class IResolver(IResolverSimple):
def query(query, timeout=None):
"""
Dispatch C{query} to the method which can handle its type.
@type query: L{twisted.names.dns.Query}
@param query: The DNS query being issued, to which a response is to be
generated.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAddress(name, timeout=None):
"""
Perform an A record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAddress6(name, timeout=None):
"""
Perform an A6 record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupIPV6Address(name, timeout=None):
"""
Perform an AAAA record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailExchange(name, timeout=None):
"""
Perform an MX record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupNameservers(name, timeout=None):
"""
Perform an NS record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupCanonicalName(name, timeout=None):
"""
Perform a CNAME record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailBox(name, timeout=None):
"""
Perform an MB record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailGroup(name, timeout=None):
"""
Perform an MG record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailRename(name, timeout=None):
"""
Perform an MR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupPointer(name, timeout=None):
"""
Perform a PTR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAuthority(name, timeout=None):
"""
Perform an SOA record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupNull(name, timeout=None):
"""
Perform a NULL record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupWellKnownServices(name, timeout=None):
"""
Perform a WKS record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupHostInfo(name, timeout=None):
"""
Perform a HINFO record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailboxInfo(name, timeout=None):
"""
Perform an MINFO record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupText(name, timeout=None):
"""
Perform a TXT record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupResponsibility(name, timeout=None):
"""
Perform an RP record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAFSDatabase(name, timeout=None):
"""
Perform an AFSDB record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupService(name, timeout=None):
"""
Perform an SRV record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAllRecords(name, timeout=None):
"""
Perform an ALL_RECORD lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupSenderPolicy(name, timeout= 10):
"""
Perform a SPF record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupNamingAuthorityPointer(name, timeout=None):
"""
Perform a NAPTR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupZone(name, timeout=None):
"""
Perform an AXFR record lookup.
NB This is quite different from other DNS requests. See
U{http://cr.yp.to/djbdns/axfr-notes.html} for more
information.
NB Unlike other C{lookup*} methods, the timeout here is not a
list of ints, it is a single int.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: C{int}
@param timeout: When this timeout expires, the query is
considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances.
The first element of the tuple gives answers.
The second and third elements are always empty.
The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
class IReactorTCP(Interface):
def listenTCP(port, factory, backlog=50, interface=''):
"""
Connects a given protocol factory to the given numeric TCP/IP port.
@param port: a port number on which to listen
@param factory: a L{twisted.internet.protocol.ServerFactory} instance
@param backlog: size of the listen queue
@param interface: The local IPv4 or IPv6 address to which to bind;
defaults to '', ie all IPv4 addresses. To bind to all IPv4 and IPv6
addresses, you must call this method twice.
@return: an object that provides L{IListeningPort}.
@raise CannotListenError: as defined here
L{twisted.internet.error.CannotListenError},
if it cannot listen on this port (e.g., it
cannot bind to the required port number)
"""
def connectTCP(host, port, factory, timeout=30, bindAddress=None):
"""
Connect a TCP client.
@param host: A hostname or an IPv4 or IPv6 address literal.
@type host: L{bytes}
@param port: a port number
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port) tuple of local address to bind
to, or None.
@return: An object which provides L{IConnector}. This connector will
call various callbacks on the factory when a connection is
made, failed, or lost - see
L{ClientFactory<twisted.internet.protocol.ClientFactory>}
docs for details.
"""
class IReactorSSL(Interface):
def connectSSL(host, port, factory, contextFactory, timeout=30, bindAddress=None):
"""
Connect a client Protocol to a remote SSL socket.
@param host: a host name
@param port: a port number
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param contextFactory: a L{twisted.internet.ssl.ClientContextFactory} object.
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port) tuple of local address to bind to,
or C{None}.
@return: An object which provides L{IConnector}.
"""
def listenSSL(port, factory, contextFactory, backlog=50, interface=''):
"""
Connects a given protocol factory to the given numeric TCP/IP port.
The connection is a SSL one, using contexts created by the context
factory.
@param port: a port number on which to listen
@param factory: a L{twisted.internet.protocol.ServerFactory} instance
@param contextFactory: a L{twisted.internet.ssl.ContextFactory} instance
@param backlog: size of the listen queue
@param interface: the hostname to bind to, defaults to '' (all)
"""
class IReactorUNIX(Interface):
"""
UNIX socket methods.
"""
def connectUNIX(address, factory, timeout=30, checkPID=0):
"""
Connect a client protocol to a UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param timeout: number of seconds to wait before assuming the connection
has failed.
@param checkPID: if True, check for a pid file to verify that a server
is listening. If C{address} is a Linux abstract namespace path,
this must be C{False}.
@return: An object which provides L{IConnector}.
"""
def listenUNIX(address, factory, backlog=50, mode=0o666, wantPID=0):
"""
Listen on a UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param factory: a L{twisted.internet.protocol.Factory} instance.
@param backlog: number of connections to allow in backlog.
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@param wantPID: if True, create a pidfile for the socket. If C{address}
is a Linux abstract namespace path, this must be C{False}.
@return: An object which provides L{IListeningPort}.
"""
class IReactorUNIXDatagram(Interface):
"""
Datagram UNIX socket methods.
"""
def connectUNIXDatagram(address, protocol, maxPacketSize=8192, mode=0o666, bindAddress=None):
"""
Connect a client protocol to a datagram UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param protocol: a L{twisted.internet.protocol.ConnectedDatagramProtocol} instance
@param maxPacketSize: maximum packet size to accept
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@param bindAddress: address to bind to
@return: An object which provides L{IConnector}.
"""
def listenUNIXDatagram(address, protocol, maxPacketSize=8192, mode=0o666):
"""
Listen on a datagram UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param protocol: a L{twisted.internet.protocol.DatagramProtocol} instance.
@param maxPacketSize: maximum packet size to accept
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@return: An object which provides L{IListeningPort}.
"""
class IReactorWin32Events(Interface):
"""
Win32 Event API methods
@since: 10.2
"""
def addEvent(event, fd, action):
"""
Add a new win32 event to the event loop.
@param event: a Win32 event object created using win32event.CreateEvent()
@param fd: an instance of L{twisted.internet.abstract.FileDescriptor}
@param action: a string that is a method name of the fd instance.
This method is called in response to the event.
@return: None
"""
def removeEvent(event):
"""
Remove an event.
@param event: a Win32 event object added using L{IReactorWin32Events.addEvent}
@return: None
"""
class IReactorUDP(Interface):
"""
UDP socket methods.
"""
def listenUDP(port, protocol, interface='', maxPacketSize=8192):
"""
Connects a given L{DatagramProtocol} to the given numeric UDP port.
@param port: A port number on which to listen.
@type port: C{int}
@param protocol: A L{DatagramProtocol} instance which will be
connected to the given C{port}.
@type protocol: L{DatagramProtocol}
@param interface: The local IPv4 or IPv6 address to which to bind;
defaults to '', ie all IPv4 addresses.
@type interface: C{str}
@param maxPacketSize: The maximum packet size to accept.
@type maxPacketSize: C{int}
@return: object which provides L{IListeningPort}.
"""
class IReactorMulticast(Interface):
"""
UDP socket methods that support multicast.
IMPORTANT: This is an experimental new interface. It may change
without backwards compatibility. Suggestions are welcome.
"""
def listenMulticast(port, protocol, interface='', maxPacketSize=8192,
listenMultiple=False):
"""
Connects a given
L{DatagramProtocol<twisted.internet.protocol.DatagramProtocol>} to the
given numeric UDP port.
@param listenMultiple: If set to True, allows multiple sockets to
bind to the same address and port number at the same time.
@type listenMultiple: C{bool}
@returns: An object which provides L{IListeningPort}.
@see: L{twisted.internet.interfaces.IMulticastTransport}
@see: U{http://twistedmatrix.com/documents/current/core/howto/udp.html}
"""
class IReactorSocket(Interface):
"""
Methods which allow a reactor to use externally created sockets.
For example, to use C{adoptStreamPort} to implement behavior equivalent
to that of L{IReactorTCP.listenTCP}, you might write code like this::
from socket import SOMAXCONN, AF_INET, SOCK_STREAM, socket
portSocket = socket(AF_INET, SOCK_STREAM)
# Set FD_CLOEXEC on port, left as an exercise. Then make it into a
# non-blocking listening port:
portSocket.setblocking(False)
portSocket.bind(('192.168.1.2', 12345))
portSocket.listen(SOMAXCONN)
# Now have the reactor use it as a TCP port
port = reactor.adoptStreamPort(
portSocket.fileno(), AF_INET, YourFactory())
# portSocket itself is no longer necessary, and needs to be cleaned
# up by us.
portSocket.close()
# Whenever the server is no longer needed, stop it as usual.
stoppedDeferred = port.stopListening()
Another potential use is to inherit a listening descriptor from a parent
process (for example, systemd or launchd), or to receive one over a UNIX
domain socket.
Some plans for extending this interface exist. See:
- U{http://twistedmatrix.com/trac/ticket/5573}: AF_UNIX SOCK_STREAM ports
- U{http://twistedmatrix.com/trac/ticket/6594}: AF_UNIX SOCK_DGRAM ports
"""
def adoptStreamPort(fileDescriptor, addressFamily, factory):
"""
Add an existing listening I{SOCK_STREAM} socket to the reactor to
monitor for new connections to accept and handle.
@param fileDescriptor: A file descriptor associated with a socket which
is already bound to an address and marked as listening. The socket
must be set non-blocking. Any additional flags (for example,
close-on-exec) must also be set by application code. Application
code is responsible for closing the file descriptor, which may be
done as soon as C{adoptStreamPort} returns.
@type fileDescriptor: C{int}
@param addressFamily: The address family (or I{domain}) of the socket.
For example, L{socket.AF_INET6}.
@param factory: A L{ServerFactory} instance to use to create new
protocols to handle connections accepted via this socket.
@return: An object providing L{IListeningPort}.
@raise twisted.internet.error.UnsupportedAddressFamily: If the
given address family is not supported by this reactor, or
not supported with the given socket type.
@raise twisted.internet.error.UnsupportedSocketType: If the
given socket type is not supported by this reactor, or not
supported with the given socket type.
"""
def adoptStreamConnection(fileDescriptor, addressFamily, factory):
"""
Add an existing connected I{SOCK_STREAM} socket to the reactor to
monitor for data.
Note that the given factory won't have its C{startFactory} and
C{stopFactory} methods called, as there is no sensible time to call
them in this situation.
@param fileDescriptor: A file descriptor associated with a socket which
is already connected. The socket must be set non-blocking. Any
additional flags (for example, close-on-exec) must also be set by
application code. Application code is responsible for closing the
file descriptor, which may be done as soon as
C{adoptStreamConnection} returns.
@type fileDescriptor: C{int}
@param addressFamily: The address family (or I{domain}) of the socket.
For example, L{socket.AF_INET6}.
@param factory: A L{ServerFactory} instance to use to create a new
protocol to handle the connection via this socket.
@raise UnsupportedAddressFamily: If the given address family is not
supported by this reactor, or not supported with the given socket
type.
@raise UnsupportedSocketType: If the given socket type is not supported
by this reactor, or not supported with the given socket type.
"""
def adoptDatagramPort(fileDescriptor, addressFamily, protocol,
maxPacketSize=8192):
"""
Add an existing listening I{SOCK_DGRAM} socket to the reactor to
monitor for read and write readiness.
@param fileDescriptor: A file descriptor associated with a socket which
is already bound to an address and marked as listening. The socket
must be set non-blocking. Any additional flags (for example,
close-on-exec) must also be set by application code. Application
code is responsible for closing the file descriptor, which may be
done as soon as C{adoptDatagramPort} returns.
@type fileDescriptor: C{int}
@param addressFamily: The address family (or I{domain}) of the socket.
For example, L{socket.AF_INET6}.
@type addressFamily: C{int}
@param protocol: A L{DatagramProtocol} instance to connect to
a UDP transport.
@type protocol: L{DatagramProtocol}
@param maxPacketSize: The maximum packet size to accept.
@type maxPacketSize: C{int}
@return: An object providing L{IListeningPort}.
@raise L{UnsupportedAddressFamily}: If the given address family is not
supported by this reactor, or not supported with the given socket
type.
@raise UnsupportedSocketType: If the given socket type is not supported
by this reactor, or not supported with the given socket type.
"""
class IReactorProcess(Interface):
def spawnProcess(processProtocol, executable, args=(), env={}, path=None,
uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn a process, with a process protocol.
@type processProtocol: L{IProcessProtocol} provider
@param processProtocol: An object which will be notified of all
events related to the created process.
@param executable: the file name to spawn - the full path should be
used.
@param args: the command line arguments to pass to the process; a
sequence of strings. The first string should be the
executable's name.
@type env: a C{dict} mapping C{str} to C{str}, or C{None}.
@param env: the environment variables to pass to the child process. The
resulting behavior varies between platforms. If
- C{env} is not set:
- On POSIX: pass an empty environment.
- On Windows: pass C{os.environ}.
- C{env} is C{None}:
- On POSIX: pass C{os.environ}.
- On Windows: pass C{os.environ}.
- C{env} is a C{dict}:
- On POSIX: pass the key/value pairs in C{env} as the
complete environment.
- On Windows: update C{os.environ} with the key/value
pairs in the C{dict} before passing it. As a
consequence of U{bug #1640
<http://twistedmatrix.com/trac/ticket/1640>}, passing
keys with empty values in an effort to unset
environment variables I{won't} unset them.
@param path: the path to run the subprocess in - defaults to the
current directory.
@param uid: user ID to run the subprocess as. (Only available on
POSIX systems.)
@param gid: group ID to run the subprocess as. (Only available on
POSIX systems.)
@param usePTY: if true, run this process in a pseudo-terminal.
optionally a tuple of C{(masterfd, slavefd, ttyname)},
in which case use those file descriptors.
(Not available on all systems.)
@param childFDs: A dictionary mapping file descriptors in the new child
process to an integer or to the string 'r' or 'w'.
If the value is an integer, it specifies a file
descriptor in the parent process which will be mapped
to a file descriptor (specified by the key) in the
child process. This is useful for things like inetd
and shell-like file redirection.
If it is the string 'r', a pipe will be created and
attached to the child at that file descriptor: the
child will be able to write to that file descriptor
and the parent will receive read notification via the
L{IProcessProtocol.childDataReceived} callback. This
is useful for the child's stdout and stderr.
If it is the string 'w', similar setup to the previous
case will occur, with the pipe being readable by the
child instead of writeable. The parent process can
write to that file descriptor using
L{IProcessTransport.writeToChild}. This is useful for
the child's stdin.
If childFDs is not passed, the default behaviour is to
use a mapping that opens the usual stdin/stdout/stderr
pipes.
@see: L{twisted.internet.protocol.ProcessProtocol}
@return: An object which provides L{IProcessTransport}.
@raise OSError: Raised with errno C{EAGAIN} or C{ENOMEM} if there are
insufficient system resources to create a new process.
"""
class IReactorTime(Interface):
"""
Time methods that a Reactor should implement.
"""
def seconds():
"""
Get the current time in seconds.
@return: A number-like object of some sort.
"""
def callLater(delay, callable, *args, **kw):
"""
Call a function later.
@type delay: C{float}
@param delay: the number of seconds to wait.
@param callable: the callable object to call later.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: An object which provides L{IDelayedCall} and can be used to
cancel the scheduled call, by calling its C{cancel()} method.
It also may be rescheduled by calling its C{delay()} or
C{reset()} methods.
"""
def getDelayedCalls():
"""
Retrieve all currently scheduled delayed calls.
@return: A tuple of all L{IDelayedCall} providers representing all
currently scheduled calls. This is everything that has been
returned by C{callLater} but not yet called or canceled.
"""
class IDelayedCall(Interface):
"""
A scheduled call.
There are probably other useful methods we can add to this interface;
suggestions are welcome.
"""
def getTime():
"""
Get time when delayed call will happen.
@return: time in seconds since epoch (a float).
"""
def cancel():
"""
Cancel the scheduled call.
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def delay(secondsLater):
"""
Delay the scheduled call.
@param secondsLater: how many seconds from its current firing time to delay
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def reset(secondsFromNow):
"""
Reset the scheduled call's timer.
@param secondsFromNow: how many seconds from now it should fire,
equivalent to C{.cancel()} and then doing another
C{reactor.callLater(secondsLater, ...)}
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def active():
"""
@return: True if this call is still active, False if it has been
called or cancelled.
"""
class IReactorThreads(Interface):
"""
Dispatch methods to be run in threads.
Internally, this should use a thread pool and dispatch methods to them.
"""
def getThreadPool():
"""
Return the threadpool used by L{callInThread}. Create it first if
necessary.
@rtype: L{twisted.python.threadpool.ThreadPool}
"""
def callInThread(callable, *args, **kwargs):
"""
Run the callable object in a separate thread.
"""
def callFromThread(callable, *args, **kw):
"""
Cause a function to be executed by the reactor thread.
Use this method when you want to run a function in the reactor's thread
from another thread. Calling L{callFromThread} should wake up the main
thread (where L{reactor.run()<reactor.run>} is executing) and run the
given callable in that thread.
If you're writing a multi-threaded application the C{callable} may need
to be thread safe, but this method doesn't require it as such. If you
want to call a function in the next mainloop iteration, but you're in
the same thread, use L{callLater} with a delay of 0.
"""
def suggestThreadPoolSize(size):
"""
Suggest the size of the internal threadpool used to dispatch functions
passed to L{callInThread}.
"""
class IReactorCore(Interface):
"""
Core methods that a Reactor must implement.
"""
running = Attribute(
"A C{bool} which is C{True} from I{during startup} to "
"I{during shutdown} and C{False} the rest of the time.")
def resolve(name, timeout=10):
"""
Return a L{twisted.internet.defer.Deferred} that will resolve a hostname.
"""
def run():
"""
Fire 'startup' System Events, move the reactor to the 'running'
state, then run the main loop until it is stopped with C{stop()} or
C{crash()}.
"""
def stop():
"""
Fire 'shutdown' System Events, which will move the reactor to the
'stopped' state and cause C{reactor.run()} to exit.
"""
def crash():
"""
Stop the main loop *immediately*, without firing any system events.
This is named as it is because this is an extremely "rude" thing to do;
it is possible to lose data and put your system in an inconsistent
state by calling this. However, it is necessary, as sometimes a system
can become wedged in a pre-shutdown call.
"""
def iterate(delay=0):
"""
Run the main loop's I/O polling function for a period of time.
This is most useful in applications where the UI is being drawn "as
fast as possible", such as games. All pending L{IDelayedCall}s will
be called.
The reactor must have been started (via the C{run()} method) prior to
any invocations of this method. It must also be stopped manually
after the last call to this method (via the C{stop()} method). This
method is not re-entrant: you must not call it recursively; in
particular, you must not call it while the reactor is running.
"""
def fireSystemEvent(eventType):
"""
Fire a system-wide event.
System-wide events are things like 'startup', 'shutdown', and
'persist'.
"""
def addSystemEventTrigger(phase, eventType, callable, *args, **kw):
"""
Add a function to be called when a system event occurs.
Each "system event" in Twisted, such as 'startup', 'shutdown', and
'persist', has 3 phases: 'before', 'during', and 'after' (in that
order, of course). These events will be fired internally by the
Reactor.
An implementor of this interface must only implement those events
described here.
Callbacks registered for the "before" phase may return either None or a
Deferred. The "during" phase will not execute until all of the
Deferreds from the "before" phase have fired.
Once the "during" phase is running, all of the remaining triggers must
execute; their return values must be ignored.
@param phase: a time to call the event -- either the string 'before',
'after', or 'during', describing when to call it
relative to the event's execution.
@param eventType: this is a string describing the type of event.
@param callable: the object to call before shutdown.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: an ID that can be used to remove this call with
removeSystemEventTrigger.
"""
def removeSystemEventTrigger(triggerID):
"""
Removes a trigger added with addSystemEventTrigger.
@param triggerID: a value returned from addSystemEventTrigger.
@raise KeyError: If there is no system event trigger for the given
C{triggerID}.
@raise ValueError: If there is no system event trigger for the given
C{triggerID}.
@raise TypeError: If there is no system event trigger for the given
C{triggerID}.
"""
def callWhenRunning(callable, *args, **kw):
"""
Call a function when the reactor is running.
If the reactor has not started, the callable will be scheduled
to run when it does start. Otherwise, the callable will be invoked
immediately.
@param callable: the callable object to call later.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: None if the callable was invoked, otherwise a system
event id for the scheduled call.
"""
class IReactorPluggableResolver(Interface):
"""
A reactor with a pluggable name resolver interface.
"""
def installResolver(resolver):
"""
Set the internal resolver to use to for name lookups.
@type resolver: An object implementing the L{IResolverSimple} interface
@param resolver: The new resolver to use.
@return: The previously installed resolver.
"""
class IReactorDaemonize(Interface):
"""
A reactor which provides hooks that need to be called before and after
daemonization.
Notes:
- This interface SHOULD NOT be called by applications.
- This interface should only be implemented by reactors as a workaround
(in particular, it's implemented currently only by kqueue()).
For details please see the comments on ticket #1918.
"""
def beforeDaemonize():
"""
Hook to be called immediately before daemonization. No reactor methods
may be called until L{afterDaemonize} is called.
@return: C{None}.
"""
def afterDaemonize():
"""
Hook to be called immediately after daemonization. This may only be
called after L{beforeDaemonize} had been called previously.
@return: C{None}.
"""
class IReactorFDSet(Interface):
"""
Implement me to be able to use L{IFileDescriptor} type resources.
This assumes that your main-loop uses UNIX-style numeric file descriptors
(or at least similarly opaque IDs returned from a .fileno() method)
"""
def addReader(reader):
"""
I add reader to the set of file descriptors to get read events for.
@param reader: An L{IReadDescriptor} provider that will be checked for
read events until it is removed from the reactor with
L{removeReader}.
@return: C{None}.
"""
def addWriter(writer):
"""
I add writer to the set of file descriptors to get write events for.
@param writer: An L{IWriteDescriptor} provider that will be checked for
write events until it is removed from the reactor with
L{removeWriter}.
@return: C{None}.
"""
def removeReader(reader):
"""
Removes an object previously added with L{addReader}.
@return: C{None}.
"""
def removeWriter(writer):
"""
Removes an object previously added with L{addWriter}.
@return: C{None}.
"""
def removeAll():
"""
Remove all readers and writers.
Should not remove reactor internal reactor connections (like a waker).
@return: A list of L{IReadDescriptor} and L{IWriteDescriptor} providers
which were removed.
"""
def getReaders():
"""
Return the list of file descriptors currently monitored for input
events by the reactor.
@return: the list of file descriptors monitored for input events.
@rtype: C{list} of C{IReadDescriptor}
"""
def getWriters():
"""
Return the list file descriptors currently monitored for output events
by the reactor.
@return: the list of file descriptors monitored for output events.
@rtype: C{list} of C{IWriteDescriptor}
"""
class IListeningPort(Interface):
"""
A listening port.
"""
def startListening():
"""
Start listening on this port.
@raise CannotListenError: If it cannot listen on this port (e.g., it is
a TCP port and it cannot bind to the required
port number).
"""
def stopListening():
"""
Stop listening on this port.
If it does not complete immediately, will return Deferred that fires
upon completion.
"""
def getHost():
"""
Get the host that this port is listening for.
@return: An L{IAddress} provider.
"""
class ILoggingContext(Interface):
"""
Give context information that will be used to log events generated by
this item.
"""
def logPrefix():
"""
@return: Prefix used during log formatting to indicate context.
@rtype: C{str}
"""
class IFileDescriptor(ILoggingContext):
"""
An interface representing a UNIX-style numeric file descriptor.
"""
def fileno():
"""
@raise: If the descriptor no longer has a valid file descriptor
number associated with it.
@return: The platform-specified representation of a file descriptor
number. Or C{-1} if the descriptor no longer has a valid file
descriptor number associated with it. As long as the descriptor
is valid, calls to this method on a particular instance must
return the same value.
"""
def connectionLost(reason):
"""
Called when the connection was lost.
This is called when the connection on a selectable object has been
lost. It will be called whether the connection was closed explicitly,
an exception occurred in an event handler, or the other end of the
connection closed it first.
See also L{IHalfCloseableDescriptor} if your descriptor wants to be
notified separately of the two halves of the connection being closed.
@param reason: A failure instance indicating the reason why the
connection was lost. L{error.ConnectionLost} and
L{error.ConnectionDone} are of special note, but the
failure may be of other classes as well.
"""
class IReadDescriptor(IFileDescriptor):
"""
An L{IFileDescriptor} that can read.
This interface is generally used in conjunction with L{IReactorFDSet}.
"""
def doRead():
"""
Some data is available for reading on your descriptor.
@return: If an error is encountered which causes the descriptor to
no longer be valid, a L{Failure} should be returned. Otherwise,
C{None}.
"""
class IWriteDescriptor(IFileDescriptor):
"""
An L{IFileDescriptor} that can write.
This interface is generally used in conjunction with L{IReactorFDSet}.
"""
def doWrite():
"""
Some data can be written to your descriptor.
@return: If an error is encountered which causes the descriptor to
no longer be valid, a L{Failure} should be returned. Otherwise,
C{None}.
"""
class IReadWriteDescriptor(IReadDescriptor, IWriteDescriptor):
"""
An L{IFileDescriptor} that can both read and write.
"""
class IHalfCloseableDescriptor(Interface):
"""
A descriptor that can be half-closed.
"""
def writeConnectionLost(reason):
"""
Indicates write connection was lost.
"""
def readConnectionLost(reason):
"""
Indicates read connection was lost.
"""
class ISystemHandle(Interface):
"""
An object that wraps a networking OS-specific handle.
"""
def getHandle():
"""
Return a system- and reactor-specific handle.
This might be a socket.socket() object, or some other type of
object, depending on which reactor is being used. Use and
manipulate at your own risk.
This might be used in cases where you want to set specific
options not exposed by the Twisted APIs.
"""
class IConsumer(Interface):
"""
A consumer consumes data from a producer.
"""
def registerProducer(producer, streaming):
"""
Register to receive data from a producer.
This sets self to be a consumer for a producer. When this object runs
out of data (as when a send(2) call on a socket succeeds in moving the
last data from a userspace buffer into a kernelspace buffer), it will
ask the producer to resumeProducing().
For L{IPullProducer} providers, C{resumeProducing} will be called once
each time data is required.
For L{IPushProducer} providers, C{pauseProducing} will be called
whenever the write buffer fills up and C{resumeProducing} will only be
called when it empties.
@type producer: L{IProducer} provider
@type streaming: C{bool}
@param streaming: C{True} if C{producer} provides L{IPushProducer},
C{False} if C{producer} provides L{IPullProducer}.
@raise RuntimeError: If a producer is already registered.
@return: C{None}
"""
def unregisterProducer():
"""
Stop consuming data from a producer, without disconnecting.
"""
def write(data):
"""
The producer will write data by calling this method.
The implementation must be non-blocking and perform whatever
buffering is necessary. If the producer has provided enough data
for now and it is a L{IPushProducer}, the consumer may call its
C{pauseProducing} method.
"""
class IProducer(Interface):
"""
A producer produces data for a consumer.
Typically producing is done by calling the write method of an class
implementing L{IConsumer}.
"""
def stopProducing():
"""
Stop producing data.
This tells a producer that its consumer has died, so it must stop
producing data for good.
"""
class IPushProducer(IProducer):
"""
A push producer, also known as a streaming producer is expected to
produce (write to this consumer) data on a continuous basis, unless
it has been paused. A paused push producer will resume producing
after its resumeProducing() method is called. For a push producer
which is not pauseable, these functions may be noops.
"""
def pauseProducing():
"""
Pause producing data.
Tells a producer that it has produced too much data to process for
the time being, and to stop until resumeProducing() is called.
"""
def resumeProducing():
"""
Resume producing data.
This tells a producer to re-add itself to the main loop and produce
more data for its consumer.
"""
class IPullProducer(IProducer):
"""
A pull producer, also known as a non-streaming producer, is
expected to produce data each time resumeProducing() is called.
"""
def resumeProducing():
"""
Produce data for the consumer a single time.
This tells a producer to produce data for the consumer once
(not repeatedly, once only). Typically this will be done
by calling the consumer's write() method a single time with
produced data.
"""
class IProtocol(Interface):
def dataReceived(data):
"""
Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
def connectionLost(reason):
"""
Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed. The C{reason}
Failure wraps a L{twisted.internet.error.ConnectionDone} or
L{twisted.internet.error.ConnectionLost} instance (or a subclass
of one of those).
@type reason: L{twisted.python.failure.Failure}
"""
def makeConnection(transport):
"""
Make a connection to a transport and a server.
"""
def connectionMade():
"""
Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
class IProcessProtocol(Interface):
"""
Interface for process-related event handlers.
"""
def makeConnection(process):
"""
Called when the process has been created.
@type process: L{IProcessTransport} provider
@param process: An object representing the process which has been
created and associated with this protocol.
"""
def childDataReceived(childFD, data):
"""
Called when data arrives from the child process.
@type childFD: C{int}
@param childFD: The file descriptor from which the data was
received.
@type data: C{str}
@param data: The data read from the child's file descriptor.
"""
def childConnectionLost(childFD):
"""
Called when a file descriptor associated with the child process is
closed.
@type childFD: C{int}
@param childFD: The file descriptor which was closed.
"""
def processExited(reason):
"""
Called when the child process exits.
@type reason: L{twisted.python.failure.Failure}
@param reason: A failure giving the reason the child process
terminated. The type of exception for this failure is either
L{twisted.internet.error.ProcessDone} or
L{twisted.internet.error.ProcessTerminated}.
@since: 8.2
"""
def processEnded(reason):
"""
Called when the child process exits and all file descriptors associated
with it have been closed.
@type reason: L{twisted.python.failure.Failure}
@param reason: A failure giving the reason the child process
terminated. The type of exception for this failure is either
L{twisted.internet.error.ProcessDone} or
L{twisted.internet.error.ProcessTerminated}.
"""
class IHalfCloseableProtocol(Interface):
"""
Implemented to indicate they want notification of half-closes.
TCP supports the notion of half-closing the connection, e.g.
closing the write side but still not stopping reading. A protocol
that implements this interface will be notified of such events,
instead of having connectionLost called.
"""
def readConnectionLost():
"""
Notification of the read connection being closed.
This indicates peer did half-close of write side. It is now
the responsibility of the this protocol to call
loseConnection(). In addition, the protocol MUST make sure a
reference to it still exists (i.e. by doing a callLater with
one of its methods, etc.) as the reactor will only have a
reference to it if it is writing.
If the protocol does not do so, it might get garbage collected
without the connectionLost method ever being called.
"""
def writeConnectionLost():
"""
Notification of the write connection being closed.
This will never be called for TCP connections as TCP does not
support notification of this type of half-close.
"""
class IFileDescriptorReceiver(Interface):
"""
Protocols may implement L{IFileDescriptorReceiver} to receive file
descriptors sent to them. This is useful in conjunction with
L{IUNIXTransport}, which allows file descriptors to be sent between
processes on a single host.
"""
def fileDescriptorReceived(descriptor):
"""
Called when a file descriptor is received over the connection.
@param descriptor: The descriptor which was received.
@type descriptor: C{int}
@return: C{None}
"""
class IProtocolFactory(Interface):
"""
Interface for protocol factories.
"""
def buildProtocol(addr):
"""
Called when a connection has been established to addr.
If None is returned, the connection is assumed to have been refused,
and the Port will close the connection.
@type addr: (host, port)
@param addr: The address of the newly-established connection
@return: None if the connection was refused, otherwise an object
providing L{IProtocol}.
"""
def doStart():
"""
Called every time this is connected to a Port or Connector.
"""
def doStop():
"""
Called every time this is unconnected from a Port or Connector.
"""
class ITransport(Interface):
"""
I am a transport for bytes.
I represent (and wrap) the physical connection and synchronicity
of the framework which is talking to the network. I make no
representations about whether calls to me will happen immediately
or require returning to a control loop, or whether they will happen
in the same or another thread. Consider methods of this class
(aside from getPeer) to be 'thrown over the wall', to happen at some
indeterminate time.
"""
def write(data):
"""
Write some data to the physical connection, in sequence, in a
non-blocking fashion.
If possible, make sure that it is all written. No data will
ever be lost, although (obviously) the connection may be closed
before it all gets through.
"""
def writeSequence(data):
"""
Write a list of strings to the physical connection.
If possible, make sure that all of the data is written to
the socket at once, without first copying it all into a
single string.
"""
def loseConnection():
"""
Close my connection, after writing all pending data.
Note that if there is a registered producer on a transport it
will not be closed until the producer has been unregistered.
"""
def getPeer():
"""
Get the remote address of this connection.
Treat this method with caution. It is the unfortunate result of the
CGI and Jabber standards, but should not be considered reliable for
the usual host of reasons; port forwarding, proxying, firewalls, IP
masquerading, etc.
@return: An L{IAddress} provider.
"""
def getHost():
"""
Similar to getPeer, but returns an address describing this side of the
connection.
@return: An L{IAddress} provider.
"""
class ITCPTransport(ITransport):
"""
A TCP based transport.
"""
def loseWriteConnection():
"""
Half-close the write side of a TCP connection.
If the protocol instance this is attached to provides
IHalfCloseableProtocol, it will get notified when the operation is
done. When closing write connection, as with loseConnection this will
only happen when buffer has emptied and there is no registered
producer.
"""
def abortConnection():
"""
Close the connection abruptly.
Discards any buffered data, stops any registered producer,
and, if possible, notifies the other end of the unclean
closure.
@since: 11.1
"""
def getTcpNoDelay():
"""
Return if C{TCP_NODELAY} is enabled.
"""
def setTcpNoDelay(enabled):
"""
Enable/disable C{TCP_NODELAY}.
Enabling C{TCP_NODELAY} turns off Nagle's algorithm. Small packets are
sent sooner, possibly at the expense of overall throughput.
"""
def getTcpKeepAlive():
"""
Return if C{SO_KEEPALIVE} is enabled.
"""
def setTcpKeepAlive(enabled):
"""
Enable/disable C{SO_KEEPALIVE}.
Enabling C{SO_KEEPALIVE} sends packets periodically when the connection
is otherwise idle, usually once every two hours. They are intended
to allow detection of lost peers in a non-infinite amount of time.
"""
def getHost():
"""
Returns L{IPv4Address} or L{IPv6Address}.
"""
def getPeer():
"""
Returns L{IPv4Address} or L{IPv6Address}.
"""
class IUNIXTransport(ITransport):
"""
Transport for stream-oriented unix domain connections.
"""
def sendFileDescriptor(descriptor):
"""
Send a duplicate of this (file, socket, pipe, etc) descriptor to the
other end of this connection.
The send is non-blocking and will be queued if it cannot be performed
immediately. The send will be processed in order with respect to other
C{sendFileDescriptor} calls on this transport, but not necessarily with
respect to C{write} calls on this transport. The send can only be
processed if there are also bytes in the normal connection-oriented send
buffer (ie, you must call C{write} at least as many times as you call
C{sendFileDescriptor}).
@param descriptor: An C{int} giving a valid file descriptor in this
process. Note that a I{file descriptor} may actually refer to a
socket, a pipe, or anything else POSIX tries to treat in the same
way as a file.
@return: C{None}
"""
class IOpenSSLServerConnectionCreator(Interface):
"""
A provider of L{IOpenSSLServerConnectionCreator} can create
L{OpenSSL.SSL.Connection} objects for TLS servers.
@see: L{twisted.internet.ssl}
@note: Creating OpenSSL connection objects is subtle, error-prone, and
security-critical. Before implementing this interface yourself,
consider using L{twisted.internet.ssl.CertificateOptions} as your
C{contextFactory}. (For historical reasons, that class does not
actually I{implement} this interface; nevertheless it is usable in all
Twisted APIs which require a provider of this interface.)
"""
def serverConnectionForTLS(tlsProtocol):
"""
Create a connection for the given server protocol.
@param tlsProtocol: the protocol server making the request.
@type tlsProtocol: L{twisted.protocols.tls.TLSMemoryBIOProtocol}.
@return: an OpenSSL connection object configured appropriately for the
given Twisted protocol.
@rtype: L{OpenSSL.SSL.Connection}
"""
class IOpenSSLClientConnectionCreator(Interface):
"""
A provider of L{IOpenSSLClientConnectionCreator} can create
L{OpenSSL.SSL.Connection} objects for TLS clients.
@see: L{twisted.internet.ssl}
@note: Creating OpenSSL connection objects is subtle, error-prone, and
security-critical. Before implementing this interface yourself,
consider using L{twisted.internet.ssl.optionsForClientTLS} as your
C{contextFactory}.
"""
def clientConnectionForTLS(tlsProtocol):
"""
Create a connection for the given client protocol.
@param tlsProtocol: the client protocol making the request.
@type tlsProtocol: L{twisted.protocols.tls.TLSMemoryBIOProtocol}.
@return: an OpenSSL connection object configured appropriately for the
given Twisted protocol.
@rtype: L{OpenSSL.SSL.Connection}
"""
class ITLSTransport(ITCPTransport):
"""
A TCP transport that supports switching to TLS midstream.
Once TLS mode is started the transport will implement L{ISSLTransport}.
"""
def startTLS(contextFactory):
"""
Initiate TLS negotiation.
@param contextFactory: An object which creates appropriately configured
TLS connections.
For clients, use L{twisted.internet.ssl.optionsForClientTLS}; for
servers, use L{twisted.internet.ssl.CertificateOptions}.
@type contextFactory: L{IOpenSSLClientConnectionCreator} or
L{IOpenSSLServerConnectionCreator}, depending on whether this
L{ITLSTransport} is a server or not. If the appropriate interface
is not provided by the value given for C{contextFactory}, it must
be an old-style L{twisted.internet.ssl.ContextFactory} or similar.
"""
class ISSLTransport(ITCPTransport):
"""
A SSL/TLS based transport.
"""
def getPeerCertificate():
"""
Return an object with the peer's certificate info.
"""
class ICipher(Interface):
"""
A TLS cipher.
"""
fullName = Attribute(
"The fully qualified name of the cipher in L{unicode}."
)
class IAcceptableCiphers(Interface):
"""
A list of acceptable ciphers for a TLS context.
"""
def selectCiphers(availableCiphers):
"""
Choose which ciphers to allow to be negotiated on a TLS connection.
@param availableCiphers: A L{list} of L{ICipher} which gives the names
of all ciphers supported by the TLS implementation in use.
@return: A L{list} of L{ICipher} which represents the ciphers
which may be negotiated on the TLS connection. The result is
ordered by preference with more preferred ciphers appearing
earlier.
"""
class IProcessTransport(ITransport):
"""
A process transport.
"""
pid = Attribute(
"From before L{IProcessProtocol.makeConnection} is called to before "
"L{IProcessProtocol.processEnded} is called, C{pid} is an L{int} "
"giving the platform process ID of this process. C{pid} is L{None} "
"at all other times.")
def closeStdin():
"""
Close stdin after all data has been written out.
"""
def closeStdout():
"""
Close stdout.
"""
def closeStderr():
"""
Close stderr.
"""
def closeChildFD(descriptor):
"""
Close a file descriptor which is connected to the child process, identified
by its FD in the child process.
"""
def writeToChild(childFD, data):
"""
Similar to L{ITransport.write} but also allows the file descriptor in
the child process which will receive the bytes to be specified.
@type childFD: C{int}
@param childFD: The file descriptor to which to write.
@type data: C{str}
@param data: The bytes to write.
@return: C{None}
@raise KeyError: If C{childFD} is not a file descriptor that was mapped
in the child when L{IReactorProcess.spawnProcess} was used to create
it.
"""
def loseConnection():
"""
Close stdin, stderr and stdout.
"""
def signalProcess(signalID):
"""
Send a signal to the process.
@param signalID: can be
- one of C{"KILL"}, C{"TERM"}, or C{"INT"}.
These will be implemented in a
cross-platform manner, and so should be used
if possible.
- an integer, where it represents a POSIX
signal ID.
@raise twisted.internet.error.ProcessExitedAlready: If the process has
already exited.
@raise OSError: If the C{os.kill} call fails with an errno different
from C{ESRCH}.
"""
class IServiceCollection(Interface):
"""
An object which provides access to a collection of services.
"""
def getServiceNamed(serviceName):
"""
Retrieve the named service from this application.
Raise a C{KeyError} if there is no such service name.
"""
def addService(service):
"""
Add a service to this collection.
"""
def removeService(service):
"""
Remove a service from this collection.
"""
class IUDPTransport(Interface):
"""
Transport for UDP DatagramProtocols.
"""
def write(packet, addr=None):
"""
Write packet to given address.
@param addr: a tuple of (ip, port). For connected transports must
be the address the transport is connected to, or None.
In non-connected mode this is mandatory.
@raise twisted.internet.error.MessageLengthError: C{packet} was too
long.
"""
def connect(host, port):
"""
Connect the transport to an address.
This changes it to connected mode. Datagrams can only be sent to
this address, and will only be received from this address. In addition
the protocol's connectionRefused method might get called if destination
is not receiving datagrams.
@param host: an IP address, not a domain name ('127.0.0.1', not 'localhost')
@param port: port to connect to.
"""
def getHost():
"""
Get this port's host address.
@return: an address describing the listening port.
@rtype: L{IPv4Address} or L{IPv6Address}.
"""
def stopListening():
"""
Stop listening on this port.
If it does not complete immediately, will return L{Deferred} that fires
upon completion.
"""
def setBroadcastAllowed(enabled):
"""
Set whether this port may broadcast.
@param enabled: Whether the port may broadcast.
@type enabled: L{bool}
"""
def getBroadcastAllowed():
"""
Checks if broadcast is currently allowed on this port.
@return: Whether this port may broadcast.
@rtype: L{bool}
"""
class IUNIXDatagramTransport(Interface):
"""
Transport for UDP PacketProtocols.
"""
def write(packet, address):
"""
Write packet to given address.
"""
def getHost():
"""
Returns L{UNIXAddress}.
"""
class IUNIXDatagramConnectedTransport(Interface):
"""
Transport for UDP ConnectedPacketProtocols.
"""
def write(packet):
"""
Write packet to address we are connected to.
"""
def getHost():
"""
Returns L{UNIXAddress}.
"""
def getPeer():
"""
Returns L{UNIXAddress}.
"""
class IMulticastTransport(Interface):
"""
Additional functionality for multicast UDP.
"""
def getOutgoingInterface():
"""
Return interface of outgoing multicast packets.
"""
def setOutgoingInterface(addr):
"""
Set interface for outgoing multicast packets.
Returns Deferred of success.
"""
def getLoopbackMode():
"""
Return if loopback mode is enabled.
"""
def setLoopbackMode(mode):
"""
Set if loopback mode is enabled.
"""
def getTTL():
"""
Get time to live for multicast packets.
"""
def setTTL(ttl):
"""
Set time to live on multicast packets.
"""
def joinGroup(addr, interface=""):
"""
Join a multicast group. Returns L{Deferred} of success or failure.
If an error occurs, the returned L{Deferred} will fail with
L{error.MulticastJoinError}.
"""
def leaveGroup(addr, interface=""):
"""
Leave multicast group, return L{Deferred} of success.
"""
class IStreamClientEndpoint(Interface):
"""
A stream client endpoint is a place that L{ClientFactory} can connect to.
For example, a remote TCP host/port pair would be a TCP client endpoint.
@since: 10.1
"""
def connect(protocolFactory):
"""
Connect the C{protocolFactory} to the location specified by this
L{IStreamClientEndpoint} provider.
@param protocolFactory: A provider of L{IProtocolFactory}
@return: A L{Deferred} that results in an L{IProtocol} upon successful
connection otherwise a L{Failure} wrapping L{ConnectError} or
L{NoProtocol <twisted.internet.error.NoProtocol>}.
"""
class IStreamServerEndpoint(Interface):
"""
A stream server endpoint is a place that a L{Factory} can listen for
incoming connections.
@since: 10.1
"""
def listen(protocolFactory):
"""
Listen with C{protocolFactory} at the location specified by this
L{IStreamServerEndpoint} provider.
@param protocolFactory: A provider of L{IProtocolFactory}
@return: A L{Deferred} that results in an L{IListeningPort} or an
L{CannotListenError}
"""
class IStreamServerEndpointStringParser(Interface):
"""
An L{IStreamServerEndpointStringParser} is like an
L{IStreamClientEndpointStringParser}, except for L{IStreamServerEndpoint}s
instead of clients. It integrates with L{endpoints.serverFromString} in
much the same way.
"""
prefix = Attribute(
"""
@see: L{IStreamClientEndpointStringParser.prefix}
"""
)
def parseStreamServer(reactor, *args, **kwargs):
"""
Parse a stream server endpoint from a reactor and string-only arguments
and keyword arguments.
@see: L{IStreamClientEndpointStringParser.parseStreamClient}
@return: a stream server endpoint
@rtype: L{IStreamServerEndpoint}
"""
class IStreamClientEndpointStringParser(Interface):
"""
This interface is deprecated since Twisted 14.0; please use the
L{IStreamClientEndpointStringParserWithReactor} interface instead.
An L{IStreamClientEndpointStringParser} is a parser which can convert
a set of string C{*args} and C{**kwargs} into an L{IStreamClientEndpoint}
provider.
This interface is really only useful in the context of the plugin system
for L{endpoints.clientFromString}. See the document entitled "I{The
Twisted Plugin System}" for more details on how to write a plugin.
If you place an L{IStreamClientEndpointStringParser} plugin in the
C{twisted.plugins} package, that plugin's C{parseStreamClient} method will
be used to produce endpoints for any description string that begins with
the result of that L{IStreamClientEndpointStringParser}'s prefix attribute.
If a L{IStreamClientEndpointStringParserWithReactor} plugin and
L{IStreamClientEndpointStringParser} plugin share the same prefix, the
L{IStreamClientEndpointStringParserWithReactor} plugin will be preferred.
"""
prefix = Attribute(
"""
A C{str}, the description prefix to respond to. For example, an
L{IStreamClientEndpointStringParser} plugin which had C{"foo"} for its
C{prefix} attribute would be called for endpoint descriptions like
C{"foo:bar:baz"} or C{"foo:"}.
"""
)
def parseStreamClient(*args, **kwargs):
"""
This method is invoked by L{endpoints.clientFromString}, if the type of
endpoint matches the return value from this
L{IStreamClientEndpointStringParser}'s C{prefix} method.
@param args: The string arguments, minus the endpoint type, in the
endpoint description string, parsed according to the rules
described in L{endpoints.quoteStringArgument}. For example, if the
description were C{"my-type:foo:bar:baz=qux"}, C{args} would be
C{('foo','bar')}
@param kwargs: The string arguments from the endpoint description
passed as keyword arguments. For example, if the description were
C{"my-type:foo:bar:baz=qux"}, C{kwargs} would be
C{dict(baz='qux')}.
@return: a client endpoint
@rtype: L{IStreamClientEndpoint}
"""
deprecate.deprecatedModuleAttribute(
Version("Twisted", 14, 0, 0),
"This interface has been superseded by "
"IStreamClientEndpointStringParserWithReactor.",
__name__,
"IStreamClientEndpointStringParser")
class IStreamClientEndpointStringParserWithReactor(Interface):
"""
An L{IStreamClientEndpointStringParserWithReactor} is a parser which can
convert a set of string C{*args} and C{**kwargs} into an
L{IStreamClientEndpoint} provider. It's much like
L{IStreamClientEndpointStringParser}, except that the reactor is passed
along to L{parseStreamClient} too.
This interface is really only useful in the context of the plugin system
for L{endpoints.clientFromString}. See the document entitled "I{The
Twisted Plugin System}" for more details on how to write a plugin.
If you place an L{IStreamClientEndpointStringParserWithReactor} plugin in
the C{twisted.plugins} package, that plugin's C{parseStreamClient} method
will be used to produce endpoints for any description string that begins
with the result of that L{IStreamClientEndpointStringParserWithReactor}'s
prefix attribute.
If a L{IStreamClientEndpointStringParserWithReactor} plugin and
L{IStreamClientEndpointStringParser} plugin share the same prefix, the
L{IStreamClientEndpointStringParserWithReactor} plugin will be preferred.
"""
prefix = Attribute(
"""
L{bytes}, the description prefix to respond to. For example, an
L{IStreamClientEndpointStringParserWithReactor} plugin which had
C{b"foo"} for its C{prefix} attribute would be called for endpoint
descriptions like C{b"foo:bar:baz"} or C{b"foo:"}.
"""
)
def parseStreamClient(reactor, *args, **kwargs):
"""
This method is invoked by L{endpoints.clientFromString}, if the type of
endpoint matches the return value from this
L{IStreamClientEndpointStringParserWithReactor}'s C{prefix} method.
@param reactor: The reactor passed to L{endpoints.clientFromString}.
@param args: The byte string arguments, minus the endpoint type, in the
endpoint description string, parsed according to the rules
described in L{endpoints.quoteStringArgument}. For example, if the
description were C{b"my-type:foo:bar:baz=qux"}, C{args} would be
C{(b'foo', b'bar')}
@param kwargs: The byte string arguments from the endpoint description
passed as keyword arguments. For example, if the description were
C{b"my-type:foo:bar:baz=qux"}, C{kwargs} would be
C{dict(baz=b'qux')}.
@return: a client endpoint
@rtype: a provider of L{IStreamClientEndpoint}
"""
| engdan77/edoAutoHomeMobile | twisted/internet/interfaces.py | Python | mit | 90,290 | 0.001207 |
# para os tipos numericos temos os seguintes operadores:
# + - * / % **
print "Numeros inteiros:"
x = 10
y = 3
print x, "+", y, "=", x + y
print x, "+", y, "=", x - y
print x, "+", y, "=", x*y
print x, "+", y, "=", x/y # repare como o resultado eh um inteiro
print x, "+", y, "=", x % y # esse eh o resto da divisao
print x, "+", y, "=", x**y # esse eh o operador potencia, x elevado a potencia de y
print x, "(",bin(x),") & ",y,"(",bin(y),") =", x&y # operador binario E
print x, "(",bin(x),") | ",y,"(",bin(y),") =", x|y # operador binario OU
print x, "(",bin(x),") ^ ",y,"(",bin(y),") =", x^y # operador binario XOU
print x," igual a ",y,"? ", x==y
print x," diferente de ",y,"? ", x!=y
print x," maior que ",y,"? ", x>y
print x," menor que ",y,"? ", x<y
print x," maior ou igual a ",y,"? ", x>=y
print x," menor ou igual a ",y,"? ", x<=y
print "\nNumeros em ponto flutuante: "
x = 10.0
y = 3.0
print x, "+", y, "=", x + y
print x, "+", y, "=", x - y
print x, "+", y, "=", x*y
print x, "+", y, "=", x/y # agora eh um numero real
print x, "+", y, "=", x % y # esse eh o resto da divisao
print x, "+", y, "=", x**y # esse eh o operador potencia, x elevado a potencia de y
print "\nNumeros complexos:"
x = 1 + 1j
y = 2 + 1j
print x, "+", y, "=", x + y
print x, "+", y, "=", x - y
print x, "+", y, "=", x*y
print x, "+", y, "=", x/y # agora eh um numero real
print x, "+", y, "=", x % y # esse eh o resto da divisao
print x, "+", y, "=", x**y # esse eh o operador potencia, x elevado a potencia de y
print "\nVariaveis Booleanas:"
# agora x eh uma variavel booleana (logica)
x = True
y = False
print "Nao ", x, "=", not x
print x," ou ",y,"=",x or y
print x," e ",y,"=",x and y
x = 10
y = 3
print x, " maior que ", y, " OU ", x, " menor que ", y, "? ", x>y or x<y
print x, " maior que ", y, " E ", x, " menor que ", y, "? ", x>y and x<y
print "\nOperacao com Strings:"
x = "Ola "
y = "Mundo"
print x," + ",y," = ",x+y
print x," *2 = ",x*2
print x,"*2 + ",y," = ",x*2 + y
print "Letra na posicao 0 de x = ",x[0]
print "Concatenar as 3 primeiras letras de x com y = ",x[0:3] + y
# Operadores Relacionais
print "Tem 'a' em Ola? ", "a" in x
print "Nao tem 'b' em Ola? ", "b" not in x
| folivetti/PI-UFABC | AULA_01/Python/operadores.py | Python | mit | 2,187 | 0.038866 |
# -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
# Angelos Tzotsos <[email protected]>
#
# Copyright (c) 2015 Tom Kralidis
# Copyright (c) 2015 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
from pycsw.core import util
from pycsw.core.etree import etree
from pycsw.ogc.gml import gml3
LOGGER = logging.getLogger(__name__)
MODEL = {
'Conformance': {
'values': [
'ImplementsQuery',
'ImplementsAdHocQuery',
'ImplementsFunctions',
'ImplementsResourceld',
'ImplementsMinStandardFilter',
'ImplementsStandardFilter',
'ImplementsMinSpatialFilter',
'ImplementsSpatialFilter',
'ImplementsMinTemporalFilter',
'ImplementsTemporalFilter',
'ImplementsVersionNav',
'ImplementsSorting',
'ImplementsExtendedOperators',
'ImplementsMinimumXPath',
'ImplementsSchemaElementFunc'
]
},
'GeometryOperands': {
'values': gml3.TYPES
},
'SpatialOperators': {
'values': ['BBOX', 'Beyond', 'Contains', 'Crosses', 'Disjoint',
'DWithin', 'Equals', 'Intersects', 'Overlaps', 'Touches', 'Within']
},
'ComparisonOperators': {
'fes20:PropertyIsBetween': {'opname': 'PropertyIsBetween', 'opvalue': 'and'},
'fes20:PropertyIsEqualTo': {'opname': 'PropertyIsEqualTo', 'opvalue': '='},
'fes20:PropertyIsGreaterThan': {'opname': 'PropertyIsGreaterThan', 'opvalue': '>'},
'fes20:PropertyIsGreaterThanOrEqualTo': {
'opname': 'PropertyIsGreaterThanOrEqualTo', 'opvalue': '>='},
'fes20:PropertyIsLessThan': {'opname': 'PropertyIsLessThan', 'opvalue': '<'},
'fes20:PropertyIsLessThanOrEqualTo': {
'opname': 'PropertyIsLessThanOrEqualTo', 'opvalue': '<='},
'fes20:PropertyIsLike': {'opname': 'PropertyIsLike', 'opvalue': 'like'},
'fes20:PropertyIsNotEqualTo': {'opname': 'PropertyIsNotEqualTo', 'opvalue': '!='},
'fes20:PropertyIsNull': {'opname': 'PropertyIsNull', 'opvalue': 'is null'},
},
'Functions': {
'length': {'returns': 'xs:string'},
'lower': {'returns': 'xs:string'},
'ltrim': {'returns': 'xs:string'},
'rtrim': {'returns': 'xs:string'},
'trim': {'returns': 'xs:string'},
'upper': {'returns': 'xs:string'},
},
'Ids': {
'values': ['csw30:id']
}
}
def parse(element, queryables, dbtype, nsmap, orm='sqlalchemy', language='english', fts=False):
"""OGC Filter object support"""
boq = None
is_pg = dbtype.startswith('postgresql')
tmp = element.xpath('fes20:And|fes20:Or|fes20:Not', namespaces=nsmap)
if len(tmp) > 0: # this is binary logic query
element_name = etree.QName(tmp[0]).localname
boq = ' %s ' % element_name.lower()
LOGGER.debug('Binary logic detected; operator=%s', boq)
tmp = tmp[0]
else:
tmp = element
pvalue_serial = [0]
def assign_param():
if orm == 'django':
return '%s'
param = ':pvalue%d' % pvalue_serial[0]
pvalue_serial[0] += 1
return param
def _get_comparison_expression(elem):
"""return the SQL expression based on Filter query"""
fname = None
matchcase = elem.attrib.get('matchCase')
wildcard = elem.attrib.get('wildCard')
singlechar = elem.attrib.get('singleChar')
expression = None
if wildcard is None:
wildcard = '%'
if singlechar is None:
singlechar = '_'
if (elem.xpath('child::*')[0].tag ==
util.nspath_eval('fes20:Function', nsmap)):
LOGGER.debug('fes20:Function detected')
if (elem.xpath('child::*')[0].attrib['name'] not in
MODEL['Functions']):
raise RuntimeError('Invalid fes20:Function: %s' %
(elem.xpath('child::*')[0].attrib['name']))
fname = elem.xpath('child::*')[0].attrib['name']
try:
LOGGER.debug('Testing existence of fes20:ValueReference')
pname = queryables[elem.find(util.nspath_eval('fes20:Function/fes20:ValueReference', nsmap)).text]['dbcol']
except Exception as err:
raise RuntimeError('Invalid PropertyName: %s. %s' % (elem.find(util.nspath_eval('fes20:Function/fes20:ValueReference', nsmap)).text, str(err))) from err
else:
try:
LOGGER.debug('Testing existence of fes20:ValueReference')
pname = queryables[elem.find(
util.nspath_eval('fes20:ValueReference', nsmap)).text]['dbcol']
except Exception as err:
raise RuntimeError('Invalid PropertyName: %s. %s' %
(elem.find(util.nspath_eval('fes20:ValueReference',
nsmap)).text, str(err))) from err
if (elem.tag != util.nspath_eval('fes20:PropertyIsBetween', nsmap)):
if elem.tag in [util.nspath_eval('fes20:%s' % n, nsmap) for n in
MODEL['SpatialOperators']['values']]:
boolean_true = '\'true\''
boolean_false = '\'false\''
if dbtype == 'mysql':
boolean_true = 'true'
boolean_false = 'false'
return "%s = %s" % (_get_spatial_operator(queryables['pycsw:BoundingBox'], elem, dbtype, nsmap), boolean_true)
else:
pval = elem.find(util.nspath_eval('fes20:Literal', nsmap)).text
com_op = _get_comparison_operator(elem)
LOGGER.debug('Comparison operator: %s', com_op)
# if this is a case insensitive search
# then set the DB-specific LIKE comparison operator
LOGGER.debug('Setting csw:AnyText property')
anytext = queryables['csw:AnyText']['dbcol']
if ((matchcase is not None and matchcase == 'false') or
pname == anytext):
com_op = 'ilike' if is_pg else 'like'
if (elem.tag == util.nspath_eval('fes20:PropertyIsBetween', nsmap)):
com_op = 'between'
lower_boundary = elem.find(
util.nspath_eval('fes20:LowerBoundary/fes20:Literal',
nsmap)).text
upper_boundary = elem.find(
util.nspath_eval('fes20:UpperBoundary/fes20:Literal',
nsmap)).text
expression = "%s %s %s and %s" % \
(pname, com_op, assign_param(), assign_param())
values.append(lower_boundary)
values.append(upper_boundary)
else:
if pname == anytext and is_pg and fts:
LOGGER.debug('PostgreSQL FTS specific search')
# do nothing, let FTS do conversion (#212)
pvalue = pval
else:
LOGGER.debug('PostgreSQL non-FTS specific search')
pvalue = pval.replace(wildcard, '%').replace(singlechar, '_')
if pname == anytext: # pad anytext with wildcards
LOGGER.debug('PostgreSQL non-FTS specific anytext search')
LOGGER.debug('old value: %s', pval)
pvalue = '%%%s%%' % pvalue.rstrip('%').lstrip('%')
LOGGER.debug('new value: %s', pvalue)
values.append(pvalue)
if boq == ' not ':
if fname is not None:
expression = "%s is null or not %s(%s) %s %s" % \
(pname, fname, pname, com_op, assign_param())
elif pname == anytext and is_pg and fts:
LOGGER.debug('PostgreSQL FTS specific search')
expression = ("%s is null or not plainto_tsquery('%s', %s) @@ anytext_tsvector" %
(anytext, language, assign_param()))
else:
LOGGER.debug('PostgreSQL non-FTS specific search')
expression = "%s is null or not %s %s %s" % \
(pname, pname, com_op, assign_param())
else:
if fname is not None:
expression = "%s(%s) %s %s" % \
(fname, pname, com_op, assign_param())
elif pname == anytext and is_pg and fts:
LOGGER.debug('PostgreSQL FTS specific search')
expression = ("plainto_tsquery('%s', %s) @@ anytext_tsvector" %
(language, assign_param()))
else:
LOGGER.debug('PostgreSQL non-FTS specific search')
expression = "%s %s %s" % (pname, com_op, assign_param())
return expression
queries = []
queries_nested = []
values = []
LOGGER.debug('Scanning children elements')
for child in tmp.xpath('child::*'):
com_op = ''
boolean_true = '\'true\''
boolean_false = '\'false\''
if dbtype == 'mysql':
boolean_true = 'true'
boolean_false = 'false'
if child.tag == util.nspath_eval('fes20:Not', nsmap):
LOGGER.debug('fes20:Not query detected')
child_not = child.xpath('child::*')[0]
if child_not.tag in \
[util.nspath_eval('fes20:%s' % n, nsmap) for n in
MODEL['SpatialOperators']['values']]:
LOGGER.debug('fes20:Not / spatial operator detected: %s', child.tag)
queries.append("%s = %s" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child.xpath('child::*')[0], dbtype, nsmap),
boolean_false))
else:
LOGGER.debug('fes20:Not / comparison operator detected: %s', child.tag)
queries.append('not %s' % _get_comparison_expression(child_not))
elif child.tag in \
[util.nspath_eval('fes20:%s' % n, nsmap) for n in
MODEL['SpatialOperators']['values']]:
LOGGER.debug('spatial operator detected: %s', child.tag)
if boq is not None and boq == ' not ':
# for fes20:Not spatial queries in PostGIS we must explictly
# test that pycsw:BoundingBox is null as well
# TODO: Do we need the same for 'postgresql+postgis+native'???
if dbtype == 'postgresql+postgis+wkt':
LOGGER.debug('Setting bbox is null test in PostgreSQL')
queries.append("%s = %s or %s is null" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child, dbtype, nsmap), boolean_false,
queryables['pycsw:BoundingBox']))
else:
queries.append("%s = %s" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child, dbtype, nsmap), boolean_false))
else:
queries.append("%s = %s" %
(_get_spatial_operator(
queryables['pycsw:BoundingBox'],
child, dbtype, nsmap), boolean_true))
elif child.tag == util.nspath_eval('fes20:FeatureId', nsmap):
LOGGER.debug('fes20:FeatureId filter detected')
queries.append("%s = %s" % (queryables['pycsw:Identifier'], assign_param()))
values.append(child.attrib.get('fid'))
else: # comparison operator
LOGGER.debug('Comparison operator processing')
child_tag_name = etree.QName(child).localname
tagname = ' %s ' % child_tag_name.lower()
if tagname in [' or ', ' and ']: # this is a nested binary logic query
LOGGER.debug('Nested binary logic detected; operator=%s', tagname)
for child2 in child.xpath('child::*'):
queries_nested.append(_get_comparison_expression(child2))
queries.append('(%s)' % tagname.join(queries_nested))
else:
queries.append(_get_comparison_expression(child))
where = boq.join(queries) if (boq is not None and boq != ' not ') \
else queries[0]
return where, values
def _get_spatial_operator(geomattr, element, dbtype, nsmap, postgis_geometry_column='wkb_geometry'):
"""return the spatial predicate function"""
property_name = element.find(util.nspath_eval('fes20:ValueReference', nsmap))
distance = element.find(util.nspath_eval('fes20:Distance', nsmap))
distance = 'false' if distance is None else distance.text
LOGGER.debug('Scanning for spatial property name')
if property_name is None:
raise RuntimeError('Missing fes20:ValueReference in spatial filter')
if (property_name.text.find('BoundingBox') == -1 and
property_name.text.find('Envelope') == -1):
raise RuntimeError('Invalid fes20:ValueReference in spatial filter: %s' %
property_name.text)
geometry = gml3.Geometry(element, nsmap)
#make decision to apply spatial ranking to results
set_spatial_ranking(geometry)
spatial_predicate = etree.QName(element).localname.lower()
LOGGER.debug('Spatial predicate: %s', spatial_predicate)
if dbtype == 'mysql': # adjust spatial query for MySQL
LOGGER.debug('Adjusting spatial query for MySQL')
if spatial_predicate == 'bbox':
spatial_predicate = 'intersects'
if spatial_predicate == 'beyond':
spatial_query = "ifnull(distance(geomfromtext(%s), \
geomfromtext('%s')) > convert(%s, signed),false)" % \
(geomattr, geometry.wkt, distance)
elif spatial_predicate == 'dwithin':
spatial_query = "ifnull(distance(geomfromtext(%s), \
geomfromtext('%s')) <= convert(%s, signed),false)" % \
(geomattr, geometry.wkt, distance)
else:
spatial_query = "ifnull(%s(geomfromtext(%s), \
geomfromtext('%s')),false)" % \
(spatial_predicate, geomattr, geometry.wkt)
elif dbtype == 'postgresql+postgis+wkt': # adjust spatial query for PostGIS with WKT geometry column
LOGGER.debug('Adjusting spatial query for PostgreSQL+PostGIS+WKT')
if spatial_predicate == 'bbox':
spatial_predicate = 'intersects'
if spatial_predicate == 'beyond':
spatial_query = "not st_dwithin(st_geomfromtext(%s), \
st_geomfromtext('%s'), %f)" % \
(geomattr, geometry.wkt, float(distance))
elif spatial_predicate == 'dwithin':
spatial_query = "st_dwithin(st_geomfromtext(%s), \
st_geomfromtext('%s'), %f)" % \
(geomattr, geometry.wkt, float(distance))
else:
spatial_query = "st_%s(st_geomfromtext(%s), \
st_geomfromtext('%s'))" % \
(spatial_predicate, geomattr, geometry.wkt)
elif dbtype == 'postgresql+postgis+native': # adjust spatial query for PostGIS with native geometry
LOGGER.debug('Adjusting spatial query for PostgreSQL+PostGIS+native')
if spatial_predicate == 'bbox':
spatial_predicate = 'intersects'
if spatial_predicate == 'beyond':
spatial_query = "not st_dwithin(%s, \
st_geomfromtext('%s',4326), %f)" % \
(postgis_geometry_column, geometry.wkt, float(distance))
elif spatial_predicate == 'dwithin':
spatial_query = "st_dwithin(%s, \
st_geomfromtext('%s',4326), %f)" % \
(postgis_geometry_column, geometry.wkt, float(distance))
else:
spatial_query = "st_%s(%s, \
st_geomfromtext('%s',4326))" % \
(spatial_predicate, postgis_geometry_column, geometry.wkt)
else:
LOGGER.debug('Adjusting spatial query')
spatial_query = "query_spatial(%s,'%s','%s','%s')" % \
(geomattr, geometry.wkt, spatial_predicate, distance)
return spatial_query
def _get_comparison_operator(element):
"""return the SQL operator based on Filter query"""
element_name = etree.QName(element).localname
return MODEL['ComparisonOperators']['fes20:%s' % element_name]['opvalue']
def set_spatial_ranking(geometry):
"""Given that we have a spatial query in fes20:Filter we check the type of geometry
and set the ranking variables"""
if util.ranking_enabled:
if geometry.type in ['Polygon', 'Envelope']:
util.ranking_pass = True
util.ranking_query_geometry = geometry.wkt
elif geometry.type in ['LineString', 'Point']:
from shapely.geometry.base import BaseGeometry
from shapely.geometry import box
from shapely.wkt import loads,dumps
ls = loads(geometry.wkt)
b = ls.bounds
if geometry.type == 'LineString':
tmp_box = box(b[0],b[1],b[2],b[3])
tmp_wkt = dumps(tmp_box)
if tmp_box.area > 0:
util.ranking_pass = True
util.ranking_query_geometry = tmp_wkt
elif geometry.type == 'Point':
tmp_box = box((float(b[0])-1.0),(float(b[1])-1.0),(float(b[2])+1.0),(float(b[3])+1.0))
tmp_wkt = dumps(tmp_box)
util.ranking_pass = True
util.ranking_query_geometry = tmp_wkt
| tomkralidis/pycsw | pycsw/ogc/fes/fes2.py | Python | mit | 19,080 | 0.002358 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os.path
import sys
from PyQt5 import QtGui
if sys.platform == 'win32':
_search_paths = []
else:
_search_paths = [
os.path.expanduser('~/.icons'),
os.path.join(os.environ.get('XDG_DATA_DIRS', '/usr/share'), 'icons'),
'/usr/share/pixmaps',
]
_current_theme = None
if 'XDG_CURRENT_DESKTOP' in os.environ:
desktop = os.environ['XDG_CURRENT_DESKTOP'].lower()
if desktop in ('gnome', 'unity'):
_current_theme = (os.popen('gsettings get org.gnome.desktop.interface icon-theme').read().strip()[1:-1]
or None)
elif os.environ.get('KDE_FULL_SESSION'):
_current_theme = (os.popen("kreadconfig --file kdeglobals --group Icons --key Theme --default crystalsvg").read().strip()
or None)
ICON_SIZE_MENU = ('16x16',)
ICON_SIZE_TOOLBAR = ('22x22',)
ICON_SIZE_ALL = ('22x22', '16x16')
def lookup(name, size=ICON_SIZE_ALL):
icon = QtGui.QIcon()
if _current_theme:
for path in _search_paths:
for subdir in ('actions', 'places', 'devices'):
fullpath = os.path.join(path, _current_theme, size[0], subdir, name)
if os.path.exists(fullpath + '.png'):
icon.addFile(fullpath + '.png')
for s in size[1:]:
icon.addFile(os.path.join(path, _current_theme, s, subdir, name) + '.png')
return icon
for s in size:
icon.addFile('/'.join([':', 'images', s, name]) + '.png')
return icon
| mineo/picard | picard/util/icontheme.py | Python | gpl-2.0 | 2,350 | 0.00213 |
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
"""
Statistical algorithms (mean, var, stdev, etc).
"""
from .library import *
from .array import *
def mean(a, weights=None, dim=None):
"""
Calculate mean along a given dimension.
Parameters
----------
a: af.Array
The input array.
weights: optional: af.Array. default: None.
Array to calculate the weighted mean. Must match size of the
input array.
dim: optional: int. default: None.
The dimension for which to obtain the mean from input data.
Returns
-------
output: af.Array
Array containing the mean of the input array along a given
dimension.
"""
if dim is not None:
out = Array()
if weights is None:
safe_call(backend.get().af_mean(c_pointer(out.arr), a.arr, c_int_t(dim)))
else:
safe_call(backend.get().af_mean_weighted(c_pointer(out.arr), a.arr, weights.arr, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
if weights is None:
safe_call(backend.get().af_mean_all(c_pointer(real), c_pointer(imag), a.arr))
else:
safe_call(backend.get().af_mean_all_weighted(c_pointer(real), c_pointer(imag), a.arr, weights.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def var(a, isbiased=False, weights=None, dim=None):
"""
Calculate variance along a given dimension.
Parameters
----------
a: af.Array
The input array.
isbiased: optional: Boolean. default: False.
Boolean denoting population variance (false) or sample
variance (true).
weights: optional: af.Array. default: None.
Array to calculate for the weighted mean. Must match size of
the input array.
dim: optional: int. default: None.
The dimension for which to obtain the variance from input data.
Returns
-------
output: af.Array
Array containing the variance of the input array along a given
dimension.
"""
if dim is not None:
out = Array()
if weights is None:
safe_call(backend.get().af_var(c_pointer(out.arr), a.arr, isbiased, c_int_t(dim)))
else:
safe_call(backend.get().af_var_weighted(c_pointer(out.arr), a.arr, weights.arr, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
if weights is None:
safe_call(backend.get().af_var_all(c_pointer(real), c_pointer(imag), a.arr, isbiased))
else:
safe_call(backend.get().af_var_all_weighted(c_pointer(real), c_pointer(imag), a.arr, weights.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def meanvar(a, weights=None, bias=VARIANCE.DEFAULT, dim=-1):
"""
Calculate mean and variance along a given dimension.
Parameters
----------
a: af.Array
The input array.
weights: optional: af.Array. default: None.
Array to calculate for the weighted mean. Must match size of
the input array.
bias: optional: af.VARIANCE. default: DEFAULT.
population variance(VARIANCE.POPULATION) or
sample variance(VARIANCE.SAMPLE).
dim: optional: int. default: -1.
The dimension for which to obtain the variance from input data.
Returns
-------
mean: af.Array
Array containing the mean of the input array along a given
dimension.
variance: af.Array
Array containing the variance of the input array along a given
dimension.
"""
mean_out = Array()
var_out = Array()
if weights is None:
weights = Array()
safe_call(backend.get().af_meanvar(c_pointer(mean_out.arr), c_pointer(var_out.arr),
a.arr, weights.arr, bias.value, c_int_t(dim)))
return mean_out, var_out
def stdev(a, dim=None):
"""
Calculate standard deviation along a given dimension.
Parameters
----------
a: af.Array
The input array.
dim: optional: int. default: None.
The dimension for which to obtain the standard deviation from
input data.
Returns
-------
output: af.Array
Array containing the standard deviation of the input array
along a given dimension.
"""
if dim is not None:
out = Array()
safe_call(backend.get().af_stdev(c_pointer(out.arr), a.arr, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
safe_call(backend.get().af_stdev_all(c_pointer(real), c_pointer(imag), a.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def cov(a, isbiased=False, dim=None):
"""
Calculate covariance along a given dimension.
Parameters
----------
a: af.Array
The input array.
isbiased: optional: Boolean. default: False.
Boolean denoting whether biased estimate should be taken.
dim: optional: int. default: None.
The dimension for which to obtain the covariance from input data.
Returns
-------
output: af.Array
Array containing the covariance of the input array along a
given dimension.
"""
if dim is not None:
out = Array()
safe_call(backend.get().af_cov(c_pointer(out.arr), a.arr, isbiased, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
safe_call(backend.get().af_cov_all(c_pointer(real), c_pointer(imag), a.arr, isbiased))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def median(a, dim=None):
"""
Calculate median along a given dimension.
Parameters
----------
a: af.Array
The input array.
dim: optional: int. default: None.
The dimension for which to obtain the median from input data.
Returns
-------
output: af.Array
Array containing the median of the input array along a
given dimension.
"""
if dim is not None:
out = Array()
safe_call(backend.get().af_median(c_pointer(out.arr), a.arr, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
safe_call(backend.get().af_median_all(c_pointer(real), c_pointer(imag), a.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def corrcoef(x, y):
"""
Calculate the correlation coefficient of the input arrays.
Parameters
----------
x: af.Array
The first input array.
y: af.Array
The second input array.
Returns
-------
output: af.Array
Array containing the correlation coefficient of the input arrays.
"""
real = c_double_t(0)
imag = c_double_t(0)
safe_call(backend.get().af_corrcoef(c_pointer(real), c_pointer(imag), x.arr, y.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
def topk(data, k, dim=0, order=TOPK.DEFAULT):
"""
Return top k elements along a single dimension.
Parameters
----------
data: af.Array
Input array to return k elements from.
k: scalar. default: 0
The number of elements to return from input array.
dim: optional: scalar. default: 0
The dimension along which the top k elements are
extracted. Note: at the moment, topk() only supports the
extraction of values along the first dimension.
order: optional: af.TOPK. default: af.TOPK.DEFAULT
The ordering of k extracted elements. Defaults to top k max values.
Returns
-------
values: af.Array
Top k elements from input array.
indices: af.Array
Corresponding index array to top k elements.
"""
values = Array()
indices = Array()
safe_call(backend.get().af_topk(c_pointer(values.arr), c_pointer(indices.arr), data.arr, k, c_int_t(dim), order.value))
return values,indices
| arrayfire/arrayfire_python | arrayfire/statistics.py | Python | bsd-3-clause | 8,560 | 0.003271 |
"""user permissions management
Revision ID: bd67c88713b8
Revises: 10023013f155
Create Date: 2021-03-31 21:31:47.278834
"""
# revision identifiers, used by Alembic.
import datetime
from sqlalchemy import orm, text
from sqlalchemy.engine.reflection import Inspector
revision = "bd67c88713b8"
down_revision = "10023013f155"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
ADMIN_EMAIL = "[email protected]"
def get_tables_names() -> [str]:
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
tables = inspector.get_table_names()
return tables
def upgrade():
# In cases of downgrade and upgrade those tables will no longer exits - and so the transaction will fail
tables_names = get_tables_names()
for table_name in [
"roles_users",
"roles",
"report_preferences",
"general_preferences",
]:
if table_name in tables_names:
op.drop_table(table_name)
if "user_oauth" in tables_names:
if "users" in tables_names:
op.drop_table("users")
op.rename_table("user_oauth", "users")
op.create_table(
"roles",
sa.Column("id", sa.Integer(), primary_key=True, autoincrement=True, nullable=False),
sa.Column("name", sa.String(127), unique=True, index=True, nullable=False),
sa.Column("description", sa.String(255)),
sa.Column("create_date", sa.DateTime(), nullable=False, server_default=text("now()")),
)
op.create_table(
"users_to_roles",
sa.Column(
"user_id", sa.BigInteger(), sa.ForeignKey("users.id"), index=True, nullable=False
),
sa.Column("role_id", sa.Integer(), sa.ForeignKey("roles.id"), index=True, nullable=False),
sa.Column("create_date", sa.DateTime(), nullable=False, server_default=text("now()")),
sa.PrimaryKeyConstraint("user_id", "role_id"),
)
from anyway.models import Roles, Users, users_to_roles
bind = op.get_bind()
session = orm.Session(bind=bind)
role_admins = Roles(
name="admins",
description="This is the default admin role.",
create_date=datetime.datetime.now(),
)
session.add(role_admins)
res = session.query(Users).with_entities(Users.email).filter(Users.email == ADMIN_EMAIL).first()
if res is None:
user = Users(
user_register_date=datetime.datetime.now(),
user_last_login_date=datetime.datetime.now(),
email=ADMIN_EMAIL,
oauth_provider_user_name=ADMIN_EMAIL,
is_active=True,
oauth_provider="google",
is_user_completed_registration=True,
oauth_provider_user_id="unknown-manual-insert",
)
session.add(user)
user_id = (
session.query(Users).with_entities(Users.id).filter(Users.email == ADMIN_EMAIL).first()
)
role_id = session.query(Roles).with_entities(Roles.id).filter(Roles.name == "admins").first()
insert_users_to_roles = users_to_roles.insert().values(
user_id=user_id.id,
role_id=role_id.id,
create_date=datetime.datetime.now(),
)
session.execute(insert_users_to_roles)
session.commit()
def downgrade():
op.drop_table("users_to_roles")
op.drop_table("roles")
# Some of the changes are irreversible
| hasadna/anyway | alembic/versions/bd67c88713b8_user_permissions_management.py | Python | mit | 3,369 | 0.003562 |
#!/usr/bin/python
# -*- coding: utf8 -*-
from bs4 import BeautifulSoup as Soup
import urls
import re
import proxy
from datetime import *
import time
from time import mktime
import functions
def materials ( config ):
url = "https://www.lectio.dk/lectio/%s/MaterialOverview.aspx?holdelement_id=%s" % ( str(config["school_id"]), str(config["team_element_id"]) )
cookies = {}
# Insert User-agent headers and the cookie information
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
"Content-Type" : "application/x-www-form-urlencoded",
"Host" : "www.lectio.dk",
"Origin" : "https://www.lectio.dk",
"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
}
response = proxy.session.get(url, headers=headers)
html = response.text
soup = Soup(html)
if soup.find("table", attrs={"id" : "m_Content_MaterialsStudents"}) is None:
return {
"status" : False,
"error" : "Data not found"
}
rows = soup.find("table", attrs={"id" : "m_Content_MaterialsStudents"}).findAll("tr")
materialsList = []
if len(rows) > 1:
rows.pop(0)
titleProg = re.compile(ur"(?P<authors>.*): (?P<title>.*), (?P<publisher>.*)")
for row in rows:
elements = row.findAll("td")
title = unicode(elements[0].text.replace("\n", ""))
titleGroups = titleProg.match(title)
materialsList.append({
"title_text" : title,
"title" : titleGroups.group("title") if not titleGroups is None else title,
"publisher" : titleGroups.group("publisher") if not titleGroups is None else "",
"authors" : titleGroups.group("authors").split(", ") if not titleGroups is None else "",
"type" : "book" if unicode(elements[1].text.replace("\n", "")) == u"Bog" else unicode(elements[1].text.replace("\n", "")),
"book_storage" : True if elements[2].text.replace("\n", "") == "Ja" else False,
"comment" : unicode(elements[3].text.strip("\n").replace("\n", "")),
"ebook" : elements[4].text.strip("\n").replace("\n", "")
})
return {
"status" : "ok",
"materials" : materialsList
} | boh1996/LectioAPI | scrapers/materials.py | Python | mit | 2,102 | 0.03568 |
"""
@name: Modules/Web/web_internet.py
@author: D. Brian Kimmel
@contact: [email protected]
@copyright: (c) 2013-2020 by D. Brian Kimmel
@license: MIT License
@note: Created on Jun 3, 2013
@summary: Handle the "Internet" information for a house.
"""
__updated__ = '2020-01-02'
# Import system type stuff
from datetime import datetime
from nevow import athena
from nevow import loaders
import os
# Import PyMh files and modules.
from Modules.Core.data_objects import InternetConnectionInformation
from Modules.Computer.Web.web_utils import GetJSONComputerInfo
from Modules.Core import logging_pyh as Logger
from Modules.Core.Utilities import json_tools
# Handy helper for finding external resources nearby.
webpath = os.path.join(os.path.split(__file__)[0])
templatepath = os.path.join(webpath, 'template')
g_debug = 0
LOG = Logger.getLogger('PyHouse.webInternet ')
class InternetElement(athena.LiveElement):
""" a 'live' internet element.
"""
docFactory = loaders.xmlfile(os.path.join(templatepath, 'internetElement.html'))
jsClass = u'internet.InternetWidget'
def __init__(self, p_workspace_obj, _p_params):
self.m_workspace_obj = p_workspace_obj
self.m_pyhouse_obj = p_workspace_obj.m_pyhouse_obj
@athena.expose
def getInternetData(self):
l_computer = GetJSONComputerInfo(self.m_pyhouse_obj)
return l_computer
# ## END DBK
| DBrianKimmel/PyHouse | Project/src/Modules/Computer/Web/web_internet.py | Python | mit | 1,421 | 0.000704 |
# Copyright (c) 2015, Matt Layman
from ConfigParser import ConfigParser, NoOptionError, NoSectionError
import os
import sys
import requests
API_URL = 'https://www.transifex.com/api/2'
LANGUAGES = [
'es',
'fr',
'it',
'nl',
]
def fetch_po_for(language, username, password):
print 'Downloading po file for {0} ...'.format(language)
po_api = '/project/tappy/resource/tappypot/translation/{0}/'.format(
language)
po_url = API_URL + po_api
params = {'file': '1'}
r = requests.get(po_url, auth=(username, password), params=params)
if r.status_code == 200:
r.encoding = 'utf-8'
output_file = os.path.join(
here, 'tap', 'locale', language, 'LC_MESSAGES', 'tappy.po')
with open(output_file, 'wb') as out:
out.write(r.text.encode('utf-8'))
else:
print('Something went wrong fetching the {0} po file.'.format(
language))
def get_auth_from_conf(here):
transifex_conf = os.path.join(here, '.transifex.ini')
config = ConfigParser()
try:
with open(transifex_conf, 'r') as conf:
config.readfp(conf)
except IOError as ex:
sys.exit('Failed to load authentication configuration file.\n'
'{0}'.format(ex))
try:
username = config.get('auth', 'username')
password = config.get('auth', 'password')
except (NoOptionError, NoSectionError) as ex:
sys.exit('Oops. Incomplete configuration file: {0}'.format(ex))
return username, password
if __name__ == '__main__':
here = os.path.abspath(os.path.dirname(__file__))
username, password = get_auth_from_conf(here)
for language in LANGUAGES:
fetch_po_for(language, username, password)
| blakev/tappy | transifex.py | Python | bsd-2-clause | 1,750 | 0.000571 |
from django.db import models
from django_extensions.db.fields import CreationDateTimeField
from tower import ugettext_lazy as _
from .managers import NoticeLiveManager
class Notice(models.Model):
LIVE = 1
REMOVED = 2
STATUS_CHOICES = (
(LIVE, _('Published')),
(REMOVED, _('Unpublished')),
)
title = models.CharField(max_length=255)
body = models.TextField()
created = CreationDateTimeField()
status = models.IntegerField(choices=STATUS_CHOICES, default=LIVE)
end_date = models.DateTimeField(blank=True, null=True,
help_text='Optional. Determines when the'
'notice dissapears')
# managers
objects = models.Manager()
live = NoticeLiveManager()
class Meta:
ordering = ('-created',)
def __unicode__(self):
return u'Notice: %s' % self.title
| mozilla/popcorn_maker | popcorn_gallery/notifications/models.py | Python | bsd-3-clause | 906 | 0 |
__source__ = 'https://leetcode.com/problems/binary-tree-cameras/'
# Time: O(N)
# Space: O(H)
#
# Description: Leetcode # 968. Binary Tree Cameras
#
# Given a binary tree, we install cameras on the nodes of the tree.
#
# Each camera at a node can monitor its parent, itself, and its immediate children.
#
# Calculate the minimum number of cameras needed to monitor all nodes of the tree.
#
# Example 1:
#
# Input: [0,0,null,0,0]
# Output: 1
# Explanation: One camera is enough to monitor all nodes if placed as shown.
#
# Example 2:
#
# Input: [0,0,null,0,null,0,null,null,0]
# Output: 2
# Explanation: At least two cameras are needed to monitor all nodes of the tree.
# The above image shows one of the valid configurations of camera placement.
#
# Note:
# The number of nodes in the given tree will be in the range [1, 1000].
# Every node has value 0.
#
import unittest
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 76ms 100%
class Solution(object):
def minCameraCover(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def solve(node):
# 0: Strict ST; All nodes below this are covered, but not this one
# 1: Normal ST; All nodes below and incl this are covered - no camera
# 2: Placed camera; All nodes below this are covered, plus camera here
if not node: return 0, 0, float('inf')
L = solve(node.left)
R = solve(node.right)
dp0 = L[1] + R[1]
dp1 = min(L[2] + min(R[1:]), R[2] + min(L[1:]))
dp2 = 1 + min(L) + min(R)
return dp0, dp1, dp2
return min(solve(root)[1:])
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/binary-tree-cameras/solution/
#
Approach 1: Dynamic Programming
Complexity Analysis
Time Complexity: O(N), where N is the number of nodes in the given tree.
Space Complexity: O(H), where H is the height of the given tree.
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
# 10ms 100%
class Solution {
public int minCameraCover(TreeNode root) {
int[] ans = solve(root);
return Math.min(ans[1], ans[2]);
}
// 0: Strict ST; All nodes below this are covered, but not this one
// 1: Normal ST; All nodes below and incl this are covered - no camera
// 2: Placed camera; All nodes below this are covered, plus camera here
public int[] solve(TreeNode node) {
if (node == null) return new int[]{ 0, 0, 99999 };
int[] L = solve(node.left);
int[] R = solve(node.right);
int mL12 = Math.min(L[1], L[2]);
int mR12 = Math.min(R[1], R[2]);
int d0 = L[1] + R[1];
int d1 = Math.min(L[2] + mR12, R[2] + mL12);
int d2 = 1 + Math.min(L[0], mL12) + Math.min(R[0], mR12);
return new int[]{d0, d1, d2};
}
}
# https://leetcode.com/problems/binary-tree-cameras/discuss/211180/JavaC%2B%2BPython-Greedy-DFS
# Explanation:
# Apply a recusion function dfs.
# Return 0 if it's a leaf.
# Return 1 if it's a parent of a leaf, with a camera on this node.
# Return 2 if it's coverd, without a camera on this node.
#
# For each node,
# if it has a child, which is leaf (node 0), then it needs camera.
# if it has a child, which is the parent of a leaf (node 1), then it's covered.
#
# If it needs camera, then res++ and we return 1.
# If it's covered, we return 2.
# Otherwise, we return 0.
# 9ms 100%
class Solution {
int res = 0;
public int minCameraCover(TreeNode root) {
return (dfs(root) < 1 ? 1: 0) + res;
}
private int dfs(TreeNode root) {
int left = root.left == null ? 2 : dfs(root.left),
right = root.right == null ? 2 : dfs(root.right);
if (left == 0 || right == 0) {
res++;
return 1;
}
return left == 1 || right == 1 ? 2 : 0;
}
}
'''
| JulyKikuAkita/PythonPrac | cs15211/BinaryTreeCameras.py | Python | apache-2.0 | 4,264 | 0.003752 |
# Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
from datetime import datetime
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
data = {
'date_updated': str(datetime.now()),
'movie_title': "On The Line",
'show_times': ["12:30:00Z", "14:45:00Z", "15:30:00Z", "17:45:00Z"],
'starring': ["Lance Bass", "Joey Fatone"],
'genre': "Romance"
}
document = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents \
.create(unique_name="MyFirstDocument",
data=data,
ttl=1814400) # expires in 21 days
print(document.sid)
| TwilioDevEd/api-snippets | sync/rest/documents/create-document/create-document.7.x.py | Python | mit | 864 | 0 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RNcbit(RPackage):
"""Making NCBI taxonomic data locally available and searchable as an R
object."""
homepage = "https://cran.r-project.org/package=ncbit"
url = "https://cran.r-project.org/src/contrib/ncbit_2013.03.29.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/ncbit"
version('2013.03.29', '29582d7e5c8bbf9683c57c4f6ac3e891')
| skosukhin/spack | var/spack/repos/builtin/packages/r-ncbit/package.py | Python | lgpl-2.1 | 1,646 | 0.000608 |
# -*- coding:utf-8 -*-
# This code is automatically transpiled by Saklient Translator
import six
from ...errors.httpnotfoundexception import HttpNotFoundException
import saklient
str = six.text_type
# module saklient.cloud.errors.resourcepathnotfoundexception
class ResourcePathNotFoundException(HttpNotFoundException):
## 対象が見つかりません。パスに誤りがあります。
## @param {int} status
# @param {str} code=None
# @param {str} message=""
def __init__(self, status, code=None, message=""):
super(ResourcePathNotFoundException, self).__init__(status, code, "対象が見つかりません。パスに誤りがあります。" if message is None or message == "" else message)
| sakura-internet/saklient.python | saklient/cloud/errors/resourcepathnotfoundexception.py | Python | mit | 742 | 0.009174 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from datetime import datetime, timedelta
from django.core.management.base import NoArgsCommand
from django.utils.translation import ugettext as _
from desktop.lib.paths import get_apps_root
from hbased.ttypes import AlreadyExists
from hbase.api import HbaseApi
LOG = logging.getLogger(__name__)
class Command(NoArgsCommand):
help = 'Create and fill some demo tables in the first configured cluster.'
def handle_noargs(self, **options):
api = HbaseApi()
cluster_name = api.getClusters()[0]['name'] # Currently pick first configured cluster
# Check connectivity
api.connectCluster(cluster_name)
self.create_analytics_table(api, cluster_name)
self.load_analytics_table(api, cluster_name)
self.create_binary_table(api, cluster_name)
self.load_binary_table(api, cluster_name)
def create_analytics_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'analytics_demo', [{'properties': {'name': 'hour'}}, {'properties': {'name': 'day'}}, {'properties': {'name': 'total'}}])
except AlreadyExists:
pass
def load_analytics_table(self, api, cluster_name):
table_data = os.path.join(get_apps_root(), 'hbase', 'example', 'analytics', 'hbase-analytics.tsv')
api.bulkUpload(cluster_name, 'analytics_demo', open(table_data))
def create_binary_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'document_demo', [{'properties': {'name': 'doc'}}])
except AlreadyExists:
pass
def load_binary_table(self, api, cluster_name):
today = datetime.now().strftime('%Y%m%d')
tomorrow = (datetime.now() + timedelta(days=1)).strftime('%Y%m%d')
api.putRow(cluster_name, 'document_demo', today, {'doc:txt': 'Hue is awesome!'})
api.putRow(cluster_name, 'document_demo', today, {'doc:json': '{"user": "hue", "coolness": "extra"}'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I like HBase</xml>'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I LOVE HBase</xml>'})
root = os.path.join(get_apps_root(), 'hbase', 'example', 'documents')
api.putRow(cluster_name, 'document_demo', today, {'doc:img': open(root + '/hue-logo.png', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:html': open(root + '/gethue.com.html', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:pdf': open(root + '/gethue.pdf', "rb").read()})
| yongshengwang/builthue | apps/hbase/src/hbase/management/commands/hbase_setup.py | Python | apache-2.0 | 3,274 | 0.007025 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import logging
from email.utils import formataddr
from urlparse import urljoin
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.base.ir.ir_mail_server import MailDeliveryException
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import openerp.tools as tools
_logger = logging.getLogger(__name__)
class mail_mail(osv.Model):
""" Model holding RFC2822 email messages to send. This model also provides
facilities to queue and send new email messages. """
_name = 'mail.mail'
_description = 'Outgoing Mails'
_inherits = {'mail.message': 'mail_message_id'}
_order = 'id desc'
_rec_name = 'subject'
_columns = {
'mail_message_id': fields.many2one('mail.message', 'Message', required=True, ondelete='cascade', auto_join=True),
'state': fields.selection([
('outgoing', 'Outgoing'),
('sent', 'Sent'),
('received', 'Received'),
('exception', 'Delivery Failed'),
('cancel', 'Cancelled'),
], 'Status', readonly=True, copy=False),
'auto_delete': fields.boolean('Auto Delete',
help="Permanently delete this email after sending it, to save space"),
'references': fields.text('References', help='Message references, such as identifiers of previous messages', readonly=1),
'email_to': fields.text('To', help='Message recipients (emails)'),
'recipient_ids': fields.many2many('res.partner', string='To (Partners)'),
'email_cc': fields.char('Cc', help='Carbon copy message recipients'),
'body_html': fields.text('Rich-text Contents', help="Rich-text/HTML message"),
'headers': fields.text('Headers', copy=False),
'failure_reason': fields.text('Failure Reason', help="Failure reason. This is usually the exception thrown by the email server, stored to ease the debugging of mailing issues.", readonly=1),
# Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification
# and during unlink() we will not cascade delete the parent and its attachments
'notification': fields.boolean('Is Notification',
help='Mail has been created to notify people of an existing mail.message'),
}
_defaults = {
'state': 'outgoing',
}
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
# To remove when automatic context propagation is removed in web client
if context and context.get('default_type') and context.get('default_type') not in self._all_columns['type'].column.selection:
context = dict(context, default_type=None)
return super(mail_mail, self).default_get(cr, uid, fields, context=context)
def create(self, cr, uid, values, context=None):
# notification field: if not set, set if mail comes from an existing mail.message
if 'notification' not in values and values.get('mail_message_id'):
values['notification'] = True
return super(mail_mail, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
# cascade-delete the parent message for all mails that are not created for a notification
ids_to_cascade = self.search(cr, uid, [('notification', '=', False), ('id', 'in', ids)])
parent_msg_ids = [m.mail_message_id.id for m in self.browse(cr, uid, ids_to_cascade, context=context)]
res = super(mail_mail, self).unlink(cr, uid, ids, context=context)
self.pool.get('mail.message').unlink(cr, uid, parent_msg_ids, context=context)
return res
def mark_outgoing(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'outgoing'}, context=context)
def cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
@api.cr_uid
def process_email_queue(self, cr, uid, ids=None, context=None):
"""Send immediately queued messages, committing after each
message is sent - this is not transactional and should
not be called during another transaction!
:param list ids: optional list of emails ids to send. If passed
no search is performed, and these ids are used
instead.
:param dict context: if a 'filters' key is present in context,
this value will be used as an additional
filter to further restrict the outgoing
messages to send (by default all 'outgoing'
messages are sent).
"""
if context is None:
context = {}
if not ids:
filters = [('state', '=', 'outgoing')]
if 'filters' in context:
filters.extend(context['filters'])
ids = self.search(cr, uid, filters, context=context)
res = None
try:
# Force auto-commit - this is meant to be called by
# the scheduler, and we can't allow rolling back the status
# of previously sent emails!
res = self.send(cr, uid, ids, auto_commit=True, context=context)
except Exception:
_logger.exception("Failed processing mail queue")
return res
def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True):
"""Perform any post-processing necessary after sending ``mail``
successfully, including deleting it completely along with its
attachment if the ``auto_delete`` flag of the mail was set.
Overridden by subclasses for extra post-processing behaviors.
:param browse_record mail: the mail that was just sent
:return: True
"""
if mail_sent and mail.auto_delete:
# done with SUPERUSER_ID to avoid giving large unlink access rights
self.unlink(cr, SUPERUSER_ID, [mail.id], context=context)
return True
#------------------------------------------------------
# mail_mail formatting, tools and send mechanism
#------------------------------------------------------
def _get_partner_access_link(self, cr, uid, mail, partner=None, context=None):
"""Generate URLs for links in mails: partner has access (is user):
link to action_mail_redirect action that will redirect to doc or Inbox """
if context is None:
context = {}
if partner and partner.user_ids:
base_url = self.pool.get('ir.config_parameter').get_param(cr, SUPERUSER_ID, 'web.base.url')
mail_model = mail.model or 'mail.thread'
url = urljoin(base_url, self.pool[mail_model]._get_access_link(cr, uid, mail, partner, context=context))
return "<span class='oe_mail_footer_access'><small>%(access_msg)s <a style='color:inherit' href='%(portal_link)s'>%(portal_msg)s</a></small></span>" % {
'access_msg': _('about') if mail.record_name else _('access'),
'portal_link': url,
'portal_msg': '%s %s' % (context.get('model_name', ''), mail.record_name) if mail.record_name else _('your messages'),
}
else:
return None
def send_get_mail_subject(self, cr, uid, mail, force=False, partner=None, context=None):
"""If subject is void, set the subject as 'Re: <Resource>' or
'Re: <mail.parent_id.subject>'
:param boolean force: force the subject replacement
"""
if (force or not mail.subject) and mail.record_name:
return 'Re: %s' % (mail.record_name)
elif (force or not mail.subject) and mail.parent_id and mail.parent_id.subject:
return 'Re: %s' % (mail.parent_id.subject)
return mail.subject
def send_get_mail_body(self, cr, uid, mail, partner=None, context=None):
"""Return a specific ir_email body. The main purpose of this method
is to be inherited to add custom content depending on some module."""
body = mail.body_html or ''
# generate access links for notifications or emails linked to a specific document with auto threading
link = None
if mail.notification or (mail.model and mail.res_id and not mail.no_auto_thread):
link = self._get_partner_access_link(cr, uid, mail, partner, context=context)
if link:
body = tools.append_content_to_html(body, link, plaintext=False, container_tag='div')
return body
def send_get_mail_to(self, cr, uid, mail, partner=None, context=None):
"""Forge the email_to with the following heuristic:
- if 'partner', recipient specific (Partner Name <email>)
- else fallback on mail.email_to splitting """
if partner:
email_to = [formataddr((partner.name, partner.email))]
else:
email_to = tools.email_split(mail.email_to)
return email_to
def send_get_email_dict(self, cr, uid, mail, partner=None, context=None):
"""Return a dictionary for specific email values, depending on a
partner, or generic to the whole recipients given by mail.email_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = self.send_get_mail_body(cr, uid, mail, partner=partner, context=context)
body_alternative = tools.html2plaintext(body)
res = {
'body': body,
'body_alternative': body_alternative,
'subject': self.send_get_mail_subject(cr, uid, mail, partner=partner, context=context),
'email_to': self.send_get_mail_to(cr, uid, mail, partner=partner, context=context),
}
return res
def send(self, cr, uid, ids, auto_commit=False, raise_exception=False, context=None):
""" Sends the selected emails immediately, ignoring their current
state (mails that have already been sent should not be passed
unless they should actually be re-sent).
Emails successfully delivered are marked as 'sent', and those
that fail to be deliver are marked as 'exception', and the
corresponding error mail is output in the server logs.
:param bool auto_commit: whether to force a commit of the mail status
after sending each mail (meant only for scheduler processing);
should never be True during normal transactions (default: False)
:param bool raise_exception: whether to raise an exception if the
email sending process has failed
:return: True
"""
context = dict(context or {})
ir_mail_server = self.pool.get('ir.mail_server')
ir_attachment = self.pool['ir.attachment']
for mail in self.browse(cr, SUPERUSER_ID, ids, context=context):
try:
# TDE note: remove me when model_id field is present on mail.message - done here to avoid doing it multiple times in the sub method
if mail.model:
model_id = self.pool['ir.model'].search(cr, SUPERUSER_ID, [('model', '=', mail.model)], context=context)[0]
model = self.pool['ir.model'].browse(cr, SUPERUSER_ID, model_id, context=context)
else:
model = None
if model:
context['model_name'] = model.name
# load attachment binary data with a separate read(), as prefetching all
# `datas` (binary field) could bloat the browse cache, triggerring
# soft/hard mem limits with temporary data.
attachment_ids = [a.id for a in mail.attachment_ids]
attachments = [(a['datas_fname'], base64.b64decode(a['datas']))
for a in ir_attachment.read(cr, SUPERUSER_ID, attachment_ids,
['datas_fname', 'datas'])]
# specific behavior to customize the send email for notified partners
email_list = []
if mail.email_to:
email_list.append(self.send_get_email_dict(cr, uid, mail, context=context))
for partner in mail.recipient_ids:
email_list.append(self.send_get_email_dict(cr, uid, mail, partner=partner, context=context))
# headers
headers = {}
bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.bounce.alias", context=context)
catchall_domain = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.catchall.domain", context=context)
if bounce_alias and catchall_domain:
if mail.model and mail.res_id:
headers['Return-Path'] = '%s-%d-%s-%d@%s' % (bounce_alias, mail.id, mail.model, mail.res_id, catchall_domain)
else:
headers['Return-Path'] = '%s-%d@%s' % (bounce_alias, mail.id, catchall_domain)
if mail.headers:
try:
headers.update(eval(mail.headers))
except Exception:
pass
# Writing on the mail object may fail (e.g. lock on user) which
# would trigger a rollback *after* actually sending the email.
# To avoid sending twice the same email, provoke the failure earlier
mail.write({
'state': 'exception',
'failure_reason': _('Error without exception. Probably due do sending an email without computed recipients.'),
})
mail_sent = False
# build an RFC2822 email.message.Message object and send it without queuing
res = None
for email in email_list:
msg = ir_mail_server.build_email(
email_from=mail.email_from,
email_to=email.get('email_to'),
subject=email.get('subject'),
body=email.get('body'),
body_alternative=email.get('body_alternative'),
email_cc=tools.email_split(mail.email_cc),
reply_to=mail.reply_to,
attachments=attachments,
message_id=mail.message_id,
references=mail.references,
object_id=mail.res_id and ('%s-%s' % (mail.res_id, mail.model)),
subtype='html',
subtype_alternative='plain',
headers=headers)
try:
res = ir_mail_server.send_email(cr, uid, msg,
mail_server_id=mail.mail_server_id.id,
context=context)
except AssertionError as error:
if error.message == ir_mail_server.NO_VALID_RECIPIENT:
# No valid recipient found for this particular
# mail item -> ignore error to avoid blocking
# delivery to next recipients, if any. If this is
# the only recipient, the mail will show as failed.
_logger.info("Ignoring invalid recipients for mail.mail %s: %s",
mail.message_id, email.get('email_to'))
else:
raise
if res:
mail.write({'state': 'sent', 'message_id': res, 'failure_reason': False})
mail_sent = True
# /!\ can't use mail.state here, as mail.refresh() will cause an error
# see revid:[email protected] in 6.1
if mail_sent:
_logger.info('Mail with ID %r and Message-Id %r successfully sent', mail.id, mail.message_id)
self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=mail_sent)
except MemoryError:
# prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
# instead of marking the mail as failed
_logger.exception('MemoryError while processing mail with ID %r and Msg-Id %r. '\
'Consider raising the --limit-memory-hard startup option',
mail.id, mail.message_id)
raise
except Exception as e:
failure_reason = tools.ustr(e)
_logger.exception('failed sending mail (id: %s) due to %s', mail.id, failure_reason)
mail.write({'state': 'exception', 'failure_reason': failure_reason})
self._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=False)
if raise_exception:
if isinstance(e, AssertionError):
# get the args of the original error, wrap into a value and throw a MailDeliveryException
# that is an except_orm, with name and value as arguments
value = '. '.join(e.args)
raise MailDeliveryException(_("Mail Delivery Failed"), value)
raise
if auto_commit is True:
cr.commit()
return True
| cdrooom/odoo | addons/mail/mail_mail.py | Python | agpl-3.0 | 18,908 | 0.004337 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import test_access_control
import test_users
import test_groups
| ovnicraft/odoo_addons | smile_access_control/tests/__init__.py | Python | agpl-3.0 | 1,040 | 0 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.website_event.tests.common import TestEventOnlineCommon
class TestEventExhibitorCommon(TestEventOnlineCommon):
@classmethod
def setUpClass(cls):
super(TestEventExhibitorCommon, cls).setUpClass()
# Sponsorship data
cls.sponsor_type_0 = cls.env['event.sponsor.type'].create({
'name': 'GigaTop',
'sequence': 1,
})
cls.sponsor_0_partner = cls.env['res.partner'].create({
'name': 'EventSponsor',
'country_id': cls.env.ref('base.be').id,
'email': '[email protected]',
'phone': '04856112233',
})
cls.sponsor_0 = cls.env['event.sponsor'].create({
'partner_id': cls.sponsor_0_partner.id,
'event_id': cls.event_0.id,
'sponsor_type_id': cls.sponsor_type_0.id,
'hour_from': 8.0,
'hour_to': 18.0,
})
| jeremiahyan/odoo | addons/website_event_exhibitor/tests/common.py | Python | gpl-3.0 | 1,021 | 0 |
from unittest import TestCase
from tcontrol.discretization import c2d
from ..transferfunction import tf
from ..model_conversion import *
from ..statespace import StateSpace
import numpy as np
from .tools.test_utility import assert_ss_equal
class TestDiscretization(TestCase):
def setUp(self):
self.s1 = tf([1], [1, 0, 1])
self.zoh = tf([0.4597, 0.4597], [1, 1.0806, 1], dt=1)
self.ss = tf2ss(tf([1], [1, 0, 1]))
def test_c2d_zoh(self):
d_sys = c2d(self.s1, 1, 'zoh')
self.assertLessEqual(np.max(np.abs(d_sys.num - self.zoh.num)), 1e-4)
def test_c2d_foh(self):
a = c2d(self.ss, 1, 'foh')
b = StateSpace([[0.540302, 0.841471], [-0.841471, 0.540302]],
[[0.773644], [0.49675]],
[[1, 0]], [[0.158529]], dt=1)
assert_ss_equal(a, b)
def test_c2d_tustin(self):
d_sys = c2d(self.s1, 1, 'tustin')
error = np.abs(d_sys.num - np.array([0.2, 0.4, 0.2]))
self.assertLessEqual(np.max(error), 1e-4)
def test_c2d_matched(self):
d_sys = c2d(self.s1, 1, 'matched')
error = np.abs(d_sys.num - np.array([0.2298, 0.4597, 0.2298]))
self.assertLessEqual(np.max(error), 1e-4)
| DaivdZhang/tinyControl | tcontrol/tests/test_discretization.py | Python | bsd-3-clause | 1,236 | 0 |
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
from client import *
from base_asset import BaseAsset
from cache_decorator import memoized
from special_class_methods import special_classes
from none_deref import NoneDeref
from string_utils import split_attribute
class V1Meta(object):
def __init__(self, *args, **kw):
self.server = V1Server(*args, **kw)
self.global_cache = {}
self.dirtylist = []
def __getattr__(self, attr):
"Dynamically build asset type classes when someone tries to get attrs "
"that we don't have."
return self.asset_class(attr)
def __enter__(self):
return self
def __exit__(self, *args, **kw):
self.commit()
@memoized
def asset_class(self, asset_type_name):
xmldata = self.server.get_meta_xml(asset_type_name)
class_members = {
'_v1_v1meta': self,
'_v1_asset_type_name': asset_type_name,
}
for operation in xmldata.findall('Operation'):
opname = operation.get('name')
def operation_func(myself, opname2=opname):
myself._v1_execute_operation(opname2)
class_members[opname] = operation_func
for attribute in xmldata.findall('AttributeDefinition'):
attr = attribute.get("name")
if attribute.get('attributetype') == 'Relation':
if attribute.get('ismultivalue') == 'True':
def getter(self, attr=attr):
return self._v1_getattr(attr)
def setter(self, value, attr=attr):
return self._v1_setattr(attr, list(value))
def deleter(self, attr=attr):
raise NotImplementedError
else:
def getter(self, attr=attr):
v = self._v1_getattr(attr)
if v:
return self._v1_getattr(attr)[0]
else:
return NoneDeref()
def setter(self, value, attr=attr):
return self._v1_setattr(attr, value)
def deleter(self, attr=attr):
raise NotImplementedError
else:
def getter(self, attr=attr):
return self._v1_getattr(attr)
def setter(self, value, attr=attr):
return self._v1_setattr(attr, value)
def deleter(self, attr=attr):
raise NotImplementedError
class_members[attr] = property(getter, setter, deleter)
bases = [BaseAsset,]
# mix in any special methods
if asset_type_name in special_classes:
mixin = special_classes[asset_type_name]
bases.append(mixin)
new_asset_class = type(asset_type_name, tuple(bases), class_members)
return new_asset_class
def add_to_dirty_list(self, asset_instance):
self.dirtylist.append(asset_instance)
def commit(self):
errors = []
for asset in self.dirtylist:
try:
asset._v1_commit()
except V1Error, e:
errors.append(e)
self.dirtylist = []
return errors
def generate_update_doc(self, newdata):
update_doc = Element('Asset')
for attrname, newvalue in newdata.items():
if newvalue is None: # single relation was removed
node = Element('Relation')
node.set('name', attrname)
node.set('act', 'set')
elif isinstance(newvalue, BaseAsset): # single relation was changed
node = Element('Relation')
node.set('name', attrname)
node.set('act', 'set')
ra = Element('Asset')
ra.set('idref', newvalue.idref)
node.append(ra)
elif isinstance(newvalue, list): # multi relation was changed
node = Element('Relation')
node.set('name', attrname)
for item in newvalue:
child = Element('Asset')
child.set('idref', item.idref)
child.set('act', 'add')
node.append(child)
else: # Not a relation
node = Element('Attribute')
node.set('name', attrname)
node.set('act', 'set')
if isinstance(newvalue, unicode) != True:
node.text = str(newvalue).decode('utf-8')
else:
node.text = newvalue
update_doc.append(node)
return update_doc
def create_asset(self, asset_type_name, newdata):
update_doc = self.generate_update_doc(newdata)
new_asset_xml = self.server.create_asset(asset_type_name, update_doc)
asset_type, asset_oid, asset_moment = new_asset_xml.get('id').split(':')
return self.asset_class(asset_type)(asset_oid)
def update_asset(self, asset_type_name, asset_oid, newdata):
update_doc = self.generate_update_doc(newdata)
return self.server.update_asset(asset_type_name, asset_oid, update_doc)
def execute_operation(self, asset_type_name, oid, opname):
return self.server.execute_operation(asset_type_name, oid, opname)
def get_attr(self, asset_type_name, oid, attrname, moment=None):
xml = self.server.get_attr(asset_type_name, oid, attrname, moment)
dummy_asset = ElementTree.Element('Asset')
dummy_asset.append(xml)
return self.unpack_asset(dummy_asset)[attrname]
def query(self, asset_type_name, wherestring, selstring):
return self.server.get_query_xml(asset_type_name, wherestring, selstring)
def read_asset(self, asset_type_name, asset_oid, moment=None):
xml = self.server.get_asset_xml(asset_type_name, asset_oid, moment)
return self.unpack_asset(xml)
def unpack_asset(self, xml):
output = {}
self.unpack_asset_relations(output, xml)
self.unpack_asset_attributes(output, xml)
return output
def unpack_asset_attributes(self, output, xml):
for attribute in xml.findall('Attribute'):
#key = attribute.get('name').replace('.','_')
key = attribute.get('name')
values = [v.text for v in attribute.findall('Value')]
if len(values) == 0:
values = [attribute.text]
self.add_attribute_to_output(output, key, values)
def unpack_asset_relations(self, output, xml):
# we sort relations in order to insert the shortest ones first, so that
# containing relations are added before leaf ones.
for relation in sorted(xml.findall('Relation'), key=lambda x: x.get('name')):
key = relation.get('name')
related_asset_elements = relation.findall('Asset')
rellist = []
for value_element in related_asset_elements:
relation_idref = value_element.get('idref')
value = self.asset_from_oid(relation_idref)
rellist.append(value)
self.add_relation_to_output(output, key, rellist)
def add_relation_to_output(self, output, relation, assets):
if self.is_attribute_qualified(relation):
(container, leaf) = self.split_relation_to_container_and_leaf(relation)
asset = self.get_related_asset(output, container)
# asset may be unset because the reference is broken
if asset:
asset.with_data({leaf: assets})
else:
output[relation] = assets
def add_attribute_to_output(self, output, relation, values):
if self.is_attribute_qualified(relation):
(container, leaf) = self.split_relation_to_container_and_leaf(relation)
for (asset, value) in zip(self.get_related_assets(output, container), values):
# for calculated values it is not an asset so take the value directly
if hasattr(asset, 'with_data'):
asset.with_data({leaf: value})
else:
output[relation] = value
else:
output[relation] = values[0]
def is_attribute_qualified(self, relation):
parts = split_attribute(relation)
return len(parts) > 1
def split_relation_to_container_and_leaf(self, relation):
parts = split_attribute(relation)
return ('.'.join(parts[:-1]), parts[-1])
def get_related_assets(self, output, relation):
if self.is_attribute_qualified(relation):
parts = split_attribute(relation)
assets = output[parts[0]]
for part in parts[1:]:
try:
asset = assets[0]
except IndexError:
return []
assets = asset._v1_getattr(part)
return assets
else:
return output[relation]
def get_related_asset(self, output, relation):
assets = self.get_related_assets(output, relation)
try:
return assets[0]
except IndexError:
return None
def asset_from_oid(self, oidtoken):
oid_parts = oidtoken.split(":")
(asset_type, asset_id, moment) = oid_parts if len(oid_parts)>2 else (oid_parts[0], oid_parts[1], None)
AssetClass = self.asset_class(asset_type)
instance = AssetClass(asset_id, moment)
return instance
def set_attachment_blob(self, attachment, data=None):
intid = attachment.intid if isinstance(attachment, BaseAsset) else attachment
return self.server.set_attachment_blob(intid, data)
get_attachment_blob = set_attachment_blob
#type_converters = dict(
# Boolean = bool
# Numeric = float,
# Date = iso8601.parse_date,
# Duration = str,
# Text = str,
# LongText = str,
# Relation = str,
# Rank = str,
# AssetType = str,
# Opaque = str,
# State = int,
# Password = str,
# Blob = str,
#)
| versionone/VersionOne.SDK.Python | v1pysdk/v1meta.py | Python | bsd-3-clause | 9,214 | 0.018016 |
##########################################################################
# Copyright (C) 2009 - 2014 Huygens ING & Gerbrandy S.R.L.
#
# This file is part of bioport.
#
# bioport is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/gpl-3.0.html>.
##########################################################################
from formencode.validators import Email
from formencode.api import Invalid
def check_email(email):
try:
Email().to_python(email)
return True
except Invalid, error:
return False
| jellegerbrandy/bioport-site | bioport/mail_validation.py | Python | gpl-3.0 | 1,103 | 0.004533 |
from ddt import ddt, data
from django.test import TestCase
from six.moves import mock
from waldur_core.core import utils
from waldur_core.structure import tasks
from waldur_core.structure.tests import factories, models
class TestDetectVMCoordinatesTask(TestCase):
@mock.patch('requests.get')
def test_task_sets_coordinates(self, mock_request_get):
ip_address = "127.0.0.1"
expected_latitude = 20
expected_longitude = 20
instance = factories.TestNewInstanceFactory()
mock_request_get.return_value.ok = True
response = {"ip": ip_address, "latitude": expected_latitude, "longitude": expected_longitude}
mock_request_get.return_value.json.return_value = response
tasks.detect_vm_coordinates(utils.serialize_instance(instance))
instance.refresh_from_db()
self.assertEqual(instance.latitude, expected_latitude)
self.assertEqual(instance.longitude, expected_longitude)
@mock.patch('requests.get')
def test_task_does_not_set_coordinates_if_response_is_not_ok(self, mock_request_get):
instance = factories.TestNewInstanceFactory()
mock_request_get.return_value.ok = False
tasks.detect_vm_coordinates(utils.serialize_instance(instance))
instance.refresh_from_db()
self.assertIsNone(instance.latitude)
self.assertIsNone(instance.longitude)
@ddt
class ThrottleProvisionTaskTest(TestCase):
@data(
dict(size=tasks.ThrottleProvisionTask.DEFAULT_LIMIT + 1, retried=True),
dict(size=tasks.ThrottleProvisionTask.DEFAULT_LIMIT - 1, retried=False),
)
def test_if_limit_is_reached_provisioning_is_delayed(self, params):
link = factories.TestServiceProjectLinkFactory()
factories.TestNewInstanceFactory.create_batch(
size=params['size'],
state=models.TestNewInstance.States.CREATING,
service_project_link=link)
vm = factories.TestNewInstanceFactory(
state=models.TestNewInstance.States.CREATION_SCHEDULED,
service_project_link=link)
serialized_vm = utils.serialize_instance(vm)
mocked_retry = mock.Mock()
tasks.ThrottleProvisionTask.retry = mocked_retry
tasks.ThrottleProvisionTask().si(
serialized_vm,
'create',
state_transition='begin_starting').apply()
self.assertEqual(mocked_retry.called, params['retried'])
| opennode/nodeconductor | waldur_core/structure/tests/unittests/test_tasks.py | Python | mit | 2,443 | 0.001228 |
# coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Volcano Polygon on Population
Metadata Definitions.
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from safe.common.utilities import OrderedDict
from safe.defaults import (
default_minimum_needs,
default_gender_postprocessor,
age_postprocessor,
minimum_needs_selector)
from safe.impact_functions.impact_function_metadata import \
ImpactFunctionMetadata
from safe.utilities.i18n import tr
from safe.definitions import (
layer_mode_classified,
layer_mode_continuous,
layer_geometry_polygon,
layer_geometry_raster,
hazard_volcano,
volcano_vector_hazard_classes,
hazard_category_multiple_event,
hazard_category_single_event,
exposure_population,
count_exposure_unit,
volcano_name_field,
)
class VolcanoPolygonPopulationFunctionMetadata(ImpactFunctionMetadata):
"""Metadata for VolcanoPolygonPopulationFunctionMetadata.
.. versionadded:: 2.1
We only need to re-implement as_dict(), all other behaviours
are inherited from the abstract base class.
"""
@staticmethod
def as_dict():
"""Return metadata as a dictionary.
This is a static method. You can use it to get the metadata in
dictionary format for an impact function.
:returns: A dictionary representing all the metadata for the
concrete impact function.
:rtype: dict
"""
dict_meta = {
'id': 'VolcanoPolygonPopulationFunction',
'name': tr('Polygon volcano on population'),
'impact': tr('Need evacuation'),
'title': tr('Need evacuation'),
'function_type': 'old-style',
'author': 'AIFDR',
'date_implemented': 'N/A',
'hazard_input': tr(
'The hazard layer must be a polygon layer. This layer '
'must have an attribute representing the volcano hazard '
'zone that can be specified in the impact function option. '
'There are three classes low, medium, and high. The default '
'values are "Kawasan Rawan Bencana I" for low, "Kawasan Rawan '
'Bencana II" for medium, and "Kawasan Rawan Bencana III for '
'high." If you want to see the name of the volcano in the '
'result, you need to specify the volcano name attribute in '
'the Impact Function options.'),
'exposure_input': tr(
'An exposure raster layer where each cell represents a '
'population count for that cell.'),
'output': tr(
'A vector layer containing people affected per hazard zone '
'and the minimum needs based on the number of people '
'affected.'),
'actions': tr(
'Provide details about the number of people that are within '
'each hazard zone.'),
'limitations': [],
'citations': [],
'overview': tr(
'To assess the impact of a volcano eruption on people.'),
'detailed_description': '',
'layer_requirements': {
'hazard': {
'layer_mode': layer_mode_classified,
'layer_geometries': [layer_geometry_polygon],
'hazard_categories': [
hazard_category_multiple_event,
hazard_category_single_event
],
'hazard_types': [hazard_volcano],
'continuous_hazard_units': [],
'vector_hazard_classifications': [
volcano_vector_hazard_classes],
'raster_hazard_classifications': [],
'additional_keywords': [
volcano_name_field]
},
'exposure': {
'layer_mode': layer_mode_continuous,
'layer_geometries': [layer_geometry_raster],
'exposure_types': [exposure_population],
'exposure_units': [count_exposure_unit],
'exposure_class_fields': [],
'additional_keywords': []
}
},
'parameters': OrderedDict([
('postprocessors', OrderedDict([
('Gender', default_gender_postprocessor()),
('Age', age_postprocessor()),
('MinimumNeeds', minimum_needs_selector()),
])),
('minimum needs', default_minimum_needs())
])
}
return dict_meta
| cchristelis/inasafe | safe/impact_functions/volcanic/volcano_polygon_population/metadata_definitions.py | Python | gpl-3.0 | 4,986 | 0 |
"""
PLPY helper module for applying row events from pgq.logutriga().
"""
import plpy
import pkgloader
pkgloader.require('skytools', '3.0')
import skytools
## TODO: automatic fkey detection
# find FK columns
FK_SQL = """
SELECT (SELECT array_agg( (SELECT attname::text FROM pg_attribute
WHERE attrelid = conrelid AND attnum = conkey[i]))
FROM generate_series(1, array_upper(conkey, 1)) i) AS kcols,
(SELECT array_agg( (SELECT attname::text FROM pg_attribute
WHERE attrelid = confrelid AND attnum = confkey[i]))
FROM generate_series(1, array_upper(confkey, 1)) i) AS fcols,
confrelid::regclass::text AS ftable
FROM pg_constraint
WHERE conrelid = {tbl}::regclass AND contype='f'
"""
class DataError(Exception):
"Invalid data"
def colfilter_full(rnew, rold):
return rnew
def colfilter_changed(rnew, rold):
res = {}
for k, v in rnew:
if rnew[k] != rold[k]:
res[k] = rnew[k]
return res
def canapply_dummy(rnew, rold):
return True
def canapply_tstamp_helper(rnew, rold, tscol):
tnew = rnew[tscol]
told = rold[tscol]
if not tnew[0].isdigit():
raise DataError('invalid timestamp')
if not told[0].isdigit():
raise DataError('invalid timestamp')
return tnew > told
def applyrow(tblname, ev_type, new_row,
backup_row = None,
alt_pkey_cols = None,
fkey_cols = None,
fkey_ref_table = None,
fkey_ref_cols = None,
fn_canapply = canapply_dummy,
fn_colfilter = colfilter_full):
"""Core logic. Actual decisions will be done in callback functions.
- [IUD]: If row referenced by fkey does not exist, event is not applied
- If pkey does not exist but alt_pkey does, row is not applied.
@param tblname: table name, schema-qualified
@param ev_type: [IUD]:pkey1,pkey2
@param alt_pkey_cols: list of alternatice columns to consuder
@param fkey_cols: columns in this table that refer to other table
@param fkey_ref_table: other table referenced here
@param fkey_ref_cols: column in other table that must match
@param fn_canapply: callback function, gets new and old row, returns whether the row should be applied
@param fn_colfilter: callback function, gets new and old row, returns dict of final columns to be applied
"""
gd = None
# parse ev_type
tmp = ev_type.split(':', 1)
if len(tmp) != 2 or tmp[0] not in ('I', 'U', 'D'):
raise DataError('Unsupported ev_type: '+repr(ev_type))
if not tmp[1]:
raise DataError('No pkey in event')
cmd = tmp[0]
pkey_cols = tmp[1].split(',')
qtblname = skytools.quote_fqident(tblname)
# parse ev_data
fields = skytools.db_urldecode(new_row)
if ev_type.find('}') >= 0:
raise DataError('Really suspicious activity')
if ",".join(fields.keys()).find('}') >= 0:
raise DataError('Really suspicious activity 2')
# generate pkey expressions
tmp = ["%s = {%s}" % (skytools.quote_ident(k), k) for k in pkey_cols]
pkey_expr = " and ".join(tmp)
alt_pkey_expr = None
if alt_pkey_cols:
tmp = ["%s = {%s}" % (skytools.quote_ident(k), k) for k in alt_pkey_cols]
alt_pkey_expr = " and ".join(tmp)
log = "data ok"
#
# Row data seems fine, now apply it
#
if fkey_ref_table:
tmp = []
for k, rk in zip(fkey_cols, fkey_ref_cols):
tmp.append("%s = {%s}" % (skytools.quote_ident(rk), k))
fkey_expr = " and ".join(tmp)
q = "select 1 from only %s where %s" % (
skytools.quote_fqident(fkey_ref_table),
fkey_expr)
res = skytools.plpy_exec(gd, q, fields)
if not res:
return "IGN: parent row does not exist"
log += ", fkey ok"
# fetch old row
if alt_pkey_expr:
q = "select * from only %s where %s for update" % (qtblname, alt_pkey_expr)
res = skytools.plpy_exec(gd, q, fields)
if res:
oldrow = res[0]
# if altpk matches, but pk not, then delete
need_del = 0
for k in pkey_cols:
# fixme: proper type cmp?
if fields[k] != str(oldrow[k]):
need_del = 1
break
if need_del:
log += ", altpk del"
q = "delete from only %s where %s" % (qtblname, alt_pkey_expr)
skytools.plpy_exec(gd, q, fields)
res = None
else:
log += ", altpk ok"
else:
# no altpk
q = "select * from only %s where %s for update" % (qtblname, pkey_expr)
res = skytools.plpy_exec(None, q, fields)
# got old row, with same pk and altpk
if res:
oldrow = res[0]
log += ", old row"
ok = fn_canapply(fields, oldrow)
if ok:
log += ", new row better"
if not ok:
# ignore the update
return "IGN:" + log + ", current row more up-to-date"
else:
log += ", no old row"
oldrow = None
if res:
if cmd == 'I':
cmd = 'U'
else:
if cmd == 'U':
cmd = 'I'
# allow column changes
if oldrow:
fields2 = fn_colfilter(fields, oldrow)
for k in pkey_cols:
if k not in fields2:
fields2[k] = fields[k]
fields = fields2
# apply change
if cmd == 'I':
q = skytools.mk_insert_sql(fields, tblname, pkey_cols)
elif cmd == 'U':
q = skytools.mk_update_sql(fields, tblname, pkey_cols)
elif cmd == 'D':
q = skytools.mk_delete_sql(fields, tblname, pkey_cols)
else:
plpy.error('Huh')
plpy.execute(q)
return log
def ts_conflict_handler(gd, args):
"""Conflict handling based on timestamp column."""
conf = skytools.db_urldecode(args[0])
timefield = conf['timefield']
ev_type = args[1]
ev_data = args[2]
ev_extra1 = args[3]
ev_extra2 = args[4]
ev_extra3 = args[5]
ev_extra4 = args[6]
altpk = None
if 'altpk' in conf:
altpk = conf['altpk'].split(',')
def ts_canapply(rnew, rold):
return canapply_tstamp_helper(rnew, rold, timefield)
return applyrow(ev_extra1, ev_type, ev_data,
backup_row = ev_extra2,
alt_pkey_cols = altpk,
fkey_ref_table = conf.get('fkey_ref_table'),
fkey_ref_cols = conf.get('fkey_ref_cols'),
fkey_cols = conf.get('fkey_cols'),
fn_canapply = ts_canapply)
| overdrive3000/skytools | python/skytools/plpy_applyrow.py | Python | isc | 6,707 | 0.006113 |
# -*- coding: UTF-8 -*-
from __future__ import print_function
import csv
import os
ignored_views = set(["HHB", "FFO", "FFOI"])
seen_views = set([])
seen_aliases = set([])
seen_groups = set([])
tpl = "check_journal(u'{1}', u'{4}', u'{11}', u'{10}')"
print("""# -*- coding: UTF-8 -*-
from __future__ import print_function
from lino.api import rt
ledger = rt.models.ledger
finan = rt.models.finan
vatless = rt.models.vatless
def check_journal(ref, name, view, group):
if ledger.Journal.objects.filter(ref=ref).count():
print("Journal", ref, "exists")
return
if not group:
return
if view == "REG":
voucher_type = 'vatless.ProjectInvoicesByJournal'
elif view == "AAW":
voucher_type = 'finan.DisbursementOrdersByJournal'
elif view == "KAS":
voucher_type = 'finan.BankStatementsByJournal'
elif view == "ZAU":
voucher_type = 'finan.PaymentOrdersByJournal'
else:
return
grp = ledger.JournalGroups.get_by_name(group.lower())
obj = ledger.Journal(ref=ref, name=name, voucher_type=voucher_type,
journal_group=grp)
obj.full_clean()
# uncomment the following line when ready:
# obj.save()
print("Journal", ref, "has been created")
""")
with open(os.path.expanduser('~/Downloads/JNL.csv'), 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';', quotechar='"')
for row in reader:
row = [x.strip() for x in row]
alias = row[2].strip()
group = row[10].strip()
view = row[11].strip()
if alias in ["IMP"]:
if view not in ignored_views:
seen_views.add(view)
seen_aliases.add(alias)
seen_groups.add(group)
print(tpl.format(*row))
# print(', '.join(row))
#print("# Seen aliases:", seen_aliases)
print("# Seen views:", seen_views)
print("# Seen groups:", seen_groups)
| lsaffre/blog | docs/blog/2016/0305.py | Python | agpl-3.0 | 1,941 | 0.00103 |
# Copyright 2013 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
from neutron.agent.common import config as a_cfg
from neutron.tests import base
from neutron.tests.unit import test_api_v2
import neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas as fwaas
_uuid = test_api_v2._uuid
FAKE_SRC_PREFIX = '10.0.0.0/24'
FAKE_DST_PREFIX = '20.0.0.0/24'
FAKE_PROTOCOL = 'tcp'
FAKE_SRC_PORT = 5000
FAKE_DST_PORT = 22
FAKE_FW_ID = 'fake-fw-uuid'
class IptablesFwaasTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFwaasTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
self.iptables_cls_p.start()
self.firewall = fwaas.IptablesFwaasDriver()
def _fake_rules_v4(self, fwid, apply_list):
rule_list = []
rule1 = {'enabled': True,
'action': 'allow',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '80',
'source_ip_address': '10.24.4.2'}
rule2 = {'enabled': True,
'action': 'deny',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '22'}
ingress_chain = ('iv4%s' % fwid)[:11]
egress_chain = ('ov4%s' % fwid)[:11]
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
v4filter_inst.chains.append(ingress_chain)
v4filter_inst.chains.append(egress_chain)
rule_list.append(rule1)
rule_list.append(rule2)
return rule_list
def _fake_firewall_no_rule(self):
rule_list = []
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall_with_admin_down(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': False,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_apply_list(self, router_count=1, distributed=False,
distributed_mode=None):
apply_list = []
while router_count > 0:
iptables_inst = mock.Mock()
router_inst = {'distributed': distributed}
v4filter_inst = mock.Mock()
v6filter_inst = mock.Mock()
v4filter_inst.chains = []
v6filter_inst.chains = []
iptables_inst.ipv4 = {'filter': v4filter_inst}
iptables_inst.ipv6 = {'filter': v6filter_inst}
router_info_inst = mock.Mock()
router_info_inst.iptables_manager = iptables_inst
router_info_inst.snat_iptables_manager = iptables_inst
if distributed_mode == 'dvr':
router_info_inst.dist_fip_count = 1
router_info_inst.router = router_inst
apply_list.append(router_info_inst)
router_count -= 1
return apply_list
def _setup_firewall_with_rules(self, func, router_count=1,
distributed=False, distributed_mode=None):
apply_list = self._fake_apply_list(router_count=router_count,
distributed=distributed, distributed_mode=distributed_mode)
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall(rule_list)
if distributed:
if distributed_mode == 'dvr_snat':
if_prefix = 'sg-+'
if distributed_mode == 'dvr':
if_prefix = 'rfp-+'
else:
if_prefix = 'qr-+'
distributed_mode = 'legacy'
func(distributed_mode, apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
rule1 = '-p tcp --dport 80 -s 10.24.4.2 -j ACCEPT'
rule2 = '-p tcp --dport 22 -j DROP'
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
bname = fwaas.iptables_manager.binary_name
ipt_mgr_ichain = '%s-%s' % (bname, ingress_chain[:11])
ipt_mgr_echain = '%s-%s' % (bname, egress_chain[:11])
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
mock.call.remove_chain('ov4fake-fw-uuid'),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule(ingress_chain, rule1),
mock.call.add_rule(egress_chain, rule1),
mock.call.add_rule(ingress_chain, rule2),
mock.call.add_rule(egress_chain, rule2),
mock.call.add_rule('FORWARD',
'-o %s -j %s' % (if_prefix,
ipt_mgr_ichain)),
mock.call.add_rule('FORWARD',
'-i %s -j %s' % (if_prefix,
ipt_mgr_echain)),
mock.call.add_rule('FORWARD',
'-o %s -j %s-fwaas-defau' % (if_prefix,
bname)),
mock.call.add_rule('FORWARD',
'-i %s -j %s-fwaas-defau' % (if_prefix,
bname))]
v4filter_inst.assert_has_calls(calls)
def test_create_firewall_no_rules(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.create_firewall('legacy', apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
bname = fwaas.iptables_manager.binary_name
for ip_version in (4, 6):
ingress_chain = ('iv%s%s' % (ip_version, firewall['id']))
egress_chain = ('ov%s%s' % (ip_version, firewall['id']))
calls = [mock.call.remove_chain(
'iv%sfake-fw-uuid' % ip_version),
mock.call.remove_chain(
'ov%sfake-fw-uuid' % ip_version),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule('FORWARD',
'-o qr-+ -j %s-fwaas-defau' % bname),
mock.call.add_rule('FORWARD',
'-i qr-+ -j %s-fwaas-defau' % bname)]
if ip_version == 4:
v4filter_inst = apply_list[0].iptables_manager.ipv4['filter']
v4filter_inst.assert_has_calls(calls)
else:
v6filter_inst = apply_list[0].iptables_manager.ipv6['filter']
v6filter_inst.assert_has_calls(calls)
def test_create_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.create_firewall)
def test_create_firewall_with_rules_two_routers(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
router_count=2)
def test_update_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.update_firewall)
def test_delete_firewall(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.delete_firewall('legacy', apply_list, firewall)
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
calls = [mock.call.remove_chain(ingress_chain),
mock.call.remove_chain(egress_chain),
mock.call.remove_chain('fwaas-default-policy')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_admin_down(self):
apply_list = self._fake_apply_list()
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall_with_admin_down(rule_list)
self.firewall.create_firewall('legacy', apply_list, firewall)
calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
mock.call.remove_chain('ov4fake-fw-uuid'),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_update_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_create_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr')
def test_update_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr')
| citrix-openstack-build/neutron-fwaas | neutron_fwaas/tests.skip/unit/services/firewall/drivers/linux/test_iptables_fwaas.py | Python | apache-2.0 | 11,580 | 0.00095 |
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_1
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params : boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| giorgiop/scikit-learn | sklearn/metrics/pairwise.py | Python | bsd-3-clause | 46,491 | 0.000043 |
from __future__ import unicode_literals
from django.utils import six
from reviewboard.attachments.models import FileAttachment
from reviewboard.webapi.base import WebAPIResource
class BaseFileAttachmentResource(WebAPIResource):
"""A base resource representing file attachments."""
added_in = '1.6'
model = FileAttachment
name = 'file_attachment'
fields = {
'id': {
'type': int,
'description': 'The numeric ID of the file.',
},
'caption': {
'type': six.text_type,
'description': "The file's descriptive caption.",
},
'filename': {
'type': six.text_type,
'description': "The name of the file.",
},
'absolute_url': {
'type': six.text_type,
'description': "The absolute URL of the file, for downloading "
"purposes.",
'added_in': '2.0',
},
'icon_url': {
'type': six.text_type,
'description': 'The URL to a 24x24 icon representing this file. '
'The use of these icons is deprecated and this '
'property will be removed in a future version.',
'deprecated_in': '2.5',
},
'mimetype': {
'type': six.text_type,
'description': 'The mimetype for the file.',
'added_in': '2.0',
},
'thumbnail': {
'type': six.text_type,
'description': 'A thumbnail representing this file.',
'added_in': '1.7',
},
}
uri_object_key = 'file_attachment_id'
def serialize_absolute_url_field(self, obj, request, **kwargs):
return request.build_absolute_uri(obj.get_absolute_url())
def serialize_caption_field(self, obj, **kwargs):
# We prefer 'caption' here, because when creating a new file
# attachment, it won't be full of data yet (and since we're posting
# to file-attachments/, it doesn't hit DraftFileAttachmentResource).
# DraftFileAttachmentResource will prefer draft_caption, in case people
# are changing an existing one.
return obj.caption or obj.draft_caption
| sgallagher/reviewboard | reviewboard/webapi/resources/base_file_attachment.py | Python | mit | 2,246 | 0 |
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class ProductApphook(CMSApp):
name = _("Product Apphook")
urls = ["wlansi_store.urls"]
apphook_pool.register(ProductApphook) | matevzmihalic/wlansi-store | wlansi_store/cms_app.py | Python | agpl-3.0 | 264 | 0.011364 |
# -*- coding: UTF-8 -*-
"""
Package-wide constants.
"""
CALL = 'C'
PUT = 'P'
| zzzoidberg/landscape | finance/consts.py | Python | mit | 78 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Vendeur', fields ['code_permanent']
db.create_unique(u'encefal_vendeur', ['code_permanent'])
def backwards(self, orm):
# Removing unique constraint on 'Vendeur', fields ['code_permanent']
db.delete_unique(u'encefal_vendeur', ['code_permanent'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'encefal.exemplaire': {
'Meta': {'object_name': 'Exemplaire'},
'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'etat': ('django.db.models.fields.CharField', [], {'default': "'VENT'", 'max_length': '4'}),
'facture': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'exemplaires'", 'null': 'True', 'db_column': "'facture'", 'to': u"orm['encefal.Facture']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'livre': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exemplaires'", 'db_column': "'livre'", 'to': u"orm['encefal.Livre']"}),
'prix': ('django.db.models.fields.IntegerField', [], {}),
'vendeur': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exemplaires'", 'db_column': "'vendeur'", 'to': u"orm['encefal.Vendeur']"})
},
u'encefal.facture': {
'Meta': {'object_name': 'Facture'},
'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'employe': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'factures'", 'blank': 'True', 'db_column': "'employe'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'factures'", 'blank': 'True', 'db_column': "'session'", 'to': u"orm['encefal.Session']"})
},
u'encefal.livre': {
'Meta': {'object_name': 'Livre'},
'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'auteur': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'edition': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '13', 'blank': 'True'}),
'titre': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'vendeur': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'livres'", 'symmetrical': 'False', 'through': u"orm['encefal.Exemplaire']", 'db_column': "'vendeur'", 'to': u"orm['encefal.Vendeur']"})
},
u'encefal.session': {
'Meta': {'object_name': 'Session'},
'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_debut': ('django.db.models.fields.DateField', [], {}),
'date_fin': ('django.db.models.fields.DateField', [], {}),
'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nom': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'encefal.vendeur': {
'Meta': {'object_name': 'Vendeur'},
'actif': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'code_permanent': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'date_creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modification': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'prenom': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['encefal'] | nilovna/EnceFAL | project/encefal/migrations/0003_auto__add_unique_vendeur_code_permanent.py | Python | gpl-3.0 | 8,651 | 0.007629 |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines an enum for classifying RPC methods by streaming semantics."""
import enum
@enum.unique
class Cardinality(enum.Enum):
"""Describes the streaming semantics of an RPC method."""
UNARY_UNARY = 'request-unary/response-unary'
UNARY_STREAM = 'request-unary/response-streaming'
STREAM_UNARY = 'request-streaming/response-unary'
STREAM_STREAM = 'request-streaming/response-streaming'
| kidaa/kythe | third_party/grpc/src/python/src/grpc/framework/common/cardinality.py | Python | apache-2.0 | 1,930 | 0.002591 |
from matplotlib import pyplot as plt
path = "C:/Temp/mnisterrors/chunk" + str(input("chunk: ")) + ".txt"
with open(path, "r") as f:
errorhistory = [float(line.rstrip('\n')) for line in f]
plt.plot(errorhistory)
plt.show()
| jabumaho/MNIST-neural-network | plot_error.py | Python | gpl-3.0 | 235 | 0.004255 |
'''OpenGL extension ARB.texture_gather
This module customises the behaviour of the
OpenGL.raw.GL.ARB.texture_gather to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a new set of texture functions
(textureGather) to the shading language that determine 2x2 footprint
that are used for linear filtering in a texture lookup, and return a
vector consisting of the first component from each of the four
texels in the footprint.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/texture_gather.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.texture_gather import *
from OpenGL.raw.GL.ARB.texture_gather import _EXTENSION_NAME
def glInitTextureGatherARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/GL/ARB/texture_gather.py | Python | lgpl-3.0 | 1,072 | 0.015858 |
"""Collects and displays information to the user."""
from __future__ import print_function
import collections
import logging
import os
import Queue
import sys
import textwrap
import zope.interface
from letsencrypt import interfaces
from letsencrypt import le_util
logger = logging.getLogger(__name__)
class Reporter(object):
"""Collects and displays information to the user.
:ivar `Queue.PriorityQueue` messages: Messages to be displayed to
the user.
"""
zope.interface.implements(interfaces.IReporter)
HIGH_PRIORITY = 0
"""High priority constant. See `add_message`."""
MEDIUM_PRIORITY = 1
"""Medium priority constant. See `add_message`."""
LOW_PRIORITY = 2
"""Low priority constant. See `add_message`."""
_msg_type = collections.namedtuple('ReporterMsg', 'priority text on_crash')
def __init__(self):
self.messages = Queue.PriorityQueue()
def add_message(self, msg, priority, on_crash=True):
"""Adds msg to the list of messages to be printed.
:param str msg: Message to be displayed to the user.
:param int priority: One of `HIGH_PRIORITY`, `MEDIUM_PRIORITY`,
or `LOW_PRIORITY`.
:param bool on_crash: Whether or not the message should be
printed if the program exits abnormally.
"""
assert self.HIGH_PRIORITY <= priority <= self.LOW_PRIORITY
self.messages.put(self._msg_type(priority, msg, on_crash))
logger.info("Reporting to user: %s", msg)
def atexit_print_messages(self, pid=os.getpid()):
"""Function to be registered with atexit to print messages.
:param int pid: Process ID
"""
# This ensures that messages are only printed from the process that
# created the Reporter.
if pid == os.getpid():
self.print_messages()
def print_messages(self):
"""Prints messages to the user and clears the message queue.
If there is an unhandled exception, only messages for which
``on_crash`` is ``True`` are printed.
"""
bold_on = False
if not self.messages.empty():
no_exception = sys.exc_info()[0] is None
bold_on = sys.stdout.isatty()
if bold_on:
print(le_util.ANSI_SGR_BOLD)
print('IMPORTANT NOTES:')
first_wrapper = textwrap.TextWrapper(
initial_indent=' - ', subsequent_indent=(' ' * 3))
next_wrapper = textwrap.TextWrapper(
initial_indent=first_wrapper.subsequent_indent,
subsequent_indent=first_wrapper.subsequent_indent)
while not self.messages.empty():
msg = self.messages.get()
if no_exception or msg.on_crash:
if bold_on and msg.priority > self.HIGH_PRIORITY:
sys.stdout.write(le_util.ANSI_SGR_RESET)
bold_on = False
lines = msg.text.splitlines()
print(first_wrapper.fill(lines[0]))
if len(lines) > 1:
print("\n".join(
next_wrapper.fill(line) for line in lines[1:]))
if bold_on:
sys.stdout.write(le_util.ANSI_SGR_RESET)
| xgin/letsencrypt | letsencrypt/reporter.py | Python | apache-2.0 | 3,253 | 0 |
"""
Create a nearly orthogonal quad mesh by solving for stream function
and velocity potential inside a given boundary.
Still trying to improve the formulation of the Laplacian. Psi
(stream function) and phi (velocity potential) are solved
simultaneously. There are other options, and each field
uniquely constraints the other field to within a constant offset.
A block matrix is constructed which solves the Laplacian for
each of psi and phi. The boundaries of the domain are restricted
to be contours of either phi or psi, and these edges meet at right
angles.
For a grid with N nodes there are 2N unknowns.
Each interior node implies 2 constraints via the Laplacian on psi and phi.
For boundary nodes not at corner, one of psi/phi implies a no-flux boundary
and d phi/dn=0 or d psi/dn=0.
The question is how to constrain the remaining boundary nodes. These boundaries
are contours of the respective field. For a boundary segment of
s nodes, inclusive of corners, that yields s-1 constraints.
TODO:
- enforce monotonic sliding of nodes on the same segment. Currently it's possible
for nodes to slide over each other resulting in an invalid (but sometimes salvageable)
intermediate grid.
- Allow ragged edges.
- Allow multiple patches that can be solved simultaneously.
- Depending on how patches go, may allow for overlaps in phi/psi space if they are distinct in geographic space.
- allow patch connections that are rotated (psi on one side matches phi on the other, or a
full inversion
"""
import numpy as np
from collections import defaultdict
from shapely import geometry, ops
from scipy import sparse, signal
import matplotlib.pyplot as plt
from matplotlib.tri import LinearTriInterpolator,TriFinder,TrapezoidMapTriFinder
from matplotlib import colors
import itertools
from . import unstructured_grid, exact_delaunay,orthogonalize, triangulate_hole
from .. import utils, filters
from ..spatial import field, linestring_utils
from . import front
import logging
log=logging.getLogger('quad_laplacian')
import six
##
# A hack for linear interpolation on g_int. Nodes outside the triangulation
# take their value from the nearest cell.
class PermissiveFinder(TrapezoidMapTriFinder):
def __init__(self,grid):
self.grid=grid
self.mp_tri=grid.mpl_triangulation()
super(PermissiveFinder,self).__init__(self.mp_tri)
def __call__(self, x, y):
base=super(PermissiveFinder,self).__call__(x,y)
missing=np.nonzero(base==-1)[0]
for i in missing:
base[i]=self.grid.select_cells_nearest( [x[i],y[i]] )
return base
# borrow codes as in front.py
RIGID=front.AdvancingFront.RIGID
class NodeDiscretization(object):
def __init__(self,g):
self.g=g
def construct_matrix(self,op='laplacian',dirichlet_nodes={},
zero_tangential_nodes=[],
gradient_nodes={},
skip_dirichlet=True):
"""
Construct a matrix and rhs for the given operation.
dirichlet_nodes: boundary node id => value
zero_tangential_nodes: list of lists. each list gives a set of
nodes which should be equal to each other, allowing specifying
a zero tangential gradient BC.
gradient_nodes: boundary node id => gradient unit vector [dx,dy]
skip_dirichlet: should dirichlet nodes be omitted from other BCs?
"""
g=self.g
# Adjust tangential node data structure for easier use
# in matrix construction
tangential_nodes={}
for grp in zero_tangential_nodes:
leader=grp[0]
for member in grp:
# NB: This includes leader=>leader
assert member not in tangential_nodes
tangential_nodes[member]=leader
# Now I want to allow multiple BCs to constrain the same node.
# How many rows will I end up with?
# First count up the nodes that will get a regular laplacian
# row. This includes boundary nodes that have a no-flux BC.
# (because that's the behavior of the discretization on a
# boundary)
nlaplace_rows=0
laplace_nodes={}
for n in range(g.Nnodes()):
if skip_dirichlet and (n in dirichlet_nodes): continue
if n in gradient_nodes: continue
if n in tangential_nodes: continue
laplace_nodes[n]=True
nlaplace_rows+=1
ndirichlet_nodes=len(dirichlet_nodes)
# Each group of tangential gradient nodes provides len-1 constraints
ntangential_nodes=len(tangential_nodes) - len(zero_tangential_nodes)
ngradient_nodes=len(gradient_nodes)
nrows=nlaplace_rows + ndirichlet_nodes + ntangential_nodes + ngradient_nodes
log.info(f"row breakdown: Lap: {nlaplace_rows} "
f"Dir: {ndirichlet_nodes} Tan: {ntangential_nodes} "
f"({len(zero_tangential_nodes)} grps) Grad: {ngradient_nodes}")
log.info(f"nrows={nrows} N={g.Nnodes()}")
B=np.zeros(nrows,np.float64)
M=sparse.dok_matrix( (nrows,g.Nnodes()),np.float64)
# Very similar code, but messy to refactor so write a new loop.
ndirichlet_nodes=len(dirichlet_nodes)
# Each group of tangential gradient nodes provides len-1 constraints
ntangential_nodes=len(tangential_nodes) - len(zero_tangential_nodes)
ngradient_nodes=len(gradient_nodes)
nrows=nlaplace_rows + ndirichlet_nodes + ntangential_nodes + ngradient_nodes
B=np.zeros(nrows,np.float64)
M=sparse.dok_matrix( (nrows,g.Nnodes()),np.float64)
multiple=True
row=0
for n in laplace_nodes:
nodes,alphas,rhs=self.node_discretization(n,op=op)
B[row]=rhs
for node,alpha in zip(nodes,alphas):
M[row,node]=alpha
row+=1
for n in dirichlet_nodes:
B[row]=dirichlet_nodes[n]
M[row,n]=1
row+=1
for n in gradient_nodes:
vec=gradient_nodes[n] # The direction of the gradient
normal=[vec[1],-vec[0]] # direction of zero gradient
dx_nodes,dx_alphas,_=self.node_discretization(n,op='dx')
dy_nodes,dy_alphas,_=self.node_discretization(n,op='dy')
assert np.all(dx_nodes==dy_nodes),"Have to be cleverer"
nodes=dx_nodes
# So if vec = [1,0], then normal=[0,-1]
# and I want dx*norma[0]+dy*normal[1] = 0
alphas=np.array(dx_alphas)*normal[0] + np.array(dy_alphas)*normal[1]
B[row]=0
for node,alpha in zip(nodes,alphas):
M[row,node]=alpha
row+=1
for n in tangential_nodes:
leader=tangential_nodes[n]
if n==leader:
# print("skip leader")
continue
M[row,n]=1
M[row,leader]=-1
B[row]=0.0
row+=1
assert row==nrows
return M,B
def node_laplacian(self,n0):
return self.node_discretization(n0,'laplacian')
def node_dx(self,n0):
return self.node_discretization(n0,'dx')
def node_dy(self,n0):
return self.node_discretization(n0,'dy')
def node_discretization(self,n0,op='laplacian'):
def beta(c):
return 1.0
N=self.g.angle_sort_adjacent_nodes(n0)
P=len(N)
is_boundary=int(self.g.is_boundary_node(n0))
M=len(N) - is_boundary
if is_boundary:
# roll N to start and end on boundary nodes:
nbr_boundary=[self.g.is_boundary_node(n)
for n in N]
while not (nbr_boundary[0] and nbr_boundary[-1]):
N=np.roll(N,1)
nbr_boundary=np.roll(nbr_boundary,1)
# area of the triangles
A=[]
for m in range(M):
tri=[n0,N[m],N[(m+1)%P]]
Am=utils.signed_area( self.g.nodes['x'][tri] )
assert Am!=0.0
A.append(Am)
AT=np.sum(A)
alphas=[]
x=self.g.nodes['x'][N,0]
y=self.g.nodes['x'][N,1]
x0,y0=self.g.nodes['x'][n0]
for n in range(P):
n_m_e=(n-1)%M
n_m=(n-1)%P
n_p=(n+1)%P
a=0
if op=='laplacian':
if n>0 or P==M: # nm<M
a+=-beta(n_m_e)/(4*A[n_m_e]) * ( (y[n_m]-y[n])*(y0-y[n_m]) + (x[n] -x[n_m])*(x[n_m]-x0))
if n<M:
a+= -beta(n)/(4*A[n]) * ( (y[n]-y[n_p])*(y[n_p]-y0) + (x[n_p]-x[n ])*(x0 - x[n_p]))
elif op=='dx':
if n>0 or P==M: # nm<M
a+= beta(n_m_e)/(2*AT) * (y0-y[n_m])
if n<M:
a+= beta(n)/(2*AT) * (y[n_p]-y0)
elif op=='dy':
if n>0 or P==M: # nm<M
a+= beta(n_m_e)/(2*AT) * (x[n_m]-x0)
if n<M:
a+= beta(n)/(2*AT) * (x0 - x[n_p])
else:
raise Exception('bad op')
alphas.append(a)
alpha0=0
for e in range(M):
ep=(e+1)%P
if op=='laplacian':
alpha0+= - beta(e)/(4*A[e]) * ( (y[e]-y[ep])**2 + (x[ep]-x[e])**2 )
elif op=='dx':
alpha0+= beta(e)/(2*AT)*(y[e]-y[ep])
elif op=='dy':
alpha0+= beta(e)/(2*AT)*(x[ep]-x[e])
else:
raise Exception('bad op')
if op=='laplacian' and P>M:
norm_grad=0 # no flux bc
L01=np.sqrt( (x[0]-x0)**2 + (y0-y[0])**2 )
L0P=np.sqrt( (x[0]-x[-1])**2 + (y0-y[-1])**2 )
gamma=3/AT * ( beta(0) * norm_grad * L01/2
+ beta(P-1) * norm_grad * L0P/2 )
else:
gamma=0
assert np.isfinite(alpha0)
return ([n0]+list(N),
[alpha0]+list(alphas),
-gamma)
def classify_nodes(g,gen):
"""
Find fixed vs. free nodes in g by matching up with gen.nodes['fixed']
"""
n_fixed=[]
for gen_n in np.nonzero( gen.nodes['fixed'] )[0]:
n=g.select_nodes_nearest( gen.nodes['x'][gen_n], max_dist=0.001)
if n is not None:
n_fixed.append(n)
n_free=[n for n in g.valid_node_iter() if n not in n_fixed]
return n_fixed, n_free
def snap_angles(gen):
"""
gen: unstructured grid
will populate turn_fwd/turn_rev fields on the edges, by iterating over
cells and setting the smallest 4 internal angles in each cell to 90, and the
rest to 180. Internal angles not associated with cells are set to 0
"""
gen.add_edge_field('turn_fwd',np.zeros(gen.Nedges(),np.float64),on_exists='overwrite')
gen.add_edge_field('turn_rev',np.zeros(gen.Nedges(),np.float64),on_exists='overwrite')
def he_angle(he):
# Calculate absolute angle of the half edge
xy=he.grid.nodes['x']
seg=xy[ [he.node_rev(), he.node_fwd()] ]
dxy=seg[1] - seg[0]
return np.arctan2(dxy[1],dxy[0])
def set_angle(he,angle):
# set turn_fwd/rev for the half edge
if he.orient==0:
gen.edges['turn_fwd'][he.j]=angle
else:
gen.edges['turn_rev'][he.j]=angle
for c in gen.valid_cell_iter():
angle_and_he=[]
he0=he=gen.cell_to_halfedge(c,0)
while 1:
he_nxt=he.fwd()
angle0=he_angle(he)
angle1=he_angle(he_nxt)
turn=(angle1-angle0)*180/np.pi
turn_d=(turn + 180) % 360 - 180
# Now turn is 0 for straight ahead, 90 for a left
angle=180-turn_d
angle_and_he.append( [angle,he] )
if he_nxt==he0:
break
else:
he=he_nxt
order=np.argsort( [angle for angle,he in angle_and_he] )
for idx in order[:4]:
set_angle(angle_and_he[idx][1],90)
for idx in order[4:]:
set_angle(angle_and_he[idx][1],180)
def prepare_angles_halfedge(gen):
"""
Move turn angles from half edges to absolute angles of edges.
This used to be done later in the game, and remove the internal edges
at the same time.
"""
# at this stage, angles are absolute, and reflect the natural direction
# of the edge
edge_angles=np.nan*np.zeros(gen.Nedges(),np.float32)
def he_angle(he,val=None):
if val is not None:
edge_angles[he.j] = (val + 180*he.orient) % 360
return (edge_angles[he.j] + 180 * he.orient) % 360
j_turns=np.c_[ gen.edges['turn_fwd'],
gen.edges['turn_rev'] ]
j_turns[ j_turns==0.0 ] = 180
j_turns[ np.isnan(j_turns) ]=180
# And convert all to delta angle, not internal angle
j_turns=180-j_turns
# Have to loop over all cells in case there are disconnected areas.
for c0 in gen.valid_cell_iter():
# Graph traversal to set edge angles:
he0=gen.cell_to_halfedge(c0,0)
if np.isfinite(he_angle(he0)):
continue
stack=[ (he0,0.0) ]
while stack:
he,angle=stack.pop()
if he.cell()<0: continue
existing_angle=he_angle(he)
if np.isfinite(existing_angle):
if existing_angle!=angle:
plt.figure(2).clf()
gen.plot_edges(labeler=lambda j,r: ["",edge_angles[j]][int(np.isfinite(edge_angles[j]))])
gen.plot_nodes(labeler='id')
edge_tans=utils.to_unit( np.diff(gen.nodes['x'][gen.edges['nodes']],axis=1)[:,0,:] )
ecs=gen.edges_center()
plt.quiver(ecs[:,0],ecs[:,1],edge_tans[:,0],edge_tans[:,1],
color='red',scale=20,width=0.01)
plt.axis('tight')
plt.axis('equal')
gen.plot_edges(mask=[he.j],color='r',lw=3)
raise Exception("Angle mismatch")
continue
else:
# Set it
he_angle(he,angle)
he_fwd=he.fwd()
angle_fwd=(angle+j_turns[he.j,he.orient])%360
# print(f" fwd: he={he_fwd} angle_fwd={angle_fwd}")
stack.append( (he_fwd,angle_fwd) )
he_opp=he.opposite()
if he_opp.cell()<0: continue
he_rev=he_opp.fwd()
angle_rev=(angle+180+j_turns[he.j,1-he.orient])%360
# print(f" rev: he={he_rev} angle_fwd={angle_rev}")
stack.append( (he_rev,angle_rev) )
gen.add_edge_field('angle',edge_angles,on_exists='overwrite')
def adjusted_scale(gen, scale, nom_res=None):
"""
Replace positive scale values on shared edges with negative counts
"""
scale=scale.copy()
e2c=gen.edge_to_cells()
for j in np.nonzero( (e2c.min(axis=1)>=0) )[0]:
if scale[j]<0: continue # already good
L=gen.edges_length(j)
if scale[j]==0.0:
if nom_res is not None:
res=nom_res
else:
continue
else:
res=scale[j]
scale[j]=-max(1,np.round(L/res))
print("Mapped edge scale %g to %g"%(res,scale[j]))
return scale
def linear_scales(gen,method='adhoc'):
"""
Infer scales from the edges of the generating grid
Create a scale field for the two canonical coordinates.
method: 'adhoc' reverts to a simple extrapolation approach, rather
than solving a proper laplacian. The Laplacian code turned out be
brittle on the very coarse triangulation of gen.
"""
scales=gen.edges['scale']
scales=np.where( np.isfinite(scales), scales, 0.0)
i_edges=np.nonzero( (scales!=0) & (gen.edges['angle']%180== 0) )[0]
j_edges=np.nonzero( (scales!=0) & (gen.edges['angle']%180==90) )[0]
gen_tri=exact_delaunay.Triangulation()
gen_tmp=gen.copy()
gen_tmp.renumber_edges()
gen_tmp.renumber_nodes()
gen_tri.init_from_grid(gen_tmp,set_valid=True)
gen_tri=gen_tri.copy()
for c in np.nonzero(~gen_tri.cells['valid'])[0]:
if not gen_tri.cells['deleted'][c]:
gen_tri.delete_cell(c)
gen_tri.delete_orphan_edges()
gen_tri.delete_orphan_nodes()
gen_tri.renumber()
# First, the i scale:
extraps=[]
el=gen.edges_length()
for edge_list in [i_edges,j_edges]:
dirich={} # nodes of gen => dirichlet BC
for j in edge_list:
scale=scales[j]
if scale<0:
scale=el[j]/(-scale)
for n in gen.edges['nodes'][j]:
if n in dirich:
dirich[n] = 0.5*(scale+dirich[n])
else:
dirich[n]=scale
mapped_dirich={}
for n in dirich:
n_tri=gen_tri.select_nodes_nearest(gen.nodes['x'][n],max_dist=0.0)
assert n_tri is not None
mapped_dirich[n_tri]=dirich[n]
if method=='adhoc':
# This really shouldn't be necessary, but I'm having issues with
# negative results coming out of NodeDiscretization.
# This is at least positive definite (right?)
N=gen_tri.Nnodes()
M=sparse.dok_matrix((N,N))
b=np.zeros( gen_tri.Nnodes())
for n in range(gen_tri.Nnodes()):
if n in mapped_dirich:
M[n,n]=1
b[n]=mapped_dirich[n]
else:
nbrs=gen_tri.node_to_nodes(n)
f=1./len(nbrs)
M[n,n]=1
for nbr in nbrs:
M[n,nbr]=-f
soln=sparse.linalg.spsolve(M.tocsr(),b)
else:
nd=NodeDiscretization(gen_tri)
M,b=nd.construct_matrix(op='laplacian',dirichlet_nodes=mapped_dirich)
soln=sparse.linalg.spsolve(M.tocsr(),b)
extraps.append(soln)
mp_tri=gen_tri.mpl_triangulation()
i_field=field.XYZField(X=gen_tri.nodes['x'],F=extraps[0])
i_field._tri = mp_tri
j_field=field.XYZField(X=gen_tri.nodes['x'],F=extraps[1])
j_field._tri = mp_tri
# As it stands, it's possible for the input grid to have disconnected
# sets of cells, such that the node discretization yields 0 in the
# disconnected areas (unless they have their own scale data).
bad=(extraps[0]<=0) | (extraps[1]<=0)
if np.any(bad):
idxs=np.nonzero(bad)[0]
log.error("Bad nodes: %s"%str(gen_tri.nodes['x'][idxs]))
raise Exception("Probably disconnected cells with no scale")
return i_field, j_field
def add_bezier(gen):
"""
Generate bezier control points for each edge. Uses angles (in ij
space) in the generating grid to calculate angles at each vertex, and then
choose bezier control points to achieve that angle.
This version is standalone, and can handle nodes with degree>2.
"""
# Need to force the internal angles at nodes to match xy space
# angles and ij space angles.
order=3 # cubic bezier curves
bez=np.nan*np.zeros( (gen.Nedges(),order+1,2) )
bez[:,0,:] = gen.nodes['x'][gen.edges['nodes'][:,0]]
bez[:,order,:] = gen.nodes['x'][gen.edges['nodes'][:,1]]
gen.add_edge_field('bez', bez, on_exists='overwrite')
for n in gen.valid_node_iter():
nbrs=gen.angle_sort_adjacent_nodes(n)
if len(nbrs)==0:
continue
hes=[gen.nodes_to_halfedge(n,nbr) for nbr in nbrs]
# Filter out non-quad grid edges in there, which will have angle=nan
hes=[he for he in hes if np.isfinite(gen.edges['angle'][he.j])]
if len(hes)==0:
continue
# Each of those half-edges has an angle in ij space, relative
# to natural edge direction.
def he_ij_angle(he):
return (gen.edges['angle'][he.j]+(he.orient*180.))%360.
def he_xy_angle(he):
A=gen.nodes['x'][he.node_rev()]
B=gen.nodes['x'][he.node_fwd()]
delta=B-A
return 180/np.pi * np.arctan2(delta[1],delta[0])
ij_angles=np.array( [he_ij_angle(he) for he in hes] )
xy_angles=np.array( [he_xy_angle(he) for he in hes] )
# overall rotation -
# 0 degress in xy space is rot degrees from 0 degrees in
# ij space.
xy_to_ij=np.pi/180 * (ij_angles-xy_angles)
# when a node is part of multple cells, but those
# cells have no edges in common, treat it as two separate
# groups of angles
# label nodes:
node_groups=np.arange(len(hes))
for a in range(len(hes)):
b=(a+1)%len(hes)
he_a=hes[a]
he_b=hes[b]
if he_a.cell()>=0 and he_a.cell()==he_b.cell_opp():
node_groups[ node_groups==node_groups[b] ] = node_groups[a]
xy_tgts=0*ij_angles
for grp in np.arange(len(hes)):
sel=node_groups==grp
if np.all(~sel): continue
rot=180/np.pi * np.arctan2( np.sin(xy_to_ij[sel]).mean(),
np.cos(xy_to_ij[sel]).mean())
xy_tgts[sel]=ij_angles[sel] - rot
xy_errs=xy_angles - xy_tgts
for he,xy_err in zip(hes,xy_errs):
vec=gen.nodes['x'][he.node_fwd()] - gen.nodes['x'][he.node_rev()]
cp=gen.nodes['x'][n] + utils.rot(-xy_err*np.pi/180., 1./3 * vec)
gen.edges['bez'][he.j,1+he.orient]=cp
def plot_gen_bezier(gen,num=10):
fig=plt.figure(num)
fig.clf()
ax=fig.add_subplot(1,1,1)
gen.plot_edges(lw=0.3,color='k',alpha=0.5,ax=ax)
gen.plot_nodes(alpha=0.5,ax=ax,zorder=3,color='orange')
for j in gen.valid_edge_iter():
n0=gen.edges['nodes'][j,0]
nN=gen.edges['nodes'][j,1]
bez=gen.edges['bez'][j]
t=np.linspace(0,1,21)
B0=(1-t)**3
B1=3*(1-t)**2 * t
B2=3*(1-t)*t**2
B3=t**3
points = B0[:,None]*bez[0] + B1[:,None]*bez[1] + B2[:,None]*bez[2] + B3[:,None]*bez[3]
ax.plot(points[:,0],points[:,1],'b-',zorder=2,lw=1.5)
ax.plot(bez[:,0],bez[:,1],'r-o',zorder=1,alpha=0.5,lw=1.5)
return fig,ax
class QuadGen(object):
# The cell spacing in geographic coordinates for the nominal, isotropic grid
nom_res=4.0
gmsh_path="gmsh"
scales=None
# Minimum number of edges along a boundary segment in the nominal isotropic grid
min_steps=2
# The additional constraints that link psi and phi create an over-determined
# system. Still trying to understand when this is a problem. The solution
# can in some cases become invalid when the gradient terms are either too
# strong (maybe) or too loose (true).
# 'scaled' will scale the gradient terms according to the number of extra dofs.
# Possibe that the scaled code was a weak attempt to fix something that was
# really a bug elsewhere, and that 1.0 is the best choice
gradient_scale=1.0
# 'rebay', 'front', 'gmsh'. When intermediate is 'tri', this chooses the method for
# generating the intermediate triangular grid
triangle_method='rebay'
# How internal angles are specified. 'node' is only valid when a single cell
# is selected in gen.
angle_source='halfedge' # 'halfedge' or 'node'
# Whether to smooth and relax the resulting grid
smooth=True
def __init__(self,gen,execute=True,cells=None,**kw):
"""
gen: the design grid. cells of this grid will be filled in with
quads.
nodes should have separate i and j fields.
i,j are interpeted as x,y indices in the reference frame of the quad grid.
execute: if True, run the full generation process. Otherwise preprocess
inputs but do not solve.
"""
utils.set_keywords(self,kw)
gen=gen.copy()
gen.modify_max_sides( gen.Nnodes() )
# Process angles on the whole quad grid, so we can also get
# scales
if self.angle_source=='halfedge':
prepare_angles_halfedge(gen)
elif self.angle_source=='existing':
pass
else:
raise Exception("bad angle source: %s"%self.angle_source)
add_bezier(gen)
if self.scales is None:
if 'scale' in gen.edges.dtype.names:
self.scales=linear_scales(gen)
else:
self.scales=[field.ConstantField(self.nom_res),
field.ConstantField(self.nom_res)]
if cells is not None:
for c in range(gen.Ncells()):
if (c not in cells) and (not gen.cells['deleted'][c]):
gen.delete_cell(c)
gen.delete_orphan_edges()
gen.delete_orphan_nodes()
gen.renumber(reorient_edges=False)
self.gen=gen
# [ [node_a,node_b,angle], ...]
# node indices reference gen, which provide
# additional groupings of nodes.
self.internal_edges=[]
if execute:
self.execute()
def add_internal_edge(self,nodes,angle=None):
self.internal_edges.append( [nodes[0], nodes[1], angle] )
def execute(self):
self.process_internal_edges(self.gen)
self.g_int=self.create_intermediate_grid_tri()
self.calc_psi_phi()
self.g_final=self.create_final_by_patches()
if self.smooth:
self.smooth_to_scale(self.g_final)
return self.g_final
def set_scales_diffusion(self):
# Probably not what I'll end up with, but try a diffusion approach
i_scale_dir={}
j_scale_dir={}
for j in np.nonzero( self.g_int.edges['gen_j']>= 0)[0]:
gen_j=self.g_int.edges['gen_j'][j]
scale=self.gen.edges['scale'][gen_j]
if scale in [0,np.nan]: continue
orient=self.gen.edges['angle'][gen_j] % 180
if orient==0:
# Add to i scale (?)
for n in self.g_int.edges['nodes'][j]:
i_scale_dir[n]=scale
elif orient==90:
# Add to j scale (?)
for n in self.g_int.edges['nodes'][j]:
j_scale_dir[n]=scale
nd=self.nd
M,B=nd.construct_matrix(op='laplacian',
dirichlet_nodes=i_scale_dir,
skip_dirichlet=True)
i_scale=sparse.linalg.spsolve(M.tocsr(),B)
M,B=nd.construct_matrix(op='laplacian',
dirichlet_nodes=j_scale_dir,
skip_dirichlet=True)
j_scale=sparse.linalg.spsolve(M.tocsr(),B)
self.i_scale=field.XYZField(X=self.g_int.nodes['x'],F=i_scale)
self.j_scale=field.XYZField(X=self.g_int.nodes['x'],F=j_scale)
self.scales=[self.i_scale,self.j_scale]
def create_intermediate_grid_tri_boundary(self,scale=None):
"""
Create the boundaries for the intermediate grid, upsampling the bezier edges
and assigning 'ij' along the way for fixed nodes.
"""
if scale is None:
scale=field.ConstantField(self.nom_res)
g=unstructured_grid.UnstructuredGrid(max_sides=3,
extra_edge_fields=[ ('gen_j',np.int32) ],
extra_node_fields=[ ('gen_n',np.int32) ])
g.edges['gen_j']=-1
g.nodes['gen_n']=-1
g.edge_defaults['gen_j']=-1
g.node_defaults['gen_n']=-1
gen=self.gen
for j in gen.valid_edge_iter():
# Just to get the length
points=self.gen_bezier_linestring(j=j,samples_per_edge=10,span_fixed=False)
dist=utils.dist_along(points)[-1]
local_res=scale(points).min(axis=0) # min=>conservative
assert local_res>0
assert np.isfinite(dist)
N=max( self.min_steps, int(dist/local_res))
points=self.gen_bezier_linestring(j=j,samples_per_edge=N,span_fixed=False)
nodes=[g.add_or_find_node(x=p,tolerance=0.1)
for p in points]
g.nodes['gen_n'][nodes[0]] =gen.edges['nodes'][j,0]
g.nodes['gen_n'][nodes[-1]]=gen.edges['nodes'][j,1]
for a,b in zip(nodes[:-1],nodes[1:]):
g.add_edge(nodes=[a,b],gen_j=j)
return g
def create_intermediate_grid_tri(self):
"""
Create a triangular grid for solving psi/phi.
src: base variable name for the ij indices to use.
i.e. gen.nodes['ij'], gen.nodes['ij_fixed'],
and gen.edges['dij']
the resulting grid will use 'ij' regardless, this just for the
generating grid.
this text needs to be updated after adapting the code below
--
coordinates:
'xy' will interpolate the gen xy coordinates to get
node coordinates for the result.
'ij' will leave 'ij' coordinate values in both x and 'ij'
"""
g=self.create_intermediate_grid_tri_boundary()
if self.triangle_method=='gmsh':
fn='tmp.geo'
g.write_gmsh_geo(fn)
import subprocess
subprocess.run([self.gmsh_path,fn,'-2'])
try:
g_gmsh=unstructured_grid.UnstructuredGrid.read_gmsh('tmp.msh')
except:
log.error("Intermediate grid generation failed. Check for self-intersection in bezier grid")
raise
g.add_grid(g_gmsh,merge_nodes='auto',tol=1e-3)
gnew=g
else:
nodes=g.find_cycles(max_cycle_len=g.Nnodes()+1)[0]
gnew=triangulate_hole.triangulate_hole(g,nodes=nodes,hole_rigidity='all',
method=self.triangle_method)
gnew.add_node_field('rigid',
(gnew.nodes['gen_n']>=0) & (self.gen.nodes['fixed'][gnew.nodes['gen_n']]))
# Really it should be sufficient to have edge_defaults give -1 for gen_j, but that's
# getting lost. easiest to just fix non-boundary edges:
internal=np.all( gnew.edge_to_cells()>=0, axis=1)
gnew.edges['gen_j'][internal]=-1
return gnew
def plot_intermediate(self,num=1):
plt.figure(num).clf()
fig,ax=plt.subplots(num=num)
self.gen.plot_edges(lw=1.5,color='b',ax=ax)
self.g_int.plot_edges(lw=0.5,color='k',ax=ax)
self.g_int.plot_nodes(mask=self.g_int.nodes['rigid']>0)
ax.axis('tight')
ax.axis('equal')
def plot_gen_bezier(self,num=10):
fig,ax=plot_gen_bezier(self.gen)
for n12 in self.internal_edges:
ax.plot( self.gen.nodes['x'][n12[:2],0],
self.gen.nodes['x'][n12[:2],1], 'g-')
i_angle=self.internal_edge_angle(n12)
mid=self.gen.nodes['x'][n12[:2],:].mean(axis=0)
ax.text( mid[0],mid[1], str(i_angle),color='g')
def gen_bezier_curve(self,j=None,samples_per_edge=10,span_fixed=True):
"""
j: make a curve for gen.edges[j], instead of the full boundary cycle.
samples_per_edge: how many samples to use to approximate each bezier
segment
span_fixed: if j is specified, create a curve that includes j and
adjacent edges until a fixed node is encountered
"""
points=self.gen_bezier_linestring(j=j,samples_per_edge=samples_per_edge,
span_fixed=span_fixed)
if j is None:
return front.Curve(points,closed=True)
else:
return front.Curve(points,closed=False)
def gen_bezier_linestring(self,j=None,samples_per_edge=10,span_fixed=True):
"""
Calculate an up-sampled linestring for the bezier boundary of self.gen
j: limit the curve to a single generating edge if given.
span_fixed: see gen_bezier_curve()
"""
gen=self.gen
# need to know which ij coordinates are used in order to know what is
# fixed. So far fixed is the same whether IJ or ij, so not making this
# a parameter yet.
src='IJ'
if j is None:
node_pairs=zip(bound_nodes,np.roll(bound_nodes,-1))
bound_nodes=self.gen.boundary_cycle() # probably eating a lot of time.
else:
if not span_fixed:
node_pairs=[ self.gen.edges['nodes'][j] ]
else:
nodes=[]
# Which coord is changing on j? I.e. which fixed should
# we consult?
# A little shaky here. Haven't tested this with nodes
# that are fixed in only coordinate.
j_coords=self.gen.nodes[src][ self.gen.edges['nodes'][j] ]
if j_coords[0,0] == j_coords[1,0]:
coord=1
elif j_coords[0,1]==j_coords[1,1]:
coord=0
else:
raise Exception("Neither coordinate is constant on this edge??")
trav=self.gen.halfedge(j,0)
while 1: # FWD
n=trav.node_fwd()
nodes.append(n)
if self.gen.nodes[src+'_fixed'][n,coord]:
break
trav=trav.fwd()
nodes=nodes[::-1]
trav=self.gen.halfedge(j,0)
while 1: # REV
n=trav.node_rev()
nodes.append(n)
if self.gen.nodes[src+'_fixed'][n,coord]:
break
trav=trav.rev()
node_pairs=zip( nodes[:-1], nodes[1:])
points=[]
for a,b in node_pairs:
j=gen.nodes_to_edge(a,b)
n0=gen.edges['nodes'][j,0]
nN=gen.edges['nodes'][j,1]
bez=gen.edges['bez'][j]
t=np.linspace(0,1,1+samples_per_edge)
if n0==b: # have to flip order
t=t[::-1]
B0=(1-t)**3
B1=3*(1-t)**2 * t
B2=3*(1-t)*t**2
B3=t**3
edge_points = B0[:,None]*bez[0] + B1[:,None]*bez[1] + B2[:,None]*bez[2] + B3[:,None]*bez[3]
points.append(edge_points[:-1])
if j is not None:
# When the curve isn't closed, then be inclusive of both
# ends
points.append(edge_points[-1:])
return np.concatenate(points,axis=0)
def calc_bc_gradients(self,gtri):
"""
Calculate gradient vectors for psi and phi along
the boundary.
"""
bcycle=gtri.boundary_cycle()
# First calculate psi gradient per edge:
j_grad_psi=np.zeros( (len(bcycle),2), np.float64)
j_angles=self.gen.edges['angle'][ gtri.edges['gen_j'] ] * np.pi/180.
# trial and error correction
j_angles-=np.pi/2
for ji,(n1,n2) in enumerate( zip(bcycle[:-1],bcycle[1:]) ):
j=gtri.nodes_to_edge(n1,n2)
tang_xy=utils.to_unit( gtri.nodes['x'][n2] - gtri.nodes['x'][n1] )
tang_ij=np.r_[ np.cos(j_angles[j]), np.sin(j_angles[j])]
# Construct a rotation R such that R.dot(tang_ij)=[1,0],
# then apply to tang_xy
Rpsi=np.array([[tang_ij[0], tang_ij[1]],
[-tang_ij[1], tang_ij[0]] ] )
j_grad_psi[ji,:]=Rpsi.dot(tang_xy)
# Interpolate to nodes
bc_grad_psi=np.zeros( (len(bcycle),2), np.float64)
N=len(bcycle)
for ni in range(N):
bc_grad_psi[ni,:]=0.5*( j_grad_psi[ni,:] +
j_grad_psi[(ni-1)%N,:] )
bc_grad_phi=np.zeros( (len(bcycle),2), np.float64)
# 90 CW from psi
bc_grad_phi[:,0]=bc_grad_psi[:,1]
bc_grad_phi[:,1]=-bc_grad_psi[:,0]
# Convert to dicts:
grad_psi={}
grad_phi={}
for ni,n in enumerate(bcycle):
grad_psi[n]=bc_grad_psi[ni,:]
grad_phi[n]=bc_grad_phi[ni,:]
return grad_psi,grad_phi
def process_internal_edges(self,gen):
"""
Remove and save internal edges.
Flip angle for remaining edge to reflect orientation
along boundary cycle.
Add 'fixed' and 'turn' field to gen.nodes
"""
e2c=gen.edge_to_cells()
internal=(e2c.min(axis=1)>=0)
for j in np.nonzero(internal)[0]:
# Only when there are parallel edges on both
# sides do we actually record the internal
# edge
angle=gen.edges['angle'][j]
parallel_count=0
for nbr in gen.edges['nodes'][j]:
for j_nbr in gen.node_to_edges(nbr):
if internal[j_nbr]: # j or another internal edge
continue
if (gen.edges['angle'][j_nbr] - angle)%180==0:
parallel_count+=1
break # parallel. good
if parallel_count<2:
log.info(f"Will skip potential internal edge {j}")
else:
self.add_internal_edge(gen.edges['nodes'][j],
gen.edges['angle'][j])
gen.merge_cells(j=j)
cycles=gen.find_cycles(max_cycle_len=1000)
assert len(cycles)==1,"For now, cannot handle multiple cycles"
cycle=cycles[0]
last_angle=None
gen.add_node_field('fixed',np.zeros(gen.Nnodes(),np.bool8))
for a,b in zip( cycle, np.roll(cycle,-1) ):
j=gen.nodes_to_edge(a,b)
assert j is not None
if np.all(gen.edges['nodes'][j]==[a,b]):
pass
else:
assert np.all(gen.edges['nodes'][j]==[b,a])
gen.edges['angle'][j]=(gen.edges['angle'][j]+180)%360
gen.add_node_field('turn',np.zeros(gen.Nnodes(),np.float64), on_exists='overwrite')
for prv,n,nxt in zip( cycle, np.roll(cycle,-1),np.roll(cycle,-2) ):
jprv=gen.nodes_to_edge(prv,n)
jnxt=gen.nodes_to_edge(n,nxt)
assert jprv is not None
assert jnxt is not None
angle_prv=gen.edges['angle'][jprv]
angle_nxt=gen.edges['angle'][jnxt]
gen.nodes['fixed'][n] = angle_prv!=angle_nxt
# These are internal angles
# funky 1 is for internal angle of 360
gen.nodes['turn'][n] = (180 - (angle_nxt-angle_prv) -1 ) % 360 + 1
def internal_edge_angle(self,gen_edge):
"""
not exact, but try a heuristic.
use adjacent edges to estimate the +i tangent vector,
Then compare to the angle of gen_edge.
Returns 0 or 90 (0 vs 180 is not unique)
"""
# Use the specified angle if it's set:
assert gen_edge[2] is not None
return gen_edge[2]
# gen=self.gen
# e2c=gen.edge_to_cells()
# i_tan_vecs=[]
# for n in gen_edge[:2]:
# for j in gen.node_to_edges(n):
# angle=gen.edges['angle'][j]
# tan_vec=np.diff(gen.nodes['x'][ gen.edges['nodes'][j] ],axis=0)[0]
# tan_vec=utils.to_unit(tan_vec)
# if e2c[j,0]<0:
# # This edge goes opposite the cycle direction
# tan_vec*=-1
# i_tan_vec=utils.rot(-angle*np.pi/180.,tan_vec)
# i_tan_vecs.append(i_tan_vec)
# i_tan=utils.to_unit( np.mean(i_tan_vecs,axis=0) )
# j_tan=np.array( [i_tan[1],-i_tan[0]] ) # sign may be off, no worries
#
# d_gen_edge= np.diff(gen.nodes['x'][gen_edge[:2]],axis=0)[0]
#
# j_score=np.dot(j_tan,d_gen_edge)
# i_score=np.dot(i_tan,d_gen_edge)
#
# if np.abs(j_score)>np.abs(i_score):
# return 90
# else:
# return 0
def calc_psi_phi(self):
if False:
self.psi_phi_setup(n_j_dirichlet=1)
self.psi_phi_solve_coupled()
else:
self.psi_phi_setup(n_j_dirichlet=2)
self.psi_phi_solve_separate()
i_nf_cells=None
j_nf_cells=None
def psi_phi_solve_separate(self):
"""
Solve psi and phi fields separately, each fully determined.
Assumes that psi_phi_setup() has already been called, and with
n_j_dirichlet=2 specified (so that the phi system is fully
determined)
"""
for coord in [0,1]:
# signify we're working on psi vs. phi
nf_cells=[]
if coord==0:
grad_nodes=dict(self.i_grad_nodes)
dirichlet_nodes=dict(self.i_dirichlet_nodes)
tan_groups=self.i_tan_groups
else:
grad_nodes=dict(self.j_grad_nodes)
dirichlet_nodes=dict(self.j_dirichlet_nodes)
tan_groups=self.j_tan_groups
# Find these automatically.
# For ragged edges: not sure, but punt by dropping the
# the gradient BC on the acute end (node 520)
noflux_tris=[]
for n in np.nonzero(self.g_int.nodes['rigid'])[0]:
gen_n=self.g_int.nodes['gen_n'][n]
assert gen_n>=0
gen_turn=self.gen.nodes['turn'][gen_n]
# For now, ignore non-cartesian, and 90
# degree doesn't count
if (gen_turn>90) and (gen_turn<180):
# A ragged edge -- try out removing the gradient BC
# here
if n in grad_nodes:
# This is maybe causing a problem with phi in cell 1.
log.info(f"n {n}: angle={gen_turn} Dropping gradient BC")
del grad_nodes[n]
continue
if gen_turn not in [270,360]: continue
if gen_turn==270:
log.info(f"n {n}: turn=270")
elif gen_turn==360:
log.info(f"n {n}: turn=360")
js=self.g_int.node_to_edges(n)
e2c=self.g_int.edge_to_cells()
for j in js:
if (e2c[j,0]>=0) and (e2c[j,1]>=0): continue
gen_j=self.g_int.edges['gen_j'][j]
angle=self.gen.edges['angle'][gen_j]
if self.g_int.edges['nodes'][j,0]==n:
nbr=self.g_int.edges['nodes'][j,1]
else:
nbr=self.g_int.edges['nodes'][j,0]
log.info(f"j={j} {n} -- {nbr} angle={angle} coord={coord}")
# Does the angle
if (angle + 90*coord)%180. == 90.:
log.info(" YES")
c=e2c[j,:].max()
tri=self.g_int.cells['nodes'][c]
nf_cells.append(c)
while tri[2] in [n,nbr]:
tri=np.roll(tri,1)
noflux_tris.append( (n,tri) )
else:
log.info(" NO")
if coord==0:
self.i_nf_cells=nf_cells
joins=self.i_tan_joins
else:
self.j_nf_cells=nf_cells
joins=self.j_tan_joins
# Drop an nf_cell constraint for every internal edge
log.info(f"About to process joins, starting with {len(noflux_tris)} nf tris")
for join in joins:
found=False
slim_noflux_tris=[]
log.info(f"Looking for an nf_tri to drop for join {join[0]}--{join[1]}")
for idx,(n,tri) in enumerate(noflux_tris):
if (not found) and (n in join):
log.info(f" will drop {n}")
# Skip this, and copy the rest
found=True
else:
slim_noflux_tris.append( (n,tri) )
if not found:
log.warning(f" Uh-oh! couldn't find a no-flux tri to drop for this internal edge")
noflux_tris=slim_noflux_tris
nf_block=sparse.dok_matrix( (len(noflux_tris),self.g_int.Nnodes()), np.float64)
nf_rhs=np.zeros( len(noflux_tris) )
node_xy=self.g_int.nodes['x'][:,:]
for idx,(n,tri) in enumerate(noflux_tris):
target_dof=idx # just controls where the row is written
d01=node_xy[tri[1],:] - node_xy[tri[0],:]
d02=node_xy[tri[2],:] - node_xy[tri[0],:]
# Derivation in sympy below
nf_block[target_dof,:]=0 # clear old
nf_block[target_dof,tri[0]]= -d01[0]**2 + d01[0]*d02[0] - d01[1]**2 + d01[1]*d02[1]
nf_block[target_dof,tri[1]]= -d01[0]*d02[0] - d01[1]*d02[1]
nf_block[target_dof,tri[2]]= d01[0]**2 + d01[1]**2
nf_rhs[target_dof]=0
M_Lap,B_Lap=self.nd.construct_matrix(op='laplacian',
dirichlet_nodes=dirichlet_nodes,
skip_dirichlet=False,
zero_tangential_nodes=tan_groups,
gradient_nodes=grad_nodes)
M=sparse.bmat( [ [M_Lap],[nf_block]] )
B=np.concatenate( [B_Lap,nf_rhs] )
if M.shape[0] != M.shape[1]:
log.error(f"M.shape: {M.shape}")
self.M_Lap=M_Lap
self.B_Lap=B_Lap
self.nf_block=nf_block
self.nf_rhs=nf_rhs
raise Exception("Matrix did not end up square!")
# Direct solve is reasonably fast and gave better results.
soln=sparse.linalg.spsolve(M.tocsr(),B)
assert np.all(np.isfinite(soln))
for grp in tan_groups:
# Making the tangent groups exact helps in contour tracing later
soln[grp]=soln[grp].mean()
if coord==0:
self.psi=soln
else:
self.phi=soln
def psi_phi_setup(self,n_j_dirichlet=1):
"""
Build the lists of BCs for solving psi/phi.
n_j_dirichlet: whether to include just a location BC or both location and scale
for the phi/j field.
"""
# Record internal edges that actually led to a join.
self.i_tan_joins=[]
self.j_tan_joins=[]
gtri=self.g_int
self.nd=nd=NodeDiscretization(gtri)
e2c=gtri.edge_to_cells()
# check boundaries and determine where Laplacian BCs go
boundary=e2c.min(axis=1)<0
i_dirichlet_nodes={} # for psi
j_dirichlet_nodes={} # for phi
# Block of nodes with a zero-tangential-gradient BC
i_tan_groups=[]
j_tan_groups=[]
# i_tan_groups_i=[] # the input i value
# j_tan_groups_j=[] # the input j value
# Try zero-tangential-gradient nodes. Current code will be under-determined
# without the derivative constraints.
bcycle=gtri.boundary_cycle()
n1=bcycle[-1]
i_grp=None
j_grp=None
psi_gradients,phi_gradients=self.calc_bc_gradients(gtri)
psi_gradient_nodes={} # node => unit vector of gradient direction
phi_gradient_nodes={} # node => unit vector of gradient direction
j_angles=self.gen.edges['angle'][ gtri.edges['gen_j'] ]
for n2 in bcycle:
j=gtri.nodes_to_edge(n1,n2)
imatch=j_angles[j] % 180==0
jmatch=j_angles[j] % 180==90
if imatch:
if i_grp is None:
i_grp=[n1]
i_tan_groups.append(i_grp)
# i_tan_groups_i.append(i1)
i_grp.append(n2)
else:
i_grp=None
if jmatch:
if j_grp is None:
j_grp=[n1]
j_tan_groups.append(j_grp)
# j_tan_groups_j.append(j1)
j_grp.append(n2)
else:
j_grp=None
if not (imatch or jmatch):
# Register gradient BC for n1
psi_gradient_nodes[n1]=psi_gradients[n1]
psi_gradient_nodes[n2]=psi_gradients[n2]
phi_gradient_nodes[n1]=phi_gradients[n1]
phi_gradient_nodes[n2]=phi_gradients[n2]
n1=n2
# bcycle likely starts in the middle of either a j_tan_group or i_tan_group.
# see if first and last need to be merged
if i_tan_groups[0][0]==i_tan_groups[-1][-1]:
i_tan_groups[0].extend( i_tan_groups.pop()[:-1] )
if j_tan_groups[0][0]==j_tan_groups[-1][-1]:
j_tan_groups[0].extend( j_tan_groups.pop()[:-1] )
# a turn=360 node should induce a tan_group (to drop it's no-flux BC)
for n in np.nonzero(self.g_int.nodes['rigid'])[0]:
gen_n=self.g_int.nodes['gen_n'][n]
assert gen_n>=0
gen_turn=self.gen.nodes['turn'][gen_n]
if gen_turn!=360: continue
idx=np.nonzero(bcycle==n)[0][0]
N=len(bcycle)
prv=bcycle[(idx-1)%N]
nxt=bcycle[(idx+1)%N]
jprv=gtri.nodes_to_edge(prv,n)
jnxt=gtri.nodes_to_edge(n,nxt)
assert (jprv is not None) and (jnxt is not None)
if (j_angles[jprv]%180==90) and (j_angles[jnxt]%180==90):
i_tan_groups.append([n])
phi_gradient_nodes[n]=phi_gradients[n]
elif (j_angles[jprv]%180==0) and (j_angles[jnxt]%180==0):
j_tan_groups.append([n])
psi_gradient_nodes[n]=psi_gradients[n]
else:
log.error('Yikes - turn is 360, but not axis-aligned')
# Use the internal_edges to combine tangential groups
def join_groups(groups,nA,nB):
grp_result=[]
grpA=grpB=None
for grp in groups:
if nA in grp:
assert grpA is None
grpA=grp
elif nB in grp:
assert grpB is None
grpB=grp
else:
grp_result.append(grp)
assert grpA is not None
assert grpB is not None
grp_result.append( list(grpA) + list(grpB) )
return grp_result
for gen_edge in self.internal_edges:
internal_angle=self.internal_edge_angle(gen_edge)
edge=[gtri.select_nodes_nearest(x)
for x in self.gen.nodes['x'][gen_edge[:2]]]
if np.any( self.gen.nodes['turn'][gen_edge[:2]]==180.0):
# This may still be too lenient. Might be better to
# check whether the angle of the internal edge is parallel
# to edges for the two nodes. Or rather than joining groups
# create a new group. The code as is assumes that both
# nodes of gen_edge[:2] have adjacent edges parallel to
# the internal edge, such that they both have existing groups
# that can be joined.
log.info("Internal edge connects straight boundaries. No join")
continue
if internal_angle%180==0: # join on i
log.info("Joining two i_tan_groups")
i_tan_groups=join_groups(i_tan_groups,edge[0],edge[1])
self.i_tan_joins.append( edge )
elif internal_angle%180==90: # join on j
log.info("Joining two j_tan_groups")
j_tan_groups=join_groups(j_tan_groups,edge[0],edge[1])
self.j_tan_joins.append( edge )
else:
#import pdb
#pdb.set_trace()
raise Exception("Internal edge doesn't appear to join same-valued contours")
# find longest consecutive stretch of the target_angle,
# bounded by edges that are perpendicular to the target_angle
# Possible that a domain could fail to have this! If it had lots of
# weird ragged edges
el=gtri.edges_length()
def longest_straight(target_angle):
longest=(0,None,None)
# start at a nice corner
cycle=np.roll( bcycle,-np.nonzero( gtri.nodes['rigid'][bcycle])[0][0] )
j_cycle=[gtri.nodes_to_edge(na,nb) for na,nb in zip(cycle, np.roll(cycle,-1))]
j_angles=self.gen.edges['angle'][gtri.edges['gen_j'][j_cycle]]
N=len(j_angles)
breaks=np.r_[0, 1+np.nonzero(np.diff(j_angles))[0], N]
for run_start,run_stop in zip(breaks[:-1],breaks[1:]):
run_angle=j_angles[run_start]
prv_angle=j_angles[(run_start-1)%N]
nxt_angle=j_angles[(run_stop+1)%N]
# Look for runs aligned with target_angle
if run_angle != target_angle:
continue
# And the ends must be perpendicular to target
# specifically they need to be part of a tan_group, I think
# Actually that's too restrictive. They just need to *not*
# be laplacian nodes. But any node with a turn is not laplacian,
# we're set.
if False:
if (prv_angle%180) != ( (target_angle+90)%180):
continue
if (nxt_angle%180) != ( (target_angle+90)%180):
continue
dist=el[j_cycle[run_start:run_stop]].sum()
if dist>longest[0]:
# nth edge joins the nth node and n+1th node
n_start=cycle[run_start]
n_stop =cycle[run_stop%N] # maybe?
longest=(dist,n_start,n_stop)
assert longest[1] is not None
assert longest[2] is not None
return longest
# Can I really decide the sign here? As long as they are consistent with each
# other. The longest stretches need to be oriented, not just parallel
i_longest=longest_straight(90)
j_longest=longest_straight(0)
i_dirichlet_nodes[i_longest[1]]=-1
i_dirichlet_nodes[i_longest[2]]=1
j_dirichlet_nodes[j_longest[1]]=-1
if n_j_dirichlet==2:
# When the signs were opposite this, tracing failed on phi
j_dirichlet_nodes[j_longest[2]]=1
self.i_dirichlet_nodes=i_dirichlet_nodes
self.i_tan_groups=i_tan_groups
self.i_grad_nodes=psi_gradient_nodes
self.j_dirichlet_nodes=j_dirichlet_nodes
self.j_tan_groups=j_tan_groups
self.j_grad_nodes=phi_gradient_nodes
def psi_phi_solve(self):
gtri=self.g_int
Mblocks=[]
Bblocks=[]
if 1: # PSI
M_psi_Lap,B_psi_Lap=nd.construct_matrix(op='laplacian',
dirichlet_nodes=self.i_dirichlet_nodes,
zero_tangential_nodes=self.i_tan_groups,
gradient_nodes=self.i_grad_nodes)
Mblocks.append( [M_psi_Lap,None] )
Bblocks.append( B_psi_Lap )
if 1: # PHI
# including phi_gradient_nodes, and the derivative links below
# is redundant but balanced.
M_phi_Lap,B_phi_Lap=nd.construct_matrix(op='laplacian',
dirichlet_nodes=self.j_dirichlet_nodes,
zero_tangential_nodes=self.j_tan_groups,
gradient_nodes=self.j_grad_nodes)
Mblocks.append( [None,M_phi_Lap] )
Bblocks.append( B_phi_Lap )
if 1:
# Not sure what the "right" value is here.
# When the grid is coarse and irregular, the
# error in these blocks can overwhelm the BCs
# above. This scaling decreases the weight of
# these blocks.
# 0.1 was okay
# Try normalizing based on degrees of freedom.
# how many dofs are we short?
# This assumes that the scale of the rows above is of
# the same order as the scale of a derivative row below.
# each of those rows constrains 1 dof, and I want the
# set of derivative rows to constrain dofs. And there
# are 2*Nnodes() rows.
# Hmmm. Had a case where it needed to be bigger (lagoon)
# Not sure why.
# Extra degrees of freedom:
# Each tangent group leaves an extra dof (a zero row)
# and the above BCs constrain 3 of those
dofs=len(i_tan_groups) + len(j_tan_groups) - 3
assert dofs>0
if self.gradient_scale=='scaled':
gradient_scale = dofs / (2*gtri.Nnodes())
else:
gradient_scale=self.gradient_scale
# PHI-PSI relationship
# When full dirichlet is used, this doesn't help, but if
# just zero-tangential-gradient is used, this is necessary.
Mdx,Bdx=nd.construct_matrix(op='dx')
Mdy,Bdy=nd.construct_matrix(op='dy')
if gradient_scale!=1.0:
Mdx *= gradient_scale
Mdy *= gradient_scale
Bdx *= gradient_scale
Bdy *= gradient_scale
Mblocks.append( [Mdy,-Mdx] )
Mblocks.append( [Mdx, Mdy] )
Bblocks.append( np.zeros(Mdx.shape[1]) )
Bblocks.append( np.zeros(Mdx.shape[1]) )
self.Mblocks=Mblocks
self.Bblocks=Bblocks
bigM=sparse.bmat( Mblocks )
rhs=np.concatenate( Bblocks )
psi_phi,*rest=sparse.linalg.lsqr(bigM,rhs)
self.psi_phi=psi_phi
self.psi=psi_phi[:gtri.Nnodes()]
self.phi=psi_phi[gtri.Nnodes():]
# Using the tan_groups, set the values to be exact
for i_grp in i_tan_groups:
self.psi[i_grp]=self.psi[i_grp].mean()
for j_grp in j_tan_groups:
self.phi[j_grp]=self.phi[j_grp].mean()
def plot_psi_phi_setup(self,num=11):
"""
Plot the BCs that went into the psi_phi calculation:
"""
plt.figure(num).clf()
fig,ax=plt.subplots(num=num)
self.g_int.plot_edges(color='k',lw=0.5,ax=ax,alpha=0.2)
ax.axis('off')
for i_d in self.i_dirichlet_nodes:
ax.annotate( f"$\psi$={self.i_dirichlet_nodes[i_d]}",
self.g_int.nodes['x'][i_d], va='top',
arrowprops=dict(arrowstyle='simple',alpha=0.4))
for j_d in self.j_dirichlet_nodes:
ax.annotate( f"$\phi$={self.j_dirichlet_nodes[j_d]}",
self.g_int.nodes['x'][j_d], va='bottom' ,
arrowprops=dict(arrowstyle='simple',alpha=0.4))
from matplotlib import cm
from itertools import cycle
group_colors=cycle( list(colors.TABLEAU_COLORS.values()) )
for idx,i_grp in enumerate(self.i_tan_groups):
ax.plot( self.g_int.nodes['x'][i_grp,0],self.g_int.nodes['x'][i_grp,1],
'.',color=next(group_colors),label=f"i grp {idx}")
for idx,j_grp in enumerate(self.j_tan_groups):
ax.plot( self.g_int.nodes['x'][j_grp,0],self.g_int.nodes['x'][j_grp,1],
'+',color=next(group_colors),label=f"j grp {idx}")
ax.legend(loc='upper right')
i_quivs=np.array( [ [self.g_int.nodes['x'][n], self.i_grad_nodes[n] ]
for n in self.i_grad_nodes] )
j_quivs=np.array( [ [self.g_int.nodes['x'][n], self.j_grad_nodes[n] ]
for n in self.j_grad_nodes] )
if len(i_quivs):
i_qkey=ax.quiver(i_quivs[:,0,0], i_quivs[:,0,1],
i_quivs[:,1,0], i_quivs[:,1,1],
color='k')
ax.quiverkey(i_qkey,0.15,0.95,1.0,"I gradient",coordinates='figure')
if len(j_quivs):
j_qkey=ax.quiver(j_quivs[:,0,0], j_quivs[:,0,1],
j_quivs[:,1,0], j_quivs[:,1,1],
color='r')
ax.quiverkey(j_qkey,0.3,0.95,1.0,"J gradient",coordinates='figure')
if self.i_nf_cells:
self.g_int.plot_cells(mask=self.i_nf_cells,color='r',alpha=0.4)
if self.j_nf_cells:
self.g_int.plot_cells(mask=self.j_nf_cells,color='0.6',alpha=0.4)
ax.set_position([0,0,1,1])
return fig,ax
def plot_psi_phi(self,num=4,thinning=2,ax=None):
if ax is None:
plt.figure(num).clf()
fig,ax=plt.subplots(num=num)
#di,dj=np.nanmax(self.gen.nodes['ij'],axis=0) - np.nanmin(self.gen.nodes['ij'],axis=0)
di=self.psi.max() - self.psi.min()
dj=self.phi.max() - self.phi.min()
delta=max(di,dj)/30 # 30 contours in the larger dimension
di/=delta
dj/=delta
self.g_int.plot_edges(color='k',lw=0.5,alpha=0.2)
cset_psi=self.g_int.contour_node_values(self.psi,int(di/thinning),
linewidths=1.5,linestyles='solid',colors='orange',
ax=ax)
cset_phi=self.g_int.contour_node_values(self.phi,int(dj/thinning),
linewidths=1.5,linestyles='solid',colors='blue',
ax=ax)
ax.axis('tight')
ax.axis('equal')
ax.clabel(cset_psi, fmt="$\psi$=%g", fontsize=10, inline=False, use_clabeltext=True)
ax.clabel(cset_phi, fmt="$\phi$=%g", fontsize=10, inline=False, use_clabeltext=True)
def plot_result(self,num=5):
plt.figure(num).clf()
fig,ax=plt.subplots(num=num)
self.g_final.plot_edges(color='k',lw=0.5,ax=ax)
self.g_final.plot_cells(color='0.85',lw=0,zorder=-2,ax=ax)
ax.set_position([0,0,1,1])
ax.axis('off')
ax.axis('tight')
ax.axis('equal')
return fig,ax
def psiphi_to_ij(self,gen,g_int,src='ij',inverse=False):
"""
Return a mapping of psi=>i and phi=>j
This is built from fixed nodes of gen, and self.psi,self.phi defined
on all of the nodes of g_int.
src defines what field is taken from gen.
Nodes are matched by nearest node search.
For now, this assumes the mapping is independent for the two coordinates.
For more complicated domains this mapping will have to become a
function [psi x phi] => [i x j].
Currently it's psi=>i, phi=>j.
Returns a function that takes [N,2] in psi/phi space, and returns [N,2]
in ij space (or the inverse of that if inverse is True)
"""
for coord in [0,1]: # i,j
gen_valid=(~gen.nodes['deleted'])&(gen.nodes[src+'_fixed'][:,coord])
# subset of gtri nodes that map to fixed gen nodes
gen_to_int_nodes=[g_int.select_nodes_nearest(x)
for x in gen.nodes['x'][gen_valid]]
# i or j coord:
all_coord=gen.nodes[src][gen_valid,coord]
if coord==0:
all_field=self.psi[gen_to_int_nodes]
else:
all_field=self.phi[gen_to_int_nodes]
# Build the 1-D mapping of i/j to psi/phi
# [ {i or j value}, {mean of psi or phi at that i/j value} ]
coord_to_field=np.array( [ [k,np.mean(all_field[elts])]
for k,elts in utils.enumerate_groups(all_coord)] )
if coord==0:
i_psi=coord_to_field
else:
j_phi=coord_to_field
# the mapping isn't necessarily monotonic at this point, but it
# needs to be.. so force it.
# enumerate_groups will put k in order, but not the field values
# Note that phi is sorted decreasing
i_psi[:,1] = np.sort(i_psi[:,1])
j_phi[:,1] = np.sort(j_phi[:,1])[::-1]
def mapper(psiphi,i_psi=i_psi,j_phi=j_phi):
ij=np.zeros_like(psiphi)
ij[:,0]=np.interp(psiphi[:,0],i_psi[:,1],i_psi[:,0])
ij[:,1]=np.interp(psiphi[:,1],j_phi[::-1,1],j_phi[::-1,0])
return ij
def mapper_inv(ij,i_psi=i_psi,j_phi=j_phi):
psiphi=np.zeros_like(ij)
psiphi[:,0]=np.interp(ij[:,0],i_psi[:,0],i_psi[:,1])
psiphi[:,1]=np.interp(ij[:,1],j_phi[:,0],j_phi[:,1])
return psiphi
if inverse:
return mapper_inv
else:
return mapper
def remap_ij(self,g,src='ij'):
"""
g: grid with a nodes['ij'] field
src: a differently scaled 'ij' field on self.gen
returns an array like g.node['ij'], but mapped to self.gen.nodes[src].
In particular, this is useful for calculating what generating ij values
would be on a nominal resolution grid (i.e. where the grid nodes and edges
are uniform in IJ space).
"""
# The nodes of g are defined on IJ, and I want
# to map those IJ to ij in a local way. Local in the sense that
# I may map to different i in different parts of the domain.
IJ_in=g.nodes['ij'] # g may be generated in IJ space, but the field is still 'ij'
# Make a hash to ease navigation
IJ_to_n={ tuple(IJ_in[n]):n
for n in g.valid_node_iter() }
ij_out=np.zeros_like(IJ_in)*np.nan
for coord in [0,1]: # psi/i, phi/j
fixed=np.nonzero( self.gen.nodes[src+'_fixed'][:,coord] )[0]
for gen_n in fixed:
val=self.gen.nodes[src][gen_n,coord]
# match that with a node in g
n=g.select_nodes_nearest( self.gen.nodes['x'][gen_n] )
# Should be a very good match. Could also search
# based on IJ, and get an exact match
assert np.allclose( g.nodes['x'][n], self.gen.nodes['x'][gen_n] ), "did not find a good match g~gen, based on x"
ij_out[n,coord]=val
# Traverse in IJ space (i.e. along g grid lines)
for incr in [1,-1]:
IJ_trav=IJ_in[n].copy()
while True:
# 1-coord, as we want to move along the constant contour of coord.
IJ_trav[1-coord]+=incr
if tuple(IJ_trav) in IJ_to_n:
n_trav=IJ_to_n[tuple(IJ_trav)]
if np.isfinite( ij_out[n_trav,coord] ):
assert ij_out[n_trav,coord]==val,"Encountered incompatible IJ"
else:
ij_out[n_trav,coord]=val
else:
break
# just one coordinate at a time
valid=np.isfinite( ij_out[:,coord] )
interp_IJ_to_ij=utils.LinearNDExtrapolator(IJ_in[valid,:], ij_out[valid,coord])
ij_out[~valid,coord] = interp_IJ_to_ij(IJ_in[~valid,:])
return ij_out
# --- Patch Construction ---
def map_fixed_int_to_gen(self,g_int,gen):
"""
Return a dictionary mapping nodes of self.g_int to fixed nodes of
self.gen
This is more specific than just looking at gen's nodes['fixed'].
Omit nodes that have an angle of 180
"""
# This code assumes that either ij are both fixed, or neither fixed.
fixed_int_to_gen={}
for n in g_int.valid_node_iter():
g_n=g_int.nodes['gen_n'][n]
#if (g_n>=0) and (gen.nodes['fixed'][g_n]):
if (g_n>=0) and (gen.nodes['turn'][g_n]!=180):
fixed_int_to_gen[n]=g_n
return fixed_int_to_gen
def create_final_by_patches(self):
fixed_int_to_gen = self.map_fixed_int_to_gen(self.g_int,self.gen)
n_fixed=list(fixed_int_to_gen.keys())
g_int=self.g_int
angles=np.zeros(g_int.Nedges(),np.float32)
angles=np.where( g_int.edges['gen_j']>=0,
self.gen.edges['angle'][g_int.edges['gen_j']],
np.nan )
g_int.add_edge_field('angle',angles,on_exists='overwrite')
# misnomer. Not final. Just for finding exact intersections
g_final=exact_delaunay.Triangulation(extra_edge_fields=[
('angle',np.float64),
('psiphi',np.float64,2)])
# g_final.edge_defaults['dij']=np.nan
# Not great - when edges get split, this will at least leave the fields as nan
# instead of 0.
# g_final.edge_defaults['ij']=np.nan
g_final.edge_defaults['psiphi']=np.nan
g_final.edge_defaults['angle']=np.nan
def trace_contour(b,angle):
"""
angle: 0 is constant psi, with psi increasing to left
"""
if angle==90:
# trace constant phi
node_field=self.phi # the field to trace a contour of
cval_pos='right' # guess and check
elif angle==270:
node_field=self.phi # the field to trace a contour of
cval_pos='left'
elif angle==0:
node_field=self.psi
cval_pos='left' # guess and check
elif angle==180:
node_field=self.psi
cval_pos='right'
else:
raise Exception("what?")
cval=node_field[b]
return g_int.trace_node_contour(n0=b,cval=cval,
node_field=node_field,
pos_side=cval_pos,
return_full=True)
# g_final node index =>
# list of [
# ( dij, from the perspective of leaving the node,
# 'internal' or 'boundary' )
node_exits=defaultdict(list)
def insert_contour(trace_items,angle=None,
psiphi0=[np.nan,np.nan]):
assert np.isfinite(psiphi0[0]) or np.isfinite(psiphi0[1])
trace_points=np.array( [pnt
for typ,idx,pnt in trace_items
if pnt is not None])
# Check whether the ends need to be forced into the boundary
# but here we preemptively doctor up the ends
for i in [0,-1]:
if trace_items[i][0]=='edge':
# When it hits a non-cartesian edge this will fail (which is okay)
# Feels a bit fragile:
j_int=trace_items[i][1].j # it's a halfedge
j_gen=g_int.edges['gen_j'][j_int] # from this original edge
angle_gen=self.gen.edges['angle'][j_gen]
if angle_gen%90 != 0:
log.info("Not worrying about contour hitting diagonal")
continue
# Force that point into an existing constrained edge of g_final
pnt=trace_points[i]
best=[None,np.inf]
for j in np.nonzero(g_final.edges['constrained'] & (~g_final.edges['deleted']))[0]:
d=utils.point_segment_distance( pnt,
g_final.nodes['x'][g_final.edges['nodes'][j]] )
if d<best[1]:
best=[j,d]
j,d=best
# Typ. 1e-10 when using UTM coordinates
assert d<1e-5
if d>0.0:
n_new=g_final.split_constraint(x=pnt,j=j)
trace_nodes,trace_edges=g_final.add_constrained_linestring(trace_points,on_intersection='insert',
on_exists='stop')
if angle is not None:
g_final.edges['angle'][trace_edges]=angle
#if ij0 is not None:
# g_final.edges['ij'][trace_edges]=ij0
if psiphi0 is not None:
g_final.edges['psiphi'][trace_edges]=psiphi0
# Update node_exits:
assert angle is not None,"Pretty sure this should always be supplied"
exit_angle=angle
for a in trace_nodes[:-1]:
node_exits[a].append( (exit_angle,'internal') )
exit_angle=(exit_angle+180)%360
for b in trace_nodes[1:]:
node_exits[b].append( (exit_angle,'internal') )
def trace_and_insert_contour(b,angle):
# does dij_angle fall between the angles formed by the boundary, including
# a little slop.
log.info(f"{angle} looks good")
gn=fixed_int_to_gen[b] # below we already check to see that b is in there.
# ij0=self.gen.nodes['ij'][gn].copy()
# only pass the one constant along the contour
if angle%180==0:
psiphi0=[self.psi[b],np.nan]
elif angle%180==90:
psiphi0=[np.nan,self.phi[b]]
trace_items=trace_contour(b,angle=angle)
return insert_contour(trace_items,angle=angle,
psiphi0=psiphi0)
def trace_and_insert_boundaries(cycle):
for a,b in utils.progress( zip( cycle, np.roll(cycle,-1) )):
j=g_int.nodes_to_edge(a,b)
angle=g_int.edges['angle'][j] # angle from a to b
if angle%90!=0: continue # ragged edge
trace_points=g_int.nodes['x'][[a,b]]
trace_nodes,trace_edges=g_final.add_constrained_linestring(trace_points,on_intersection='insert')
g_final.edges['angle'][trace_edges]=angle
# Update node_exits, which are referenced by nodes in g_final
for a_fin in trace_nodes[:-1]:
node_exits[a_fin].append( (angle,'boundary') )
opp_angle=(angle+180)%360
for b_fin in trace_nodes[1:]:
node_exits[b_fin].append( (opp_angle,'boundary') )
# This used to also fill in ij, but we don't have that now.
# need to update psiphi for these edges, too.
if angle%180==0: # psi constant
psiphi=[self.psi[a],np.nan]
elif angle%180==90:
psiphi=[np.nan, self.phi[a]]
else:
assert False
g_final.edges['psiphi'][trace_edges]=psiphi
# Add boundaries when they coincide with contours
cycle=g_int.boundary_cycle() # could be multiple eventually...
log.info("Bulk init with boundaries")
# Bulk init with the points, then come back to fix metadata
# Don't add the constraints, since that would mean adding
# ragged edges potentially too early. This saves a huge amount
# of time in building the DT, and the constraint handling is
# relatively fast.
g_final.bulk_init(g_int.nodes['x'][cycle])
log.info("Tracing boundaries...")
trace_and_insert_boundaries(cycle)
log.info("done")
# Add internal contours
# return with internal.
for a,b,c in zip(cycle,
np.roll(cycle,-1),
np.roll(cycle,-2)):
# if b in [331,434]: # side-channel
# plt.figure(1).clf()
# g_int.plot_edges()
# g_int.plot_nodes(mask=g_int.nodes['rigid'],labeler='id')
# g_int.plot_nodes(mask=[a,c], labeler='id')
# g_int.plot_edges(mask=[j_ab,j_bc],labeler='angle')
# zoom=(552573.3257994705, 552606.492118541, 4124415.575118965, 4124440.2893760786)
# plt.axis(zoom)
# plt.draw()
# import pdb
# pdb.set_trace()
if b not in fixed_int_to_gen: continue
j_ab=g_int.nodes_to_edge(a,b)
j_bc=g_int.nodes_to_edge(b,c)
# flip to be the exit angle
angle_ba = (180+g_int.edges['angle'][j_ab])%360
angle_bc = g_int.edges['angle'][j_bc]
b_final=None # lazy lookup
for angle in [0,90,180,270]:
# is angle into the domain?
# cruft trace=None
# if angle is left of j_ab and right of j_bc,
# then it should be into the domain and can be traced
# careful with sting angles
# a,b,c are ordered CCW on the cycle, domain is to the
# left.
# so I want bc - angle - ba to be ordered CCW
if ( ((angle_bc==angle_ba) and (angle!=angle_bc))
or (angle-angle_bc)%360 < ((angle_ba-angle_bc)%360) ):
if b_final is None:
b_final=g_final.select_nodes_nearest(g_int.nodes['x'][b],max_dist=0.0)
dupe=False
assert b_final is not None # should be in there from trace_boundaries
for exit_angle,exit_type in node_exits[b_final]:
if exit_angle==angle:
dupe=True
log.info("Duplicate exit for internal trace from %d. Skip"%b)
break
if not dupe:
trace_and_insert_contour(b,angle)
def tri_to_grid(g_final):
g_final2=g_final.copy()
for c in g_final2.valid_cell_iter():
g_final2.delete_cell(c)
for j in np.nonzero( (~g_final2.edges['deleted']) & (~g_final2.edges['constrained']))[0]:
g_final2.delete_edge(j)
g_final2.modify_max_sides(2000)
g_final2.make_cells_from_edges()
return g_final2
g_final2=tri_to_grid(g_final)
if 1:
# Add any diagonal edges here, so that ragged edges
# get cells in g_final2, too.
ragged=np.isfinite(g_int.edges['angle']) & (g_int.edges['angle']%90!=0.0)
j_ints=np.nonzero( ragged )[0]
for j_int in j_ints:
nodes=[g_final2.add_or_find_node(g_int.nodes['x'][n])
for n in g_int.edges['nodes'][j_int]]
j_fin2=g_final2.nodes_to_edge(nodes)
angle=g_int.edges['angle'][j_int]
if j_fin2 is None:
j_fin2=g_final2.add_edge(nodes=nodes,constrained=True,
angle=angle,
psiphi=[np.nan,np.nan])
g_final2.make_cells_from_edges()
# DBG
self.g_not_final=g_final
self.g_final2=g_final2
# import pdb
# pdb.set_trace()
#print("Bailing early")
#return
# /DBG
# --- Compile Swaths ---
e2c=g_final2.edge_to_cells(recalc=True)
i_adj=np.zeros( (g_final2.Ncells(), g_final2.Ncells()), np.bool8)
j_adj=np.zeros( (g_final2.Ncells(), g_final2.Ncells()), np.bool8)
# tag ragged cells
j_ragged=g_final2.edges['angle']%90 != 0.0
c_ragged=np.unique( g_final2.edge_to_cells()[j_ragged,:] )
c_ragged=c_ragged[c_ragged>=0]
for j in g_final2.valid_edge_iter():
c1,c2=e2c[j,:]
if c1<0 or c2<0: continue
# if the di of dij is 0, the edge joins cell in i_adj
# I think angle==0 is the same as dij=[1,0]
# Need to ignore ragged edges here -- if a contour intersects
# a ragged edge, the join is not guaranteed at the ragged
# boundary, and that can create a ragged cell that erroneously
# joins two parallel swaths. Not sure how this will affect
# downstream handling of ragged cells, but try omitting them
# entirely here.
if (c1 in c_ragged) or (c2 in c_ragged):
continue
if g_final2.edges['angle'][j] % 180==0: # guess failed.
i_adj[c1,c2]=i_adj[c2,c1]=True
elif g_final2.edges['angle'][j] % 180==90:
j_adj[c1,c2]=j_adj[c2,c1]=True
else:
log.warning("What? Ragged edge okay, but it shouldn't have both cell neighbors")
n_comp_i,labels_i=sparse.csgraph.connected_components(i_adj.astype(np.int32),directed=False)
n_comp_j,labels_j=sparse.csgraph.connected_components(j_adj,directed=False)
# preprocessing for contour placement
nd=NodeDiscretization(g_int)
Mdx,Bdx=nd.construct_matrix(op='dx')
Mdy,Bdy=nd.construct_matrix(op='dy')
psi_dx=Mdx.dot(self.psi)
psi_dy=Mdy.dot(self.psi)
phi_dx=Mdx.dot(self.phi)
phi_dy=Mdy.dot(self.phi)
# These should be about the same. And they are, but
# keep them separate in case the psi_phi solution procedure
# evolves.
psi_grad=np.sqrt( psi_dx**2 + psi_dy**2)
phi_grad=np.sqrt( phi_dx**2 + phi_dy**2)
pp_grad=[psi_grad,phi_grad]
# Just figures out the contour values and sets them on the patches.
patch_to_contour=[{},{}] # coord, cell index=>array of contour values
self.patch_to_contour=patch_to_contour
def add_swath_contours_new(comp_cells,node_field,coord,scale):
if len(comp_cells)==1 and comp_cells[0] in c_ragged:
# Ragged cells are handled afterwards by compiling
# contours from neighboring cells
return
# Check all of the nodes to find the range ij
comp_nodes=[ g_final2.cell_to_nodes(c) for c in comp_cells ]
comp_nodes=np.unique( np.concatenate(comp_nodes) )
comp_ijs=[] # Certainly could have kept this info along while building...
field_values=[]
# To do this, need to do it over all cells, not just picking comp_cells[0]
field_min=np.inf
field_max=-np.inf
for comp_cell in comp_cells:
comp_pp=np.array(g_final2.edges['psiphi'][ g_final2.cell_to_edges(comp_cell) ])
# it's actually the other coordinate that we want to consider.
field_min=min(field_min,np.nanmin( comp_pp[:,1-coord] ))
field_max=max(field_max,np.nanmax( comp_pp[:,1-coord] ))
# Could do this more directly from topology if it mattered..
swath_poly=ops.cascaded_union( [g_final2.cell_polygon(c) for c in comp_cells] )
swath_nodes=g_int.select_nodes_intersecting(swath_poly)
swath_vals=node_field[swath_nodes]
swath_grad=pp_grad[1-coord][swath_nodes] # right?
order=np.argsort(swath_vals)
o_vals=swath_vals[order]
o_dval_ds=swath_grad[order]
o_ds_dval=1./o_dval_ds
uniform_scale=False
if not uniform_scale:
# Rather than using s as a distance in geographic space, which
# scaled by a single value of local_scale, here normalize
# ds by variable scale in o_scales, and consider s a distance in grid space
o_scales=scale( g_int.nodes['x'][swath_nodes])[order]
o_ds_dval /= o_scales
# trapezoid rule integration
d_vals=np.diff(o_vals)
# Particularly near the ends there are a lot of
# duplicate swath_vals.
# Try a bit of lowpass to even things out.
if 1:
winsize=int(len(o_vals)/5)
if winsize>1:
o_ds_dval=filters.lowpass_fir(o_ds_dval,winsize)
s=np.cumsum(d_vals*0.5*(o_ds_dval[:-1]+o_ds_dval[1:]))
s=np.r_[0,s]
# HERE -- calculate this from resolution
# Scale is under-utilized here.
# This is just calculating a mean scale over the whole swath
if uniform_scale:
local_scale=scale( g_int.nodes['x'][swath_nodes] ).mean(axis=0)
else:
local_scale=1.0
# s gives the average geographic distance along the swath.
# this takes the average geographic length of the swath,
# and divides by local scale to get the number of cells
n_swath_cells=int(np.round( (s.max() - s.min())/local_scale))
n_swath_cells=max(1,n_swath_cells)
# Evenly divide up geographic space into the desired number of cells
s_contours=np.linspace(s[0],s[-1],1+n_swath_cells)
# Then map back to the node field values.
adj_contours=np.interp( s_contours,
s,o_vals)
adj_contours[0]=field_min
adj_contours[-1]=field_max
assert np.all(np.diff(adj_contours)>0),"should be monotonic, right?"
for c in comp_cells:
patch_to_contour[coord][c]=adj_contours
if 1: # Swath processing
for coord in [0,1]: # i/j
log.info("Coord: %s"%coord)
if coord==0:
labels=labels_i
n_comp=n_comp_i
node_field=self.phi # feels backwards.. it's right, just misnamed
else:
labels=labels_j
n_comp=n_comp_j
node_field=self.psi
for comp in range(n_comp):
log.info("Swath: %s"%comp)
comp_cells=np.nonzero(labels==comp)[0]
add_swath_contours_new(comp_cells,node_field,coord,self.scales[coord])
# come back to handle ragged cells
for c in c_ragged:
c_contours=[[],[]]
for j in g_final2.cell_to_edges(c):
c1,c2 = g_final2.edges['cells'][j]
if min(c1,c2)<0: continue # only internal edges matter
if c1==c:
c_nbr=c2
elif c2==c:
c_nbr=c1
else:
raise Exception("Sanity lost")
if c_nbr in c_ragged:
log.info("Brave! Two ragged cells adjacent to each other")
continue
# similar logic as above
orient=g_final2.edges['angle'][j] % 180
if orient==0:
c_contours[0].append( patch_to_contour[0][c_nbr] )
elif orient==90:
c_contours[1].append( patch_to_contour[1][c_nbr] )
# import pdb
# pdb.set_trace()
patch_to_contour[0][c]=np.unique(np.concatenate(c_contours[0]))
patch_to_contour[1][c]=np.unique(np.concatenate(c_contours[1]))
# Direct grid gen from contour specifications:
self.patch_grids=patch_grids=[]
g_int.edge_to_cells()
for c in utils.progress(g_final2.valid_cell_iter()):
psi_cvals=patch_to_contour[1][c]
phi_cvals=patch_to_contour[0][c]
g_patch=unstructured_grid.UnstructuredGrid(max_sides=4)
g_patch.add_rectilinear( [0,0], [len(psi_cvals)-1,len(phi_cvals)-1],
len(psi_cvals),len(phi_cvals))
g_patch.add_node_field( 'ij', g_patch.nodes['x'].astype(np.int32))
pp=np.c_[ psi_cvals[g_patch.nodes['ij'][:,0]],
phi_cvals[g_patch.nodes['ij'][:,1]] ]
g_patch.add_node_field( 'pp', pp)
x0=g_final2.cells_centroid([c])[0]
for n in g_patch.valid_node_iter():
x=g_int.fields_to_xy(g_patch.nodes['pp'][n],
node_fields=[self.psi,self.phi],
x0=x0)
if np.isnan(x[0]):
# If it's a ragged cell, probably okay.
edge_angles=g_final2.edges['angle'][ g_final2.cell_to_edges(c) ]
ragged_js=(edge_angles%90!=0.0)
if np.any(ragged_js):
log.info("fields_to_xy() failed, but cell is ragged.")
g_patch.delete_node_cascade(n)
continue
else:
log.error("ERROR: fields_to_xy() failed. Cell not ragged.")
g_patch.nodes['x'][n]=x
# Hmm -
# When it works, this probably reduces the search time considerably,
# but there is the possibility, particularly at corners, that
# this x will be a corner, that corner will lead to the cell *around*
# the corner, and then we get stuck.
# Even the centroid isn't great since it might not even fall inside
# the cell.
# x0=x
# Cells are coming out CW, probably because phi has the opposite sign
# relative j.
# Maybe a better fix is to transpose the grid above.
g_patch.orient_cells()
patch_grids.append(g_patch)
g=patch_grids[0].copy()
for g_next in patch_grids[1:]:
g.add_grid(g_next,merge_nodes='auto',tol=1e-6)
return g
def label_edge_orientation_and_scale(self,g):
"""
Expects a 'pp' node field on g, uses that to
add an 'orient' field {0,90} to edges. Then
add target_l to each edge based on orientation and
self.scales
"""
j_orients=np.zeros( g.Nedges(), np.int32) - 1
jns=g.edges['nodes']
psi_match=(g.nodes['pp'][jns[:,0],0]==g.nodes['pp'][jns[:,1],0])
phi_match=(g.nodes['pp'][jns[:,0],1]==g.nodes['pp'][jns[:,1],1])
assert np.all( psi_match | phi_match )
j_orients[psi_match]=0
j_orients[phi_match]=90
g.add_edge_field('orient',j_orients,on_exists='overwrite')
ec=g.edges_center()
target_scales=np.zeros(g.Nedges(),np.float64)
target_scales[psi_match]=self.scales[0]( ec[psi_match] )
target_scales[phi_match]=self.scales[1]( ec[phi_match] )
assert np.all(target_scales>0)
g.add_edge_field('target_l',target_scales,on_exists='overwrite')
return target_scales
def smooth_to_scale(self,g=None,smooth_iters=3,nudge_iters=2):
"""
Wrapper for orthogonalize.Tweaker.smooth_to_scale
"""
from . import orthogonalize
if g is None:
g=self.g_final
target_scales=self.label_edge_orientation_and_scale(g)
n_fixed,n_free=classify_nodes(g,self.gen)
tweaker=orthogonalize.Tweaker(g)
tweaker.smooth_to_scale(n_free,target_scales,
smooth_iters=smooth_iters,
nudge_iters=nudge_iters)
class SimpleQuadGen(object):
"""
A streamline quad generator that is more usable.
- each cell must map to a rectangle, and the smallest 4 internal angles
will be automatically labeled as 90-degree turns, the rest 180.
- all edges shared by two cells must have a specific count of nodes, given
by a negative scale value.
"""
nom_res=3.5 # needs to be adaptive..
triangle_method='gmsh'
gmsh_path='gmsh'
merge_tol=0.01
def __init__(self,gen,cells=None,execute=True,adjust_scale=True,**kw):
"""
adjust_scale: make a copy of the edge array and update scale to be sure that
shared edges always have a given edge count
"""
utils.set_keywords(self,kw)
self.gen=gen
self.cells=cells
if adjust_scale:
self.gen.edges=self.gen.edges.copy()
self.gen.edges['scale']=adjusted_scale(self.gen,self.gen.edges['scale'],self.nom_res)
if execute:
self.execute()
def execute(self):
snap_angles(self.gen)
prepare_angles_halfedge(self.gen)
add_bezier(self.gen)
self.grids=[]
self.qgs=[]
for c in self.cells:
qg=self.process_one_cell(c)
self.grids.append(qg.g_final)
if self.grids:
self.g_final=self.merge_grids()
return self.g_final
def merge_grids(self):
g=unstructured_grid.UnstructuredGrid(max_sides=4)
for sub_g in self.grids:
g.add_grid(sub_g,merge_nodes='auto',tol=self.merge_tol)
return g
def process_one_cell(self,c,**kw):
kwargs=dict(cell=c,
triangle_method=self.triangle_method,
gmsh_path=self.gmsh_path,
nom_res=self.nom_res)
kwargs.update(kw)
qg=SimpleSingleQuadGen(self.gen,**kwargs)
self.qgs.append(qg)
qg.execute()
return qg
def patch_contours(g_int,node_field,scale,count=None,Mdx=None,Mdy=None,
lowpass_ds_dval=True):
"""
Given g_int, a node field (psi/phi) defined on g_int, a scale field, and
a count of edges, return the contour values of the node field which
would best approximate the requested scale.
g_int: UnstructuredGrid
node_field: a psi or phi field defined on the nodes of g_int
scale: length scale Field with domain include g_int
count: if specified, the number of nodes in the resulting discretization
Mdx,Mdy: matrix operators to calculate derivatives of the node field.
by default create from scratch
returns the contour values (one more than the number of edges)
"""
field_min=node_field.min()
field_max=node_field.max()
# original swath code had to pull out a subset of node in the node
# field, but now we can assume that g_int is congruent to the target
# patch.
swath_nodes=np.arange(g_int.Nnodes())
swath_vals=node_field[swath_nodes]
# preprocessing for contour placement
nd=NodeDiscretization(g_int)
if Mdx is None:
Mdx,_=nd.construct_matrix(op='dx')
if Mdy is None:
Mdy,_=nd.construct_matrix(op='dy')
field_dx=Mdx.dot(node_field)
field_dy=Mdy.dot(node_field)
field_grad=np.sqrt( field_dx**2 + field_dy**2 )
swath_grad=field_grad
order=np.argsort(swath_vals)
o_vals=swath_vals[order]
o_dval_ds=swath_grad[order] # s: coordinate perpendicular to contours
local_scale=scale( g_int.nodes['x'][swath_nodes[order]] )
# local_scale is ds/di or ds/dj, so now s is approx. grid index
o_ds_dval=1./(o_dval_ds*local_scale)
# trapezoid rule integration
d_vals=np.diff(o_vals)
# Particularly near the ends there are a lot of
# duplicate swath_vals.
# Try a bit of lowpass to even things out.
if lowpass_ds_dval:
# HERE this needs to be scale-aware!!
# could use count, but we may not have it, and
# it ends up being evenly spread out, which isn't
# ideal.
# How bad is it to just drop this? Does have an effect,
# particularly in the lateral
if count is None:
# There is probably something more clever to do using the actual
# swath vals.
winsize=int(len(o_vals)/10)
else:
winsize=int(len(o_vals)/count)
if winsize>1:
o_ds_dval=filters.lowpass_fir(o_ds_dval,winsize)
else:
print("No lowpass on ds_dval")
s=np.cumsum(d_vals*0.5*(o_ds_dval[:-1]+o_ds_dval[1:]))
s=np.r_[0,s]
# calculate this from resolution
# might have i/j swapped. range of s is 77m, and field
# is 1. to 1.08. better now..
if count is None:
count=max(2,int(np.round(s.max())))
s_contours=np.linspace(s[0],s[-1],count)
adj_contours=np.interp( s_contours,
s,o_vals)
adj_contours[0]=field_min
adj_contours[-1]=field_max
assert np.all(np.diff(adj_contours)>0),"should be monotonic, right?"
return adj_contours
class SimpleSingleQuadGen(QuadGen):
"""
Rewrite of portions of QuadGen to handle the local single-cell portion.
"""
triangle_method='gmsh'
angle_source='existing'
# The lowpass seems like it would defeat some of the scale handling.
lowpass_ds_dval=True
smooth_patch=True
def __init__(self,gen,cell,**kw):
super(SimpleSingleQuadGen,self).__init__(gen,execute=False,cells=[cell],
**kw)
def discretize_perimeter(self):
"""
up-sample the bezier curves of the generating grid,
populating
self.perimeter: [N,{x,y}]
self.node_to_idx: node index of self.gen mapped to index into perimeter
self.angle_to_segments: map each of the 4 cardinal angles to a list of segments,
proceeding CCW around the cell. each segment is a start node end node, and a count
if count was specified in the input (i.e. gen.edges['scale'] has negative value)
self.perimeter_scale: [N] - linearly interpolated scale along the perimeter nodes
"""
def he_angle(he):
# return (he.grid.edges['angle'][he.j] + 180*he.orient)%360.0
# Since this is being used after the internal edges handling,
# angles are oriented to the cycle of the cell, not the natural
# edge orientation.
return he.grid.edges['angle'][he.j]
# Start at a corner
he=self.gen.cell_to_halfedge(0,0)
while 1:
he_fwd=he.fwd()
corner= he_angle(he) != he_angle(he_fwd)
he=he_fwd
if corner:
break
he0=he
idx=0 # current location into list of perimeter samples
perimeter=[]
perimeter_scales=[] # [N,2] scale interpolated along boundary
node_to_idx={}
angle_to_segments={0:[],
90:[],
180:[],
270:[]}
last_fixed_node=he.node_rev()
# scale here is just for edges with exact number of
# cells (negative).
if 'scale' in self.gen.edges.dtype.names:
edge_scale=self.gen.edges['scale']
else:
edge_scale=np.zeros(self.gen.Nedges())
while 1:
pnts=self.gen_bezier_linestring(he.j,span_fixed=False)
if he.orient:
pnts=pnts[::-1]
perimeter.append(pnts[:-1])
nA=he.node_rev()
nB=he.node_fwd()
# It's possible to trace the nodes here back to nodes of the original
# gen, and then query the scale values directly. But note that
# these are not the same node indices, since gen has been renumbered
# after selecting a single cell. For now, just interpolate:
scales0=np.linspace( self.scales[0](pnts[0]), self.scales[0](pnts[-1]), len(pnts))
scales1=np.linspace( self.scales[1](pnts[0]), self.scales[1](pnts[-1]), len(pnts))
scales=np.c_[scales0,scales1]
perimeter_scales.append(scales[:-1])
node_to_idx[nA]=idx
idx+=len(pnts)-1
he_fwd=he.fwd()
angle=he_angle(he)
angle_fwd=he_angle(he_fwd)
if ( (angle!=angle_fwd) # a corner
or (edge_scale[he.j]<0)
or (edge_scale[he_fwd.j]<0) ):
if edge_scale[he.j]<0:
count=-int( edge_scale[he.j] )
else:
count=0
angle_to_segments[angle].append( [last_fixed_node,he.node_fwd(),count] )
last_fixed_node=he.node_fwd()
he=he_fwd
if he==he0:
break
self.perimeter=np.concatenate(perimeter)
self.perimeter_scales=np.concatenate(perimeter_scales)
self.angle_to_segments=angle_to_segments
self.node_to_idx=node_to_idx
def discretize_string(self,string,density):
"""
string: a node string with counts,
[ (start node, end node, count), ... ]
where a count of 0 means use the provided density
density: NEW: density evaluated at perimeter points. (old: a density field)
returns: (N,2) discretized linestring and (N,) bool array of
rigid-ness.
"""
result=[]
rigids=[]
for a,b,count in string:
if count==0:
idx_a=self.node_to_idx[a]
idx_b=self.node_to_idx[b]
if idx_a<idx_b:
pnts=self.perimeter[idx_a:idx_b+1]
dens_pc=density[idx_a:idx_b+1]
else:
pnts=np.concatenate( [self.perimeter[idx_a:],
self.perimeter[:idx_b+1]] )
dens_pc=np.concatenate( [density[idx_a:],
density[:idx_b+1]] )
assert len(pnts)>0
# To defend against discontinuity in the density field,
# use a precalculated density continuous along the linestring,
# dens_pc=density(pnts) # old usage
seg=linestring_utils.resample_linearring(pnts,dens_pc,closed_ring=0)
rigid=np.zeros(len(seg),np.bool8)
rigid[0]=rigid[-1]=True
else:
pnt_a=self.gen.nodes['x'][a]
pnt_b=self.gen.nodes['x'][b]
j=self.gen.nodes_to_edge(a,b)
if j is None:
log.warning("Rigid segment doesn't match a single edge in gen?")
seg=np.c_[ np.linspace(pnt_a[0], pnt_b[0], count+1),
np.linspace(pnt_a[1], pnt_b[1], count+1) ]
else:
# Try using bezier:
# need to be sure that the result doesn't have any dependence on which of the
# two cells we're processing, so that each patch will get the same
# nodes here (bit-equal not necessary since the grids will be merged
# with some small tolerance, but we should be talking machine-precision)
# Should be okay.
# count is correct, shouldn't have +1 here.
seg=self.gen_bezier_linestring(j=j,samples_per_edge=count,span_fixed=False)
if self.gen.edges['nodes'][j,1]==a:
seg=seg[::-1]
rigid=np.ones(len(seg),np.bool8)
result.append(seg[:-1])
rigids.append(rigid[:-1])
result.append( seg[-1:] )
rigids.append( rigid[-1:] )
result=np.concatenate(result)
rigids=np.concatenate(rigids)
return result,rigids
def calculate_coord_count(self,left,right,density):
"""
Shift node densities to get the count on opposite sides to match up.
left,right: lists of segments as in self.angle_to_segments (node start, stop, count)
density: the coordinate-specific density field, already evaluated at perimeter points.
returns: (points along left, rigid markers for left), (points along right, rigid markers for right)
NB: the assumption is that left and right have opposite orientations (i.e. both are CCW, so anti-parallel)
the right sequences are reversed to make them parallel.
"""
af_low=-5
af_high=5
while 1:
af=(af_low+af_high)/2
assert abs(af)<4.9
if not af_high-af_low>1e-8:
raise Exception("Calculate coord count failed to converge")
#import pdb
#pdb.set_trace()
pnts0,rigid0=self.discretize_string(left,(2**af)*density)
pnts1,rigid1=self.discretize_string(right,(0.5**af)*density)
c0=len(pnts0)
c1=len(pnts1)
if c0==c1:
break
# 0,180: positive af makes c1 larger
if c0>c1: # af should be larger
af_low=af
continue
if c0<c1:
af_high=af
continue
return (pnts0,rigid0),(pnts1[::-1],rigid1[::-1])
def select_node_counts(self):
"""
Solve for the count of nodes
First get the full perimeter at 10 point per bezier segment
Group edges by angle.
Within each group, consecutive edges with non-negative scale
are treated as a unit.
af is an asymmetry factor.
It starts at 0. We go through the grouped edges, count up
the number of nodes, and see if opposite edges agree. If they
don't agree, af is adjusted.
Sets (left_i,left_i_rigid),(right_i,right_i_rigid) and
(left_j,left_j_rigid),(right_j,right_j_rigid)
"""
self.discretize_perimeter()
# Precalculate density fields along perimeter:
(left_i,left_i_rigid),(right_i,right_i_rigid)=self.calculate_coord_count(self.angle_to_segments[0],
self.angle_to_segments[180],
self.perimeter_scales[:,0])
(left_j,left_j_rigid),(right_j,right_j_rigid)=self.calculate_coord_count(self.angle_to_segments[90],
self.angle_to_segments[270],
self.perimeter_scales[:,1])
# Necessary to have order match grid order below
left_i=left_i[::-1]
left_i_rigid=left_i_rigid[::-1]
right_i=right_i[::-1]
right_i_rigid=right_i_rigid[::-1]
self.left_i=left_i
self.left_i_rigid=left_i_rigid
self.left_j=left_j
self.left_j_rigid=left_j_rigid
self.right_i=right_i
self.right_i_rigid=right_i_rigid
self.right_j=right_j
self.right_j_rigid=right_j_rigid
def create_patch(self):
"""
Using the count information in self.left_{i,j}, create the rectilinear patch,
and return the cell/node map, too.
"""
patch=unstructured_grid.UnstructuredGrid(max_sides=4,
extra_node_fields=[('rigid',np.bool8),
('pp',np.float64,2)],
extra_edge_fields=[('orient',np.float32)])
elts=patch.add_rectilinear( [0,0],
[len(self.left_i)-1, len(self.left_j)-1],
len(self.left_i), len(self.left_j),
reverse_cells=True)
# Fill in orientation
segs=patch.nodes['x'][ patch.edges['nodes'] ]
deltas=segs[:,1,:] - segs[:,0,:]
patch.edges['orient'][deltas[:,0]==0]=90 # should be consistent with gen.edges['angle']
patch.edges['orient'][deltas[:,1]==0]=0
return patch,elts
def field_interpolators(self):
"""
Return two interpolating function that map X=>psi/phi.
Some care is taken to return reasonable values when X is slightly outside
self.g_int.
"""
finder=PermissiveFinder(self.g_int)
psi_interp=LinearTriInterpolator(finder.mp_tri,self.psi,finder)
phi_interp=LinearTriInterpolator(finder.mp_tri,self.phi,finder)
psi_field=lambda x: psi_interp(x[...,0],x[...,1]).filled(np.nan)
phi_field=lambda x: phi_interp(x[...,0],x[...,1]).filled(np.nan)
return psi_field,phi_field
def execute(self):
# Note that self.gen is *not* the same grid as gen that is passed in.
# self.gen has a single cell, while gen is the original input with potentially
# many cells
self.process_internal_edges(self.gen) # N.B. this flips angles
self.g_int=self.create_intermediate_grid_tri()
self.calc_psi_phi()
# got a nice psi/phi field.
# I have some edges with negative scale. Other edges
# will get just the ambient scale.
self.select_node_counts()
# The target contour values, if there weren't any fixed nodes aside from corners:
nd=NodeDiscretization(self.g_int)
Mdx,_=nd.construct_matrix(op='dx') # discard rhs, it's all 0.
Mdy,_=nd.construct_matrix(op='dy') # discard rhs
self.i_contours=patch_contours(self.g_int,self.phi,self.scales[0], len(self.left_i),
Mdx=Mdx,Mdy=Mdy,lowpass_ds_dval=self.lowpass_ds_dval)
self.j_contours=patch_contours(self.g_int,self.psi,self.scales[1], len(self.left_j),
Mdx=Mdx,Mdy=Mdy,lowpass_ds_dval=self.lowpass_ds_dval)
# Now I have the target psi/phi contours
# I know which nodes should be rigid, and their locations.
self.patch,self.patch_elts=self.create_patch()
self.g_final=self.patch
self.psi_field,self.phi_field=self.field_interpolators()
self.position_patch_nodes()
self.patch.cells['_center']=np.nan # otherwise all 0...
if self.smooth_patch:
self.smooth_patch_psiphi_implicit()
self.patch.cells['_area']=np.nan # force recalculation
# heavy handed -- could be done more gracefully during construction
# in fact, this shouldn't be necessary at all anymore.
self.patch.orient_cells()
return self.patch
def position_patch_nodes(self):
"""
Use i_contours,j_contours to set psi/phi for non-rigid nodes.
Use left_i/right_i/left_j/right_j to set x for rigid nodes.
Fill in nodes['pp'] and ['x']. Non-rigid nodes get a target pp, from which
we calculate x. Rigid nodes get a prescribed x, from which we calculate pp.
"""
for i in range(len(self.left_i)):
for j in range(len(self.left_j)):
n=self.patch_elts['nodes'][i,j]
rigid=True
if i==0 and self.left_j_rigid[j]:
x=self.left_j[j]
elif i+1==len(self.left_i) and self.right_j_rigid[j]:
x=self.right_j[j]
elif j==0 and self.left_i_rigid[i]:
x=self.left_i[i]
elif j+1==len(self.left_j) and self.right_i_rigid[i]:
x=self.right_i[i]
else:
rigid=False
pp=[self.j_contours[j],
self.i_contours[-i-1]] # I think I have to reverse i
x=self.g_int.fields_to_xy(pp,[self.psi,self.phi],x)
if rigid:
pp=[min(1,max(-1,self.psi_field(x))),
min(1,max(-1,self.phi_field(x)))]
self.patch.nodes['x'][n]=x
self.patch.nodes['pp'][n]=pp
self.patch.nodes['rigid'][n]=rigid
def smooth_patch_psiphi_implicit(self,aniso=0.02,monotonify=True):
"""
Anisotropic, implicit smoothing of psi/phi values.
Using the target contours in self.{i,j}_contours
and the existing values in patch.nodes['pp'], adjust
patch.nodes['pp'] to smoothly transition between the
rigid and non-rigid areas.
aniso controls how much smaller the off-axis smoothing
is. "On-axis" means how much each grid line follows a
contour, and "off-axis" means how much the spacing between
grid lines is evened out.
monotonify: if True, then check for monotonicity of coordinates
and iteratively decrease anisotropy until monotonic.
This is a hack fallback for boundary spacing that's not even,
when the default anisotropy would cause self-intersections.
There might be a better solution where the requested resolution
information could be incorporated into the psiphi setup. The
goal of this hack is to make sure the generation step completes.
"""
patch=self.patch
patch_nodes=self.patch_elts['nodes']
target_pp=np.zeros( (patch.Nnodes(),2),np.float64)
target_pp[patch_nodes,0]=self.j_contours[None,:]
target_pp[patch_nodes,1]=self.i_contours[::-1,None]
dpp=patch.nodes['pp']-target_pp
rigid_r=patch.nodes['rigid'][patch_nodes]
rigid_r0=rigid_r.copy() # coordinate 0
rigid_r1=rigid_r.copy()
rigid_r0[:,0]=True
rigid_r0[:,-1]=True
rigid_r1[0,:]=True
rigid_r1[-1,:]=True
N=patch.Nnodes()
dpp_smooths=[None,None]
nrows,ncols=patch_nodes.shape
# dc/dt= d/dx Kx dc/dx + d/dy Ky dc/dy
# steady state, uniform K, dx=dy=1
# 0 = Kx c[-1,0] + Kx c[1,0] + Ky c[0,-1] + Ky c[0,1] - (2Kx+2Ky) c[0,0]
for coord in [0,1]:
adj_aniso=aniso
while 1: # monotonify loop
M=sparse.dok_matrix( (N,N), np.float64)
b=np.zeros(N,np.float64)
K=[1,1]
K[1-coord]*=adj_aniso
rigid_this_coord=[rigid_r0,rigid_r1][coord]
dirich=dpp[:,coord]
for row in range(nrows):
for col in range(ncols):
n=patch_nodes[row,col]
if rigid_this_coord[row,col]:
M[n,n]=1
b[n]=dirich[n]
else:
b[n]=0.0
M[n,n]=-2*(K[0]+K[1])
if row==0:
M[n,patch_nodes[row+1,col]]=2*K[0]
elif row==nrows-1:
M[n,patch_nodes[row-1,col]]=2*K[0]
else:
M[n,patch_nodes[row-1,col]]=K[0]
M[n,patch_nodes[row+1,col]]=K[0]
if col==0:
M[n,patch_nodes[row,col+1]]=2*K[1]
elif col==ncols-1:
M[n,patch_nodes[row,col-1]]=2*K[1]
else:
M[n,patch_nodes[row,col-1]]=K[1]
M[n,patch_nodes[row,col+1]]=K[1]
dpp_smooths[coord]=sparse.linalg.spsolve(M.tocsr(),b)
if monotonify:
# Check for monotonicity
result=target_pp[...,coord] + dpp_smooths[coord]
result_r=result[patch_nodes]
if coord==0:
if np.all(np.diff(result_r,axis=1-coord)>0):
break
else: # coord==1
if np.all(np.diff(result_r,axis=1-coord)<0):
break
adj_aniso*=2
if adj_aniso>=1:
log.warning("Could not adjust anisotropy enough to regain monotonicity")
break
else:
log.warning("Will smooth with decreased anisotropy (%g)"%adj_aniso)
else:
break
dpp_smooth=np.c_[dpp_smooths[0],dpp_smooths[1]]
# Copy back to pp
sel=~patch.nodes['rigid']
patch.nodes['pp'][sel] = target_pp[sel] + dpp_smooth[sel]
# And remap those nodes:
for n in np.nonzero(~patch.nodes['rigid'])[0]:
x_orig=patch.nodes['x'][n]
patch.nodes['x'][n]=self.g_int.fields_to_xy(patch.nodes['pp'][n],
[self.psi,self.phi],
x_orig)
def nudge_boundaries_monotonic(self):
"""
Check that boundary nodes have monotonic psi/phi, and
nudge any violating nodes to be linearly interpolated between
the okay nodes.
Updates pp and x for offending nodes.
This was a halfway solution for non-monotonic nodes before
the smoothing code above was adapted to adjust for non-monotonicity.
Currently nothing uses this method.
"""
nodes_r=self.patch_elts['nodes']
for nlist,coord,sign in [ (nodes_r[:,0],1,-1),
(nodes_r[:,-1],1,-1),
(nodes_r[0,:],0,1),
(nodes_r[-1,:],0,1)]:
# nlist: node indices into patch
# coord: which coordinate of pp to adjust
# sign: +1 for increasing, -1 for decreasing
vals=self.patch.nodes['pp'][nlist,coord]
if np.all(sign*np.diff(vals)>0): continue
rigid=self.patch.nodes['rigid'][nlist]
# At least the rigid ones better be monotonic.
assert np.all(sign*np.diff(vals[rigid]))>0
i=np.arange(len(vals))
# each entry is the index of the next rigid node, self included.
i_next=i[rigid][ np.searchsorted(i[rigid],i) ]
# each entry is the index of the previous rigid node, self included
i_prev=i[rigid][::-1][ np.searchsorted(-i[rigid][::-1],-i) ]
assert np.all( i_prev[ i[rigid] ] == i[rigid] )
assert np.all( i_next[ i[rigid] ] == i[rigid] )
bad= (~rigid) & ( (sign*vals[i_prev] >= sign*vals) | (sign*vals[i_next]<=sign*vals))
print(bad.sum())
vals[bad] = np.interp(i[bad], i[~bad], vals[~bad] )
self.patch.nodes['pp'][nlist,coord]=vals
for n in nlist[bad]:
x=self.g_int.fields_to_xy(self.patch.nodes['pp'][n],[self.psi,self.phi],
self.patch.nodes['x'][n])
self.patch.nodes['x'][n]=x
def smooth_patch_psiphi(self,n_iter=10):
"""
Smooth out the deviations from target i_contour and j_contour, blending
from rigid nodes to the smooth interior
"""
elts=self.patch_elts
patch=self.patch
target_pp=np.zeros( (patch.Nnodes(),2),np.float64)
target_pp[elts['nodes'],0]=self.j_contours[None,:]
target_pp[elts['nodes'],1]=self.i_contours[::-1,None]
dpp=patch.nodes['pp']-target_pp
# Move to 2D array to streamline the smoothing
dpp_r=dpp[elts['nodes']].copy()
rigid_r=patch.nodes['rigid'][elts['nodes']]
rigid_r0=rigid_r.copy() # coordinate 0
rigid_r1=rigid_r.copy()
rigid_r0[:,0]=True # I think this is correct.
rigid_r0[:,-1]=True
rigid_r1[0,:]=True
rigid_r1[-1,:]=True
# could also use a larger window...
#win=np.array([0.5,0,0.5])
win=np.ones(3)/3.
for it in range(n_iter):
smooth=0*dpp_r
# Smoothing is only along the respective coordinate. I.e. phi
# anomalies are smoothed along contours of phi, and psi anomalies
# are smoothed along contours of psi.
smooth[...,0]=signal.fftconvolve(dpp_r[...,0],win[:,None],mode='same')
smooth[...,1]=signal.fftconvolve(dpp_r[...,1],win[None,:],mode='same')
if 1: # well, in some cases might need this...
smooth[...,0]=signal.fftconvolve(smooth[...,0],win[None,:],mode='same')
smooth[...,1]=signal.fftconvolve(smooth[...,1],win[:,None],mode='same')
# Just update the non-rigid nodes:
dpp_r[~rigid_r0,0]=smooth[~rigid_r0,0]
dpp_r[~rigid_r1,1]=smooth[~rigid_r1,1]
dpp=0*patch.nodes['pp']
dpp[elts['nodes']] = dpp_r
# Copy back to pp
sel=~patch.nodes['rigid']
patch.nodes['pp'][sel] = target_pp[sel] + dpp[sel]
# And remap those nodes:
for n in np.nonzero(~patch.nodes['rigid'])[0]:
x_orig=patch.nodes['x'][n]
patch.nodes['x'][n]=self.g_int.fields_to_xy(patch.nodes['pp'][n],
[self.psi,self.phi],
x_orig)
# Invalid circumcenters. May need to invalidate other geometry, too.
patch.cells['_center']=np.nan
##
# # HERE: try some relaxation approaches.
# # First, pin the known fixed nodes, and don't worry about
# # trying to keep everybody on the bezier boundary.
#
# # rigid-ness is carried through from the discretized nodestrings,
# # with nodes with negative scale and corner nodes set as rigid
#
# from stompy.grid import orthogonalize
# tweaker=orthogonalize.Tweaker(patch)
#
# # First, just nudge everybody towards orthogonal:
# # BAD. Too far out of orthogonal.
# for n in patch.valid_node_iter():
# if patch.nodes['rigid'][n]: continue
# tweaker.nudge_node_orthogonal(n)
#
# plt.figure(1)
# plt.cla()
# patch.plot_nodes(mask=patch.nodes['rigid'],color='r',sizes=30)
# patch.plot_edges()
# #plt.axis( (552066.9646997608, 552207.1805374735, 4124548.347825134, 4124660.504092434) )
# plt.axis( (552447.0573990112, 552507.7547532236, 4124244.839335523, 4124293.3901183335) )
#
# ##
# from stompy.grid import orthogonalize
# tweaker=orthogonalize.Tweaker(patch)
#
# n_free=np.nonzero(~patch.nodes['rigid'])[0]
# edge_scales=np.zeros(patch.Nedges(),np.float64)
# ec=patch.edges_center()
#
# for orient,scale in zip( [0,90], qg.scales):
# sel=patch.edges['orient']==orient
# edge_scales[sel] = scale(ec[sel])
#
# ##
#
# # This produces okay results, but it's going to be super slow
# # to converge.
# tweaker.smooth_to_scale( n_free, edge_scales,
# smooth_iters=1,nudge_iters=1)
#
# plt.figure(1)
# plt.cla()
# patch.plot_nodes(mask=patch.nodes['rigid'],color='r',sizes=30)
# patch.plot_edges()
# plt.axis( (552066.9646997608, 552207.1805374735, 4124548.347825134, 4124660.504092434) )
#
# ##
| rustychris/stompy | stompy/grid/quad_laplacian.py | Python | mit | 123,597 | 0.021538 |
"""
Goal: Define the routes for general pages
@authors:
Andrei Sura <[email protected]>
Ruchi Vivek Desai <[email protected]>
Sanath Pasumarthy <[email protected]>
@see https://flask-login.readthedocs.org/en/latest/
@see https://pythonhosted.org/Flask-Principal/
"""
import hashlib
import base64
import datetime
import uuid
from flask import current_app
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from flask import url_for
from app.models.log_entity import LogEntity
from app.models.web_session_entity import WebSessionEntity
from app.models.user_agent_entity import UserAgentEntity
from wtforms import Form, TextField, PasswordField, HiddenField, validators
from flask_login import LoginManager
from flask_login import login_user, logout_user, current_user
from flask_principal import \
Identity, AnonymousIdentity, identity_changed, identity_loaded, RoleNeed
from app.main import app
from app import utils
from app.models.user_entity import UserEntity
# set the login manager for the app
login_manager = LoginManager(app)
# Possible options: strong, basic, None
login_manager.session_protection = "strong"
login_manager.login_message = ""
login_manager.login_message_category = "info"
@login_manager.user_loader
def load_user(user_id):
"""Return the user from the database"""
return UserEntity.get_by_id(user_id)
@login_manager.unauthorized_handler
def unauthorized():
""" Returns a message for the unauthorized users """
return 'Please <a href="{}">login</a> first.'.format(url_for('index'))
@app.errorhandler(403)
def page_not_found(e):
"""
Redirect to login page if probing a protected resources before login
"""
return redirect(url_for('index') + "?next={}".format(request.url))
class LoginForm(Form):
""" Declare the validation rules for the login form """
next = HiddenField(default='')
# email = TextField('Email')
email = TextField('Email',
[validators.Required(),
validators.Length(min=4, max=25)])
password = PasswordField('Password',
[validators.Required(),
validators.Length(min=6, max=25)])
def get_user_agent():
"""Find an existing user agent or insert a new one"""
# The raw user agent string received from the browser
uag = request.user_agent
hash = utils.compute_text_md5(uag.string)
# The entity representing the user agent
user_agent = UserAgentEntity.get_by_hash(hash)
if user_agent is None:
platform = uag.platform if uag.platform is not None else ''
browser = uag.browser if uag.browser is not None else ''
version = uag.version if uag.version is not None else ''
language = uag.language if uag.language is not None else ''
user_agent = UserAgentEntity.create(user_agent=uag.string,
hash=hash,
platform=platform,
browser=browser,
version=version,
language=language)
return user_agent
@app.before_request
def check_session_id():
"""
Generate a UUID and store it in the session
as well as in the WebSession table.
"""
user_agent = get_user_agent()
if 'uuid' not in session:
session['uuid'] = str(uuid.uuid4())
WebSessionEntity.create(session_id=session['uuid'],
user_id=current_user.get_id(),
ip=request.remote_addr,
date_time=datetime.datetime.now(),
user_agent=user_agent)
return
if current_user.is_authenticated():
# update the user_id on the first request after login is completed
session_id = session['uuid']
web_session = WebSessionEntity.get_by_session_id(session_id)
if web_session is not None:
web_session = WebSessionEntity.update(
web_session,
user_id=current_user.get_id())
else:
app.logger.error("No row found for sess_id: {}".format(session_id))
@app.route('/', methods=['POST', 'GET'])
def index():
""" Render the login page"""
if app.config['LOGIN_USING_SHIB_AUTH']:
return render_login_shib()
return render_login_local()
def render_login_local():
""" Render the login page with username/pass
@see #index()
@see #render_login_shib()
"""
if current_user.is_authenticated():
return redirect(get_role_landing_page())
uuid = session['uuid']
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
email = form.email.data.strip(
) if form.email.data else ""
password = form.password.data.strip() if form.password.data else ""
app.logger.debug("{} password: {}".format(email, password))
app.logger.debug("Checking email: {}".format(email))
user = UserEntity.query.filter_by(email=email).first()
if user:
app.logger.debug("Found user object: {}".format(user))
else:
utils.flash_error("No such email: {}".format(email))
LogEntity.login(uuid, "No such email: {}".format(email))
return redirect(url_for('index'))
# if utils.is_valid_auth(app.config['SECRET_KEY'], auth.uathSalt,
# password, auth.uathPassword):
if '' == user.password_hash:
app.logger.info('Log login event for: {}'.format(user))
LogEntity.login(uuid, 'Successful login via email/password')
login_user(user, remember=False, force=False)
# Tell Flask-Principal that the identity has changed
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.get_id()))
return redirect(get_role_landing_page())
else:
app.logger.info('Incorrect pass for: {}'.format(user))
LogEntity.login_error(uuid, 'Incorrect pass for: {}'.format(user))
# When sending a GET request render the login form
return render_template('index.html', form=form,
next_page=request.args.get('next'))
@app.route('/loginExternalAuth', methods=['POST', 'GET'])
def shibb_redirect():
"""
Redirect to the local shibboleth instance where
we can pass the return path.
This route is reached when the user clicks the "Login" button.
Note: This is equivalent to Apache's syntax:
Redirect seeother /loginExternalAuth /Shibboleth.sso/Login?target=...
@see #index()
@see #shibb_return()
"""
next_page = "/Shibboleth.sso/Login?target={}"\
.format(url_for('shibb_return'))
return redirect(next_page)
@app.route('/loginExternalAuthReturn', methods=['POST', 'GET'])
def shibb_return():
"""
Read the Shibboleth headers returned by the IdP after
the user entered the username/password.
If the `eduPersonPrincipalName` (aka Eppn) for the user matches the
usrEmail of an active user then let the user in,
otherwise let them see the login page.
@see #shibb_redirect()
"""
if current_user.is_authenticated():
# next_page = request.args.get('next') or get_role_landing_page()
return redirect(get_role_landing_page())
# fresh login...
uuid = session['uuid']
email = request.headers['Mail']
glid = request.headers['Glid'] # Gatorlink ID
app.logger.debug("Checking if email: {} is registered for glid: {}"
.format(email, glid))
user = UserEntity.query.filter_by(email=email).first()
if not user:
utils.flash_error("No such user: {}".format(email))
LogEntity.login_error(uuid,
"Shibboleth user is not registered for this app")
return redirect(url_for('index'))
if not user.is_active():
utils.flash_error("Inactive user: {}".format(email))
LogEntity.login_error(uuid, 'Inactive user tried to login')
return redirect(url_for('index'))
if user.is_expired():
utils.flash_error("User account for {} expired on {}"
.format(email, user.access_expires_at))
LogEntity.login_error(uuid, 'Expired user tried to login')
return redirect(url_for('index'))
# Log it
app.logger.info('Successful login via Shibboleth for: {}'.format(user))
LogEntity.login(uuid, 'Successful login via Shibboleth')
login_user(user, remember=False, force=False)
# Tell Flask-Principal that the identity has changed
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.get_id()))
next_page = get_role_landing_page()
return redirect(next_page)
def render_login_shib():
""" Render the login page with button redirecting to
Shibboleth /loginExternalAuth path
"""
return render_template('login_shib.html', form=request.form)
def get_role_landing_page():
"""
Get the landing page for a user with specific role
:return None if the user has no roles
"""
next_page = request.form.get('next')
# Per Chris's request all users land on the same page
if next_page is not None and next_page != 'None':
return next_page
return url_for('dashboard')
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
""" Describe what `needs` does this identity provide
@TODO: add unit tests
http://stackoverflow.com/questions/16712321/unit-testing-a-flask-principal-application
"""
if type(current_user) == 'AnonymousUserMixin':
return
identity.user = current_user
if hasattr(current_user, 'roles'):
for role in current_user.roles:
# app.logger.debug("Provide role: {}".format(role))
identity.provides.add(RoleNeed(role.name))
@login_manager.request_loader
def load_user_from_request(req):
""" To support login from both a url argument and from Basic Auth
using the Authorization header
@TODO: use for api requests?
Need to add column `UserAuth.uathApiKey`
"""
# first, try to login using the api_key url arg
api_key = req.args.get('api_key')
if not api_key:
# next, try to login using Basic Auth
api_key = req.headers.get('Authorization')
if api_key:
api_key = api_key.replace('Basic ', '', 1)
try:
api_key = base64.b64decode(api_key)
except TypeError:
pass
if api_key:
md5 = hashlib.md5()
md5.update(api_key)
app.logger.debug("trying api_key: {}".format(md5.digest()))
user = UserEntity.query.filter_by(api_key=api_key).first()
return user
# finally, return None if neither of the api_keys is valid
return None
@app.route('/logout')
def logout():
""" Destroy the user session and redirect to the home page
Shib:
https://shib.ncsu.edu/docs/logout.html
https://wiki.shibboleth.net/confluence/display/CONCEPT/SLOIssues
"""
# Log the logout
if 'uuid' in session:
LogEntity.logout(session['uuid'])
logout_user()
# Remove session keys set by Flask-Principal, and `uuid` key set manually
for key in ('identity.name', 'identity.auth_type', 'uuid'):
session.pop(key, None)
# Tell Flask-Principal the user is anonymous
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
return redirect('/')
| ctsit/barebones-flask-app | app/routes/pages.py | Python | bsd-3-clause | 11,826 | 0 |
import json
import requests
from transliterate import translit
_eng_chars = "~!@#$%^&qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:\"|ZXCVBNM<>?"
_rus_chars = "ё!\"№;%:?йцукенгшщзхъфывапролджэячсмитьбю.ЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭ/ЯЧСМИТЬБЮ,"
_trans_table = dict(zip(_eng_chars, _rus_chars))
def _fix_layout(s):
return "".join([_trans_table.get(c, c) for c in s])
def es_predict(es_url, keywords):
query = set(
keywords +
[_fix_layout(word) for word in keywords] +
[translit(word, "ru") for word in keywords]
)
post_data = json.dumps({
"size": 5,
"query": {
"simple_query_string": {
"query": "|".join(query),
"flags": "OR|PREFIX"
}
}
})
response = requests.post(es_url + "/_search", data=post_data).json()
if "hits" in response and "hits" in response["hits"]:
for it in response["hits"]["hits"]:
if "_source" in it and "query" in it["_source"]:
yield it["_source"]["query"]
| bashkirtsevich/autocode | text_preprocessing/es_predict.py | Python | gpl-3.0 | 1,121 | 0.001898 |
#!/usr/bin/env python
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import logging
from helper.pi_tool import PiTool
logging.basicConfig(filename='log/test_email.log', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = PiTool.RESOLUTION_SQ_L
rawCapture = PiRGBArray(camera)
# allow the camera to warmup
time.sleep(0.3)
# grab an image from the camera
camera.capture(rawCapture, format="bgr")
#image = PiTool.get_roi_doorhole(rawCapture.array)
image = rawCapture.array
image = PiTool.get_doorhole_roi(image)
# display the image on screen and wait for a keypress
#cv2.imshow("Image", image)
#cv2.waitKey(0)
PiTool.save_and_email(image, "test_email")
| m1ojk/nicedoormat | test_email.py | Python | mit | 898 | 0.004454 |
# type command prints file contents
from lib.utils import *
def _help():
usage = '''
Usage: type (file)
Print content of (file)
Use '%' in front of global
vars to use value as file
name.
'''
print(usage)
def main(argv):
if len(argv) < 1 or '-h' in argv:
_help()
return
# The shell doesnt send the
# command name in the arg list
# so the next line is not needed
# anymore
# argv.pop(0)
# The shell does the work of replacing
# vars already. Code segment below
# is not required anymore.
# argv=replace_vars(argv)
argv = make_s(argv)
path = get_path() + argv
if os.path.isfile(path):
with open(path) as f:
data = f.readlines()
print('_________________<START>_________________\n')
print(make_s2(data))
print('__________________<END>__________________\n')
return
elif os.path.isdir(path):
err(3, add=argv + ' is a directory')
else:
err(2, path)
| nayas360/pyterm | bin/type.py | Python | mit | 1,002 | 0 |
'''
This is a New BSD License.
http://www.opensource.org/licenses/bsd-license.php
Copyright (c) 2008-2009, Jonathan Hartley ([email protected])
Copyright (c) 2012, Christian Fobel ([email protected])
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Jonathan Hartley nor the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import warnings
from collections import OrderedDict
from lxml import etree
from path_helpers import path
from .path_parser import PathParser, ParseError
from ..loop import Loop
from ..geo_path import Path
class SvgParseError(Exception):
pass
def parse_warning(*args):
filename, tag, message = args
msg = 'Error parsing %s:%d, %s\n %s'
if filename:
filename = filename.name
warnings.warn(msg % (filename, tag.sourceline, message,
etree.tostring(tag)), RuntimeWarning)
class Svg(object):
'''
Maintains an ordered list of paths, each one corresponding to a path tag
from an SVG file. Creates a pylget Batch containing all these paths, for
rendering as a single OpenGL GL_TRIANGLES indexed vert primitive.
'''
def __init__(self):
self.paths = OrderedDict()
def add_path(self, id, path):
self.paths[id] = path
def add_to_batch(self, batch):
'''
Adds paths to the given batch object. They are all added as
GL_TRIANGLES, so the batch will aggregate them all into a single OpenGL
primitive.
'''
for name in self.paths:
svg_path = self.paths[name]
svg_path.add_to_batch(batch)
def get_bounding_box(self):
points = list(self.all_verts())
x_vals = zip(*points)[0]
y_vals = zip(*points)[1]
min_x, min_y = min(x_vals), min(y_vals)
max_x, max_y = max(x_vals), max(y_vals)
return Loop([(min_x, min_y), (min_x, max_y), (max_x, max_y),
(max_x, min_y)])
def get_boundary(self):
if 'boundary' in self.paths:
boundary = self.paths['boundary']
else:
boundary = Path([self.get_bounding_box()])
return boundary
def all_verts(self):
for svg_path in self.paths.itervalues():
for loop in svg_path.loops:
for vert in loop.verts:
yield vert
class SvgParser(object):
'''
parse(filename) returns an Svg object, populated from the <path> tags
in the file.
'''
def parse_file(self, filename, on_error=None):
self.filename = path(filename)
xml_root = etree.parse(self.filename)
return self.parse(xml_root, on_error)
def parse(self, xml_root, on_error=None):
'''
Parse all <path> elements from xml_root.
Optional on_error arg specifies a callback function to be run when
an error occurs during parsing.
The specified on_error function must accept 3 arguments:
<svg filename>, <path_tag>, <error message>
An example on_error handler is provided as svg_load.svg_parser.parse_warning(),
where all SvgParseErrors are converted to warning messages. See usage below:
>>> import re
>>> svg_parser = SvgParser()
>>> path_tag = etree.XML("""
... <path
... xmlns="http://www.w3.org/2000/svg"
... xmlns:dc="http://purl.org/dc/elements/1.1/"
... xmlns:cc="http://creativecommons.org/ns#"
... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
... xmlns:svg="http://www.w3.org/2000/svg"
... xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
... xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
... id="path13231"
... d="M8 4 l-4,4"
... linecap="square"
... stroke="#000000"
... stroke-width="0.25"
... />""")
>>> with warnings.catch_warnings(record=True) as w:
... svg = svg_parser.parse(path_tag, on_error=parse_warning)
>>> print w[-1].category
<type 'exceptions.RuntimeWarning'>
>>> match = re.search(r'^Error parsing None:\d+, unsupported svg path command: l', str(w[-1].message))
>>> print match is None
False
>>> path_tag = etree.XML("""
... <path
... xmlns="http://www.w3.org/2000/svg" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
... xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#"
... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
... xmlns:svg="http://www.w3.org/2000/svg"
... xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
... style="fill:#0000ff;stroke:#ff0000;stroke-width:0.10000000000000001;stroke-miterlimit:4;stroke-dasharray:none"
... id="path18327"
... d="M 636.0331,256.9345 L 636.0331,256.9345"
... inkscape:connector-curvature="0"/>""")
>>> with warnings.catch_warnings(record=True) as w:
... svg = svg_parser.parse(path_tag, on_error=parse_warning)
>>> print w[-1].category
<type 'exceptions.RuntimeWarning'>
>>> match = re.search(r'^Error parsing None:\d+, loop needs 3 or more verts', str(w[-1].message))
>>> print match is None
False
'''
svg = Svg()
svg_namespace = {'svg': 'http://www.w3.org/2000/svg'}
path_tags = xml_root.xpath('(/svg:svg|/svg:svg/svg:g)/svg:path',
namespaces=svg_namespace)
parser = PathParser()
for path_tag in path_tags:
try:
id, svg_path = parser.parse(path_tag)
if svg_path.loops:
svg.add_path(id, svg_path)
except (ParseError, ), why:
filename = getattr(self, 'filename', None)
args = (filename, path_tag, why.message)
if on_error:
on_error(*args)
else:
raise SvgParseError(*args)
if svg.paths:
x, y = svg.get_boundary().get_center()
for svg_path in svg.paths.values():
svg_path.offset(-x, -y)
return svg
| wheeler-microfluidics/svg_model | svg_model/svgload/svg_parser.py | Python | lgpl-2.1 | 7,624 | 0.002361 |
class Solution(object):
def isPowerOfFour(self, num):
"""
:type num: int
:rtype: bool
"""
if num < 1:
return False
return num & (num - 1) == 0 and num & 0x55555555 > 0
| Jacy-Wang/MyLeetCode | PowerofFour342.py | Python | gpl-2.0 | 232 | 0 |
from .mim import Connection, match, MatchDoc, MatchList, BsonArith
| rick446/MongoTools | mongotools/mim/__init__.py | Python | mit | 67 | 0 |
from __future__ import print_function
from __future__ import absolute_import
# System modules
import os
# Third-party modules
# LLDB modules
import lldb
from .lldbtest import *
from . import configuration
from . import lldbutil
from .decorators import *
def source_type(filename):
_, extension = os.path.splitext(filename)
return {
'.c': 'C_SOURCES',
'.cpp': 'CXX_SOURCES',
'.cxx': 'CXX_SOURCES',
'.cc': 'CXX_SOURCES',
'.m': 'OBJC_SOURCES',
'.mm': 'OBJCXX_SOURCES'
}.get(extension, None)
class CommandParser:
def __init__(self):
self.breakpoints = []
def parse_one_command(self, line):
parts = line.split('//%')
command = None
new_breakpoint = True
if len(parts) == 2:
command = parts[1].strip() # take off whitespace
new_breakpoint = parts[0].strip() != ""
return (command, new_breakpoint)
def parse_source_files(self, source_files):
for source_file in source_files:
file_handle = open(source_file)
lines = file_handle.readlines()
line_number = 0
# non-NULL means we're looking through whitespace to find
# additional commands
current_breakpoint = None
for line in lines:
line_number = line_number + 1 # 1-based, so we do this first
(command, new_breakpoint) = self.parse_one_command(line)
if new_breakpoint:
current_breakpoint = None
if command is not None:
if current_breakpoint is None:
current_breakpoint = {}
current_breakpoint['file_name'] = source_file
current_breakpoint['line_number'] = line_number
current_breakpoint['command'] = command
self.breakpoints.append(current_breakpoint)
else:
current_breakpoint['command'] = current_breakpoint[
'command'] + "\n" + command
def set_breakpoints(self, target):
for breakpoint in self.breakpoints:
breakpoint['breakpoint'] = target.BreakpointCreateByLocation(
breakpoint['file_name'], breakpoint['line_number'])
def handle_breakpoint(self, test, breakpoint_id):
for breakpoint in self.breakpoints:
if breakpoint['breakpoint'].GetID() == breakpoint_id:
test.execute_user_command(breakpoint['command'])
return
class InlineTest(TestBase):
# Internal implementation
def BuildMakefile(self):
makefilePath = self.getBuildArtifact("Makefile")
if os.path.exists(makefilePath):
return
categories = {}
for f in os.listdir(self.getSourceDir()):
t = source_type(f)
if t:
if t in list(categories.keys()):
categories[t].append(f)
else:
categories[t] = [f]
makefile = open(makefilePath, 'w+')
level = os.sep.join(
[".."] * len(self.mydir.split(os.sep))) + os.sep + "make"
makefile.write("LEVEL = " + level + "\n")
for t in list(categories.keys()):
line = t + " := " + " ".join(categories[t])
makefile.write(line + "\n")
if ('OBJCXX_SOURCES' in list(categories.keys())) or (
'OBJC_SOURCES' in list(categories.keys())):
makefile.write(
"LDFLAGS = $(CFLAGS) -lobjc -framework Foundation\n")
if ('CXX_SOURCES' in list(categories.keys())):
makefile.write("CXXFLAGS += -std=c++11\n")
makefile.write("include $(LEVEL)/Makefile.rules\n")
makefile.write("\ncleanup:\n\trm -f Makefile *.d\n\n")
makefile.flush()
makefile.close()
def _test(self):
self.BuildMakefile()
self.build()
self.do_test()
def execute_user_command(self, __command):
exec(__command, globals(), locals())
def do_test(self):
exe = self.getBuildArtifact("a.out")
source_files = [f for f in os.listdir(self.getSourceDir())
if source_type(f)]
target = self.dbg.CreateTarget(exe)
parser = CommandParser()
parser.parse_source_files(source_files)
parser.set_breakpoints(target)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
hit_breakpoints = 0
while lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint):
hit_breakpoints += 1
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
breakpoint_id = thread.GetStopReasonDataAtIndex(0)
parser.handle_breakpoint(self, breakpoint_id)
process.Continue()
self.assertTrue(hit_breakpoints > 0,
"inline test did not hit a single breakpoint")
# Either the process exited or the stepping plan is complete.
self.assertTrue(process.GetState() in [lldb.eStateStopped,
lldb.eStateExited],
PROCESS_EXITED)
# Utilities for testcases
def check_expression(self, expression, expected_result, use_summary=True):
value = self.frame().EvaluateExpression(expression)
self.assertTrue(value.IsValid(), expression + "returned a valid value")
if self.TraceOn():
print(value.GetSummary())
print(value.GetValue())
if use_summary:
answer = value.GetSummary()
else:
answer = value.GetValue()
report_str = "%s expected: %s got: %s" % (
expression, expected_result, answer)
self.assertTrue(answer == expected_result, report_str)
def ApplyDecoratorsToFunction(func, decorators):
tmp = func
if isinstance(decorators, list):
for decorator in decorators:
tmp = decorator(tmp)
elif hasattr(decorators, '__call__'):
tmp = decorators(tmp)
return tmp
def MakeInlineTest(__file, __globals, decorators=None):
# Adjust the filename if it ends in .pyc. We want filenames to
# reflect the source python file, not the compiled variant.
if __file is not None and __file.endswith(".pyc"):
# Strip the trailing "c"
__file = __file[0:-1]
# Derive the test name from the current file name
file_basename = os.path.basename(__file)
InlineTest.mydir = TestBase.compute_mydir(__file)
test_name, _ = os.path.splitext(file_basename)
test_func = ApplyDecoratorsToFunction(InlineTest._test, decorators)
# Build the test case
test_class = type(test_name, (InlineTest,), dict(test=test_func, name=test_name))
# Add the test case to the globals, and hide InlineTest
__globals.update({test_name: test_class})
# Keep track of the original test filename so we report it
# correctly in test results.
test_class.test_filename = __file
return test_class
| youtube/cobalt | third_party/llvm-project/lldb/packages/Python/lldbsuite/test/lldbinline.py | Python | bsd-3-clause | 7,168 | 0.000279 |
#!/usr/bin/python
import json
import logging
import sys
from datetime import datetime
import csv
if __name__ == '__main__':
_loggingLevel = logging.DEBUG ## How much trace
logger = logging.getLogger(__name__)
logging.basicConfig(level=_loggingLevel)
a = {}
altmetricFile = sys.argv[1]
with open(altmetricFile) as afData:
for line in afData:
data = line.rstrip('\n')
a[data] = 0
with open(sys.argv[2], 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter='$', quotechar='\'')
for line in spamreader:
id = line[0]
title = line[1]
url = line[2]
dateP = line[3]
restP = line[4]
actorId = line[5]
actorUrl = line[6]
actorDisplayName = line[7]
verb = line[8]
objectId = line[9]
objectActorId = line[10]
objectActorDisplayName = line[11]
objectContent = line[12]
if url not in a.keys():
in_altmetric = "N"
else:
in_altmetric = "Y"
print("%s$%s$%s$%s$%s$%s$%s$%s$%s$%s$%s$%s$%s$%r" %(dateP, restP, id, title, in_altmetric, url, verb, actorDisplayName, actorId, actorUrl, objectId, objectActorId, objectActorDisplayName, objectContent))
| sinharrajesh/dbtools | google-plus-analysis/clarify.py | Python | apache-2.0 | 1,344 | 0.004464 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for remote execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import time
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import SimpleClusterResolver
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import server_lib
from tensorflow.python.training.server_lib import ClusterSpec
class SingleWorkerTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(SingleWorkerTest, self).setUp()
workers, _ = test_util.create_local_cluster(1, 0)
remote.connect_to_remote_host(workers[0].target)
def tearDown(self):
super(SingleWorkerTest, self).tearDown()
# Clear the current device scope to avoid polluting other test cases.
ops.device(None).__enter__()
# Reset the context to avoid polluting other test cases.
context._reset_context()
@test_util.eager_lazy_remote_copy_on_and_off
def testMultiDeviceFunctionBasic(self):
@def_function.function
def basic(i):
with ops.device('/job:localhost/replica:0/task:0/cpu:0'):
a = constant_op.constant([2]) + i
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
b = constant_op.constant([1])
return a + b
self.assertAllEqual(basic(constant_op.constant([2])).numpy(), [5])
self.assertAllEqual(basic(constant_op.constant([1])).numpy(), [4])
@test_util.eager_lazy_remote_copy_on_and_off
def testMultiDeviceFunctionVariable(self):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
variable_b = variables.Variable(1)
@def_function.function
def with_variable(i):
return i + variable_b
self.assertAllEqual(with_variable(constant_op.constant([2])).numpy(), [3])
def testMultiDeviceFunctionRemoteOutput(self):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
variable_b = variables.Variable(1)
@def_function.function
def remote_output(i):
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
c = variable_b + 1
return i + variable_b, c
rets = remote_output(constant_op.constant([1]))
self.assertAllEqual(rets[0].numpy(), [2])
self.assertAllEqual(rets[1].numpy(), 2)
self.assertEqual(rets[0].backing_device,
'/job:localhost/replica:0/task:0/device:CPU:0')
self.assertEqual(rets[1].backing_device,
'/job:worker/replica:0/task:0/device:CPU:0')
def testMultiDeviceFunctionAmbiguousDevice(self):
@def_function.function
def ambiguous_device(i):
with ops.device('cpu:0'):
return i + constant_op.constant([2])
with self.assertRaises(errors.InvalidArgumentError) as cm:
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
ambiguous_device(constant_op.constant([2])).numpy()
self.assertIn('the output node must match exactly one device',
cm.exception.message)
def testStreaming(self):
"""A mini stress test for streaming - issuing many RPCs back to back."""
with ops.device('job:worker/replica:0/task:0/device:CPU:0'):
x = array_ops.ones([2, 2])
y = array_ops.zeros([2, 2])
num_iters = 200
for _ in range(num_iters):
y = x + y
# Ask for y's shape after every 10 additions on average.
# This exercises waiting for remote shape logic in TensorHandle.
if random.randint(1, 10) == 1:
_ = y.shape
np.testing.assert_array_equal(
[[num_iters, num_iters], [num_iters, num_iters]], y.numpy())
def testShapeError_OpByOp(self):
with ops.device('job:worker/replica:0/task:0/device:CPU:0'):
x = array_ops.ones([2, 3])
y = array_ops.zeros([2, 2])
with self.assertRaises(errors.InvalidArgumentError) as cm:
math_ops.matmul(x, y)
self.assertIn('Dimensions must be equal', cm.exception.message)
@test_util.eager_lazy_remote_copy_on_and_off
def testShapeError_Function(self):
@def_function.function
def matmul_func(x, y):
return math_ops.matmul(x, y)
x = array_ops.ones([2, 3])
y = array_ops.zeros([2, 2])
with ops.device('job:worker/replica:0/task:0/device:CPU:0'):
with self.assertRaises(ValueError) as cm:
matmul_func(x, y)
if six.PY2:
self.assertIn('Dimensions must be equal', cm.exception.message)
else:
self.assertIn('Dimensions must be equal', cm.exception.args[0])
def testClientVarible(self):
var = variables.Variable(initial_value=0)
@def_function.function
def func():
with ops.device('/job:localhost/task:0'):
read = var.read_value()
return read + 1
with ops.device('/job:worker/task:0'):
self.assertAllEqual(func(), 1)
@test_util.eager_lazy_remote_copy_on_and_off
def testRemoteCall(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def _remote_fn(x):
return constant_op.constant(1) + x
remote_fn = _remote_fn.get_concrete_function()
@def_function.function
def func(x):
return functional_ops.remote_call(
args=[x],
Tout=[dtypes.int32],
f=remote_fn,
target='/job:worker/task:0')
with ops.device('/job:localhost/task:0'):
self.assertAllEqual(func(constant_op.constant(1)), [2])
class RemoteAsyncTest(test.TestCase):
def setUp(self):
super(RemoteAsyncTest, self).setUp()
workers, _ = test_util.create_local_cluster(1, 0)
remote.connect_to_remote_host(workers[0].target)
def tearDown(self):
super(RemoteAsyncTest, self).tearDown()
# Reset the context to avoid polluting other test cases.
context._reset_context()
def test_out_of_range_with_while_loop(self):
with ops.device('/job:worker/task:0'):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0])
dataset = dataset.batch(1, drop_remainder=False)
iterator = iter(dataset)
v = variables.Variable(1.0)
@def_function.function
def train_step(iterator):
i = next(iterator)
v.assign_add(math_ops.reduce_mean(i))
while True:
try:
with ops.device('/job:worker/task:0'):
train_step(iterator)
except (errors.OutOfRangeError, errors.InternalError):
context.async_clear_error()
break
self.assertAllEqual(v.numpy(), 4.0)
def test_out_of_range_with_for_loop(self):
with ops.device('/job:worker/task:0'):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0])
dataset = dataset.batch(1, drop_remainder=False)
iterator = iter(dataset)
v = variables.Variable(1.0)
@def_function.function
def train_step(iterator):
i = next(iterator)
v.assign_add(math_ops.reduce_mean(i))
num_steps = 3
for i in range(num_steps):
try:
with ops.device('/job:worker/task:0'):
train_step(iterator)
if i == num_steps - 1:
context.async_wait()
except errors.OutOfRangeError:
context.async_clear_error()
break
self.assertAllEqual(v.numpy(), 4.0)
def test_out_of_range_with_async_scope(self):
with ops.device('/job:worker/task:0'):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0])
dataset = dataset.batch(1, drop_remainder=False)
iterator = iter(dataset)
v = variables.Variable(1.0)
@def_function.function
def train_step(iterator):
i = next(iterator)
v.assign_add(math_ops.reduce_mean(i))
num_steps = 3
try:
with context.async_scope():
for _ in range(num_steps):
with ops.device('/job:worker/task:0'):
train_step(iterator)
except errors.OutOfRangeError:
context.async_clear_error()
self.assertAllEqual(v.numpy(), 4.0)
class MultiWorkersTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(MultiWorkersTest, self).setUp()
workers, _ = test_util.create_local_cluster(3, 0)
remote.connect_to_remote_host(
[workers[0].target, workers[1].target, workers[2].target])
def tearDown(self):
super(MultiWorkersTest, self).tearDown()
# Clear the current device scope to avoid polluting other test cases.
ops.device(None).__enter__()
# Reset the context to avoid polluting other test cases.
context._reset_context()
@test_util.eager_lazy_remote_copy_on_and_off
def testReturnRemoteArgument(self):
@def_function.function
def local_func(i):
return i
with ops.device('/job:worker/replica:0/task:0'):
x = constant_op.constant([2, 1])
with ops.device('/job:worker/replica:0/task:1'):
self.assertAllEqual(local_func(x), [2, 1])
# Note that the following tests for remote function cancellation only works
# when non-streaming RPC. We need to disable streaming explicitly and restore
# this config to its initial value at the end of each test case.
def testCancelRemoteFunctionBeforeExecution(self):
remote_async_env_var = 'TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE'
default_streaming = os.environ.get(remote_async_env_var)
os.environ[remote_async_env_var] = str(False)
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
@def_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
c_mgr.start_cancel()
with self.assertRaises(errors.CancelledError):
with ops.device('/job:worker/replica:0/task:1'):
cancelable_func()
if default_streaming is None:
del os.environ[remote_async_env_var]
else:
os.environ[remote_async_env_var] = default_streaming
def testCancelRemoteFunctionDuringExecution(self):
remote_async_env_var = 'TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE'
default_streaming = os.environ.get(remote_async_env_var)
os.environ[remote_async_env_var] = str(False)
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
@def_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
def cancel_thread():
time.sleep(0.5)
c_mgr.start_cancel()
t = self.checkedThread(cancel_thread)
t.start()
with self.assertRaises(errors.CancelledError):
with ops.device('/job:worker/replica:0/task:1'):
cancelable_func()
t.join()
if default_streaming is None:
del os.environ[remote_async_env_var]
else:
os.environ[remote_async_env_var] = default_streaming
@test_util.eager_lazy_remote_copy_on_and_off
def testMultiDeviceFunctionOnLocalDevice(self):
with ops.device('/job:worker/replica:0/task:1'):
variable_b = variables.Variable(1.0)
@def_function.function
def remote_function(i):
with ops.device('/job:worker/replica:0/task:0'):
a = i + variable_b
c = a + 1.0
return c
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
def testMultiDeviceFunctionExecutionOrderingWithPackedInput(self):
shape = [2]
with ops.device('/job:worker/replica:0/task:2/device:CPU:0'):
# Send 20 remote requests to simulate heavy load on worker:2.
unused_values = []
for _ in range(20):
unused_values.append(array_ops.zeros(shape))
func_input = array_ops.zeros(shape)
packed_input = ops.pack_eager_tensors([func_input])
@def_function.function
def func(packed_input):
# When worker:2 receives the component function request, packed_input
# should be ready on worker:2.
with ops.device('/job:worker/replica:0/task:2/device:CPU:0'):
ret = packed_input + constant_op.constant(1.0)
return ret + constant_op.constant(1.0)
# Run the function on a worker:1
with ops.device('/job:worker/replica:0/task:1/device:CPU:0'):
self.assertAllEqual(func(packed_input).numpy(),
array_ops.ones(shape).numpy() * 2)
def testMultiDeviceFunctionWithPackedVariable(self):
with ops.device('/job:worker/replica:0/task:0/device:CPU:0'):
var0 = resource_variable_ops.ResourceVariable(1.0)
with ops.device('/job:worker/replica:0/task:1/device:CPU:0'):
var1 = resource_variable_ops.ResourceVariable(2.0)
packed_var = ops.pack_eager_tensors([var0.handle, var1.handle])
self.assertEqual(packed_var.device,
'/job:localhost/replica:0/task:0/device:COMPOSITE:0')
self.assertEqual(packed_var.backing_device,
'/job:localhost/replica:0/task:0/device:COMPOSITE:0')
@def_function.function
def add_variables():
with ops.device('/job:worker/replica:0/task:0/device:CPU:0'):
read0 = resource_variable_ops.read_variable_op(
packed_var, dtype=dtypes.float32)
with ops.device('/job:worker/replica:0/task:1/device:CPU:0'):
read1 = resource_variable_ops.read_variable_op(
packed_var, dtype=dtypes.float32)
return read0 + read1
# Run the function on a remote device
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(add_variables().numpy(), 3.0)
# Run the function on a local worker
self.assertAllEqual(add_variables().numpy(), 3.0)
@test_util.eager_lazy_remote_copy_on_and_off
def testMultiDeviceFunctionOnRemoteDeviceWithWait(self):
with ops.device('/job:worker/replica:0/task:1'):
variable_b = variables.Variable([1.0])
@def_function.function
def remote_function(i):
x = array_ops.ones([1000, 1000])
for _ in range(1, 1000):
x = x * x
variable_b.assign_add(i)
a = 1.0 + variable_b
return a
@def_function.function
def remote_function2(i):
variable_b.assign_add(i)
a = 1.0 + variable_b
return a
# Runs first function:
# - on remote device
# - needs remote input
# - is side impacting
# - runs much slower
with ops.device('/job:worker/replica:0/task:0'):
remote_function(constant_op.constant([2.0]))
# Runs second function:
# - on remote device
# - is side impacting
# There should be a sync point here and the next function will be executed
# only after the first function has completed.
with ops.device('/job:worker/replica:0/task:2'):
self.assertAllEqual(remote_function2(constant_op.constant([3.0])), [7.0])
@test_util.eager_lazy_remote_copy_on_and_off
def testMultiDeviceFunctionOnRemoteDevice(self):
with ops.device('/job:worker/replica:0/task:1'):
variable_b = variables.Variable(1.0)
@def_function.function
def remote_function(i):
with ops.device('/job:worker/replica:0/task:0'):
a = i + variable_b
c = a + 1.0
return c
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
if test_util.is_gpu_available():
with ops.device('/job:worker/replica:0/task:0/device:GPU:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
def testMultiDeviceFunctionRemoteOutput(self):
with ops.device('/job:worker/replica:0/task:1/cpu:0'):
variable_b = variables.Variable(1)
@def_function.function
def remote_output(i):
with ops.device('/job:worker/replica:0/task:1/cpu:0'):
c = variable_b + 1
return i + variable_b, c
with ops.device('/job:worker/replica:0/task:0/cpu:0'):
rets = remote_output(constant_op.constant([1]))
self.assertEqual(rets[0].backing_device,
'/job:worker/replica:0/task:0/device:CPU:0')
self.assertEqual(rets[1].backing_device,
'/job:worker/replica:0/task:1/device:CPU:0')
self.assertAllEqual(rets[0].numpy(), [2])
self.assertAllEqual(rets[1].numpy(), 2)
@test_util.eager_lazy_remote_copy_on_and_off
def testMultiDeviceWhileLoopOnRemoteDevice(self):
with ops.device('/job:worker/replica:0/task:1'):
variable_b = variables.Variable(1.0)
@def_function.function
def remote_function(i):
def body(i, _):
with ops.device('/job:worker/replica:0/task:0'):
a = i + variable_b
return a + 1.0, 1
return control_flow_ops.while_loop_v2(lambda _, d: d < 1, body, [i, 0])[0]
with ops.device('/job:worker/replica:0/task:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
if test_util.is_gpu_available():
with ops.device('/job:worker/replica:0/task:0/device:GPU:0'):
self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0])
@test_util.eager_lazy_remote_copy_on_and_off
def testSimpleParameterServer(self):
with ops.device('/job:worker/task:2/device:CPU:0'):
v1 = variables.Variable(initial_value=0)
v2 = variables.Variable(initial_value=10)
@def_function.function
def worker_fn():
v1.assign_add(1)
v2.assign_sub(2)
return v1.read_value() + v2.read_value()
with ops.device('/job:worker/task:0/device:CPU:0'):
self.assertAllEqual(worker_fn(), 9)
with ops.device('/job:worker/task:1/device:CPU:0'):
self.assertAllEqual(worker_fn(), 8)
_GRPC_PREFIX = 'grpc://'
class MultiJobsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(MultiJobsTest, self).setUp()
workers, ps = test_util.create_local_cluster(num_workers=2, num_ps=2)
cluster = {
'my_worker': [_strip_prefix(t.target, _GRPC_PREFIX) for t in workers],
'my_ps': [_strip_prefix(t.target, _GRPC_PREFIX) for t in ps],
}
self._cluster = server_lib.ClusterSpec(cluster)
self._cluster_resolver = SimpleClusterResolver(
cluster_spec=self._cluster, master=ps[0].target)
def tearDown(self):
super(MultiJobsTest, self).tearDown()
# Clear the current device scope to avoid polluting other test cases.
ops.device(None).__enter__()
# Reset the context to avoid polluting other test cases.
context._reset_context()
@test_util.eager_lazy_remote_copy_on_and_off
def testSimpleParameterServer(self):
remote.connect_to_cluster(self._cluster)
with ops.device('/job:my_ps/task:0/device:CPU:0'):
v1 = variables.Variable(initial_value=0)
v2 = variables.Variable(initial_value=10)
@def_function.function
def worker_fn():
v1.assign_add(1)
v2.assign_sub(2)
return v1.read_value() + v2.read_value()
with ops.device('/job:my_worker/task:0/device:CPU:0'):
self.assertAllEqual(worker_fn(), 9)
with ops.device('/job:my_worker/task:1/device:CPU:0'):
self.assertAllEqual(worker_fn(), 8)
# TODO(b/152224115): Re-enable this test.
@test_util.eager_lazy_remote_copy_on_and_off
def DISABLED_testSimpleParameterServerWithDeviceFilters(self):
cluster_device_filters = server_lib.ClusterDeviceFilters()
for i in range(2):
cluster_device_filters.set_device_filters('my_worker', i, ['/job:my_ps'])
cluster_device_filters.set_device_filters('my_ps', i, ['/job:my_worker'])
remote.connect_to_cluster(
self._cluster, cluster_device_filters=cluster_device_filters)
with ops.device('/job:my_ps/task:0/device:CPU:0'):
v1 = variables.Variable(initial_value=0)
with ops.device('/job:my_ps/task:1/device:CPU:0'):
v2 = variables.Variable(initial_value=10)
@def_function.function
def worker_fn():
v1.assign_add(1)
v2.assign_sub(2)
return v1.read_value() + v2.read_value()
with ops.device('/job:my_worker/task:0/device:CPU:0'):
self.assertAllEqual(worker_fn(), 9)
with ops.device('/job:my_worker/task:1/device:CPU:0'):
self.assertAllEqual(worker_fn(), 8)
# The following remote call would fail because the ps nodes cannot see each
# other due to the device filters.
with self.assertRaises(errors.InvalidArgumentError) as cm:
with ops.device('/job:my_ps/task:0/device:CPU:0'):
worker_fn().numpy()
self.assertIn('/job:my_ps/replica:0/task:1/device:CPU:0 unknown device',
cm.exception.message)
with self.assertRaises(errors.InvalidArgumentError) as cm:
with ops.device('/job:my_ps/task:1/device:CPU:0'):
worker_fn().numpy()
self.assertIn('/job:my_ps/replica:0/task:0/device:CPU:0 unknown device',
cm.exception.message)
with ops.device('/job:my_worker/task:0/device:CPU:0'):
self.assertAllEqual(worker_fn(), 7)
with ops.device('/job:my_worker/task:1/device:CPU:0'):
self.assertAllEqual(worker_fn(), 6)
# Explicitly delete variables to avoid triggering errors when being GC'ed in
# subsequent tests.
del v1, v2
@test_util.eager_lazy_remote_copy_on_and_off
def testConnectWithClusterResolver(self):
remote.connect_to_cluster(self._cluster_resolver)
v1 = variables.Variable(initial_value=0)
v2 = variables.Variable(initial_value=10)
@def_function.function
def worker_fn():
v1.assign_add(1)
v2.assign_sub(2)
return v1.read_value() + v2.read_value()
with ops.device('/job:my_worker/task:0/device:CPU:0'):
self.assertAllEqual(worker_fn(), 9)
with ops.device('/job:my_worker/task:1/device:CPU:0'):
self.assertAllEqual(worker_fn(), 8)
@test_util.eager_lazy_remote_copy_on_and_off
def testConnectToClusterTwiceOk(self):
remote.connect_to_cluster(self._cluster_resolver)
remote.connect_to_cluster(self._cluster_resolver)
@test_util.eager_lazy_remote_copy_on_and_off
def testConnectToClusterOnMismatchedDevice(self):
remote.connect_to_cluster(self._cluster_resolver)
# enter into another device scope.
ops.device('/job:my_worker/task:0/device:CPU:0').__enter__()
with self.assertRaises(ValueError):
remote.connect_to_cluster(self._cluster_resolver)
@test_util.eager_lazy_remote_copy_on_and_off
def testConnectToClusterWithLocalMaster(self):
local_resolver = SimpleClusterResolver(ClusterSpec({}), master='local')
remote.connect_to_cluster(local_resolver)
@test_util.eager_lazy_remote_copy_on_and_off
def testConnectToClusterInGraphModeWillFail(self):
ops.disable_eager_execution()
with self.assertRaises(ValueError):
remote.connect_to_cluster(self._cluster_resolver)
ops.enable_eager_execution()
def _strip_prefix(s, prefix):
return s[len(prefix):] if s.startswith(prefix) else s
if __name__ == '__main__':
test.main()
| cxxgtxy/tensorflow | tensorflow/python/eager/remote_test.py | Python | apache-2.0 | 24,106 | 0.008919 |
# Copyright 2022 The jax3d Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax3d.projects.nesf.nerfstatic.utils.camera_utils."""
import chex
from jax3d.projects.nesf.nerfstatic.utils import camera_utils
from jax3d.projects.nesf.nerfstatic.utils import types
import numpy as np
import pytest
def _batched_array(val, dtype=None):
"""Returns the array with leading `1` dimension."""
return np.array(val, dtype=dtype)[None, ...]
# TODO(epot): Support the np.array case. Camera should work for both batched
# and non-batched arrays.
@pytest.mark.parametrize('np_array', [_batched_array])
def test_camera(np_array):
camera = camera_utils.Camera.from_position_and_quaternion(
positions=np_array([2., 0., 1.,]),
quaternions=np_array([0.1, 0.2, 0.3, 0.4]),
resolution=(2, 2),
focal_px_length=280.,
)
rays = camera.pixel_centers2rays()
expected_rays = types.Rays(
scene_id=None,
origin=np_array([
[[2., 0., 1.],
[2., 0., 1.]],
[[2., 0., 1.],
[2., 0., 1.]],
]),
direction=np_array([
[[-0.27698026, -0.24996764, -0.92779206],
[-0.2750864, -0.24938536, -0.92851193]],
[[-0.27663719, -0.25217938, -0.92729576],
[-0.27474675, -0.25160123, -0.92801457]],
]),
)
chex.assert_tree_all_close(rays, expected_rays, ignore_nones=True)
| google-research/jax3d | jax3d/projects/nesf/nerfstatic/utils/camera_utils_test.py | Python | apache-2.0 | 1,892 | 0.0037 |
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Float, Property
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.hardware.core.abstract_device import AbstractDevice
class LinearAxis(AbstractDevice):
position = Property(depends_on='_position')
_position = Float
min_value = Float(0.0)
max_value = Float(100.0)
min_limit = Property(depends_on='_position')
max_limit = Property(depends_on='_position')
_slewing = False
def set_home(self):
if self._cdevice:
self._cdevice.set_home()
def set_position(self, v, **kw):
if self._cdevice:
self._cdevice.set_position(v, **kw)
# self.add_consumable((self._cdevice.set_position, v, kw))
# def relative_move(self, v):
# self.set_position(self._position + v)
def is_slewing(self):
return self._slewing
def is_stalled(self):
if self._cdevice:
return self._cdevice.stalled()
def slew(self, modifier):
if self._cdevice:
self._slewing = True
self._cdevice.slew(modifier)
def stop(self):
if self._cdevice:
self._slewing = False
self._cdevice.stop_drive()
def _get_min_limit(self):
return abs(self._position - self.min_value) < 1e-5
def _get_max_limit(self):
return abs(self._position - self.max_value) < 1e-5
def _get_position(self):
return float('{:0.3f}'.format(self._position))
def _set_position(self, v):
self._position = v
if self._cdevice:
self.set_position(v)
# ============= EOF =============================================
| UManPychron/pychron | pychron/hardware/linear_axis.py | Python | apache-2.0 | 2,566 | 0 |
#!/usr/bin/env python
#--------Include modules---------------
from copy import copy
import rospy
from visualization_msgs.msg import Marker
from std_msgs.msg import String
from geometry_msgs.msg import Point
from nav_msgs.msg import OccupancyGrid
import actionlib_msgs.msg
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import actionlib
import tf
from os import system
from random import random
from numpy import array,concatenate,vstack,delete,floor,ceil
from numpy import linalg as LA
from numpy import all as All
from time import time
#-----------------------------------------------------
# Subscribers' callbacks------------------------------
mapData=OccupancyGrid()
def mapCallBack(data):
global mapData
mapData=data
# Node----------------------------------------------
def node():
rospy.init_node('distanceCounter1', anonymous=False)
#-------------------------------------------
rate = rospy.Rate(50)
listener = tf.TransformListener()
listener.waitForTransform('/robot_1/odom', '/robot_1/base_link', rospy.Time(0),rospy.Duration(50.0))
try:
(trans,rot) = listener.lookupTransform('/robot_1/odom', '/robot_1/base_link', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
trans=[0,0]
xinx=trans[0]
xiny=trans[1]
xprev=array([xinx,xiny])
distance=0
t0=time()
#-------------------------------RRT------------------------------------------
while not rospy.is_shutdown():
(trans,rot)=listener.lookupTransform('/robot_1/odom', '/robot_1/base_link', rospy.Time(0))
xinx=int(trans[0]*1000)/1000.0
xiny=int(trans[1]*1000)/1000.0
xnew=array([xinx,xiny])
distance+=LA.norm(xnew-xprev)
print distance," elapsed ",(time()-t0)," sec"
xprev=array([xinx,xiny])
rate.sleep()
#_____________________________________________________________________________
if __name__ == '__main__':
try:
node()
except rospy.ROSInterruptException:
pass
| hasauino/multi_kobuki_gazebo | scripts/tot_r1.py | Python | mit | 2,113 | 0.056791 |
#!/usr/bin/env python
# encoding: utf-8
"""
Waf tool for defining ardupilot's submodules, so that they are kept up to date.
Submodules can be considered dynamic sources, since they are updated during the
build. Furthermore, they can be used to generate other dynamic sources (mavlink
headers generation, for example). Thus, the correct use of this tool should
have three build groups: first one for updating the submodules, second for
generating any dynamic source from them, and the last one for the build. And
post_mode should be set to POST_LAZY. Example::
def build(bld):
bld.post_mode = waflib.Build.POST_LAZY
bld.add_group('git_submodules')
# gtest submodule
bld(
features='git_submodule'
git_submodule='gtest',
)
# mavlink submodule with syntactic sugar
bld.git_submodule('mavlink')
...
# now, for the dynamic sources
bld.add_group('dynamic_sources')
...
# now, below go the task generators for normal build process
bld.add_group('build')
...
"""
from waflib import Context, Task, Utils
from waflib.Configure import conf
from waflib.TaskGen import before_method, feature, taskgen_method
import os.path
class update_submodule(Task.Task):
color = 'BLUE'
run_str = '${GIT} -C ${SRC_ROOT} submodule update --init -- ${SUBMODULE_PATH}'
def runnable_status(self):
e = self.env.get_flat
cmd = e('GIT'), '-C', e('SRC_ROOT'), 'submodule', 'status', '--', e('SUBMODULE_PATH')
out = self.generator.bld.cmd_and_log(cmd, quiet=Context.BOTH)
# git submodule status uses a blank prefix for submodules that are up
# to date
if out[0] != ' ':
return Task.RUN_ME
return Task.SKIP_ME
def uid(self):
if not hasattr(self, 'uid_'):
m = Utils.md5()
m.update(self.__class__.__name__)
m.update(self.env.get_flat('SUBMODULE_PATH'))
self.uid_ = m.digest()
return self.uid_
def __str__(self):
return 'Submodule update: %s' % self.submodule
def configure(cfg):
cfg.find_program('git')
_submodules_tasks = {}
@taskgen_method
def git_submodule_update(self, name):
if name not in _submodules_tasks:
module_node = self.bld.srcnode.make_node(os.path.join('modules', name))
tsk = self.create_task('update_submodule', submodule=name)
tsk.env.SRC_ROOT = self.bld.srcnode.abspath()
tsk.env.SUBMODULE_PATH = module_node.abspath()
_submodules_tasks[name] = tsk
return _submodules_tasks[name]
@feature('git_submodule')
@before_method('process_source')
def process_module_dependencies(self):
self.git_submodule = getattr(self, 'git_submodule', '')
if not self.git_submodule:
self.bld.fatal('git_submodule: empty or missing git_submodule argument')
self.git_submodule_update(self.git_submodule)
@conf
def git_submodule(bld, git_submodule, **kw):
kw['git_submodule'] = git_submodule
kw['features'] = Utils.to_list(kw.get('features', ''))
kw['features'].append('git_submodule')
return bld(**kw)
| aesaae/ardupilot_str | Tools/ardupilotwaf/git_submodule.py | Python | gpl-3.0 | 3,169 | 0.002524 |
import os
import unittest
from shutil import copyfile
from ruamel.yaml import YAML
from ukbrest.common.utils.auth import PasswordHasher
from tests.utils import get_repository_path
class WSGIFunctions(unittest.TestCase):
def load_data(self, filepath):
yaml = YAML()
with open(filepath, 'r') as f:
return yaml.load(f)
def test_process_users_file_test00(self):
# prepare
orig_user_file = get_repository_path('wsgi/test00/users.txt')
users_file = orig_user_file + '.bak'
copyfile(orig_user_file, users_file)
orig_users = self.load_data(orig_user_file)
# run
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert os.path.isfile(users_file)
users = self.load_data(users_file)
assert len(users) == 3
for user, password in users.items():
assert user in orig_users.keys(), user
assert password != orig_users[user], password + ' / ' + orig_users[user]
assert 90 < len(password) < 96, (len(password), password)
os.remove(users_file)
def test_process_users_file_file_does_not_exist_test00(self):
# prepare
users_file = get_repository_path('no/existing/file/here.txt')
# run
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
def test_process_users_file_already_hashed_test00(self):
# prepare
orig_user_file = get_repository_path('wsgi/test00/users.txt')
users_file = orig_user_file + '.bak'
copyfile(orig_user_file, users_file)
orig_users = self.load_data(orig_user_file)
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
users = self.load_data(users_file)
# run
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert os.path.isfile(users_file)
new_users = self.load_data(users_file)
assert len(users) == 3
for user, password in new_users.items():
assert user in orig_users.keys(), user
assert password == users[user], password + ' / ' + users[user]
assert 90 < len(password) < 96, (len(password), password)
os.remove(users_file)
def test_process_users_file_one_password_hashed_rest_not_test01(self):
# prepare
orig_user_file = get_repository_path('wsgi/test01/users.txt')
users_file = orig_user_file + '.bak'
copyfile(orig_user_file, users_file)
orig_users = self.load_data(orig_user_file)
# run
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert os.path.isfile(users_file)
users = self.load_data(users_file)
assert len(users) == 3
for user, password in users.items():
assert user in orig_users.keys(), user
if user != 'adams':
assert password != orig_users[user], user + ' / ' + password + ' / ' + orig_users[user]
else:
assert password == users[user], user + password + ' / ' + users[user]
assert 90 < len(password) < 96, (len(password), password)
os.remove(users_file)
def test_verify_password_test01(self):
# prepare
orig_user_file = get_repository_path('wsgi/test01/users.txt')
users_file = orig_user_file + '.bak'
copyfile(orig_user_file, users_file)
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert os.path.isfile(users_file)
assert not ph.verify_password('milton', 'whatever')
assert ph.verify_password('john', 'mypassword')
assert ph.verify_password('adams', 'anotherpassword')
assert ph.verify_password('james', 'mypassword')
os.remove(users_file)
def test_verify_password_users_file_does_not_exist_test01(self):
# prepare
users_file = get_repository_path('no/existing/file/here.txt')
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert not ph.verify_password('milton', 'whatever')
assert not ph.verify_password('john', 'mypassword')
assert not ph.verify_password('adams', 'anotherpassword')
assert not ph.verify_password('james', 'mypassword')
def test_verify_password_users_file_empty_test01(self):
# prepare
orig_user_file = get_repository_path('wsgi/test02/users.txt')
users_file = orig_user_file + '.bak'
ph = PasswordHasher(users_file, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert not ph.verify_password('milton', 'whatever')
assert not ph.verify_password('john', 'mypassword')
assert not ph.verify_password('adams', 'anotherpassword')
assert not ph.verify_password('james', 'mypassword')
def test_verify_password_users_file_none_test01(self):
# prepare
ph = PasswordHasher(None, method='pbkdf2:sha256')
ph.process_users_file()
# evaluate
assert ph.verify_password('milton', 'whatever')
assert ph.verify_password('john', 'mypassword')
assert ph.verify_password('adams', 'anotherpassword')
assert ph.verify_password('james', 'mypassword')
| miltondp/ukbrest | tests/test_password_hasher.py | Python | gpl-3.0 | 5,529 | 0.000723 |
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
ADMINS = (
("David Barragán", "[email protected]"),
)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0q)_&-!hu%%en55a&cx!a2c^7aiw*7*+^zg%_&vk9&4&-4&qg#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# Media files
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'monarch.base',
'monarch.documents',
'monarch.users',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'monarch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'TEMPLATE_DEBUG': False,
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
| bameda/monarch | back/settings/common.py | Python | agpl-3.0 | 3,078 | 0.00195 |
"""Caching Library using redis."""
import logging
from functools import wraps
from flask import current_app
from walrus import Walrus
import api
import hashlib
import pickle
from api import PicoException
log = logging.getLogger(__name__)
__redis = {
"walrus": None,
"cache": None,
"zsets": {"scores": None},
}
def get_conn():
"""Get a redis connection, reusing one if it exists."""
global __redis
if __redis.get("walrus") is None:
conf = current_app.config
try:
__redis["walrus"] = Walrus(
host=conf["REDIS_ADDR"],
port=conf["REDIS_PORT"],
password=conf["REDIS_PW"],
db=conf["REDIS_DB_NUMBER"],
)
except Exception as error:
raise PicoException(
"Internal server error. " + "Please contact a system administrator.",
data={"original_error": error},
)
return __redis["walrus"]
def get_cache():
"""Get a walrus cache, reusing one if it exists."""
global __redis
if __redis.get("cache") is None:
__redis["cache"] = get_conn().cache(default_timeout=0)
return __redis["cache"]
def get_score_cache():
global __redis
if __redis["zsets"].get("scores") is None:
__redis["zsets"]["scores"] = get_conn().ZSet("scores")
return __redis["zsets"]["scores"]
def get_scoreboard_cache(**kwargs):
global __redis
scoreboard_name = "scoreboard:{}".format(_hash_key((), kwargs))
if __redis["zsets"].get(scoreboard_name) is None:
__redis["zsets"][scoreboard_name] = get_conn().ZSet(scoreboard_name)
return __redis["zsets"][scoreboard_name]
def clear():
global __redis
if __redis.get("walrus") is not None:
__redis["walrus"].flushdb()
def __insert_cache(f, *args, **kwargs):
"""
Directly upserting without first invalidating, thus keeping a memoized
value available without lapse
"""
if f == api.stats.get_score:
raise PicoException("Error: Do not manually reset_cache get_score")
else:
key = "%s:%s" % (f.__name__, _hash_key(args, kwargs))
value = f(*args, **kwargs)
get_cache().set(key, value)
return value
def memoize(_f=None, **cached_kwargs):
"""walrus.Cache.cached wrapper that reuses shared cache."""
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
if kwargs.get("reset_cache", False):
kwargs.pop("reset_cache", None)
return __insert_cache(f, *args, **kwargs)
else:
return get_cache().cached(**cached_kwargs)(f)(*args, **kwargs)
return wrapper
if _f is None:
return decorator
else:
return decorator(_f)
def _hash_key(a, k):
return hashlib.md5(pickle.dumps((a, k))).hexdigest()
def get_scoreboard_key(team):
# For lack of better idea of delimiter, use '>' illegal team name char
return "{}>{}>{}".format(team["team_name"], team["affiliation"], team["tid"])
def decode_scoreboard_item(item, with_weight=False, include_key=False):
"""
:param item: tuple of ZSet (key, score)
:param with_weight: keep decimal weighting of score, or return as int
:param include_key: whether to include to raw key
:return: dict of scoreboard item
"""
key = item[0].decode("utf-8")
data = key.split(">")
score = item[1]
if not with_weight:
score = int(score)
output = {"name": data[0], "affiliation": data[1], "tid": data[2], "score": score}
if include_key:
output["key"] = key
return output
def search_scoreboard_cache(scoreboard, pattern):
"""
:param scoreboard: scoreboard cache ZSet
:param pattern: text pattern to search team names and affiliations,
not including wildcards
:return: sorted list of scoreboard entries
"""
# Trailing '*>' avoids search on last token, tid
results = [
decode_scoreboard_item(item, with_weight=True, include_key=True)
for item in list(scoreboard.search("*{}*>*".format(pattern)))
]
return sorted(results, key=lambda item: item["score"], reverse=True)
def invalidate(f, *args, **kwargs):
"""
Clunky way to replicate busting behavior due to awkward wrapping of walrus
cached decorator
"""
if f == api.stats.get_score:
key = args[0]
get_score_cache().remove(key)
else:
key = "%s:%s" % (f.__name__, _hash_key(args, kwargs))
get_cache().delete(key)
| royragsdale/picoCTF | picoCTF-web/api/cache.py | Python | mit | 4,558 | 0.000658 |
from .Lifeform import Lifeform
from erukar.system.engine import Indexer
from erukar.ext.math.Distance import Distance
class Player(Lifeform, Indexer):
def __init__(self, world=None):
Indexer.__init__(self)
super().__init__(world)
self.faction = 'iurian'
self.uid = '' # Player UID
self.credits = 0
self.define_level(1)
def alias(self):
return self.uid
def lifeform(self):
return self
def generate_tile(self, dimensions, tile_id):
h, w = dimensions
radius = int(w/3)-1
circle = list(Distance.points_in_circle(radius, (int(h/2),int(w/2))))
inner_circle = list(Distance.points_in_circle(int(w/4)-1, (int(h/2),int(w/2))))
for y in range(h):
for x in range(w):
if (x,y) in circle:
if (x,y) not in inner_circle:
yield {'r':0,'g':0,'b':0,'a':1}
else:
yield {'r':0,'g':255,'b':0,'a':1}
else: yield {'r':0,'g':0,'b':0,'a':0}
| etkirsch/legends-of-erukar | erukar/system/engine/lifeforms/Player.py | Python | agpl-3.0 | 1,072 | 0.027052 |
''' Test_RSS_Policy_JobEfficiencyPolicy
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import DIRAC.ResourceStatusSystem.Policy.JobEfficiencyPolicy as moduleTested
################################################################################
class JobEfficiencyPolicy_TestCase( unittest.TestCase ):
def setUp( self ):
'''
Setup
'''
self.moduleTested = moduleTested
self.testClass = self.moduleTested.JobEfficiencyPolicy
def tearDown( self ):
'''
Tear down
'''
del self.moduleTested
del self.testClass
################################################################################
class JobEfficiencyPolicy_Success( JobEfficiencyPolicy_TestCase ):
def test_instantiate( self ):
''' tests that we can instantiate one object of the tested class
'''
module = self.testClass()
self.assertEqual( 'JobEfficiencyPolicy', module.__class__.__name__ )
def test_evaluate( self ):
''' tests the method _evaluate
'''
module = self.testClass()
res = module._evaluate( { 'OK' : False, 'Message' : 'Bo!' } )
self.assertTrue(res['OK'])
self.assertEqual( 'Error', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Bo!', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : None } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No values to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No values to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{}] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'No values to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Completed' : 0, 'Done' : 0, 'Failed' : 0 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Not enough jobs to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Completed' : 0, 'Done' : 0, 'Failed' : 1 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Unknown', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Not enough jobs to take a decision', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Completed' : 0, 'Done' : 0, 'Failed' : 10 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Banned', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Jobs Efficiency of 0.00', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Completed' : 0, 'Done' : 8, 'Failed' : 2 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Degraded', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Jobs Efficiency of 0.80', res[ 'Value' ][ 'Reason' ] )
res = module._evaluate( { 'OK' : True, 'Value' : [{ 'Completed' : 10, 'Done' : 9, 'Failed' : 1 }] } )
self.assertTrue(res['OK'])
self.assertEqual( 'Active', res[ 'Value' ][ 'Status' ] )
self.assertEqual( 'Jobs Efficiency of 0.95', res[ 'Value' ][ 'Reason' ] )
################################################################################
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( JobEfficiencyPolicy_TestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( JobEfficiencyPolicy_Success ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| yujikato/DIRAC | src/DIRAC/ResourceStatusSystem/Policy/test/Test_RSS_Policy_JobEfficiencyPolicy.py | Python | gpl-3.0 | 3,905 | 0.06274 |
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* Without a direct way to compute N steps forward, the semantics of
jumpahead(n) are weakened to simply jump to another distant state and rely
on the large period to avoid overlapping sequences.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from __future__ import division
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from binascii import hexlify as _hexlify
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate","jumpahead", "WichmannHill", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state. Especially useful for multi-threaded programs, creating
a different instance of Random for each thread, and using the jumpahead()
method to ensure that the generated sequences seen by each thread don't
overlap.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), setstate() and jumpahead().
Optionally, implement a getrandbits() method so that randrange() can cover
arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
"""
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
super(Random, self).seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super(Random, self).getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super(Random, self).setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple( long(x) % (2**32) for x in internalstate )
except ValueError, e:
raise TypeError, e
super(Random, self).setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, int=int, default=None,
maxwidth=1L<<BPF):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
Do not supply the 'int', 'default', and 'maxwidth' arguments.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
if stop is default:
if istart > 0:
if istart >= maxwidth:
return self._randbelow(istart)
return int(self.random() * istart)
raise ValueError, "empty range for randrange()"
# stop argument supplied.
istop = int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
width = istop - istart
if step == 1 and width > 0:
# Note that
# int(istart + self.random()*width)
# instead would be incorrect. For example, consider istart
# = -2 and istop = 0. Then the guts would be in
# -2.0 to 0.0 exclusive on both ends (ignoring that random()
# might return 0.0), and because int() truncates toward 0, the
# final result would be -1 or 0 (instead of -2 or -1).
# istart + int(self.random()*width)
# would also be incorrect, for a subtler reason: the RHS
# can return a long, and then randrange() would also return
# a long, but we're supposed to return an int (for backward
# compatibility).
if width >= maxwidth:
return int(istart + self._randbelow(width))
return int(istart + int(self.random()*width))
if step == 1:
raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width)
# Non-unit step argument supplied.
istep = int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError, "zero step for randrange()"
if n <= 0:
raise ValueError, "empty range for randrange()"
if n >= maxwidth:
return istart + istep*self._randbelow(n)
return istart + istep*int(self.random() * n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L<<BPF,
_Method=_MethodType, _BuiltinMethod=_BuiltinMethodType):
"""Return a random int in the range [0,n)
Handles the case where n has more bits than returned
by a single call to the underlying generator.
"""
try:
getrandbits = self.getrandbits
except AttributeError:
pass
else:
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
# This assures that the two methods correspond.
if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method:
k = int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
r = getrandbits(k)
while r >= n:
r = getrandbits(k)
return r
if n >= _maxwidth:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large")
return int(self.random() * n)
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
def shuffle(self, x, random=None, int=int):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
random = self.random
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use xrange as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(xrange(10000000), 60)
"""
# XXX Although the documentation says `population` is "a sequence",
# XXX attempts are made to cater to any iterable with a __len__
# XXX method. This has had mixed success. Examples from both
# XXX sides: sets work fine, and should become officially supported;
# XXX dicts are much harder, and have failed in various subtle
# XXX ways across attempts. Support for mapping types should probably
# XXX be dropped (and users should pass mapping.keys() or .values()
# XXX explicitly).
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
n = len(population)
if not 0 <= k <= n:
raise ValueError, "sample larger than population"
random = self.random
_int = int
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize or hasattr(population, "keys"):
# An n-length list is smaller than a k-length set, or this is a
# mapping type so the other algorithm wouldn't work.
pool = list(population)
for i in xrange(k): # invariant: non-selected at [0,n-i)
j = _int(random() * (n-i))
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
try:
selected = set()
selected_add = selected.add
for i in xrange(k):
j = _int(random() * n)
while j in selected:
j = _int(random() * n)
selected_add(j)
result[i] = population[j]
except (TypeError, KeyError): # handle (at least) sets
if isinstance(population, list):
raise
return self.sample(tuple(population), k)
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"""Get a random number in the range [a, b)."""
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
c = 0.5 if mode is None else (mode - low) / (high - low)
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
random = self.random
u = random()
while u <= 1e-7:
u = random()
return -_log(u)/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
r = (1.0 + b * b)/(2.0 * b)
while 1:
u1 = random()
z = _cos(_pi * u1)
f = (1.0 + r * z)/(r + z)
c = kappa * (r - f)
u2 = random()
if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c):
break
u3 = random()
if u3 > 0.5:
theta = (mu % TWOPI) + _acos(f)
else:
theta = (mu % TWOPI) - _acos(f)
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / pow(u, 1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * pow(-_log(u), 1.0/beta)
## -------------------- Wichmann-Hill -------------------
class WichmannHill(Random):
VERSION = 1 # used by getstate/setstate
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
If a is an int or long, a is used directly. Distinct values between
0 and 27814431486575L inclusive are guaranteed to yield distinct
internal states (this guarantee is specific to the default
Wichmann-Hill generator).
"""
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
if not isinstance(a, (int, long)):
a = hash(a)
a, x = divmod(a, 30268)
a, y = divmod(a, 30306)
a, z = divmod(a, 30322)
self._seed = int(x)+1, int(y)+1, int(z)+1
self.gauss_next = None
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
# Wichman-Hill random number generator.
#
# Wichmann, B. A. & Hill, I. D. (1982)
# Algorithm AS 183:
# An efficient and portable pseudo-random number generator
# Applied Statistics 31 (1982) 188-190
#
# see also:
# Correction to Algorithm AS 183
# Applied Statistics 33 (1984) 123
#
# McLeod, A. I. (1985)
# A remark on Algorithm AS 183
# Applied Statistics 34 (1985),198-200
# This part is thread-unsafe:
# BEGIN CRITICAL SECTION
x, y, z = self._seed
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
self._seed = x, y, z
# END CRITICAL SECTION
# Note: on a platform using IEEE-754 double arithmetic, this can
# never return 0.0 (asserted by Tim; proof too long for a comment).
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, self._seed, self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 1:
version, self._seed, self.gauss_next = state
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
def jumpahead(self, n):
"""Act as if n calls to random() were made, but quickly.
n is an int, greater than or equal to 0.
Example use: If you have 2 threads and know that each will
consume no more than a million random numbers, create two Random
objects r1 and r2, then do
r2.setstate(r1.getstate())
r2.jumpahead(1000000)
Then r1 and r2 will use guaranteed-disjoint segments of the full
period.
"""
if not n >= 0:
raise ValueError("n must be >= 0")
x, y, z = self._seed
x = int(x * pow(171, n, 30269)) % 30269
y = int(y * pow(172, n, 30307)) % 30307
z = int(z * pow(170, n, 30323)) % 30323
self._seed = x, y, z
def __whseed(self, x=0, y=0, z=0):
"""Set the Wichmann-Hill seed from (x, y, z).
These must be integers in the range [0, 256).
"""
if not type(x) == type(y) == type(z) == int:
raise TypeError('seeds must be integers')
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError('seeds must be in range(0, 256)')
if 0 == x == y == z:
# Initialize from current time
import time
t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256)
t, y = divmod(t, 256)
t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1)
self.gauss_next = None
def whseed(self, a=None):
"""Seed from hashable object's hash code.
None or no argument seeds from current time. It is not guaranteed
that objects with distinct hash codes lead to distinct internal
states.
This is obsolete, provided for compatibility with the seed routine
used prior to Python 2.1. Use the .seed() method instead.
"""
if a is None:
self.__whseed()
return
a = hash(a)
a, x = divmod(a, 256)
a, y = divmod(a, 256)
a, z = divmod(a, 256)
x = (x + a) % 256 or 1
y = (y + a) % 256 or 1
z = (z + a) % 256 or 1
self.__whseed(x, y, z)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
bytes = (k + 7) // 8 # bits / 8 and rounded up
x = long(_hexlify(_urandom(bytes)), 16)
return x >> (bytes * 8 - k) # trim excess bits
def _stub(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
seed = jumpahead = _stub
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print n, 'times', func.__name__
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print round(t1-t0, 3), 'sec,',
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print 'avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest)
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
jumpahead = _inst.jumpahead
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| DmitryADP/diff_qc750 | vendor/nvidia/tegra/3rdparty/python-support-files/src/Lib/random.py | Python | gpl-2.0 | 31,938 | 0.002536 |
#! /usr/bin/python
"""
Runs GenomeMapper on single-end or paired-end data.
"""
import optparse, os, sys, tempfile
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option('', '--threads', dest='threads', help='The number of threads to run')
parser.add_option('', '--input1', dest='input1', help='The (forward or single-end) reads file in Sanger FASTQ format')
parser.add_option('', '--input2', dest='input2', help='The reverse reads file in Sanger FASTQ format')
parser.add_option('', '--output', dest='output', help='The output file')
parser.add_option('', '--paired', dest='paired', help='Whether the data is single- or paired-end')
parser.add_option('', '--genomeSource', dest='genomeSource', help='The type of reference provided')
parser.add_option('', '--ref', dest='ref', help='The reference genome to use or index')
parser.add_option('', '--indexSettings', dest='index_settings', help='Whether or not indexing options are to be set')
parser.add_option('', '--params', dest='params', help='Whether to use default or specified parameters')
parser.add_option('', '--seedlength', dest='seedlength', help='GenomeMapper Index Seed Length')
parser.add_option('', '--alignseedlength', dest='alignseedlength', help='GenomeMapper Alignment Seed Length')
parser.add_option('', '--format', dest='format', help='Output format (bed or shore)')
parser.add_option('', '--maxmismatches', dest='maxmismatches', help='Maximal number of mismatches')
parser.add_option('', '--maxgaps', dest='maxgaps', help='Maximal number of gaps')
parser.add_option('', '--maxedits', dest='maxedits', help='Maximal number of edit operations')
parser.add_option('', '--reportall', dest='reportall', help='Report all hits')
(options, args) = parser.parse_args()
# index if necessary
if options.genomeSource == 'history':
# set up commands
if options.index_settings =='index_pre_set':
indexing_cmds = ''
else:
try:
indexing_cmds = '%s ' % \
(('','-s %s'%options.seedlength)[options.seedlength!='None' and options.seedlength>=1])
except ValueError:
indexing_cmds = ''
# make temp directory for placement of indices and copy reference file there
tmp_dir = tempfile.gettempdir()
try:
os.system('cp %s %s' % (options.ref, tmp_dir))
except Exception, erf:
stop_err('Error creating temp directory for indexing purposes\n' + str(erf))
options.ref = os.path.join(tmp_dir, os.path.split(options.ref)[1])
cmd1 = 'gmindex -v -i %s %s' % (options.ref, indexing_cmds)
try:
os.system(cmd1)
except Exception, erf:
stop_err('Error indexing reference sequence\n' + str(erf))
if options.params == 'pre_set':
aligning_cmds = '-v '
else:
try:
print options
aligning_cmds = '%s %s %s %s %s %s -v ' % \
(('','-f %s' % options.format)[options.format!='None'],
('','-a')[options.reportall!='None'],
('','-M %s' % options.maxmismatches)[options.maxmismatches!='None'],
('','-G %s' % options.maxgaps)[options.maxgaps!='None'],
('','-E %s' % options.maxedits)[options.maxedits!='None'],
('','-l %s' % options.alignseedlength)[options.alignseedlength!='None'])
except ValueError, erf:
stop_err('Something is wrong with the alignment parameters and the alignment could not be run\n' + str(erf))
# prepare actual aligning commands
if options.paired == 'paired':
print "Sorry, paired end alignments are not implemented yet"
return
#cmd2 = 'genomemapper %s %s -1 %s -2 %s > %s ' % (options.ref, options.input1, options.input2, options.output)
else:
cmd2 = 'genomemapper %s -i %s -q %s -o %s ' % (aligning_cmds, options.ref, options.input1, options.output)
# align
try:
print cmd2
os.system(cmd2)
except Exception, erf:
stop_err("Error aligning sequence\n" + str(erf))
if __name__=="__main__": __main__()
| vipints/oqtans | oqtans_tools/PALMapper/0.5/galaxy/genomemapper_wrapper.py | Python | bsd-3-clause | 4,421 | 0.013798 |
# Configure SP Shasta CTC machine support
#
# Exensively uses the jmri.jmrit.ussctc package capabilities
#
# Author: Bob Jacobsen, copyright 2016-17
#
import jmri
from jmri.jmrit.ussctc import *
import jarray
import java.util
def arrayList(contents) :
retval = java.util.ArrayList()
for item in contents :
retval.add(item)
return retval
# When the call-on turnout is set THROWN, show restricting on signals
class ForceRestrictingWhenCallOn(java.beans.PropertyChangeListener):
def set(self, callOnName, groupList) :
self.callon = turnouts.getTurnout(callOnName)
self.groupNames = groupList
# set up listeners
self.callon.addPropertyChangeListener(self)
for name in self.groupNames :
signals.getSignalHead(name).addPropertyChangeListener(self) # need to fix it if held
return
def propertyChange(self, event):
if (event.source == self.callon) :
if (self.callon.state == THROWN) :
for name in self.groupNames :
logic = jmri.jmrit.blockboss.BlockBossLogic.getExisting(signals.getSignalHead(name))
print "Setting logic", logic
logic.setRestrictingSpeed1(True)
logic.setRestrictingSpeed2(True)
signals.getSignalHead(name).setHeld(False) # sets output too
else :
for name in self.groupNames :
logic = jmri.jmrit.blockboss.BlockBossLogic.getExisting(signals.getSignalHead(name))
logic.setRestrictingSpeed1(False)
logic.setRestrictingSpeed2(False)
signals.getSignalHead(name).setHeld(True) # sets output too
else :
if (event.propertyName == "Held") :
if (self.callon.state == THROWN and event.source.held != False) : event.source.setHeld(False)
return
class ConfigureCtcControlLogic(jmri.jmrit.automat.AbstractAutomaton) :
def init(self):
return
def handle(self):
# delay long enough for debug init to run if present, polling to start, turnouts to be in place, plus a bit more
self.waitMsec(1000+8000+2000+500) # time is in milliseconds
print "ConfigureCtcControlLogic starts"
# The code line is shared by all Stations
codeline = CodeLine("CTC Code Indication Driver", "CTC Code Send Driver", "IT CODE MOD 1", "IT CODE MOD 2", "IT CODE MOD 3", "IT CODE MOD 4")
bell = PhysicalBell("CTC Bell")
vbell = VetoedBell("CTC Bell Cutout", bell)
# ===== Set up Station 1/2 ===== (TODO: NOT FULLY CONFIGURED)
button = CodeButton("CTC 02 Code A", "CTC 02 Code")
station = Station("1/2", codeline, button)
CombineTurnouts().set("CTC TC 01", ["CTC TC 01A"]) # right-side repeater
CombineTurnouts().set("CTC TC 02", ["CTC TC 02A"]) # right-side repeater
station.add(TrackCircuitSection("TC 01","CTC TC 01", station)) # -1 main
station.add(TrackCircuitSection("TC 02","CTC TC 02", station)) # -3 siding
station.add(TrackCircuitSection("TC 03","CTC TC 03", station)) # 1 OS
turnout1 = TurnoutSection("Helix Level 2 B", "CTC 01 N", "CTC 01 R", "CTC 01 N", "CTC 01 R", station)
station.add(turnout1)
# ===== Set up Station 3/4 ===== (TODO: NOT FULLY CONFIGURED)
button = CodeButton("CTC 04 Code A", "CTC 04 Code")
station = Station("3/4", codeline, button)
station.add(TrackCircuitSection("TC 04","CTC TC 04", station)) # 3 OS
turnout3 = TurnoutSection("Helix Level 2 A", "CTC 03 N", "CTC 03 R", "CTC 03 N", "CTC 03 R", station)
station.add(turnout3)
# ===== Set up Station 5/6 ===== (TODO: NOT FULLY CONFIGURED)
button = CodeButton("CTC 06 Code A", "CTC 06 Code")
station = Station("5/6", codeline, button)
station.add(TrackCircuitSection("TC 05","CTC TC 05", station)) # 1-5 siding
station.add(TrackCircuitSection("TC 06","CTC TC 06", station)) # 3-5 main
station.add(TrackCircuitSection("TC 07","CTC TC 07", station, bell)) # 5 OS
turnout5 = TurnoutSection("Helix Level 1", "CTC 05 N", "CTC 05 R", "CTC 05 N", "CTC 05 R", station)
station.add(turnout5)
station.add(MaintainerCallSection("CTC 06 Call","MC 6", station))
# ===== Set up Station 7/8 =====
button = CodeButton("CTC 08 Code A", "CTC 08 Code")
station = Station("7/8", codeline, button)
station.add(TrackCircuitSection("TC 08","CTC TC 08", station)) # 5-7 track
station.add(TrackCircuitSection("TC 09","CTC TC 09", station, bell)) # Redding approach
station.add(TrackCircuitSection("TC 10","CTC TC 10", station, vbell)) # OS 7
turnout7 = TurnoutSection("TO 07", "CTC 07 N", "CTC 07 R", "CTC 07 N", "CTC 07 R", station)
station.add(turnout7)
rightward = arrayList(["08 R from Helix", "08 R from Staging"])
leftward = arrayList(["08 L Upper", "08 L Lower"])
signal = SignalHeadSection(rightward, leftward, "CTC 08 L", "CTC 08 C", "CTC 08 R", "CTC 08 L", "CTC 08 R", station);
station.add(signal)
occupancyLock = OccupancyLock("TC 10")
routeLock = RouteLock(["08 R from Helix", "08 R from Staging", "08 L Upper", "08 L Lower"]);
turnout7.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal)]));
# ===== Set up Station 9/10 =====
button = CodeButton("CTC 10 Code A", "CTC 10 Code")
station = Station("9/10", codeline, button)
station.add(TrackCircuitSection("TC 11","CTC TC 11", station)) # 7-9
station.add(TrackCircuitSection("TC 12","CTC TC 12", station, vbell)) # OS 9
turnout9 = TurnoutSection("TO 09", "CTC 09 N", "CTC 09 R", "CTC 09 N", "CTC 09 R", station)
station.add(turnout9)
rightward = arrayList(["10 R Upper", "10 R Lower"])
leftward = arrayList(["10 L Main", "10 L Siding"])
signal = SignalHeadSection(rightward, leftward, "CTC 10 L", "CTC 10 C", "CTC 10 R", "CTC 10 L", "CTC 10 R", station);
station.add(signal)
occupancyLock = OccupancyLock("TC 12")
routeLock = RouteLock(["10 R Upper", "10 R Lower", "10 L Main", "10 L Siding"]);
turnout9.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal)]));
# ===== Set up Station 13/14/16 =====
button = CodeButton("CTC 14 Code A", "CTC 14 Code")
station = Station("13/14/16", codeline, button)
station.add(TrackCircuitSection("TC 13","CTC TC 13", station)) # 9-13 siding
station.add(TrackCircuitSection("TC 14","CTC TC 14", station)) # 9-13 main
station.add(TrackCircuitSection("TC 15","CTC TC 15", station, vbell)) # OS 13 siding
station.add(TrackCircuitSection("TC 16","CTC TC 16", station, vbell)) # OS 13 main
turnout13 = TurnoutSection("TO 13", "CTC 13 N", "CTC 13 R", "CTC 13 N", "CTC 13 R", station)
station.add(turnout13)
rightward = arrayList(["14 R Main"])
leftward = arrayList(["14 L Main"])
signal1 = SignalHeadSection(rightward, leftward, "CTC 14 L", "CTC 14 C", "CTC 14 R", "CTC 14 L", "CTC 14 R", station);
station.add(signal1)
rightward = arrayList(["16 R Siding"])
leftward = arrayList(["16 L Bridge Upper", "16 L Bridge Lower", "16 L Siding"])
signal2 = SignalHeadSection(rightward, leftward, "CTC 16 L", "CTC 16 C", "CTC 16 R", "CTC 16 L", "CTC 16 R", station);
station.add(signal2)
occupancyLock = CombinedLock([OccupancyLock("TC 15"), OccupancyLock("TC 16")])
routeLock = RouteLock(["14 R Main", "16 R Siding", "14 L Main", "16 L Bridge Upper", "16 L Bridge Lower", "16 L Siding"]);
turnout13.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal1), TimeLock(signal2)]));
# ===== Set up Station 17/18/20 =====
button = CodeButton("CTC 18 Code A", "CTC 18 Code")
station = Station("17/18/20", codeline, button)
station.add(TrackCircuitSection("TC 17","CTC TC 17", station)) # 13-17 main
station.add(TrackCircuitSection("TC 18","CTC TC 18", station)) # 13-17 siding
station.add(TrackCircuitSection("TC 19","CTC TC 19", station, vbell)) # OS 17 main
station.add(TrackCircuitSection("TC 20","CTC TC 20", station, vbell)) # OS 17 siding
turnout17 = TurnoutSection("TO 17", "CTC 17 N", "CTC 17 R", "CTC 17 N", "CTC 17 R", station)
station.add(turnout17)
rightward = arrayList(["18 R"])
leftward = arrayList([])
signal1 = SignalHeadSection(rightward, leftward, "CTC 18 L", "CTC 18 C", "CTC 18 R", "CTC 18 L", "CTC 18 R", station);
station.add(signal1)
rightward = arrayList(["20 R Upper", "20 R Lower"])
leftward = arrayList([])
signal2 = SignalHeadSection(rightward, leftward, "CTC 20 L", "CTC 20 C", "CTC 20 R", "CTC 20 L", "CTC 20 R", station);
station.add(signal2)
station.add(MaintainerCallSection("CTC 18 Call","MC 18", station))
occupancyLock = CombinedLock([OccupancyLock("TC 19"), OccupancyLock("TC 20")])
routeLock = RouteLock(["20 R Upper", "20 R Lower", "18 R"]);
turnout17.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal1), TimeLock(signal2)]));
# ===== Set up Station 21/22 =====
button = CodeButton("CTC 22 Code A", "CTC 22 Code")
station = Station("21/22", codeline, button)
station.add(TrackCircuitSection("TC 21","CTC TC 21", station)) # siding 17 - xover
station.add(TrackCircuitSection("TC 22","CTC TC 22", station)) # 17-21 main
station.add(TrackCircuitSection("TC 23","CTC TC 23", station)) # xover - 21 siding
station.add(TrackCircuitSection("TC 24","CTC TC 24", station, vbell)) # OS 21
turnout21 = TurnoutSection("TO 21", "CTC 21 N", "CTC 21 R", "CTC 21 N", "CTC 21 R", station)
station.add(turnout21)
rightward = arrayList(["22 R Main", "22 R Siding"])
leftward = arrayList(["22 L Upper", "22 L Lower"])
signal = SignalHeadSection(rightward, leftward, "CTC 22 L", "CTC 22 C", "CTC 22 R", "CTC 22 L", "CTC 22 R", station);
station.add(signal)
occupancyLock = OccupancyLock("TC 24")
routeLock = RouteLock(["22 R Main", "22 R Siding", "22 L Upper", "22 L Lower"]);
turnout21.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal)]));
# ===== Set up Station 25/26 =====
button = CodeButton("CTC 26 Code A", "CTC 26 Code")
station = Station("25/26", codeline, button)
CombineSensors().set("TC 25", ["TC 25 Lower (bridge)","TC 25 Upper (shasta)"])
station.add(TrackCircuitSection("TC 25","CTC TC 25", station)) # 21-25
station.add(TrackCircuitSection("TC 26","CTC TC 26", station, vbell)) # OS 25
turnout25 = TurnoutSection("TO 25", "CTC 25 N", "CTC 25 R", "CTC 25 N", "CTC 25 R", station)
station.add(turnout25)
rightward = arrayList(["26 R Upper", "26 R Lower"])
leftward = arrayList(["26 L Main", "26 L Siding"])
signal26 = SignalHeadSection(rightward, leftward, "CTC 26 L", "CTC 26 C", "CTC 26 R", "CTC 26 L", "CTC 26 R", station);
station.add(signal26)
station.add(MaintainerCallSection("CTC 26 Call","MC 26/28", station))
occupancyLock = OccupancyLock("TC 26")
routeLock = RouteLock(["26 R Upper", "26 R Lower", "26 L Main", "26 L Siding"]);
turnout25.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal26)]));
# ===== Set up Station 27/28 =====
button = CodeButton("CTC 28 Code A", "CTC 28 Code")
station = Station("27/28", codeline, button)
station.add(TrackCircuitSection("TC 27","CTC TC 27", station)) # 25-27 main
station.add(TrackCircuitSection("TC 28","CTC TC 28", station)) # 25-27 siding
station.add(TrackCircuitSection("TC 29","CTC TC 29", station, vbell)) # OS 27
turnout27 = TurnoutSection("TO 27", "CTC 27 N", "CTC 27 R", "CTC 27 N", "CTC 27 R", station)
station.add(turnout27)
rightward = arrayList(["28 R Main", "28 R Siding"])
leftward = arrayList(["28 L Upper", "28 L Lower"])
signal28 = SignalHeadSection(rightward, leftward, "CTC 28 L", "CTC 28 C", "CTC 28 R", "CTC 28 L", "CTC 28 R", station);
station.add(signal28)
station.add(MaintainerCallSection("CTC 28 Call","MC 26/28", station))
occupancyLock = OccupancyLock("TC 29")
routeLock = RouteLock(["28 R Main", "28 R Siding", "28 L Upper", "28 L Lower"]);
turnout27.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal28)]));
viaMain = TrafficLock(signal28, SignalHeadSection.CODE_LEFT, [jmri.BeanSetting(turnouts.getTurnout("TO 25"), THROWN), jmri.BeanSetting(turnouts.getTurnout("TO 27"), THROWN)])
viaSiding = TrafficLock(signal28, SignalHeadSection.CODE_LEFT, [jmri.BeanSetting(turnouts.getTurnout("TO 25"), CLOSED), jmri.BeanSetting(turnouts.getTurnout("TO 27"), CLOSED)])
signal26.addRightwardLocks([viaMain,viaSiding])
viaMain = TrafficLock(signal26, SignalHeadSection.CODE_RIGHT, [jmri.BeanSetting(turnouts.getTurnout("TO 25"), THROWN), jmri.BeanSetting(turnouts.getTurnout("TO 27"), THROWN)])
viaSiding = TrafficLock(signal26, SignalHeadSection.CODE_RIGHT, [jmri.BeanSetting(turnouts.getTurnout("TO 25"), CLOSED), jmri.BeanSetting(turnouts.getTurnout("TO 27"), CLOSED)])
signal28.addLeftwardLocks([viaMain,viaSiding])
# ===== Set up Station 29/30 =====
button = CodeButton("CTC 30 Code A", "CTC 30 Code")
station = Station("29/30", codeline, button)
station.add(TrackCircuitSection("TC 30","CTC TC 30", station)) # 27-29
station.add(TrackCircuitSection("TC 31","CTC TC 31", station, vbell)) # OS 29
turnout29 = TurnoutSection("TO 29", "CTC 29 N", "CTC 29 R", "CTC 29 N", "CTC 29 R", station)
station.add(turnout29)
rightward = arrayList(["30 R Upper", "30 R Lower"])
leftward = arrayList(["30 L Main", "30 L Siding"])
signal30 = SignalHeadSection(rightward, leftward, "CTC 30 L", "CTC 30 C", "CTC 30 R", "CTC 30 L", "CTC 30 R", station);
station.add(signal30)
station.add(MaintainerCallSection("CTC 30 Call","MC 30", station))
occupancyLock = OccupancyLock("TC 31")
routeLock = RouteLock(["30 R Upper", "30 R Lower", "30 L Main", "30 L Siding"]);
turnout29.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal30)]));
signal28.addRightwardLocks(java.util.Arrays.asList([TrafficLock(signal30, SignalHeadSection.CODE_LEFT)]))
signal30.addLeftwardLocks(java.util.Arrays.asList([TrafficLock(signal28, SignalHeadSection.CODE_RIGHT)]))
# ===== Set up Station 31/32/34 =====
button = CodeButton("CTC 32 Code A", "CTC 32 Code")
station = Station("31/23/34", codeline, button)
station.add(TrackCircuitSection("TC 32","CTC TC 32", station)) # 29-31 main
station.add(TrackCircuitSection("TC 33","CTC TC 33", station)) # 29-31 siding
station.add(TrackCircuitSection("TC 34","CTC TC 34", station, vbell)) # OS 31 west
station.add(TrackCircuitSection("TC 35","CTC TC 35", station, vbell)) # OS 31 west
turnouts.getTurnout("TO 31").setCommandedState(CLOSED)
CombineTurnouts().set("TO 31", ["TO 31A","TO 31B"])
turnout31 = TurnoutSection("TO 31", "CTC 31 N", "CTC 31 R", "CTC 31 N", "CTC 31 R", station)
station.add(turnout31)
rightward = arrayList(["32 R Lower", "32 R Upper"])
leftward = arrayList(["32 L Siding"])
signal1 = SignalHeadSection(rightward, leftward, "CTC 32 L", "CTC 32 C", "CTC 32 R", "CTC 32 L", "CTC 32 R", station);
station.add(signal1)
rightward = arrayList(["34 R Siding"])
leftward = arrayList(["34 L Lower", "34 L Upper"])
signal2 = SignalHeadSection(rightward, leftward, "CTC 34 L", "CTC 34 C", "CTC 34 R", "CTC 34 L", "CTC 34 R", station);
station.add(signal2)
station.add(MaintainerCallSection("CTC 32 Call","MC 32", station))
occupancyLock = CombinedLock([OccupancyLock("TC 34"), OccupancyLock("TC 35")])
routeLock = RouteLock(["32 R Lower", "32 R Upper", "34 R Siding", "34 L Lower", "34 L Upper", "32 L Siding"]);
turnout31.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal1), TimeLock(signal2)]));
# ===== Set up Station 35/36/38 =====
button = CodeButton("CTC 36 Code A", "CTC 36 Code")
station = Station("35/36/38", codeline, button)
station.add(TrackCircuitSection("TC 36","CTC TC 36", station)) # 31-35 siding
station.add(TrackCircuitSection("TC 37","CTC TC 37", station)) # 31-35 main
station.add(TrackCircuitSection("TC 38","CTC TC 38", station, vbell)) # OS 35 east
station.add(TrackCircuitSection("TC 101","CTC TC 101", station, vbell)) # OS 35 west
turnout35 = TurnoutSection("TO 35A", "CTC 35 N", "CTC 35 R", "CTC 35 N", "CTC 35 R", station)
station.add(turnout35)
callOnLock = TurnoutLock("Call On Mode 38", CLOSED)
rightward = arrayList(["36 R Azalea Main Upper", "36 R Azalea Main Lower"])
leftward = arrayList(["36 L Azalea Bypass"])
signal1 = SignalHeadSection(rightward, leftward, "CTC 36 L", "CTC 36 C", "CTC 36 R", "CTC 36 L", "CTC 36 R", station);
station.add(signal1)
rightward = arrayList(["38 R Siding"])
leftward = arrayList(["38 L Lower", "38 L Upper"])
signal2 = SignalHeadSection(rightward, leftward, "CTC 38 L", "CTC 38 C", "CTC 38 R", "CTC 38 L", "CTC 38 R", station);
station.add(signal2)
station.add(MaintainerCallSection("CTC 36 Call","MC 36", station))
station.add(MaintainerCallSection("CTC 38 Call","Call On Mode 38", station)) # Internal turnout to hold state: Call On Mode 38
# call-on implementation is via Logix
occupancyLock = CombinedLock([OccupancyLock("TC 38"), OccupancyLock("TC 101")])
routeLock = RouteLock(["36 R Azalea Main Upper", "36 R Azalea Main Lower", "38 R Siding", "38 L Lower", "38 L Upper", "36 L Azalea Bypass"]);
turnout35.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal1), TimeLock(signal2), callOnLock]));
# ===== Set up Station 39/40/41/42 =====
button = CodeButton("CTC 40 Code A", "CTC 40 Code")
station = Station("39/40/41/42", codeline, button)
station.add(TrackCircuitSection("TC 39","CTC TC 39", station)) # 35-39 main
station.add(TrackCircuitSection("TC 102","CTC TC 102", station)) # 35-39 siding
station.add(TrackCircuitSection("TC 40","CTC TC 40", station, vbell)) # OS 39 west
station.add(TrackCircuitSection("TC 41","CTC TC 41", station, bell)) # 39 to BB main
station.add(TrackCircuitSection("TC 42","CTC TC 42", station, bell)) # 41 to BB siding
station.add(TrackCircuitSection("TC 43","CTC TC 43", station, bell)) # 41 to Weed
station.add(TrackCircuitSection("TC 103","CTC TC 103", station, vbell)) # OS 39 east
turnouts.getTurnout("TO 39").setCommandedState(CLOSED)
CombineTurnouts().set("TO 39", ["TO 39A","TO 39B"])
turnout39 = TurnoutSection("TO 39", "CTC 39 N", "CTC 39 R", "CTC 39 N", "CTC 39 R", station)
station.add(turnout39)
turnout41 = TurnoutSection("TO 41", "CTC 41 N", "CTC 41 R", "CTC 41 N", "CTC 41 R", station)
station.add(turnout41)
callOnLock = TurnoutLock("Call On Mode 40", CLOSED)
# "40 R 2nd on main" is an ABS signal protecting engineers from running over the back-set turnout at end of bypass
rightward = arrayList(["40 R Upper", "40 R Middle", "40 R Lower"])
leftward = arrayList(["40 L Weed", "40 L Siding"]) # bit of a hack with 42 L Upper, allows traffic over 39
signal = SignalHeadSection(rightward, leftward, "CTC 40 L", "CTC 40 C", "CTC 40 R", "CTC 40 L", "CTC 40 R", station);
station.add(signal)
rightward = arrayList(["42 R Bypass"])
leftward = arrayList(["42 L Black Butte Main Upper", "42 L Black Butte Main Lower"]) # no 42 L Upper as that's traffic over 39 in 40
signal2 = SignalHeadSection(rightward, leftward, "CTC 42 L", "CTC 42 C", "CTC 42 R", "CTC 42 L", "CTC 42 R", station);
station.add(signal2)
station.add(MaintainerCallSection("CTC 40 Call","Call On Mode 40", station)) # Internal turnout to hold state: Call On Mode 40
# call-on implementation is via Logix
station.add(MaintainerCallSection("CTC 42 Call","MC 42", station))
occupancyLock = CombinedLock([OccupancyLock("TC 40"), OccupancyLock("TC 103")])
routeLock = RouteLock(["40 R Upper", "40 R Middle", "40 R Lower", "40 L Weed", "40 L Siding", "42 R Bypass", "42 L Black Butte Main Upper", "42 L Black Butte Main Lower"]);
turnout39.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, TimeLock(signal1), TimeLock(signal2), callOnLock]));
occupancyLock = OccupancyLock("TC 40")
routeLock = RouteLock(["40 L Weed", "40 L Siding"]);
routeLock2 = RouteLock(["40 R Upper", "40 R Middle", "40 R Lower"], [jmri.BeanSetting(turnouts.getTurnout("TO 39"), THROWN)]);
turnout41.addLocks(java.util.Arrays.asList([occupancyLock, routeLock, routeLock2, TimeLock(signal1), TimeLock(signal2), callOnLock]));
# ===== Handle Call Ons =====
# See the ForceFlashing class at top
ForceRestrictingWhenCallOn().set("Call On Mode 38",["38 R Siding","38 L Lower"])
ForceRestrictingWhenCallOn().set("Call On Mode 40",["40 L Weed","40 L Siding"])
# ===== Final Items =====
# set the device timings
execfile(jmri.util.FileUtil.getExternalFilename("preference:SetDurations.py"))
memories.getMemory("IMUSS CTC:CODELINE:1:LOG").setValue('Configuration Done')
print "ConfigureCtcControlLogic done"
ConfigureCtcControlLogic().start() # create one of these, and start it running
| bobjacobsen/SPShasta | userfiles/ConfigureCtcControlLogic.py | Python | gpl-2.0 | 21,226 | 0.015076 |
"""
WSGI config for saleor project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
from .health_check import health_check
application = health_check(application, '/health/')
| tfroehlich82/saleor | saleor/wsgi/__init__.py | Python | bsd-3-clause | 1,225 | 0.001633 |
from ._decorators import singleton,\
hide_field
__license__ = "MIT"
__version__ = "0.2.1"
__author__ = "Nikolay Romashchenko"
__maintainer__ = "Nikolay Romashchenko"
__email__ = "[email protected]"
__status__ = "Development"
| nromashchenko/amquery | amquery/utils/decorators/__init__.py | Python | mit | 265 | 0 |
import inspect
import os
import sys
import scapy
# -- Linkcode resolver -----------------------------------------------------
# This is HEAVILY inspired by numpy's
# https://github.com/numpy/numpy/blob/73fe877ff967f279d470b81ad447b9f3056c1335/doc/source/conf.py#L390
# Copyright (c) 2005-2020, NumPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# strip decorators, which would resolve to the source of the decorator
# possibly an upstream bug in getsourcefile, bpo-1764286
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
fn = None
lineno = None
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
fn = os.path.relpath(fn, start=os.path.dirname(scapy.__file__))
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
if 'dev' in scapy.__version__:
return "https://github.com/secdev/scapy/blob/master/scapy/%s%s" % (
fn, linespec)
else:
return "https://github.com/secdev/scapy/blob/v%s/scapy/%s%s" % (
scapy.__version__, fn, linespec)
| gpotter2/scapy | doc/scapy/_ext/linkcode_res.py | Python | gpl-2.0 | 3,293 | 0.002126 |
#!/usr/bin/env python
"""
Example of a model fitting script.
The user should modify the model
according to the characteristics of
the signal of interest.
Part of the code is taken from the
kmpfit examples:
http://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html#profile-fitting
"""
import numpy as np
import argparse
from lmfit import Model
from scipy.special import wofz
from crrlpy import crrls
ln2 = np.log(2)
def voigt(x, y):
# The Voigt function is also the real part of
# w(z) = exp(-z^2) erfc(iz), the complex probability function,
# which is also known as the Faddeeva function. Scipy has
# implemented this function under the name wofz()
z = x + 1j*y
I = wofz(z).real
return I
def Voigt(nu, alphaD, alphaL, nu_0, A, a, b):
"""
The Voigt line shape in terms of its physical parameters
nu: independent variable
alphaD: FWHM of the Gaussian
alphaL: FWHM of the Lorentzian
nu_0: the line center
A: the line area
a, b: background parameters. bkgd = a + b*nu
"""
f = np.sqrt(ln2)
x = (nu - nu_0)/alphaD * f
y = alphaL/alphaD * f
bkgd = a + b*nu
V = A*f/(alphaD*np.sqrt(np.pi)) * voigt(x, y) + bkgd
return V
def funcV(x, p):
# Compose the Voigt line-shape
alphaD, alphaL, nu_0, I, a, b = p
return Voigt(x, alphaD, alphaL, nu_0, I, a, b)
def main(spec, output, plot):
"""
"""
dD = 3 # 3 km/s Doppler FWHM for the lines
data = np.loadtxt(spec)
x = data[:,0]
y = data[:,1]
w = data[:,2]
# Catch NaNs and invalid values:
mask_x = np.ma.masked_equal(x, -9999).mask
mask_y = np.isnan(y)
mask = np.array(reduce(np.logical_or, [mask_x, mask_y]))
mx = x[~mask]
my = y[~mask]
mw = w[~mask]
# Create the model and set the parameters
mod1 = Model(Voigt, prefix='V1_')
pars = mod1.make_params()
#mod2 = Model(Voigt, prefix='V2_')
#pars += mod2.make_params()
mod = mod1 #+ mod2
# Edit the model parameter starting values, conditions, etc...
# Background parameters
pars['V1_a'].set(value=0, expr='', vary=False)
pars['V1_b'].set(value=0, expr='', vary=False)
#pars['V2_a'].set(value=0, expr='', vary=False)
#pars['V2_b'].set(value=0, expr='', vary=False)
# Line center
pars['V1_nu_0'].set(value=-47., vary=True, min=-50, max=-45)
#pars['V2_nu_0'].set(value=-37.6, expr='V1_nu_0+9.4', vary=False)
# Line area
pars['V1_A'].set(value=-1e-2, max=-1e-8)
#pars['V2_A'].set(value=-1e-2, max=-1e-8)
# Line width
pars['V1_alphaD'].set(value=dD, vary=True, min=0.)
#pars['V2_alphaD'].set(value=dD, vary=True, min=0.)
pars['V1_alphaL'].set(value=1, vary=True, min=0, max=250.)
#pars['V2_alphaL'].set(value=1, vary=True, min=0, max=250.)
# Fit the model using a weight
fit = mod.fit(my, pars, nu=mx, weights=mw)
fit1 = Voigt(mx, fit.params['V1_alphaD'].value, fit.params['V1_alphaL'].value,
fit.params['V1_nu_0'].value, fit.params['V1_A'].value, fit.params['V1_a'].value, 0)
#fit2 = Voigt(mx, fit.params['V2_alphaD'].value, fit.params['V2_alphaL'].value,
#fit.params['V2_nu_0'].value, fit.params['V2_A'].value, 0, 0)
fit3 = fit.best_fit
#mody = np.array([fit1, fit2, fit3])
mody = np.array([fit1, fit3])
#modx = np.array([mx, mx, mx])
modx = np.array([mx, mx])
# Disable for now, and check this: http://stackoverflow.com/questions/4931376/generating-matplotlib-graphs-without-a-running-x-server
# fir a possible fix.
crrls.plot_model(mx, my, modx, mody, plot)
np.savetxt(output, np.c_[mx, fit.best_fit])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('spec', type=str,
help="Spectrum to be fit. (string)")
parser.add_argument('output', type=str,
help="Name of the output file with the best fit model. (string)")
parser.add_argument('plot', type=str,
help="Name of the output figure. (string)")
args = parser.parse_args()
main(args.spec, args.output, args.plot)
| astrofle/CRRLpy | examples/synth_spec/makemodel.py | Python | mit | 4,292 | 0.011883 |
# -*- coding: utf-8 -*-
from typing import Tuple
from asyncpg import Connection
from starlette.requests import Request
from starlette.responses import JSONResponse, RedirectResponse
from starlette.routing import Route
from qllr.app import App
from qllr.endpoints import Endpoint, HTTPEndpoint
from qllr.templating import templates
from .methods import get_best_match_of_player, get_player_info, get_player_info_mod_date
bp = App()
class PlayerEndpoint(Endpoint):
async def get_last_doc_modified(self, request: Request, con: Connection) -> Tuple:
return await get_player_info_mod_date(
con, request.path_params["steam_id"], request.path_params.get("gametype_id")
)
class PlayerJson(PlayerEndpoint):
async def get_document(self, request: Request, con: Connection):
steam_id = request.path_params["steam_id"]
return JSONResponse(await get_player_info(con, steam_id))
class PlayerHtml(PlayerEndpoint):
async def get_document(self, request: Request, con: Connection):
steam_id = request.path_params["steam_id"]
context = await get_player_info(con, steam_id)
context["request"] = request
context["steam_id"] = str(steam_id)
return templates.TemplateResponse("player_stats.html", context)
class PlayerMatchesDeprecatedRoute(HTTPEndpoint):
async def get(self, request: Request):
return RedirectResponse(
request.url_for(
"MatchesHtml",
steam_id=request.path_params["steam_id"],
page=request.path_params.get("page"),
gametype=request.path_params.get("gametype"),
),
status_code=308,
)
class BestMatchOfPlayerRedirect(PlayerEndpoint):
async def get_document(self, request: Request, con: Connection):
steam_id = request.path_params["steam_id"]
gametype_id = request.path_params["gametype_id"]
match_id = await get_best_match_of_player(con, steam_id, gametype_id)
return RedirectResponse(request.url_for("ScoreboardHtml", match_id=match_id))
routes = [
Route("/{steam_id:int}.json", endpoint=PlayerJson),
Route("/{steam_id:int}", endpoint=PlayerHtml),
Route("/{steam_id:int}/matches", endpoint=PlayerMatchesDeprecatedRoute),
Route("/{steam_id:int}/matches/", endpoint=PlayerMatchesDeprecatedRoute),
Route("/{steam_id:int}/matches/{page:int}/", endpoint=PlayerMatchesDeprecatedRoute),
Route("/{steam_id:int}/matches/{gametype}/", endpoint=PlayerMatchesDeprecatedRoute),
Route(
"/{steam_id:int}/matches/{gametype}/{page:int}/",
endpoint=PlayerMatchesDeprecatedRoute,
),
Route("/{steam_id:int}/best_match/{gametype}", endpoint=BestMatchOfPlayerRedirect),
]
| em92/pickup-rating | qllr/blueprints/player/__init__.py | Python | mit | 2,763 | 0.002533 |
"""Module with Caffe models."""
from django.db import models
from employees.models import Employee
class Caffe(models.Model):
"""Stores one cafe."""
name = models.CharField(max_length=100, unique=True)
city = models.CharField(max_length=100)
street = models.CharField(max_length=100)
# CharField for extra characters like '-'
postal_code = models.CharField(max_length=20)
# CharFields in case house numbers like '1A'
building_number = models.CharField(max_length=10)
house_number = models.CharField(max_length=10, blank=True)
created_on = models.TimeField(auto_now_add=True)
creator = models.ForeignKey(Employee,
related_name='my_caffe',
default=None,
blank=False,
null=True)
def __str__(self):
return '{}, {}'.format(self.name, self. city)
| VirrageS/io-kawiarnie | caffe/caffe/models.py | Python | mit | 931 | 0 |
import unittest
import os.path
import sys
import test.support
from ctypes import *
from ctypes.util import find_library
# On some systems, loading the OpenGL libraries needs the RTLD_GLOBAL mode.
class Test_OpenGL_libs(unittest.TestCase):
@classmethod
def setUpClass(cls):
lib_gl = lib_glu = lib_gle = None
if sys.platform == "win32":
lib_gl = find_library("OpenGL32")
lib_glu = find_library("Glu32")
elif sys.platform == "darwin":
lib_gl = lib_glu = find_library("OpenGL")
else:
lib_gl = find_library("GL")
lib_glu = find_library("GLU")
lib_gle = find_library("gle")
## print, for debugging
if test.support.verbose:
print("OpenGL libraries:")
for item in (("GL", lib_gl),
("GLU", lib_glu),
("gle", lib_gle)):
print("\t", item)
cls.gl = cls.glu = cls.gle = None
if lib_gl:
try:
cls.gl = CDLL(lib_gl, mode=RTLD_GLOBAL)
except OSError:
pass
if lib_glu:
try:
cls.glu = CDLL(lib_glu, RTLD_GLOBAL)
except OSError:
pass
if lib_gle:
try:
cls.gle = CDLL(lib_gle)
except OSError:
pass
@classmethod
def tearDownClass(cls):
cls.gl = cls.glu = cls.gle = None
def test_gl(self):
if self.gl is None:
self.skipTest('lib_gl not available')
self.gl.glClearIndex
def test_glu(self):
if self.glu is None:
self.skipTest('lib_glu not available')
self.glu.gluBeginCurve
def test_gle(self):
if self.gle is None:
self.skipTest('lib_gle not available')
self.gle.gleGetJoinStyle
def test_shell_injection(self):
result = find_library('; echo Hello shell > ' + test.support.TESTFN)
self.assertFalse(os.path.lexists(test.support.TESTFN))
self.assertIsNone(result)
@unittest.skipUnless(sys.platform.startswith('linux'),
'Test only valid for Linux')
class LibPathFindTest(unittest.TestCase):
def test_find_on_libpath(self):
import subprocess
import tempfile
try:
p = subprocess.Popen(['gcc', '--version'], stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
out, _ = p.communicate()
except OSError:
raise unittest.SkipTest('gcc, needed for test, not available')
with tempfile.TemporaryDirectory() as d:
# create an empty temporary file
srcname = os.path.join(d, 'dummy.c')
libname = 'py_ctypes_test_dummy'
dstname = os.path.join(d, 'lib%s.so' % libname)
with open(srcname, 'w') as f:
pass
self.assertTrue(os.path.exists(srcname))
# compile the file to a shared library
cmd = ['gcc', '-o', dstname, '--shared',
'-Wl,-soname,lib%s.so' % libname, srcname]
out = subprocess.check_output(cmd)
self.assertTrue(os.path.exists(dstname))
# now check that the .so can't be found (since not in
# LD_LIBRARY_PATH)
self.assertIsNone(find_library(libname))
# now add the location to LD_LIBRARY_PATH
with test.support.EnvironmentVarGuard() as env:
KEY = 'LD_LIBRARY_PATH'
if KEY not in env:
v = d
else:
v = '%s:%s' % (env[KEY], d)
env.set(KEY, v)
# now check that the .so can be found (since in
# LD_LIBRARY_PATH)
self.assertEqual(find_library(libname), 'lib%s.so' % libname)
if __name__ == "__main__":
unittest.main()
| yotchang4s/cafebabepy | src/main/python/ctypes/test/test_find.py | Python | bsd-3-clause | 3,948 | 0.000507 |
# -*- coding: utf-8 -*-
#
# Streaker documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 6 12:34:57 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Streaker'
copyright = u'2016, Aldi Alimucaj'
author = u'Aldi Alimucaj'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Streakerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Streaker.tex', u'Streaker Documentation',
u'Aldi Alimucaj', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'streaker', u'Streaker Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Streaker', u'Streaker Documentation',
author, 'Streaker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| aldialimucaj/Streaker | docs/source/conf.py | Python | mit | 11,449 | 0.006376 |
import re
import yaml
import subprocess
import os
import datetime
from . import AbstractCamera
from ..utils.logger import has_logger
from ..utils.config import load_config
from ..utils.indi import PanIndi
@has_logger
class Camera(AbstractCamera):
def __init__(self, device_name, client=PanIndi(), config=dict(), *args, **kwargs):
assert client.devices[device_name] is not None
super().__init__(config=config, *args, **kwargs)
self.client = client
self.name = device_name
self.device = client.devices[device_name]
self.last_start_time = None
def connect(self):
'''
For Canon DSLRs using gphoto2, this just means confirming that there is
a camera on that port and that we can communicate with it.
'''
self.logger.info('Connecting to camera')
# connect to device
self.client.connectDevice(self.device.getDeviceName())
self.client.connectDevice(self.device.getDeviceName())
# set BLOB mode to BLOB_ALSO
self.client.setBLOBMode(1, self.name, None)
self.logger.info("Connected to camera")
self.init()
def init(self):
self.logger.info("Setting defaults for camera")
self.client.get_property_value(self.name, 'UPLOAD_MODE')
# self.client.sendNewText(self.name, 'UPLOAD_MODE', 'Local', 'On')
self.client.sendNewText(self.name, 'CCD_ISO', '100', 'On')
# result = self.set('Auto Power Off', 0) # Don't power off
# result = self.set('/main/settings/reviewtime', 0) # Screen off
# result = self.set('/main/settings/capturetarget', 1) # SD Card
# result = self.set('/main/settings/ownername', 'Project PANOPTES')
# result = self.set('/main/settings/copyright', 'Project PANOPTES 2015')
#
# result = self.set('/main/status/lensname', 'Rokinon 85mm')
#
# result = self.set('/main/imgsettings/imageformat', 9) # RAW
# result = self.set('/main/imgsettings/imageformatsd', 9) # RAW
# result = self.set('/main/imgsettings/imageformatcf', 9) # RAW
# result = self.set('/main/imgsettings/iso', 1) # ISO 100
# result = self.set('/main/imgsettings/colorspace', 0) # sRGB
#
# result = self.set('/main/capturesettings/focusmode', 0) # Manual
# result = self.set('/main/capturesettings/autoexposuremode', 3) # 3 - Manual; 4 - Bulb
# result = self.set('/main/capturesettings/drivemode', 0) # Single exposure
# result = self.set('/main/capturesettings/picturestyle', 1) # Standard
#
# result = self.set('/main/capturesettings/shutterspeed', 0) # Bulb
#
# result = self.set('/main/actions/syncdatetime', 1) # Sync date and time to computer
# result = self.set('/main/actions/uilock', 1) # Don't let the UI change
#
# # Get Camera Properties
# self.get_serial_number()
# -------------------------------------------------------------------------
# Generic Panoptes Camera Methods
# -------------------------------------------------------------------------
def start_cooling(self):
'''
This does nothing for a Canon DSLR as it does not have cooling.
'''
self.logger.info('No camera cooling available')
self.cooling = True
def stop_cooling(self):
'''
This does nothing for a Canon DSLR as it does not have cooling.
'''
self.logger.info('No camera cooling available')
self.cooling = False
def is_exposing(self):
'''
'''
pass
# -------------------------------------------------------------------------
# Actions Specific to Canon / gphoto
# -------------------------------------------------------------------------
def get_serial_number(self):
''' Gets the 'EOS Serial Number' property
Populates the self.serial_number property
Returns:
str: The serial number
'''
self.serial_number = self.get('Serial Number')
return self.serial_number
def get_iso(self):
'''
Queries the camera for the ISO setting and populates the self.iso
property with a string containing the ISO speed.
'''
self.iso = self.get('ISO Speed')
return self.iso
def set_iso(self, iso):
'''
Sets the ISO speed of the camera after checking that the input value (a
string or in) is in the list of allowed values in the self.iso_options
dictionary.
'''
if not iso:
iso = 400
print(iso)
self.get_iso()
self.set('ISO Speed', iso)
def get_model(self):
'''
Gets the Camera Model string from the camera and populates the
self.model property.
'''
self.model = self.get('Camera Model')
return self.model
def get_shutter_count(self):
'''
Gets the shutter count value and populates the self.shutter_count
property.
'''
self.shutter_count = self.get('Shutter Counter')
return self.shutter_count
def construct_filename(self):
'''
Use the filename_pattern from the camera config file to construct the
filename for an image from this camera
'''
if self.last_start_time:
filename = self.last_start_time.strftime('image_%Y%m%dat%H%M%S.cr2')
else:
filename = self.last_start_time.strftime('image.cr2')
return filename
def take_exposure(self, exptime=5):
""" Take an exposure """
self.logger.info("<<<<<<<< Exposure >>>>>>>>>")
self.logger.info('Taking {} second exposure'.format(exptime))
self.last_start_time = datetime.datetime.now()
#get current exposure time
exp = self.device.getNumber("CCD_EXPOSURE")
# set exposure time to 5 seconds
exp[0].value = exptime
# send new exposure time to server/device
self.client.sendNewNumber(exp)
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
| fmin2958/POCS | panoptes/camera/canon_indi.py | Python | mit | 6,347 | 0.002521 |
# -*- coding: utf-8 -*-
'''
Created on 22 Ιαν 2013
@author: tedlaz
'''
from PyQt4 import QtGui, QtCore
from gui import ui_pro
class dlg(QtGui.QDialog):
def __init__(self, args=None, parent=None):
super(dlg, self).__init__(parent)
self.ui = ui_pro.Ui_Dialog()
self.ui.setupUi(self)
self.makeConnections()
if parent:
self.db = parent.db
else:
self.db = ''
def makeConnections(self):
QtCore.QObject.connect(self.ui.b_save, QtCore.SIGNAL("clicked()"),self.saveToDb)
def saveToDb(self):
from utils.dbutils import commitToDb
sql = "INSERT INTO m12_pro(prod,fpr_id,coy_id,eid_id,proy,aptyp_id,apod) VALUES('%s',%s,%s,%s,%s,%s,%s)"
ar = []
ar.append(self.ui.le_prod.text())
ar.append(self.ui.le_fpr_id.text())
ar.append(self.ui.le_coy_id.text())
ar.append(self.ui.le_eid_id.text())
ar.append(self.ui.le_proy.text())
ar.append(self.ui.le_aptyp_id.text())
ar.append(self.ui.le_apod.text())
if self.db:
try:
noId = commitToDb(sql % tuple(ar),self.db)
QtGui.QMessageBox.warning(self,u'Επιτυχής αποθήκευση',u'Η εγγραφή αποθηκεύτηκε με αριθμό : %s' % noId)
#self.ui.le_id.setText(noId)
except Exception:
QtGui.QMessageBox.warning(self,u'Λάθος κατά την αποθήκευση',u'Υπάρχει ήδη φυσικό πρόσωπο με αυτά τα στοιχεία')
else:
QtGui.QMessageBox.critical(self,u'Λάθος !!!',u'Δεν υπάρχει σύνδεση με Βάση Δεδομένων')
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
form = dlg(sys.argv)
form.show()
app.exec_() | tedlaz/pyted | misthodosia/m13/f_pro.py | Python | gpl-3.0 | 1,976 | 0.013122 |
"""
:mod:`Selectors` -- selection methods module
==============================================================
This module have the *selection methods*, like roulette wheel, tournament, ranking, etc.
"""
import random
import Consts
def GRankSelector(population, **args):
""" The Rank Selector - This selector will pick the best individual of
the population every time.
"""
count = 0
if args["popID"] != GRankSelector.cachePopID:
best_fitness = population.bestFitness().fitness
for index in xrange(1, len(population.internalPop)):
if population[index].fitness == best_fitness:
count += 1
GRankSelector.cachePopID = args["popID"]
GRankSelector.cacheCount = count
else:
count = GRankSelector.cacheCount
return population[random.randint(0, count)]
GRankSelector.cachePopID = None
GRankSelector.cacheCount = None
def GUniformSelector(population, **args):
""" The Uniform Selector """
return population[random.randint(0, len(population) - 1)]
def GTournamentSelector(population, **args):
""" The Tournament Selector
It accepts the *tournamentPool* population parameter.
.. note::
the Tournament Selector uses the Roulette Wheel to
pick individuals for the pool
"""
choosen = None
poolSize = population.getParam("tournamentPool", Consts.CDefTournamentPoolSize)
tournament_pool = [GRouletteWheel(population, **args) for i in xrange(poolSize)]
choosen = min(tournament_pool, key=lambda ind: ind.fitness)
return choosen
def GTournamentSelectorAlternative(population, **args):
""" The alternative Tournament Selector
This Tournament Selector don't uses the Roulette Wheel
It accepts the *tournamentPool* population parameter.
"""
pool_size = population.getParam("tournamentPool", Consts.CDefTournamentPoolSize)
len_pop = len(population)
tournament_pool = [population[random.randint(0, len_pop - 1)] for i in xrange(pool_size)]
choosen = min(tournament_pool, key=lambda ind: ind.fitness)
return choosen
def GRouletteWheel(population, **args):
""" The Roulette Wheel selector """
psum = None
if args["popID"] != GRouletteWheel.cachePopID:
GRouletteWheel.cachePopID = args["popID"]
psum = GRouletteWheel_PrepareWheel(population)
GRouletteWheel.cacheWheel = psum
else:
psum = GRouletteWheel.cacheWheel
cutoff = random.random()
lower = 0
upper = len(population) - 1
while(upper >= lower):
i = lower + ((upper - lower) / 2)
if psum[i] > cutoff:
upper = i - 1
else:
lower = i + 1
lower = min(len(population) - 1, lower)
lower = max(0, lower)
return population.bestFitness(lower)
GRouletteWheel.cachePopID = None
GRouletteWheel.cacheWheel = None
def GRouletteWheel_PrepareWheel(population):
""" A preparation for Roulette Wheel selection """
len_pop = len(population)
psum = [i for i in xrange(len_pop)]
population.statistics()
pop_fitMax = population.stats["fitMax"]
pop_fitMin = population.stats["fitMin"]
if pop_fitMax == pop_fitMin:
for index in xrange(len_pop):
psum[index] = (index + 1) / float(len_pop)
elif (pop_fitMax > 0 and pop_fitMin >= 0) or (pop_fitMax <= 0 and pop_fitMin < 0):
population.sort()
psum[0] = -population[0].fitness + pop_fitMax + pop_fitMin
for i in xrange(1, len_pop):
psum[i] = -population[i].fitness + pop_fitMax + pop_fitMin + psum[i - 1]
for i in xrange(len_pop):
psum[i] /= float(psum[len_pop - 1])
return psum
| UdeM-LBIT/GAPol | lib/ga/evolve/Selectors.py | Python | gpl-3.0 | 3,696 | 0.0046 |
#!/usr/bin/env python
"""
crate_anon/linkage/bulk_hash.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal ([email protected]).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
Tool to hash multiple IDs from the command line.
Test code to look at different types of digest:
.. code-block:: python
import hashlib
import hmac
msg = "This is an ex-parrot!"
key = "voom"
key_bytes = str(key).encode('utf-8')
msg_bytes = str(msg).encode('utf-8')
digestmod = hashlib.sha256
hmac_obj = hmac.new(key=key_bytes, msg=msg_bytes, digestmod=digestmod)
# These are the two default kinds of digest:
print(hmac_obj.digest()) # 8-bit binary
print(hmac_obj.hexdigest()) # hexadecimal
# Hex carries 4 bits per character. There are other possibilities,
# notably:
# - Base64 with 6 bits per character;
# - Base32 with 5 bits per character.
"""
import argparse
import logging
from typing import Optional, TextIO
from cardinal_pythonlib.file_io import (
gen_noncomment_lines,
smart_open,
writeline_nl,
)
from cardinal_pythonlib.logs import main_only_quicksetup_rootlogger
from cardinal_pythonlib.hash import (
HashMethods,
make_hasher,
)
log = logging.getLogger(__name__)
def get_first_noncomment_line(filename: str) -> Optional[str]:
try:
with open(filename) as f:
return next(gen_noncomment_lines(f))
except StopIteration:
return None
def bulk_hash(input_filename: str,
output_filename: str,
hash_method: str,
key: str,
keep_id: bool = True):
"""
Hash lines from one file to another.
Args:
input_filename:
input filename, or "-" for stdin
output_filename:
output filename, or "-" for stdin
hash_method:
method to use; e.g. ``HMAC_SHA256``
key:
secret key for hasher
keep_id:
produce CSV with ``hash,id`` pairs, rather than just lines with
the hashes?
Note that the hash precedes the ID with the ``keep_id`` option, which
works best if the ID might contain commas.
"""
log.info(f"Reading from: {input_filename}")
log.info(f"Writing to: {output_filename}")
log.info(f"Using hash method: {hash_method}")
log.info(f"keep_id: {keep_id}")
log.debug(f"Using key: {key!r}") # NB security warning in help
hasher = make_hasher(hash_method=hash_method, key=key)
with smart_open(input_filename, "rt") as i: # type: TextIO
with smart_open(output_filename, "wt") as o: # type: TextIO
for line in gen_noncomment_lines(i):
hashed = hasher.hash(line) if line else ""
outline = f"{hashed},{line}" if keep_id else hashed
# log.debug(f"{line!r} -> {hashed!r}")
writeline_nl(o, outline)
def main() -> None:
"""
Command-line entry point.
"""
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description="Hash IDs in bulk, using a cryptographic hash function.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'infile', type=str,
help="Input file, or '-' for stdin. "
"Use one line per thing to be hashed. "
"Comments (marked with '#') and blank lines are ignored. "
"Lines have whitespace stripped left and right.")
parser.add_argument(
'--outfile', type=str, default="-",
help="Output file, or '-' for stdout. "
"One line will be written for every input line. "
"Blank lines will be written for commented or blank input.")
parser.add_argument(
'--key', type=str,
help="Secret key for hasher (warning: may be visible in process list; "
"see also --keyfile)")
parser.add_argument(
'--keyfile', type=str,
help="File whose first noncomment line contains the secret key for "
"the hasher. (It will be whitespace-stripped right and left.)")
parser.add_argument(
'--method', choices=[HashMethods.HMAC_MD5,
HashMethods.HMAC_SHA256,
HashMethods.HMAC_SHA512],
default=HashMethods.HMAC_MD5,
help="Hash method")
parser.add_argument(
'--keepid', action="store_true",
help="Produce CSV output with (hash,id) rather than just the hash")
parser.add_argument(
'--verbose', '-v', action="store_true",
help="Be verbose (NB will write key to stderr)")
args = parser.parse_args()
main_only_quicksetup_rootlogger(logging.DEBUG if args.verbose
else logging.INFO)
assert bool(args.key) != bool(args.keyfile), (
"Specify either --key or --keyfile (and not both)."
)
if args.keyfile:
key = get_first_noncomment_line(args.keyfile)
assert key, f"No key found in keyfile: {args.keyfile}"
else:
key = args.key
bulk_hash(
input_filename=args.infile,
output_filename=args.outfile,
hash_method=args.method,
key=key,
keep_id=args.keepid,
)
if __name__ == "__main__":
main()
| RudolfCardinal/crate | crate_anon/linkage/bulk_hash.py | Python | gpl-3.0 | 6,042 | 0 |
"""
flask.ext.acl
=============
This extension provides an Access Control implementation for `tipfy <http://www.tipfy.org/>`_.
Links
-----
* `Documentation <http://www.tipfy.org/wiki/extensions/acl/>`_
* `Source Code Repository <http://code.google.com/p/tipfy-ext-acl/>`_
* `Issue Tracker <http://code.google.com/p/tipfy-ext-acl/issues/list>`_
About tipfy
-----------
* `Home page <http://www.tipfy.org/>`_
* `Extension list <http://www.tipfy.org/wiki/extensions/>`_
* `Discussion Group <http://groups.google.com/group/tipfy>`_
"""
from setuptools import setup
setup(
name = 'flask.ext.acl',
version = '0.6',
license = 'BSD',
url = 'https://github.com/guotie/flask-acl',
description = 'Access Control extension for flask',
long_description = __doc__,
author = 'guotie',
author_email = '[email protected]',
zip_safe = False,
platforms = 'any',
packages = [
'flask',
'flask.ext',
],
namespace_packages = [
'flask',
'flask.ext',
],
include_package_data = True,
install_requires = [
'flask',
'flask.ext.sqlalchemy',
'flask.ext.cache',
],
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| guotie/flask-acl | setup.py | Python | mit | 1,588 | 0.019521 |
from django.conf.urls import patterns, include, url
try:
from djcelery.views import apply
urlpatterns = patterns('',
url(r'^apply/(?P<task_name>.+?)/', apply, name='celery-apply'),
url(r'^celery/', include('djcelery.urls')),
)
except ImportError:
urlpatterns = patterns('')
| PolicyStat/jobtastic | test_projects/django/testproj/urls.py | Python | mit | 307 | 0.003257 |
from miro.test.framework import MiroTestCase
from miro.frontends.widgets.widgetstatestore import WidgetStateStore
from miro.frontends.widgets.itemlist import SORT_KEY_MAP
class WidgetStateConstants(MiroTestCase):
def setUp(self):
MiroTestCase.setUp(self)
self.display_types = set(WidgetStateStore.get_display_types())
def test_view_types(self):
# test that all view types are different
view_types = (WidgetStateStore.get_list_view_type(),
WidgetStateStore.get_standard_view_type(),
WidgetStateStore.get_album_view_type())
for i in range(len(view_types)):
for j in range(i + 1, len(view_types)):
self.assertNotEqual(view_types[i], view_types[j])
def test_default_view_types(self):
display_types = set(WidgetStateStore.DEFAULT_VIEW_TYPE)
self.assertEqual(self.display_types, display_types)
def test_default_column_widths(self):
# test that all available columns have widths set for them
# calculate all columns that available for some display/view
# combination
available_columns = set()
display_id = None # this isn't used yet, just set it to a dummy value
for display_type in self.display_types:
for view_type in (WidgetStateStore.get_list_view_type(),
WidgetStateStore.get_standard_view_type(),
WidgetStateStore.get_album_view_type()):
available_columns.update(
WidgetStateStore.get_columns_available(
display_type, display_id, view_type))
# make sure that we have widths for those columns
self.assertEqual(available_columns,
set(WidgetStateStore.DEFAULT_COLUMN_WIDTHS.keys()))
def test_default_sort_column(self):
display_types = set(WidgetStateStore.DEFAULT_SORT_COLUMN)
self.assertEqual(self.display_types, display_types)
def test_default_columns(self):
display_types = set(WidgetStateStore.DEFAULT_COLUMNS)
self.assertEqual(self.display_types, display_types)
def test_available_columns(self):
# Currently what get_display_types() uses. Testing it anyway.
display_types = set(WidgetStateStore.AVAILABLE_COLUMNS)
self.assertEqual(self.display_types, display_types)
def test_sort_key_map(self):
columns = set(WidgetStateStore.DEFAULT_COLUMN_WIDTHS)
sort_keys = set(SORT_KEY_MAP)
self.assertEqual(sort_keys, columns)
| debugger06/MiroX | lib/test/widgetstateconstantstest.py | Python | gpl-2.0 | 2,572 | 0.002722 |
# Copyright 2014 Michael Frank <[email protected]>
#
# This file is part of Mandelbrot.
#
# Mandelbrot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mandelbrot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mandelbrot. If not, see <http://www.gnu.org/licenses/>.
import datetime
import pyparsing as pp
from mandelbrot.model.timestamp import Timestamp, UTC
EpochDateTime = pp.Word(pp.srange('[1-9]'), pp.srange('[0-9]'))
def parseEpochDateTime(tokens):
return datetime.datetime.fromtimestamp(int(tokens[0]), UTC)
EpochDateTime.setParseAction(parseEpochDateTime)
ISODateTimeUTC = pp.Regex(r'\d{4}-\d{2}-\d{2}T\d{2}\:\d{2}\:\d{2}Z')
def parseISODateTime(tokens):
return datetime.datetime.strptime(tokens[0], '%Y-%m-%dT%H:%M:%SZ')
ISODateTimeUTC.setParseAction(parseISODateTime)
ISODateTimeAndOffset = pp.Regex(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{2}:\d{2}')
def parseISODateTimeAndOffset(tokens):
return datetime.datetime.strptime(tokens[0], '%Y-%m-%dT%H:%M:%S%z')
ISODateTimeAndOffset.setParseAction(parseISODateTimeAndOffset)
ISODateTime = ISODateTimeUTC | ISODateTimeAndOffset
TimeValue = pp.Word(pp.srange('[1-9]'), pp.srange('[0-9]'))
UnitDays = pp.CaselessKeyword('day') | pp.CaselessKeyword('days')
UnitDays.setParseAction(lambda x: lambda hours: hours * 60 * 60 * 24)
UnitHours = pp.CaselessKeyword('hour') | pp.CaselessKeyword('hours')
UnitHours.setParseAction(lambda x: lambda hours: hours * 60 * 60)
UnitMinutes = pp.CaselessKeyword('minute') | pp.CaselessKeyword('minutes')
UnitMinutes.setParseAction(lambda x: lambda minutes: minutes * 60)
UnitSeconds = pp.CaselessKeyword('second') | pp.CaselessKeyword('seconds')
UnitSeconds.setParseAction(lambda x: lambda seconds: seconds)
TimeUnit = UnitDays | UnitHours | UnitMinutes | UnitSeconds
DirectionAgo = pp.CaselessLiteral("ago")
DirectionAgo.setParseAction(lambda x: lambda point,delta: point - delta)
DirectionAhead = pp.CaselessLiteral("ahead")
DirectionAhead.setParseAction(lambda x: lambda point,delta: point + delta)
RelativeDirection = DirectionAgo | DirectionAhead
RelativeDateTime = TimeValue + TimeUnit + RelativeDirection
def parseRelativeDateTime(tokens):
value = int(tokens[0])
magnify = tokens[1]
shift = tokens[2]
seconds = magnify(value)
return shift(datetime.datetime.now(UTC), datetime.timedelta(seconds=seconds))
RelativeDateTime.setParseAction(parseRelativeDateTime)
DateTime = ISODateTime | RelativeDateTime | EpochDateTime
ClosedDateTimeRange = DateTime + pp.Suppress(pp.Literal('..')) + DateTime
LeftOpenDateTimeRange = pp.Literal('..') + DateTime
RightOpenDateTimeRange = DateTime + pp.Literal('..')
DateTimeRange = ClosedDateTimeRange | LeftOpenDateTimeRange | RightOpenDateTimeRange
DateTimePlusDelta = DateTime + pp.Suppress(pp.Literal('+')) + TimeValue + TimeUnit
def parseDateTimePlusDelta(tokens):
start = tokens[0]
value = int(tokens[1])
magnify = tokens[2]
delta = datetime.timedelta(seconds=magnify(value))
return [start, start + delta]
DateTimePlusDelta.setParseAction(parseDateTimePlusDelta)
NowPlusDelta = pp.Suppress(pp.Literal('+')) + TimeValue + TimeUnit
def parseNowPlusDelta(tokens):
start = datetime.datetime.now(UTC)
value = int(tokens[0])
magnify = tokens[1]
delta = datetime.timedelta(seconds=magnify(value))
return [start, start + delta]
NowPlusDelta.setParseAction(parseNowPlusDelta)
DateTimeWindow = ClosedDateTimeRange | DateTimePlusDelta | NowPlusDelta
def datetime_to_timestamp(dt):
timestamp = Timestamp()
timestamp.set_datetime(dt)
return timestamp
def parse_datetime(string):
"""
Parse a datetime string. Datetimes may be specified using the following formats:
ISOFORMAT ISO-8601 format, e.g. "2015-05-01T12:45:00Z"
RELATIVE some magnitude relative to now, e.g. "2 hours ago" or "15 days ahead"
EPOCH seconds since the UNIX epoch
:param string: The timerange to parse
:type string: str
:returns: the datetime as a Timestamp
:rtype: Timestamp
:raises ValueError: the timerange could not be parsed
"""
try:
return datetime_to_timestamp(DateTime.parseString(string, parseAll=True).asList()[0])
except Exception as e:
raise ValueError("failed to parse datetime '%s'" % string)
def parse_timerange(string):
"""
Parse a timerange string. Timeranges may be specified using the following formats:
START..END between START and END
START.. from START to infinity
..END from -infinity to END
:param string: The timerange to parse
:type string: str
:returns: A 2-tuple containing the start and end timestamps
:rtype: tuple[Timestamp,Timestamp]
:raises ValueError: the timerange could not be parsed
"""
try:
start,end = DateTimeRange.parseString(string, parseAll=True).asList()
start = None if start == ".." else datetime_to_timestamp(start)
end = None if end == ".." else datetime_to_timestamp(end)
return (start,end)
except Exception as e:
raise ValueError("failed to parse timerange '%s'" % string)
def parse_timewindow(string):
"""
Parse a timewindow. Timewindows may be specified using the following
formats:
START..END
START+DELTA
+DELTA
:param string: The timewindow to parse
:type string: str
:returns: A 2-tuple containing the start and end timestamps
:rtype: tuple[Timestamp,Timestamp]
:raises ValueError: the timewindow could not be parsed
"""
try:
start,end = DateTimeWindow.parseString(string, parseAll=True).asList()
return (datetime_to_timestamp(start), datetime_to_timestamp(end))
except Exception as e:
raise ValueError("failed to parse timewindow '%s'" % string)
| msfrank/mandelbrot | mandelbrot/timerange.py | Python | gpl-3.0 | 6,251 | 0.004799 |
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright 2013 Aron Parsons <[email protected]>
# Copyright (c) 2011--2015 Red Hat, Inc.
#
# NOTE: the 'self' variable is an instance of SpacewalkShell
# wildcard import
# pylint: disable=W0401,W0614
# unused argument
# pylint: disable=W0613
# invalid function name
# pylint: disable=C0103
import logging
import readline
import shlex
from getpass import getpass
from ConfigParser import NoOptionError
from spacecmd.utils import *
from time import sleep
import xmlrpclib
# list of system selection options for the help output
HELP_SYSTEM_OPTS = '''<SYSTEMS> can be any of the following:
name
ssm (see 'help ssm')
search:QUERY (see 'help system_search')
group:GROUP
channel:CHANNEL
'''
HELP_TIME_OPTS = '''Dates can be any of the following:
Explicit Dates:
Dates can be expressed as explicit date strings in the YYYYMMDD[HHMM]
format. The year, month and day are required, while the hours and
minutes are not; the hours and minutes will default to 0000 if no
values are provided.
Deltas:
Dates can be expressed as delta values. For example, '2h' would
mean 2 hours in the future. You can also use negative values to
express times in the past (e.g., -7d would be one week ago).
Units:
s -> seconds
m -> minutes
h -> hours
d -> days
'''
####################
# life of caches in seconds
SYSTEM_CACHE_TTL = 3600
PACKAGE_CACHE_TTL = 86400
ERRATA_CACHE_TTL = 86400
MINIMUM_API_VERSION = 10.8
SEPARATOR = '\n' + '#' * 30 + '\n'
####################
ENTITLEMENTS = ['enterprise_entitled',
'virtualization_host'
]
SYSTEM_SEARCH_FIELDS = ['id', 'name', 'ip', 'hostname',
'device', 'vendor', 'driver', 'uuid']
####################
def help_systems(self):
print HELP_SYSTEM_OPTS
def help_time(self):
print HELP_TIME_OPTS
####################
def help_clear(self):
print 'clear: clear the screen'
print 'usage: clear'
def do_clear(self, args):
os.system('clear')
####################
def help_clear_caches(self):
print 'clear_caches: Clear the internal caches kept for systems' + \
' and packages'
print 'usage: clear_caches'
def do_clear_caches(self, args):
self.clear_system_cache()
self.clear_package_cache()
self.clear_errata_cache()
####################
def help_get_apiversion(self):
print 'get_apiversion: Display the API version of the server'
print 'usage: get_apiversion'
def do_get_apiversion(self, args):
print self.client.api.getVersion()
####################
def help_get_serverversion(self):
print 'get_serverversion: Display the version of the server'
print 'usage: get_serverversion'
def do_get_serverversion(self, args):
print self.client.api.systemVersion()
####################
def help_get_certificateexpiration(self):
print 'get_certificateexpiration: Print the expiration date of the'
print " server's entitlement certificate"
print 'usage: get_certificateexpiration'
def do_get_certificateexpiration(self, args):
date = self.client.satellite.getCertificateExpirationDate(self.session)
print date
####################
def help_list_proxies(self):
print 'list_proxies: List the proxies wihtin the user\'s organization '
print 'usage: list_proxies'
def do_list_proxies(self, args):
proxies = self.client.satellite.listProxies(self.session)
print proxies
####################
def help_get_session(self):
print 'get_session: Show the current session string'
print 'usage: get_session'
def do_get_session(self, args):
if self.session:
print self.session
else:
logging.error('No session found')
####################
def help_help(self):
print 'help: Show help for the given command'
print 'usage: help COMMAND'
####################
def help_history(self):
print 'history: List your command history'
print 'usage: history'
def do_history(self, args):
for i in range(1, readline.get_current_history_length()):
print '%s %s' % (str(i).rjust(4), readline.get_history_item(i))
####################
def help_toggle_confirmations(self):
print 'toggle_confirmations: Toggle confirmation messages on/off'
print 'usage: toggle_confirmations'
def do_toggle_confirmations(self, args):
if self.options.yes:
self.options.yes = False
print 'Confirmation messages are enabled'
else:
self.options.yes = True
logging.warning('Confirmation messages are DISABLED!')
####################
def help_login(self):
print 'login: Connect to a Spacewalk server'
print 'usage: login [USERNAME] [SERVER]'
def do_login(self, args):
(args, _options) = parse_arguments(args)
# logout before logging in again
if len(self.session):
logging.warning('You are already logged in')
return True
# an argument passed to the function get precedence
if len(args) == 2:
server = args[1]
else:
# use the server we were already using
server = self.config['server']
# bail out if not server was given
if not server:
logging.warning('No server specified')
return False
# load the server-specific configuration
self.load_config_section(server)
# an argument passed to the function get precedence
if len(args):
username = args[0]
elif self.config.has_key('username'):
# use the username from before
username = self.config['username']
elif self.options.username:
# use the username from before
username = self.options.username
else:
username = ''
# set the protocol
if self.config.has_key('nossl') and self.config['nossl']:
proto = 'http'
else:
proto = 'https'
server_url = '%s://%s/rpc/api' % (proto, server)
# this will enable spewing out all client/server traffic
verbose_xmlrpc = False
if self.options.debug > 1:
verbose_xmlrpc = True
# connect to the server
logging.debug('Connecting to %s', server_url)
self.client = xmlrpclib.Server(server_url, verbose=verbose_xmlrpc)
# check the API to verify connectivity
try:
self.api_version = self.client.api.getVersion()
logging.debug('Server API Version = %s', self.api_version)
except xmlrpclib.Fault, e:
if self.options.debug > 0:
logging.exception(e)
logging.error('Failed to connect to %s', server_url)
self.client = None
return False
# ensure the server is recent enough
if self.api_version < self.MINIMUM_API_VERSION:
logging.error('API (%s) is too old (>= %s required)',
self.api_version, self.MINIMUM_API_VERSION)
self.client = None
return False
# store the session file in the server's own directory
session_file = os.path.join(self.conf_dir, server, 'session')
# retrieve a cached session
if os.path.isfile(session_file) and not self.options.password:
try:
sessionfile = open(session_file, 'r')
# read the session (format = username:session)
for line in sessionfile:
parts = line.split(':')
# if a username was passed, make sure it matches
if len(username):
if parts[0] == username:
self.session = parts[1]
else:
# get the username from the cache if one
# wasn't passed by the user
username = parts[0]
self.session = parts[1]
sessionfile.close()
except IOError:
logging.error('Could not read %s', session_file)
# check the cached credentials by doing an API call
if self.session:
try:
logging.debug('Using cached credentials from %s', session_file)
self.client.user.listAssignableRoles(self.session)
except xmlrpclib.Fault:
logging.warning('Cached credentials are invalid')
self.current_user = ''
self.session = ''
# attempt to login if we don't have a valid session yet
if not len(self.session):
if len(username):
logging.info('Spacewalk Username: %s', username)
else:
username = prompt_user('Spacewalk Username:', noblank=True)
if self.options.password:
password = self.options.password
# remove this from the options so that if 'login' is called
# again, the user is prompted for the information
self.options.password = None
elif self.config.has_key('password'):
password = self.config['password']
else:
password = getpass('Spacewalk Password: ')
# login to the server
try:
self.session = self.client.auth.login(username, password)
# don't keep the password around
password = None
except xmlrpclib.Fault:
logging.error('Invalid credentials')
return False
try:
# make sure ~/.spacecmd/<server> exists
conf_dir = os.path.join(self.conf_dir, server)
if not os.path.isdir(conf_dir):
os.mkdir(conf_dir, 0700)
# add the new cache to the file
line = '%s:%s\n' % (username, self.session)
# write the new cache file out
sessionfile = open(session_file, 'w')
sessionfile.write(line)
sessionfile.close()
except IOError:
logging.error('Could not write session file')
# load the system/package/errata caches
self.load_caches(server)
# keep track of who we are and who we're connected to
self.current_user = username
self.server = server
logging.info('Connected to %s as %s', server_url, username)
return True
####################
def help_logout(self):
print 'logout: Disconnect from the server'
print 'usage: logout'
def do_logout(self, args):
if self.session:
self.client.auth.logout(self.session)
self.session = ''
self.current_user = ''
self.server = ''
self.do_clear_caches('')
####################
def help_whoami(self):
print 'whoami: Print the name of the currently logged in user'
print 'usage: whoami'
def do_whoami(self, args):
if len(self.current_user):
print self.current_user
else:
logging.warning("You are not logged in")
####################
def help_whoamitalkingto(self):
print 'whoamitalkingto: Print the name of the server'
print 'usage: whoamitalkingto'
def do_whoamitalkingto(self, args):
if len(self.server):
print self.server
else:
logging.warning('Yourself')
####################
def tab_complete_errata(self, text):
options = self.do_errata_list('', True)
options.append('search:')
return tab_completer(options, text)
def tab_complete_systems(self, text):
if re.match('group:', text):
# prepend 'group' to each item for tab completion
groups = ['group:%s' % g for g in self.do_group_list('', True)]
return tab_completer(groups, text)
elif re.match('channel:', text):
# prepend 'channel' to each item for tab completion
channels = ['channel:%s' % s
for s in self.do_softwarechannel_list('', True)]
return tab_completer(channels, text)
elif re.match('search:', text):
# prepend 'search' to each item for tab completion
fields = ['search:%s:' % f for f in self.SYSTEM_SEARCH_FIELDS]
return tab_completer(fields, text)
else:
options = self.get_system_names()
# add our special search options
options.extend(['group:', 'channel:', 'search:'])
return tab_completer(options, text)
def remove_last_history_item(self):
last = readline.get_current_history_length() - 1
if last >= 0:
readline.remove_history_item(last)
def clear_errata_cache(self):
self.all_errata = {}
self.errata_cache_expire = datetime.now()
self.save_errata_cache()
def get_errata_names(self):
return sorted([e.get('advisory_name') for e in self.all_errata])
def get_erratum_id(self, name):
if name in self.all_errata:
return self.all_errata[name]['id']
def get_erratum_name(self, erratum_id):
for erratum in self.all_errata:
if self.all_errata[erratum]['id'] == erratum_id:
return erratum
def generate_errata_cache(self, force=False):
if not force and datetime.now() < self.errata_cache_expire:
return
if not self.options.quiet:
# tell the user what's going on
self.replace_line_buffer('** Generating errata cache **')
channels = self.client.channel.listSoftwareChannels(self.session)
channels = [c.get('label') for c in channels]
for c in channels:
try:
errata = \
self.client.channel.software.listErrata(self.session, c)
except xmlrpclib.Fault:
logging.debug('No access to %s', c)
continue
for erratum in errata:
if erratum.get('advisory_name') not in self.all_errata:
self.all_errata[erratum.get('advisory_name')] = \
{'id': erratum.get('id'),
'advisory_name': erratum.get('advisory_name'),
'advisory_type': erratum.get('advisory_type'),
'date': erratum.get('date'),
'advisory_synopsis': erratum.get('advisory_synopsis')}
self.errata_cache_expire = \
datetime.now() + timedelta(self.ERRATA_CACHE_TTL)
self.save_errata_cache()
if not self.options.quiet:
# restore the original line buffer
self.replace_line_buffer()
def save_errata_cache(self):
save_cache(self.errata_cache_file,
self.all_errata,
self.errata_cache_expire)
def clear_package_cache(self):
self.all_packages_short = {}
self.all_packages = {}
self.all_packages_by_id = {}
self.package_cache_expire = datetime.now()
self.save_package_caches()
def generate_package_cache(self, force=False):
if not force and datetime.now() < self.package_cache_expire:
return
if not self.options.quiet:
# tell the user what's going on
self.replace_line_buffer('** Generating package cache **')
channels = self.client.channel.listSoftwareChannels(self.session)
channels = [c.get('label') for c in channels]
for c in channels:
try:
packages = \
self.client.channel.software.listAllPackages(self.session, c)
except xmlrpclib.Fault:
logging.debug('No access to %s', c)
continue
for p in packages:
if not p.get('name') in self.all_packages_short:
self.all_packages_short[p.get('name')] = ''
longname = build_package_names(p)
if not longname in self.all_packages:
self.all_packages[longname] = [p.get('id')]
else:
self.all_packages[longname].append(p.get('id'))
# keep a reverse dictionary so we can lookup package names by ID
self.all_packages_by_id = {}
for (k, v) in self.all_packages.iteritems():
for i in v:
self.all_packages_by_id[i] = k
self.package_cache_expire = \
datetime.now() + timedelta(seconds=self.PACKAGE_CACHE_TTL)
self.save_package_caches()
if not self.options.quiet:
# restore the original line buffer
self.replace_line_buffer()
def save_package_caches(self):
# store the cache to disk to speed things up
save_cache(self.packages_short_cache_file,
self.all_packages_short,
self.package_cache_expire)
save_cache(self.packages_long_cache_file,
self.all_packages,
self.package_cache_expire)
save_cache(self.packages_by_id_cache_file,
self.all_packages_by_id,
self.package_cache_expire)
# create a global list of all available package names
def get_package_names(self, longnames=False):
self.generate_package_cache()
if longnames:
return self.all_packages.keys()
else:
return self.all_packages_short
def get_package_id(self, name):
self.generate_package_cache()
try:
return set(self.all_packages[name])
except KeyError:
return
def get_package_name(self, package_id):
self.generate_package_cache()
try:
return self.all_packages_by_id[package_id]
except KeyError:
return
def clear_system_cache(self):
self.all_systems = {}
self.system_cache_expire = datetime.now()
self.save_system_cache()
def generate_system_cache(self, force=False, delay=0):
if not force and datetime.now() < self.system_cache_expire:
return
if not self.options.quiet:
# tell the user what's going on
self.replace_line_buffer('** Generating system cache **')
# we might need to wait for some systems to delete
if delay:
sleep(delay)
systems = self.client.system.listSystems(self.session)
self.all_systems = {}
for s in systems:
self.all_systems[s.get('id')] = s.get('name')
self.system_cache_expire = \
datetime.now() + timedelta(seconds=self.SYSTEM_CACHE_TTL)
self.save_system_cache()
if not self.options.quiet:
# restore the original line buffer
self.replace_line_buffer()
def save_system_cache(self):
save_cache(self.system_cache_file,
self.all_systems,
self.system_cache_expire)
def load_caches(self, server):
conf_dir = os.path.join(self.conf_dir, server)
try:
if not os.path.isdir(conf_dir):
os.mkdir(conf_dir, 0700)
except OSError:
logging.error('Could not create directory %s', conf_dir)
return
self.ssm_cache_file = os.path.join(conf_dir, 'ssm')
self.system_cache_file = os.path.join(conf_dir, 'systems')
self.errata_cache_file = os.path.join(conf_dir, 'errata')
self.packages_long_cache_file = os.path.join(conf_dir, 'packages_long')
self.packages_by_id_cache_file = \
os.path.join(conf_dir, 'packages_by_id')
self.packages_short_cache_file = \
os.path.join(conf_dir, 'packages_short')
# load self.ssm from disk
(self.ssm, _ignore) = load_cache(self.ssm_cache_file)
# update the prompt now that we loaded the SSM
self.postcmd(False, '')
# load self.all_systems from disk
(self.all_systems, self.system_cache_expire) = \
load_cache(self.system_cache_file)
# load self.all_errata from disk
(self.all_errata, self.errata_cache_expire) = \
load_cache(self.errata_cache_file)
# load self.all_packages_short from disk
(self.all_packages_short, self.package_cache_expire) = \
load_cache(self.packages_short_cache_file)
# load self.all_packages from disk
(self.all_packages, self.package_cache_expire) = \
load_cache(self.packages_long_cache_file)
# load self.all_packages_by_id from disk
(self.all_packages_by_id, self.package_cache_expire) = \
load_cache(self.packages_by_id_cache_file)
def get_system_names(self):
self.generate_system_cache()
return self.all_systems.values()
# check for duplicate system names and return the system ID
def get_system_id(self, name):
self.generate_system_cache()
try:
# check if we were passed a system instead of a name
system_id = int(name)
if system_id in self.all_systems:
return system_id
except ValueError:
pass
# get a set of matching systems to check for duplicate names
systems = []
for system_id in self.all_systems:
if name == self.all_systems[system_id]:
systems.append(system_id)
if len(systems) == 1:
return systems[0]
elif not len(systems):
logging.warning("Can't find system ID for %s", name)
return 0
else:
logging.warning('Duplicate system profile names found!')
logging.warning("Please reference systems by ID or resolve the")
logging.warning("underlying issue with 'system_delete' or 'system_rename'")
id_list = '%s = ' % name
for system_id in systems:
id_list = id_list + '%i, ' % system_id
logging.warning('')
logging.warning(id_list[:-2])
return 0
def get_system_name(self, system_id):
self.generate_system_cache()
try:
return self.all_systems[system_id]
except KeyError:
return
def get_org_id(self, name):
details = self.client.org.getDetails(self.session, name)
return details.get('id')
def expand_errata(self, args):
if not isinstance(args, list):
args = args.split()
self.generate_errata_cache()
if len(args) == 0:
return self.all_errata
errata = []
for item in args:
if re.match('search:', item):
item = re.sub('search:', '', item)
errata.extend(self.do_errata_search(item, True))
else:
errata.append(item)
matches = filter_results(self.all_errata, errata)
return matches
def expand_systems(self, args):
if not isinstance(args, list):
args = shlex.split(args)
systems = []
system_ids = []
for item in args:
if re.match('ssm', item, re.I):
systems.extend(self.ssm)
elif re.match('group:', item):
item = re.sub('group:', '', item)
members = self.do_group_listsystems("'%s'" % item, True)
if len(members):
systems.extend([re.escape(m) for m in members])
else:
logging.warning('No systems in group %s', item)
elif re.match('search:', item):
query = item.split(':', 1)[1]
results = self.do_system_search(query, True)
if len(results):
systems.extend([re.escape(r) for r in results])
elif re.match('channel:', item):
item = re.sub('channel:', '', item)
members = self.do_softwarechannel_listsystems(item, True)
if len(members):
systems.extend([re.escape(m) for m in members])
else:
logging.warning('No systems subscribed to %s', item)
else:
# translate system IDs that the user passes
try:
sys_id = int(item)
system_ids.append(sys_id)
except ValueError:
# just a system name
systems.append(item)
matches = filter_results(self.get_system_names(), systems)
return list(set(matches + system_ids))
def list_base_channels(self):
all_channels = self.client.channel.listSoftwareChannels(self.session)
base_channels = []
for c in all_channels:
if not c.get('parent_label'):
base_channels.append(c.get('label'))
return base_channels
def list_child_channels(self, system=None, parent=None, subscribed=False):
channels = []
if system:
system_id = self.get_system_id(system)
if not system_id:
return
if subscribed:
channels = \
self.client.system.listSubscribedChildChannels(self.session,
system_id)
else:
channels = self.client.system.listSubscribableChildChannels(
self.session, system_id)
elif parent:
all_channels = \
self.client.channel.listSoftwareChannels(self.session)
for c in all_channels:
if parent == c.get('parent_label'):
channels.append(c)
else:
# get all channels that have a parent
all_channels = \
self.client.channel.listSoftwareChannels(self.session)
for c in all_channels:
if c.get('parent_label'):
channels.append(c)
return [c.get('label') for c in channels]
def user_confirm(self, prompt='Is this ok [y/N]:', nospacer=False,
integer=False, ignore_yes=False):
if self.options.yes and not ignore_yes:
return True
if nospacer:
answer = prompt_user('%s' % prompt)
else:
answer = prompt_user('\n%s' % prompt)
if re.match('y', answer, re.I):
if integer:
return 1
else:
return True
else:
if integer:
return 0
else:
return False
# check if the available API is recent enough
def check_api_version(self, want):
want_parts = [int(i) for i in want.split('.')]
have_parts = [int(i) for i in self.api_version.split('.')]
if len(have_parts) == 2 and len(want_parts) == 2:
if have_parts[0] == want_parts[0]:
# compare minor versions if majors are the same
return have_parts[1] >= want_parts[1]
else:
# only compare major versions if they differ
return have_parts[0] >= want_parts[0]
else:
# compare the whole value
return float(self.api_version) >= float(want)
# replace the current line buffer
def replace_line_buffer(self, msg=None):
# restore the old buffer if we weren't given a new line
if not msg:
msg = readline.get_line_buffer()
# don't print a prompt if there wasn't one to begin with
if len(readline.get_line_buffer()):
new_line = '%s%s' % (self.prompt, msg)
else:
new_line = '%s' % msg
# clear the current line
self.stdout.write('\r'.ljust(len(self.current_line) + 1))
self.stdout.flush()
# write the new line
self.stdout.write('\r%s' % new_line)
self.stdout.flush()
# keep track of what is displayed so we can clear it later
self.current_line = new_line
def load_config_section(self, section):
config_opts = ['server', 'username', 'password', 'nossl']
if not self.config_parser.has_section(section):
logging.debug('Configuration section [%s] does not exist', section)
return
logging.debug('Loading configuration section [%s]', section)
for key in config_opts:
# don't override command-line options
if self.options.__dict__[key]:
# set the config value to the command-line argument
self.config[key] = self.options.__dict__[key]
else:
try:
self.config[key] = self.config_parser.get(section, key)
except NoOptionError:
pass
# handle the nossl boolean
if self.config.has_key('nossl') and isinstance(self.config['nossl'], str):
if re.match('^1|y|true$', self.config['nossl'], re.I):
self.config['nossl'] = True
else:
self.config['nossl'] = False
# Obfuscate the password with asterisks
config_debug = self.config.copy()
if config_debug.has_key('password'):
config_debug['password'] = "*" * len(config_debug['password'])
logging.debug('Current Configuration: %s', config_debug)
| xkollar/spacewalk | spacecmd/src/lib/misc.py | Python | gpl-2.0 | 28,096 | 0.000285 |
import json
from kolekto.printer import printer
from kolekto.commands import Command
class Restore(Command):
""" Restore metadata from a json dump.
"""
help = 'Restore metadata from a json dump'
def prepare(self):
self.add_arg('file', help='The json dump file to restore')
def run(self, args, config):
mdb = self.get_metadata_db(args.tree)
with open(args.file) as fdump:
dump = json.load(fdump)
for movie in dump:
mdb.save(movie['hash'], movie['movie'])
printer.verbose('Loaded {hash}', hash=movie['hash'])
printer.p('Loaded {nb} movies.', nb=len(dump))
| NaPs/Kolekto | kolekto/commands/restore.py | Python | mit | 657 | 0 |
# proxy module
from __future__ import absolute_import
from apptools.type_manager.hook import *
| enthought/etsproxy | enthought/type_manager/hook.py | Python | bsd-3-clause | 95 | 0 |
from __future__ import print_function
from argparse import ArgumentParser, Namespace
TEMPLATE_SEPARATOR = '#####'
OUTPUT_PREFIX = '''
// This file is the result of rendering `{filepath}`.
// You should make changes to this code by editing that template; not
// this file.
'''
def main(argv):
args = parse_args(argv[1:])
with open(args.output, 'w') as output:
print(render(args.filepath, args.limit), file=output)
return 0
def parse_args(raw_args):
p = ArgumentParser(
description=('Renders the specified template file with the given '
'arity limit. The template file should contain a line '
'containing just `{}`, with the template text above '
'that separator, and the context generation code '
'below. The code should define a `context` function '
'that generates a dict. The template text is then '
'rendered by: '
'`text.format(limit=limit, **(context(limit))`')
.format(TEMPLATE_SEPARATOR))
p.add_argument('limit', type=int)
p.add_argument('filepath', type=str)
p.add_argument('-o', '--output', default='/dev/stdout',
help='The path to the file to write the rendered template to.')
return p.parse_args(raw_args)
def render(filepath, limit):
text = read_file(filepath)
template, code = text.split('\n' + TEMPLATE_SEPARATOR + '\n', 2)
context_func = execute(code, filepath)['context']
context = context_func(limit)
return (OUTPUT_PREFIX.format(filepath=filepath)
+ template.format(limit=limit, **context))
def execute(code, filepath):
code_locals = {}
code_obj = compile(code, filepath, 'exec')
exec(code_obj, {}, code_locals)
return code_locals
def read_file(path):
with open(path, 'r') as f:
return f.read()
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| mcinglis/libpp | templates/render.py | Python | agpl-3.0 | 2,004 | 0.001497 |
"""
Wind Turbine Company - 2013
Author: Stephan Rayner
Email: [email protected]
"""
import time
from test.Base_Test import Base_Test
class Maintenance_153Validation(Base_Test):
def setUp(self):
self.WindSpeedValue = "4.5"
self.interface.reset()
self.interface.write("Yaw_Generation", "2")
self.interface.expect("Emergency_Stop", "off")
self.interface.expect("Maintenance_Mode", "off")
def test_MaintenanceSD46(self):
'''
Moving into Maintenance Mode while the turbine is running (State 2 or
Higher) Causes SD_46 to before any other shutdowns. In other words
SD_46 Should fire and only SD_46
'''
self._State2Setup()
self.interface.expect("Maintenance_Mode", "on")
self.TEST_CONDITION = self.interface.Shutdown.read(self.interface.Shutdown_List, return_onlyHigh = True)
print self.TEST_CONDITION
self.assertTrue("@GV.SD_46" in self.TEST_CONDITION,"Shutdown 46 did not fire")
self.assertEqual(self.TEST_CONDITION.keys()[0], "@GV.SD_46","Shutdown did not fire first")
self.assertEqual(len(self.TEST_CONDITION), 1,"More that one shutdown is pressent.")
self.TEST_CONDITION = self.interface.read("Turbine_State")
self.assertEqual(self.TEST_CONDITION,"0")
def test_MaintenanceHardwareControl(self):
'''
DO_BypLineProtRelMaintMode and DO_BypassRotorOverSpeed should be 0
then Maintenance Mode is activated SD_46 goes high and 1 minute later
DO_BypLineProtRelMaintMode and DO_BypassRotorOverSpeed should be 1
//I am using a running counter with a read to check time not a wait and read.
//This maintain that the values don't flip early.
'''
self._State2Setup()
read_Vars = ["@GV.DO_BypLineProtRelMaintMode","@GV.DO_BypassRotorOverSpeed"]
#
self.assertEqual(self._readpxUtils(read_Vars),["0","0"])
self.interface.expect("Maintenance_Mode","on")
elapseTime = 0.0
initialTime = time.time()
self.TEST_CONDITION = self.interface.Shutdown.read(self.interface.Shutdown_List, return_onlyHigh = True)
#
self.assertTrue("@GV.SD_46" in self.TEST_CONDITION,"Shutdown 46 did not fire")
print "\nPlease Wait One Minute\n"
while((self._readpxUtils(read_Vars) == ["0","0"]) and (elapseTime < 120)):
elapseTime = time.time() - initialTime
expectedRunningTime = 60
tollerance = 10
self.TEST_CONDITION = self._readpxUtils(read_Vars)
#
self.assertEqual(self.TEST_CONDITION,["1","1"])
#
self.assertLessEqual(abs(expectedRunningTime-elapseTime),tollerance,"The hardware does not retain control over the UPR and the Smartplug unitil the breaks apply as expected:\nElapse Time: %s\n%s : %s\n%s : %s\n" % (str(elapseTime), read_Vars[0], self.TEST_CONDITION[0], read_Vars[1], self.TEST_CONDITION[1]))
#Helper Functions
def _State2Setup(self):
self.interface.write("Wind_Speed",self.WindSpeedValue)
self.interface.write("Yaw_Generation", "2")
print ("Waiting for 2 minutes")
time.sleep(70)# must hold this here for the Minute averages to hold
self.interface.Shutdown.bypass([24, 31])
self.interface.Shutdown.reset()
self.interface.start()
def _readpxUtils(self,List):
a = self.interface.mcc.read(List)
tmp=[]
for x in List:
tmp.append(a[x])
return tmp
| stephan-rayner/HIL-TestStation | Tests/e3120/Maintenance/TransitionIntoState0.py | Python | mit | 3,533 | 0.012171 |
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from provider import StackExchangeProvider
urlpatterns = default_urlpatterns(StackExchangeProvider)
| HackerEarth/django-allauth | allauth/socialaccount/providers/stackexchange/urls.py | Python | mit | 178 | 0.005618 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['L1Decay', 'L2Decay']
import paddle.fluid as fluid
class L1Decay(fluid.regularizer.L1Decay):
r"""
Implement the L1 Weight Decay Regularization, which encourages the weights to be sparse.
It can be set in :ref:`api_paddle_ParamAttr` or ``optimizer`` (such as :ref:`api_paddle_optimizer_Momentum` ).
When set in ``ParamAttr`` , it only takes effect for trainable parameters in this layer. When set in
``optimizer`` , it takes effect for all trainable parameters. When set together, ``ParamAttr`` has
higher priority than ``optimizer`` , which means that for a trainable parameter, if regularizer is defined
in its ParamAttr, then the regularizer in Optimizer will be ignored. Otherwise the regularizer
in Optimizer will be used.
In the implementation, the loss function of L1 Weight Decay Regularization is as follows:
.. math::
loss = coeff * reduce\_sum(abs(x))
Args:
coeff(float, optional): regularization coeff. Default:0.0.
Examples:
.. code-block:: python
# Example1: set Regularizer in optimizer
import paddle
from paddle.regularizer import L1Decay
import numpy as np
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand(shape=[10, 10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
momentum = paddle.optimizer.Momentum(
learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=L1Decay(0.0001))
back = out.backward()
momentum.step()
momentum.clear_grad()
# Example2: set Regularizer in parameters
# Set L1 regularization in parameters.
# Global regularizer does not take effect on my_conv2d for this case.
from paddle.nn import Conv2D
from paddle import ParamAttr
from paddle.regularizer import L2Decay
my_conv2d = Conv2D(
in_channels=10,
out_channels=10,
kernel_size=1,
stride=1,
padding=0,
weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)),
bias_attr=False)
"""
def __init__(self, coeff=0.0):
super(L1Decay, self).__init__(coeff)
class L2Decay(fluid.regularizer.L2Decay):
r"""
Implement the L2 Weight Decay Regularization, which helps to prevent the model over-fitting.
It can be set in :ref:`api_paddle_ParamAttr` or ``optimizer`` (such as :ref:`api_paddle_optimizer_Momentum` ).
When set in ``ParamAttr`` , it only takes effect for trainable parameters in this layer. When set in
``optimizer`` , it takes effect for all trainable parameters. When set together, ``ParamAttr`` has
higher priority than ``optimizer`` , which means that for a trainable parameter, if regularizer is defined
in its ParamAttr, then the regularizer in Optimizer will be ignored. Otherwise the regularizer
in Optimizer will be used.
In the implementation, the loss function of L2 Weight Decay Regularization is as follows:
.. math::
loss = 0.5 * coeff * reduce\_sum(square(x))
Args:
regularization_coeff(float, optional): regularization coeff. Default:0.0
Examples:
.. code-block:: python
# Example1: set Regularizer in optimizer
import paddle
from paddle.regularizer import L2Decay
import numpy as np
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand(shape=[10, 10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
momentum = paddle.optimizer.Momentum(
learning_rate=0.1,
parameters=linear.parameters(),
weight_decay=L2Decay(0.0001))
back = out.backward()
momentum.step()
momentum.clear_grad()
# Example2: set Regularizer in parameters
# Set L2 regularization in parameters.
# Global regularizer does not take effect on my_conv2d for this case.
from paddle.nn import Conv2D
from paddle import ParamAttr
from paddle.regularizer import L2Decay
my_conv2d = Conv2D(
in_channels=10,
out_channels=10,
kernel_size=1,
stride=1,
padding=0,
weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)),
bias_attr=False)
"""
def __init__(self, coeff=0.0):
super(L2Decay, self).__init__(coeff)
| luotao1/Paddle | python/paddle/regularizer.py | Python | apache-2.0 | 5,630 | 0.007282 |
import time
import RPi.GPIO as GPIO
LED_VERDE = 22
LED_VERMELHO = 18
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LED_VERDE, GPIO.OUT)
GPIO.output(LED_VERDE, GPIO.LOW)
GPIO.setup(LED_VERMELHO, GPIO.OUT)
GPIO.output(LED_VERMELHO, GPIO.LOW)
while True:
GPIO.output(LED_VERDE, GPIO.HIGH)
GPIO.output(LED_VERMELHO, GPIO.LOW)
time.sleep(0.5)
GPIO.output(LED_VERDE, GPIO.LOW)
GPIO.output(LED_VERMELHO, GPIO.HIGH)
time.sleep(0.5)
| ocarneiro/minecraft-pi | lab/blinky.py | Python | apache-2.0 | 441 | 0 |
from ._terminator import Terminator
__all__ = ("Terminator",)
| nikitanovosibirsk/vedro | vedro/plugins/terminator/__init__.py | Python | apache-2.0 | 63 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"Fully test this module's functionality through the use of fixtures."
from megacosm.generators import Generator
import unittest2 as unittest
import json
from mock import Mock, patch, MagicMock
import fakeredis
import fixtures
from config import TestConfiguration
from pprint import pprint
class TestGenerator(unittest.TestCase):
def setUp(self):
self.redis = fakeredis.FakeRedis()
fixtures.generator.import_fixtures(self)
def tearDown(self):
self.redis.flushall()
def test_missing_feature(self):
""" Test a feature that doesn't exist."""
generator = Generator(self.redis)
with self.assertRaises(AttributeError):
generator.bananasmissingfeature
def test_static_seed(self):
''' Ensure a static seed can be set. '''
generator = Generator(self.redis, {'seed':1337})
self.assertEqual(generator.seed, 1337)
def test_randomseed(self):
''' ensure a see that is an integer is created. '''
generator = Generator(self.redis)
self.assertIs(type(generator.seed), int)
#FIXME these were taken from megacosm.util.Seed.Seed values. Don't hardcode them.
self.assertGreaterEqual(generator.seed,1)
self.assertLessEqual(generator.seed, 10000000)
def test_select_by_roll(self):
''' Select the bogus_size greater than or equal to the rolled number.'''
generator = Generator(self.redis, {'seed': 1007, 'bogus_size_roll': 37})
self.assertEqual({u'score': 40, u'name': u'large', u'multiplier': 1.0} ,generator.select_by_roll('bogus_size'))
def test_select_by_roll_key_doesnt_exist(self):
''' Try to select funion for a role, only to find it doesn't exist.'''
generator = Generator(self.redis)
with self.assertRaisesRegexp(ValueError, 'The key funion does not exist.'):
generator.select_by_roll('funion')
self.assertNotEqual('', generator.select_by_roll('bogus_size'))
def test_select_by_roll_highmin(self):
''' Test rolling outside our limits of 0-100. '''
generator = Generator(self.redis, { 'bogus_size_roll': 1037})
self.assertEquals({u'score': 100, u'name': u'giant', u'multiplier': 2.0},
generator.select_by_roll('bogus_size'))
generator = Generator(self.redis, {'bogus_size_roll': -1037})
self.assertEquals({u'score': 1, u'name': u'tiny', u'multiplier': 0.5},
generator.select_by_roll('bogus_size'))
def test_select_by_roll_key_wrong_type(self):
'''Intentionally try to roll on the wrong datatype.'''
generator = Generator(self.redis, {'seed': 1007, 'bogus_mylist_roll': 37})
with self.assertRaisesRegexp(Exception,
"The key bogus_mylist is not a zset; the type is list."):
generator.select_by_roll('bogus_mylist')
def test_random_list_value(self):
''' Find a random list value '''
generator = Generator(self.redis)
self.assertIn(generator.rand_value('bogus_mylist'), ['1','2','3','4'])
def test_rand_value_key_wrong_type(self):
''' Try to use a zset as a list. '''
generator = Generator(self.redis)
with self.assertRaisesRegexp(Exception,
"the key \(bogus_size\) doesn't appear to exist or isn't a list \(zset\)."):
generator.rand_value('bogus_size')
def test_rand_value_key_doesnt_exist(self):
''' Try to generate a rand_value from a key that doesn't exist at all. '''
generator = Generator(self.redis)
with self.assertRaisesRegexp(Exception, "the key \(somekey\) doesn't appear to exist or isn't a list"):
generator.rand_value('somekey')
def test_dump_vars(self):
'''Ensure that the generator dumps properly. '''
generator = Generator(self.redis, {'seed': 1007})
self.assertIn('seed', generator.dump_vars())
self.assertEqual(vars(generator), generator.dump_vars())
def test_generate_features(self):
'''test Feature Generation from a namekey'''
generator = Generator(self.redis, {'bogus_size_roll': 1})
self.assertNotIn('bogus', generator.dump_vars())
generator.generate_features('bogus')
self.assertIn('booyahfeature', generator.dump_vars())
self.assertEqual('Booyah',generator.booyahfeature)
self.assertEqual('tiny', generator.size['name'])
'''Ensure misslist from other was not included. '''
with self.assertRaises(AttributeError):
generator.misslist
def test_generate_feature_chance_100(self):
'''test Feature Generation from a namekey with 100% chance.'''
generator = Generator(self.redis, {'chnc_size_roll': 1})
self.assertNotIn('chnc', generator.dump_vars())
generator.generate_features('chnc')
self.assertIn('mylist', generator.dump_vars())
self.assertIn(generator.mylist, ['1','2','3','4'])
self.assertEqual('tiny', generator.size['name'])
'''Ensure misslist from other was not included. '''
with self.assertRaises(AttributeError):
generator.misslist
def test_generate_feature_chance_roll(self):
'''test Feature Generation from a namekey with 0% chance.'''
generator = Generator(self.redis, {'nochnc_size_roll': 1, 'nochnc_size_chance':5,'nochnc_mylist_chance':5 })
self.assertNotIn('mylist_chance', generator.dump_vars())
generator.generate_features('nochnc')
self.assertIn('mylist_chance', generator.dump_vars())
'''Ensure misslist from other was not included. '''
with self.assertRaises(AttributeError):
generator.misslist
def test_kind_description(self):
'''Ensure that kind description JSON is loaded properly.'''
generator = Generator(self.redis)
self.assertNotIn('kind', generator.dump_vars())
generator.generate_features('myknd')
self.assertIn('kind', generator.dump_vars())
def test_bad_kind_description(self):
'''Ensure that kind description with bad JSON throws an error.'''
generator = Generator(self.redis)
self.assertNotIn('kind', generator.dump_vars())
with self.assertRaises(ValueError):
generator.generate_features('mybadknd')
def test_error_handling_roll(self):
'''Ensure that select_by_roll handles errors properly.'''
generator = Generator(self.redis, {'incompleteset_size_roll':10 })
with self.assertRaises(ValueError) as cm:
generator.select_by_roll('derpderp_size')
self.assertEqual(str(cm.exception), "The key derpderp_size does not exist.")
with self.assertRaises(LookupError) as cm:
generator.select_by_roll('incompleteset_size')
self.assertEqual(str(cm.exception), 'The key (incompleteset_size) appears to be empty for a roll of 10- This should never happen.')
with self.assertRaises(ValueError) as cm:
generator.select_by_roll('badjson_widget')
self.assertEqual(str(cm.exception), '("JSON parsing error: Couldn\'t read json", \'waffles not json\')')
def test_bogus_generator(self):
'''Ensure that a fullname is generated.'''
generator = Generator(self.redis,{},'bogus')
self.assertIn('booyahfeature',generator.dump_vars())
def test_generate_feature(self):
'''Ensure that a fullname is generated.'''
generator = Generator(self.redis,{'mylist':'foobar'})
generator.generate_feature( 'bogus', 'bogus_mylist')
generator = Generator(self.redis,{'kind':'small', 'kind_description':'foobar'})
generator.generate_feature( 'myknd', 'myknd_kind')
def test_render_template(self):
'''Ensure that a fullname is generated.'''
generator = Generator(self.redis,{'test_value':'a bigger string'})
self.assertEqual('A large string, a bigger string.',generator.render_template("A large string, {{params.test_value}}."))
| CityGenerator/Megacosm-Generator | tests/test_generator.py | Python | gpl-2.0 | 8,144 | 0.007859 |
#!/usr/bin/env python2
print "Hello World!"
print "Hello Again"
print "I like typing this."
print "This is fun."
print 'Yay! Printing.'
print "I'd much rather you 'not'."
print 'I "said" do not touch this.'
print "Some text"
| christianheinrichs/learning-lpthw | ex01/ex1-sd1.py | Python | gpl-3.0 | 226 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0016_auto_20151128_2006'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content', models.TextField()),
('service_comment_id', models.IntegerField()),
('username', models.CharField(max_length=255)),
('created', models.DateTimeField()),
('updated', models.DateTimeField()),
('issue', models.ForeignKey(to='website.Issue')),
],
),
]
| atuljain/coderbounty | website/migrations/0017_comment.py | Python | agpl-3.0 | 819 | 0.001221 |
import logging
from shapely.geometry import *
from lib.raster import Raster
import numpy as np
from os import path
import sys
sys.path.append(path.abspath(path.join(path.dirname(__file__), "../../..")))
from lib.shapefileloader import Shapefile
from lib.exception import DataException, MissingException
from lib.metrics import CHaMPMetric
import argparse
"""
/Users/work/Projects/CHaMP/tools/tmp/2011/Asotin/ASW00001-NF-F1P2BR/VISIT_228/Topo/GISLayers/Thalweg.shp
/Users/work/Projects/CHaMP/tools/tmp/2011/Asotin/ASW00001-NF-F1P2BR/VISIT_228/Topo/GISLayers/DEM.tif
1.0
"""
class ThalwegMetrics(CHaMPMetric):
TEMPLATE = {
'Min': None,
'Max': None,
'Mean': None,
'StDev': None,
'Count': None,
'Length': None,
'WSGradientRatio': None,
'WSGradientPC': None,
'Sinuosity': None,
'CV': None,
'ThalwegToCenterlineRatio': None
}
def calc(self, sThalwegshp, sDepthRaster, sWaterSurfaceRaster, fDist, visitMetrics):
if not path.isfile(sThalwegshp):
raise MissingException("Thalweg shapefile missing")
if not path.isfile(sDepthRaster):
raise MissingException("Depth raster missing")
if not path.isfile(sWaterSurfaceRaster):
raise MissingException("Surface raster missing")
wettedMainstemLength = visitMetrics['Wetted']['Centerline']['MainstemLength']
if wettedMainstemLength is None:
raise MissingException("No wetted mainstem length found in visit metrics")
sfile = Shapefile(sThalwegshp).featuresToShapely()
if len(sfile) < 1:
raise DataException("Thalweg shapefile has no features")
thalweg = sfile[0]['geometry']
depthRaster = Raster(sDepthRaster)
waterSurfaceRaster = Raster(sWaterSurfaceRaster)
samplepts = ThalwegMetrics.interpolateRasterAlongLine(thalweg, fDist)
results = ThalwegMetrics.lookupRasterValues(samplepts, depthRaster)['values']
# Get the elevation at the first (downstream) point on the Thalweg
dsElev = waterSurfaceRaster.getPixelVal(thalweg.coords[0])
usElev = waterSurfaceRaster.getPixelVal(thalweg.coords[-1])
if (np.isnan(dsElev)):
raise DataException('nodata detected in the raster for downstream point on the thalweg')
elif np.isnan(usElev):
raise DataException('nodata detected in the raster for upstream point on the thalweg')
waterSurfaceGradientRatio = (usElev - dsElev) / thalweg.length
waterSurfaceGradientPC = waterSurfaceGradientRatio * 100.0
# Thalweg straight length and sinuosity
firstPoint = Point(thalweg.coords[0])
lastPoint = Point(thalweg.coords[-1])
straightLength = firstPoint.distance(lastPoint)
sinuosity = thalweg.length / straightLength
self.metrics = {
'Min': np.nanmin(results),
'Max': np.nanmax(results),
'Mean': np.mean(results),
'StDev': np.std(results),
'Count': np.count_nonzero(results),
'Length': thalweg.length,
'WSGradientRatio': waterSurfaceGradientRatio,
'WSGradientPC': waterSurfaceGradientPC,
'Sinuosity': sinuosity,
'CV': 0.0,
'ThalwegToCenterlineRatio': thalweg.length / wettedMainstemLength
#, 'Values': results.data
}
if self.metrics['StDev'] != 0 and self.metrics['Mean'] != 0:
self.metrics['CV'] = self.metrics['StDev'] / self.metrics['Mean']
@staticmethod
def interpolateRasterAlongLine(line, fStationInterval):
"""
Given a cross section (Linestring) and a spacing point return regularly spaced points
along that line
:param line:
:param fStationInterval:
:return:
"""
points = [line.interpolate(currDist) for currDist in np.arange(0, line.length, fStationInterval)]
# Add the endpoint if it doesn't already exist
if points[-1] != line.coords[-1]:
points.append(Point(line.coords[-1]))
return points
@staticmethod
def lookupRasterValues(points, raster):
"""
Given an array of points with real-world coordinates, lookup values in raster
then mask out any nan/nodata values
:param points:
:param raster:
:return:
"""
pointsdict = { "points": points, "values": [] }
for pt in pointsdict['points']:
pointsdict['values'].append(raster.getPixelVal(pt.coords[0]))
# Mask out the np.nan values
pointsdict['values'] = np.ma.masked_invalid(pointsdict['values'])
return pointsdict
if __name__ == "__main__":
logfmt = "[%(asctime)s - %(levelname)s] - %(message)s"
dtfmt = "%Y-%m-%d %I:%M:%S"
logging.basicConfig(filename='raster_metrics.log', level=logging.DEBUG, format=logfmt, datefmt=dtfmt)
# parse command line options
parser = argparse.ArgumentParser()
parser.add_argument('thalweg',
help='Path to the thalweg',
type=argparse.FileType('r'))
parser.add_argument('depthraster',
help='Path to the depth raster',
type=argparse.FileType('r'))
parser.add_argument('watersurfaceraster',
help='Path to the depth raster',
type=argparse.FileType('r'))
parser.add_argument('dist',
help='interval spacing between raster measurements',
type=float)
args = parser.parse_args()
if not args.depthraster:
print "ERROR: Missing arguments"
parser.print_help()
exit(0)
if not args.watersurfaceraster:
print "ERROR: Missing arguments"
parser.print_help()
exit(0)
try:
dMetrics = ThalwegMetrics(args.thalweg.name, args.depthraster.name, args.watersurfaceraster.name, args.dist)
except AssertionError as e:
sys.exit(0)
except Exception as e:
raise
sys.exit(0)
| SouthForkResearch/CHaMP_Metrics | tools/topometrics/methods/thalweg.py | Python | gpl-3.0 | 6,178 | 0.003237 |
from collections import OrderedDict
import logging
import os
import shutil
import subprocess
import simtk.unit as units
from intermol.desmond.desmond_parser import load, save
DES_PATH = ''
logger = logging.getLogger('InterMolLog')
# terms we are ignoring for now.
#'en': 'Raw Potential',
#'E_x': 'Extended En.',
unwanted = ['E_x','E_n','E_k','constraints',]
key_dict = {
'E_p': 'Potential',
'stretch': 'Bond',
'angle': 'Angle',
'dihedral': 'All dihedrals',
'pair_vdw': 'LJ-14',
'pair_elec': 'Coulomb-14',
'nonbonded_vdw': 'LJ (SR)',
}
def standardize_key(in_key):
if in_key in key_dict:
out_key = key_dict[in_key]
else:
out_key = in_key
return out_key
def get_desmond_energy_from_file(energy_file):
"""Parses the desmond energy file. """
with open(energy_file, 'r') as f:
data = []
types = []
# First line of enegrp.dat file contains total energy terms.
line = f.readline()
# Just to make sure the line is what we think it is.
if line.startswith('time=0.000000'):
terms = line.split()
terms = terms[1:-2] # Exclude time, pressure, and volume.
for term in terms:
key, value = term.split('=')
types.append(standardize_key(key))
data.append(float(value))
# Parse rest of file for individual energy grouops.
for line in f:
if '(0.000000)' in line: # Time = 0.0
words = line.split()
if words[-1] == 'total':
continue
key = standardize_key(words[0])
if key:
types.append(key)
data.append(words[-1])
data = [float(value) * units.kilocalories_per_mole for value in data]
e_out = OrderedDict(zip(types, data))
# Discard non-energy terms.
for group in unwanted:
if group in e_out:
del e_out[group]
return e_out
def energies(cms, cfg, des_path):
"""Evalutes energies of DESMOND files
Args:
cms (str): Path to .cms file.
cfg (str): Path to .cfg file.
des_path (str): Path to DESMOND binaries.
Returns:
tot_energy:
energy_file:
"""
logger.info('Evaluating energy of {0}'.format(cms))
cms = os.path.abspath(cms)
cfg = os.path.abspath(cfg)
direc, cms_filename = os.path.split(cms)
cwd = os.getcwd()
name = os.path.splitext(cms_filename)[0]
energy_file = '%s/%s.enegrp.dat' % (direc, name)
if des_path and not (des_path == ''):
desmond_bin = os.path.join(des_path,'desmond')
elif os.environ.get('SCHRODINGER'):
desmond_bin = os.path.join(os.environ.get('SCHRODINGER'), 'desmond')
else:
raise Exception('Desmond binary not found')
# Use DESMOND To evaluate energy
# cd to directory of cms file so that files generated by desmond
# don't clog the working directory
os.chdir(direc)
if os.path.exists('trj'):
shutil.rmtree('trj')
cmd = [desmond_bin, '-WAIT', '-P', '1', '-in', cms, '-JOBNAME', name, '-c', cfg]
logger.debug('Running DESMOND with command:\n %s' % ' '.join(cmd))
with open('desmond_stdout.txt', 'w') as out, open('desmond_stderr.txt', 'w') as err:
exit = subprocess.call(cmd, stdout=out, stderr=err)
if exit:
logger.error('Energy evaluation failed. See %s/desmond_stderr.txt' % direc)
os.chdir(cwd) # return directory up a level again
raise Exception('Energy evaluation failed for {0}'.format(cms))
tot_energy = get_desmond_energy_from_file(energy_file)
# for now, remove the desmond '-out.cms' file.
outcms = cms[:-4] + '-out' + cms[-4:]
os.remove(outcms)
os.chdir(cwd) # return directory up a level again
return tot_energy, energy_file
| ctk3b/InterMol | intermol/desmond/__init__.py | Python | mit | 3,879 | 0.003094 |
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs45_detached_award_financial_assistance'
def test_column_headers(database):
expected_subset = {'row_number', 'indirect_federal_sharing', 'federal_action_obligation',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test when both are provided, IndirectCostFederalShareAmount should be less than or equal to
FederalActionObligation.
"""
# One or both not provided, rule ignored
det_award_1 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=None, federal_action_obligation=None)
det_award_2 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=123, federal_action_obligation=None)
det_award_3 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=None, federal_action_obligation=123)
# ICFSA is 0, rule ignored
det_award_4 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=0, federal_action_obligation=123)
# Both have the same sign and are appropriately valued
det_award_5 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=-1, federal_action_obligation=-1)
det_award_6 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=5, federal_action_obligation=6)
# Ignore when CorrectionDeleteIndicator is D
det_award_7 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=123, federal_action_obligation=0,
correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5,
det_award_6, det_award_7])
assert errors == 0
def test_failure(database):
""" Test failure when both are provided, IndirectCostFederalShareAmount should be less than or equal to
FederalActionObligation.
"""
# ICFSA is not 0 but FAO is
det_award_1 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=123, federal_action_obligation=0)
# Differing signs
det_award_2 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=-1, federal_action_obligation=1)
det_award_3 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=1, federal_action_obligation=-1)
# Same sign, absolute value incorrect
det_award_4 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=5, federal_action_obligation=4)
det_award_5 = DetachedAwardFinancialAssistanceFactory(indirect_federal_sharing=-5, federal_action_obligation=-4)
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5])
assert errors == 5
| fedspendingtransparency/data-act-broker-backend | tests/unit/dataactvalidator/test_fabs45_detached_award_financial_assistance.py | Python | cc0-1.0 | 3,014 | 0.006636 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
from subprocess import check_call, CalledProcessError
import shlex
import sys
import logging
log = logging.getLogger( __name__ )
def novo_sort( bam_filename, output_filename ):
cmdline_str = "novosort -c 8 -m 8G -s -f {} -o {}".format( bam_filename, output_filename )
cmdline = newSplit(cmdline_str)
try:
check_call(cmdline)
except CalledProcessError:
print("Error running the nova-sort", file=sys.stderr)
def newSplit(value):
lex = shlex.shlex(value)
lex.quotes = '"'
lex.whitespace_split = True
lex.commenters = ''
return list(lex)
def main():
parser = argparse.ArgumentParser(description="Re-sorting aligned files by read position")
parser.add_argument('output_filename')
parser.add_argument('--bam_filename')
args = parser.parse_args()
novo_sort(args.bam_filename, args.output_filename)
if __name__ == "__main__":
main()
| SANBI-SA/tools-sanbi-uwc | tools/novo_sort/novo_sort.py | Python | gpl-3.0 | 986 | 0.008114 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='NoticeQueueBatch',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('pickled_data', models.TextField()),
],
),
migrations.CreateModel(
name='NoticeSetting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('medium', models.CharField(max_length=1, verbose_name='medium', choices=[(0, b'email')])),
('send', models.BooleanField(verbose_name='send')),
],
options={
'verbose_name': 'notice setting',
'verbose_name_plural': 'notice settings',
},
),
migrations.CreateModel(
name='NoticeType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=40, verbose_name='label')),
('display', models.CharField(max_length=50, verbose_name='display')),
('description', models.CharField(max_length=100, verbose_name='description')),
('default', models.IntegerField(verbose_name='default')),
],
options={
'verbose_name': 'notice type',
'verbose_name_plural': 'notice types',
},
),
migrations.AddField(
model_name='noticesetting',
name='notice_type',
field=models.ForeignKey(verbose_name='notice type', to='notification.NoticeType'),
),
migrations.AddField(
model_name='noticesetting',
name='user',
field=models.ForeignKey(verbose_name='user', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='noticesetting',
unique_together=set([('user', 'notice_type', 'medium')]),
),
]
| GeoNode/geonode-notification | notification/migrations/0001_initial.py | Python | mit | 2,390 | 0.003766 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.