text
stringlengths 4
1.02M
| meta
dict |
---|---|
import time
def knows_how_to_generate(output_file, another_input_dep):
f = open(output_file, "w")
print >>f, "#define GENERATED \"%s\"" % (time.ctime(), )
print >>f, "#define ANOTHER_GENERATED \"%s\"" % (open(another_input_dep).read().strip(), )
f.close()
| {
"content_hash": "c12bc8279389bb4d8a77e6adbf0953ae",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 94,
"avg_line_length": 39,
"alnum_prop": 0.608058608058608,
"repo_name": "da-x/crumb",
"id": "dcbe8d4ea46690bfc7377499956623325b598878",
"size": "273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/arena1/someimport.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "51147"
},
{
"name": "Python",
"bytes": "17725"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
} |
import json
import random
import string
from flask import Flask, request, jsonify
from flask.ext.pymongo import PyMongo
from pymongo.errors import DuplicateKeyError
from crossdomain import crossdomain
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'kasm'
mongo = PyMongo(app)
@app.route('/alias/test')
def hello_world():
return "API is a go"
@app.route('/alias/create', methods=['POST'])
def create_alias():
def id_generator(size, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in xrange(size))
email = request.form['email']
inserted = False
while not inserted:
try:
alias = id_generator(8)
mongo.db.redirects.insert({'email': email, 'alias': alias})
except DuplicateKeyError:
pass
else:
inserted = True
to_return = {'email': email,
'encrypt': 'lolwut',
'original': email,
'response': 'success',
'username': alias,
}
return jsonify(to_return)
@app.route('/alias/deactivate', methods=['POST'])
def deactivate_alias():
alias = request.form['alias']
print 'Received request to deactivate alias "%s".' % alias
result = mongo.db.redirects.update({'alias': alias},
{'$set': {'deactivated': True}})
print 'Result:'
print json.dumps(result, sort_keys=True, indent=3)
to_return = {'alias': alias}
to_return['response'] = 'success' if result['n'] > 0 else 'fail'
return jsonify(to_return)
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True)
| {
"content_hash": "006433917fc47c27ddacc1f53a98fe42",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 73,
"avg_line_length": 28.74137931034483,
"alnum_prop": 0.5944811037792441,
"repo_name": "clayadavis/OpenKasm",
"id": "62b9e75f048a39ddbbc70d3e4a619598339344bb",
"size": "1667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask/kasm_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "108526"
},
{
"name": "JavaScript",
"bytes": "30612"
},
{
"name": "Python",
"bytes": "9647"
}
],
"symlink_target": ""
} |
import ctypes
import platform
import os
print(platform.system())
if platform.system() == "Linux":
testlib = ctypes.CDLL("./libRunningDict.so")
else:
if platform.architecture()[0] == '64bit':
dllpath = "libRunningDict64.dll"
else:
dllpath = "libRunningDict32.dll"
print("Loading: " + dllpath)
testlib = ctypes.CDLL(dllpath)
testlib.search.restype = ctypes.c_char_p
testlib.addDictionary.restype = ctypes.c_bool
testlib.addDictionaryForce.restype = ctypes.c_bool
print("Loading dicts...")
# print (testlib.addDictionaryForce(b'test.dict', True))
print (testlib.addDictionaryForce(b'freedict-deu-eng.dict', False))
# print (testlib.addDictionaryForce(b'test.dict', True))
print("...done")
testlib.setNumberOfThreads(ctypes.c_int(8))
import json
howMany = 40
sumOfTimes = 0
print("Testing:")
for x in range(0,howMany):
# rawBytes = testlib.search(b'CKeine_katze_CANTO_Tag_RSUUnit_KatzeUnd ihre')
rawBytes = testlib.search(b'CKeineKatz ihre')
decodeData = rawBytes.decode("utf-8")
jsonResult = json.loads(decodeData)
sumOfTimes = sumOfTimes + jsonResult["speed"]
print(" " + str(jsonResult["speed"]) + "s") # +", " + str(sumOfTimes))
print (json.dumps(jsonResult, sort_keys=True, indent=4, separators=(',', ': ')))
print("\navg time = " + str(sumOfTimes/howMany) + "s")
| {
"content_hash": "272e855d936b8b3bb13985893c423361",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 29.043478260869566,
"alnum_prop": 0.6931137724550899,
"repo_name": "kracejic/runningDict",
"id": "a2bb6ede00236be341359fc1c489808c5186899f",
"size": "1339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1152"
},
{
"name": "C++",
"bytes": "97576"
},
{
"name": "CMake",
"bytes": "20607"
},
{
"name": "CSS",
"bytes": "465785"
},
{
"name": "Python",
"bytes": "8226"
},
{
"name": "Ruby",
"bytes": "76"
},
{
"name": "Shell",
"bytes": "1349"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class ConnStringValueTypePair(Model):
"""Database connection string value to type pair.
:param value: Value of pair.
:type value: str
:param type: Type of database. Possible values include: 'MySql',
'SQLServer', 'SQLAzure', 'Custom', 'NotificationHub', 'ServiceBus',
'EventHub', 'ApiHub', 'DocDb', 'RedisCache', 'PostgreSQL'
:type type: str or ~azure.mgmt.web.models.ConnectionStringType
"""
_validation = {
'value': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'type': {'key': 'type', 'type': 'ConnectionStringType'},
}
def __init__(self, value, type):
super(ConnStringValueTypePair, self).__init__()
self.value = value
self.type = type
| {
"content_hash": "14a8d76e1d5bbd603f3243afa55d6de8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 30.678571428571427,
"alnum_prop": 0.6030267753201397,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "a06aab9a003c21fe5a1382a8e7bed3f307b49f0e",
"size": "1333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-web/azure/mgmt/web/models/conn_string_value_type_pair.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
import twitter
import urllib2
import time
import re
import gdbm
opener = urllib2.build_opener()
urllib2.install_opener(opener)
api = twitter.Api(consumer_key="", consumer_secret="",access_token_key="", access_token_secret="",proxy ={})
def get_proxy_urllib(Proxy=None):
if not Proxy:
proxy = urllib2.ProxyHandler({}) # your proxy here
else:
proxy = urllib2.ProxyHandler(Proxy)
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
return urllib2
def get_user(string):
'''if provided a username, it will return a twitter.User object corresponding the provided username'''
user = api.GetUser(string)
return user
def getfollowers():
'''Returns a list containing all followers as twitter.User'''
followers = api.GetFollowers()
return followers
def getfollowing():
'''Returns a list containing all followings/friends as twitter.User'''
following = api.GetFriends()
return following
def get_user_pic(user):
'''Returns the URL of display picture of the twitter.User'''
image_url = user.profile_image_url
page = opener.open(image_url)
f = open(user.screen_name+'.jpg','wb')
f.write(page.read())
f.close()
return
def get_user_status(user):
'''Returns the status as twitter.Status of twitter.User'''
return user.status
def get_status_detail(status):
'''Returns a tuple (status.id, status.text,status.location,status.user,status.urls,status.user_mentions,status.hashtags) of twitter.Status'''
return(status.id, status.text,status.location,status.user,status.urls,status.user_mentions,status.hashtags)
def show_friends_timeline(since_ids=None, hashtag_list = None, hashtag_db_name=None, tweet_db_name = None):
'''since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
Returns the tweet id of the latest tweet.
'''
timeline = api.GetFriendsTimeline(since_id = since_ids)
if not timeline:
return since_ids
hashtag_timeline_db = None
tweet_db = None
if hashtag_db_name:
hashtag_timeline_db = gdbm.open(hashtag_db_name,'c')
if tweet_db_name:
tweet_db = gdbm.open(tweet_db_name,'c')
since_ids = show_timeline(timeline, hashtag_db = hashtag_timeline_db, tweet_db = tweet_db ,hashtag_list = hashtag_list)
if hashtag_db_name:
hashtag_timeline_db.close()
if tweet_db_name:
tweet_db.close()
return since_ids
def set_continuous_timeline(since_ids=None, hashtag_list = None, hashtag_db_name = None, tweet_db_name = None):
'''
since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
It will run indefinitely untill KeyboardInterrupt is provided. (^c)
Returns the tweet id of the latest tweet.
'''
try:
if not since_ids:
since_ids = None
while 1:
since_ids = show_friends_timeline(since_ids, hashtag_list = hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name )
time.sleep(15)
except KeyboardInterrupt:
return since_ids
def show_user_timeline(user, since_ids = None, hashtag_list = None, hashtag_db_name = None, tweet_db_name = None):
'''
user - twitter.User object
since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
Returns the tweet id of the latest tweet.
'''
if not user:
return since_ids
if not user.protected:
try:
timeline = api.GetUserTimeline(user.id, since_id = since_ids)
except ValueError:
print 'ValueError'
else:
return since_ids
if not timeline:
return since_ids
hashtag_user_db = None
if hashtag_db_name:
# print hashtag_db_name
hashtag_user_db = gdbm.open(hashtag_db_name+'_hashtag','c')
if tweet_db_name:
tweet_user_db = gdbm.open(tweet_db_name+'_tweets','c')
since_ids = show_timeline(timeline, hashtag_db = hashtag_user_db, tweet_db = tweet_user_db, hashtag_list = hashtag_list)
if hashtag_db_name:
hashtag_user_db.close()
if tweet_db_name:
tweet_user_db.close()
return since_ids
def set_continuous_user_timeline(user, since_ids = None, hashtag_list = None, hashtag_db_name = None, tweet_db_name = None ):
'''
user - twitter.User object
since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
It will run indefinitely untill KeyboardInterrupt is provided. (^c)
Returns the tweet id of the latest tweet.
'''
if not user:
return since_ids
try:
while 1:
# print hashtag_db_name
since_ids = show_user_timeline(user, since_ids, hashtag_list = hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = None)
time.sleep(30)
except KeyboardInterrupt:
return since_ids
def show_public_timeline(since_ids = None, hashtag_list = None, hashtag_db_name = None, tweet_db_name = None):
'''
user - twitter.User object
since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
Returns the tweet id of the latest tweet.
'''
timeline = api.GetPublicTimeline(since_id = since_ids)
if not timeline:
return since_ids
hashtag_public_db = None
tweet_db = None
if hashtag_db_name:
hashtag_public_db = gdbm.open(hashtag_db_name,'c')
if tweet_db_name:
tweet_db = gdbm.open(tweet_db_name,'c')
since_ids = show_timeline(timeline, hashtag_list = hashtag_list, hashtag_db = hashtag_public_db, tweet_db = tweet_db)
if hashtag_db_name:
hashtag_public_db.close()
if tweet_db_name:
tweet_db.close()
return since_ids
def set_continuous_public_timeline(since_ids = None, hashtag_list = None, hashtag_db_name = None, tweet_db_name = None):
'''
since_ids - twitter.Status.id (the timeline will consist of all the tweets after the tweet with id as since_ids)
hashtag_list - A list of hashtags if you want to search for particular hashtags
hashtag_db_name - Provide a string name, If you want to save the hashtags and the counts. It will be stored in a gdbm file.
tweet_db_name - Provide a string name, If you want to save the tweets hashtags and the tweet id. It will be stored in a gdbm file.
It will run indefinitely untill KeyboardInterrupt is provided. (^c)
Returns the tweet id of the latest tweet.
'''
try:
count = 0
if not since_ids:
since_ids = None
while 1:
since_ids = show_public_timeline(since_ids, hashtag_list = hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name)
count = count+1
time.sleep(1)
if count > 60:
break
except KeyboardInterrupt:
return since_ids
def show_timeline(timeline, hashtag_db=None, tweet_db = None, hashtag_list=None):
for i in range(len(timeline)-1,-1,-1):
ids = timeline[i].id
screen_name = '@'+timeline[i].user.screen_name
user_name = timeline[i].user.name
text = timeline[i].text
tweet = screen_name+' ('+user_name+') '+': '+text
print tweet
res = get_hashtag(text)
if hashtag_list:
for j in range(len(hashtag_list)):
if not hashtag_list[j].startswith('#'):
hashtag_list[j]='#'+hashtag_list[j]
if hashtag_list[j] in res:
# print "opening",hashtag_list[j]+"_hashtag"
py_db = gdbm.open(hashtag_list[j]+'_hashtag','c')
py_db[str(timeline[i].id)] = repr(tweet)
py_db.close()
if res:
# print hashtag_db
if hashtag_db is not None:
# print 'save_hashtag'
hashtag_db = save_hashtag(res, hashtag_db)
if tweet_db is not None:
tweet_db = save_tweet(ids, tweet, tweet_db)
return timeline[0].id
def get_hashtag(tweet):
hashtag = re.compile(u"#\w+")
res = re.findall(hashtag, tweet)
for i in range(len(res)):
res[i] = res[i].lower()
print res
return res
def save_hashtag(res, db):
for i in range(len(res)):
try:
count = int(db[res[i]])
count = count + 1
db[res[i]] = str(count)
except KeyError:
db[res[i]] = '1'
return db
def save_tweet(ids, tweet, db):
print tweet
try:
db[str(ids)] = tweet
except TypeError:
print 'typeerror'
return db
def search_hashtags(hashtag_list, flag = 1, hashtag_db_flag = 1, ids = None, user = None, hashtag_db_name = None, tweet_db_name = None):
'''
hashtag_list - A list of hashtags(must be string)
flag - flag = 1 : Search hashtags in timeline.
flag = 2 : Search hashtags in user's timeline.
flag = 3 : Search hashtags in public timeline
hashtag_db_flag - flag = 0 : Doesn't store hashtags
flag != 0 : Stroe hashtags
ids - twitter.Status.id (the hashtags will be searched in tweets after the tweet with id as since_ids)
user - if flag == 2: twitter.User object
hashtag_db_name -
if flag == 1:
if hashtag_db_flag != 0 : Store hashtags and counts in a gdbm file with given string. If None, hashtag_db_flag = 'hashtag_timeline'
if flag == 2:
if hashtag_db_flag != 0 : Store hashtags and counts in a gdbm file with given string. If None, hashtag_db_flag = username
if flag == 3:
if hashtag_db_flag != 0 : Store hashtags and counts in a gdbm file with given string. If None, hashtag_db_flag = 'hashtag_public'
tweet_db_name - If provided, it will store all the tweets containing the provided hashtags in a gdbm file with tweet ids.
else, it will not store the tweets.
It will run indefinitely untill Keyboard Interrupt (^c) is provided.
Returns the id of the latest tweet.
'''
if hashtag_list:
for i in range(len(hashtag_list)):
hashtag_list[i] = hashtag_list[i].lower()
if not hashtag_list[i].startswith('#'):
hashtag_list[i] = '#'+hashtag_list[i]
if flag == 1:
if hashtag_db_flag:
if not hashtag_db_name:
hashtag_db_name = 'hashtags_timeline'
ids = set_continuous_timeline(ids, hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name)
if flag == 2:
print 'user hashtags'
if not user:
print 'No user provided'
return ids
if hashtag_db_flag:
if hashtag_db_name is not None:
hashtag_db_name = hashtag_db_name
else:
hashtag_db_name = user.screen_name
else:
hashtag_db_name = None
# print hashtag_db_name
ids = set_continuous_user_timeline(user, ids, hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name)
if flag == 3:
if hashtag_db_flag:
if not hashtag_db_name:
hashtag_db_name = 'hashtags_public'
ids = set_continuous_public_timeline(ids, hashtag_list = hashtag_list, hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name)
return ids
def get_conv(user_list, hashtag_db_name, tweet_db_name = None) :
'''
user_list - A list containing twitter.User objects
hashtag_db_name - A string name for gdbm file which will save all the hashtags and counts.
tweet_db_name - If a string provided, all the tweets with hashtags will be stored in the gdbm file with tweet ids.
It will run indefinitely untill Keyboard Interrupt (^c) is provided.
Returns nothing
'''
if not user_list:
return
try:
ids = len(user_list)*[None]
while 1:
for i in range(len(user_list)):
time.sleep(2)
ids[i] = show_user_timeline(user=user_list[i], since_ids = ids[i], hashtag_db_name = hashtag_db_name, tweet_db_name = tweet_db_name, hashtag_list = None)
#ids[i] = search_hashtags(ids = ids[i], flag=2,hashtag_db_flag=1,user=user_list[i],hashtag_db_name = hashtag_db_name)
except KeyboardInterrupt:
return
def get_user_profile(user):
user_id = user.id
name = user.name
screen_name = user.screen_name
des = user.description
protected = user.protected
image_url = user.profile_image_url
user_url = user.url
status = user.status
status_count = user.statuses_count
followers_count = user.followers_count
friends_count = user.friends_count
return(user, user_id, name, screen_name, des, protected, image_url, user_url, status, status_count, followers_count, friends_count)
def get_tweet_profile(status):
t = status.created_at
f = status.favorited
in_reply_to = (status.in_reply_to_screen_name, status.in_reply_to_user_id, status.in_reply_to_status_id)
source = status.source
status_id = status.id
tweet = status.text
user = status.user
user_mentions = status.user_mentions
return(status_id, tweet, user, in_reply_to, user_mentions, f, t)
| {
"content_hash": "819c48d83ba3dc5fab2b0fbc6a792cbd",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 169,
"avg_line_length": 41.892561983471076,
"alnum_prop": 0.6406260274873413,
"repo_name": "jayrambhia/Twitter-Data-Mining",
"id": "009128085d0ad2a9e56593fad0baf46174d911f5",
"size": "15232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jweepy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "132793"
}
],
"symlink_target": ""
} |
"""
Testing the rst files generator
"""
from __future__ import division, absolute_import, print_function
import pytest
from sphinx.errors import ExtensionError
import sphinx_gallery.backreferences as sg
from sphinx_gallery.py_source_parser import split_code_and_text_blocks
from sphinx_gallery.gen_rst import _sanitize_rst
REFERENCE = r"""
.. raw:: html
<div class="sphx-glr-thumbcontainer" tooltip="{0}">
.. only:: html
.. figure:: /fake_dir/images/thumb/sphx_glr_test_file_thumb.png
:alt: test title
:ref:`sphx_glr_fake_dir_test_file.py`
.. raw:: html
</div>{1}
"""
@pytest.mark.parametrize('content, tooltip, is_backref', [
# HTML sanitizing
('<"test">', '<"test">', False),
# backref support
('test formating', 'test formating', True),
# RST sanitizing
('1 :class:`~a.b`. 2 :class:`a.b` 3 :ref:`whatever <better name>`',
'1 b. 2 a.b 3 better name', False),
('use :meth:`mne.io.Raw.plot_psd` to',
'use mne.io.Raw.plot_psd to', False),
('`this` and ``that``; and `these things` and ``those things``',
'this and that; and these things and those things', False),
])
def test_thumbnail_div(content, tooltip, is_backref):
"""Test if the thumbnail div generates the correct string."""
with pytest.raises(ExtensionError, match='internal Sphinx-Gallery thumb'):
html_div = sg._thumbnail_div('fake_dir', '', 'test_file.py',
'<"test">', '<"title">')
content = _sanitize_rst(content)
title = 'test title'
html_div = sg._thumbnail_div('fake_dir', '', 'test_file.py',
content, title, is_backref=is_backref,
check=False)
if is_backref:
extra = """
.. only:: not html
* :ref:`sphx_glr_fake_dir_test_file.py`"""
else:
extra = ''
reference = REFERENCE.format(tooltip, extra)
assert html_div == reference
def test_identify_names(unicode_sample):
"""Test name identification."""
expected = {
'os.path.join':
[{
'name': 'join',
'module': 'os.path',
'module_short': 'os.path',
'is_class': False,
}],
'br.identify_names':
[{
'name': 'identify_names',
'module': 'sphinx_gallery.back_references',
'module_short': 'sphinx_gallery.back_references',
'is_class': False,
}],
'identify_names':
[{
'name': 'identify_names',
'module': 'sphinx_gallery.back_references',
'module_short': 'sphinx_gallery.back_references',
'is_class': False,
}],
}
_, script_blocks = split_code_and_text_blocks(unicode_sample)
res = sg.identify_names(script_blocks)
assert expected == res
def test_identify_names2(tmpdir):
"""Test more name identification."""
code_str = b"""
'''
Title
-----
This is an example.
'''
# -*- coding: utf-8 -*-
# \xc3\x9f
from a.b import c
import d as e
import h.i
print(c)
e.HelloWorld().f.g
h.i.j()
"""
expected = {
'c':
[{
'name': 'c',
'module': 'a.b',
'module_short': 'a.b',
'is_class': False,
}],
'e.HelloWorld':
[{
'name': 'HelloWorld',
'module': 'd',
'module_short': 'd',
'is_class': False,
}],
'h.i.j':
[{
'name': 'j',
'module': 'h.i',
'module_short': 'h.i',
'is_class': False,
}],
}
fname = tmpdir.join("identify_names.py")
fname.write(code_str, 'wb')
_, script_blocks = split_code_and_text_blocks(fname.strpath)
res = sg.identify_names(script_blocks)
assert expected == res
code_str = b"""
'''
Title
-----
This example uses :func:`k.l` and :meth:`~m.n`.
'''
""" + code_str.split(b"'''")[-1]
expected['k.l'] = [{u'module': u'k', u'module_short': u'k', u'name': u'l',
'is_class': False}]
expected['m.n'] = [{u'module': u'm', u'module_short': u'm', u'name': u'n',
'is_class': False}]
fname = tmpdir.join("identify_names.py")
fname.write(code_str, 'wb')
_, script_blocks = split_code_and_text_blocks(fname.strpath)
res = sg.identify_names(script_blocks)
assert expected == res
| {
"content_hash": "add9995da59a3f448b63298fe9fe5036",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 78,
"avg_line_length": 27.304878048780488,
"alnum_prop": 0.5290308173291648,
"repo_name": "Eric89GXL/sphinx-gallery",
"id": "5afb819a5044b1c6c9d498e471b735e90752113c",
"size": "4551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sphinx_gallery/tests/test_backreferences.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3981"
},
{
"name": "Python",
"bytes": "113604"
},
{
"name": "Shell",
"bytes": "1280"
}
],
"symlink_target": ""
} |
"""
A cross-platform library to retrieve the current user's special folders (like
Images, Documents, Videos etc.).
"""
import sys
import locale
VERSION = '0.1.0'
__encoding__ = locale.getdefaultlocale()[1]
if sys.platform.startswith("win"):
from win32com.shell import shell, shellcon
__mapping__ = {
'DESKTOP': 'CSIDL_DESKTOP',
'DOCUMENTS': 'CSIDL_PERSONAL',
'PICTURES': 'CSIDL_MYPICTURES',
'MUSIC': 'CSIDL_MYMUSIC',
'VIDEOS': 'CSIDL_MYVIDEO', # None on XP
# TODO: "downloads" folder on windows ?
}
def _get_folder(typ):
"""
Windows implementation.
"""
try:
typ = __mapping__[typ]
return shell.SHGetFolderPath(0, getattr(shellcon, typ), None, 0)
except:
return None
elif sys.platform.startswith("darwin"):
import AppKit
__mapping__ = {
'DESKTOP': 'NSDesktopDirectory',
'DOCUMENTS': 'NSDocumentDirectory',
'PICTURES': 'NSPicturesDirectory',
'MUSIC': 'NSMusicDirectory',
'VIDEOS': 'NSMoviesDirectory',
'DOWNLOAD': 'NSDownloadsDirectory',
}
def _get_folder(typ):
"""
MacOSX implementation.
"""
try:
typ = __mapping__[typ]
return unicode(
AppKit.NSSearchPathForDirectoriesInDomains(
getattr(AppKit, typ),
AppKit.NSUserDomainMask,
True
)[0],
__encoding__
)
except:
return None
else:
import subprocess
def _get_folder(typ):
"""
Unices implementation.
"""
try:
proc = subprocess.Popen(
['xdg-user-dir', typ],
stdout=subprocess.PIPE
)
return unicode(proc.communicate()[0].strip(), __encoding__)
except:
return None
def get_desktop_folder():
"""
Returns the "Desktop" folder path.
"""
return _get_folder('DESKTOP')
def get_documents_folder():
"""
Returns the "Documents" folder path.
"""
return _get_folder('DOCUMENTS')
def get_pictures_folder():
"""
Returns the "Pictures" folder path.
"""
return _get_folder('PICTURES')
def get_music_folder():
"""
Returns the "Music" folder path.
"""
return _get_folder('MUSIC')
def get_videos_folder():
"""
Returns the "Music" folder path.
"""
return _get_folder('VIDEOS')
def get_downloads_folder():
"""
Returns the "Downloads" folder path.
"""
return _get_folder('DOWNLOAD')
def get_all_special_folders():
"""
Returns all special folders in a dictionary.
"""
return {
'desktop': get_desktop_folder(),
'documents': get_documents_folder(),
'pictures': get_pictures_folder(),
'music': get_music_folder(),
'videos': get_videos_folder(),
'downloads': get_downloads_folder(),
}
if __name__ == '__main__':
print 'Special folders on platform', sys.platform
print 'Desktop folder', get_desktop_folder()
print 'Documents folder', get_documents_folder()
print 'Pictures folder', get_pictures_folder()
print 'Music folder', get_music_folder()
print 'Videos folder', get_videos_folder()
print 'Downloads folder', get_downloads_folder()
print 'All folders', get_all_special_folders()
| {
"content_hash": "265f5712f91f6df42aace55d2ca30328",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 77,
"avg_line_length": 24.62142857142857,
"alnum_prop": 0.5590368436321439,
"repo_name": "mediaodyssee/specialfolders",
"id": "6ee22b4a2d23d1c277a36c94e0259e2264b31eec",
"size": "3494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "specialfolders.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11644"
},
{
"name": "Shell",
"bytes": "4527"
}
],
"symlink_target": ""
} |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='ErrorHandling.proto',
package='pb',
serialized_pb=_b('\n\x13\x45rrorHandling.proto\x12\x02pb\"p\n\x18StackTraceElementMessage\x12\x17\n\x0f\x64\x65\x63laring_class\x18\x01 \x01(\t\x12\x13\n\x0bmethod_name\x18\x02 \x01(\t\x12\x11\n\tfile_name\x18\x03 \x01(\t\x12\x13\n\x0bline_number\x18\x04 \x01(\x05\"\x7f\n\x17GenericExceptionMessage\x12\x12\n\nclass_name\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x12\n\nerror_info\x18\x03 \x01(\x0c\x12+\n\x05trace\x18\x04 \x03(\x0b\x32\x1c.pb.StackTraceElementMessage\"a\n\x17\x46oreignExceptionMessage\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x36\n\x11generic_exception\x18\x02 \x01(\x0b\x32\x1b.pb.GenericExceptionMessageBF\n*org.apache.hadoop.hbase.protobuf.generatedB\x13\x45rrorHandlingProtosH\x01\xa0\x01\x01')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_STACKTRACEELEMENTMESSAGE = _descriptor.Descriptor(
name='StackTraceElementMessage',
full_name='pb.StackTraceElementMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='declaring_class', full_name='pb.StackTraceElementMessage.declaring_class', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='method_name', full_name='pb.StackTraceElementMessage.method_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_name', full_name='pb.StackTraceElementMessage.file_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='line_number', full_name='pb.StackTraceElementMessage.line_number', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=27,
serialized_end=139,
)
_GENERICEXCEPTIONMESSAGE = _descriptor.Descriptor(
name='GenericExceptionMessage',
full_name='pb.GenericExceptionMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='class_name', full_name='pb.GenericExceptionMessage.class_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='message', full_name='pb.GenericExceptionMessage.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error_info', full_name='pb.GenericExceptionMessage.error_info', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trace', full_name='pb.GenericExceptionMessage.trace', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=141,
serialized_end=268,
)
_FOREIGNEXCEPTIONMESSAGE = _descriptor.Descriptor(
name='ForeignExceptionMessage',
full_name='pb.ForeignExceptionMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='pb.ForeignExceptionMessage.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='generic_exception', full_name='pb.ForeignExceptionMessage.generic_exception', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=270,
serialized_end=367,
)
_GENERICEXCEPTIONMESSAGE.fields_by_name['trace'].message_type = _STACKTRACEELEMENTMESSAGE
_FOREIGNEXCEPTIONMESSAGE.fields_by_name['generic_exception'].message_type = _GENERICEXCEPTIONMESSAGE
DESCRIPTOR.message_types_by_name['StackTraceElementMessage'] = _STACKTRACEELEMENTMESSAGE
DESCRIPTOR.message_types_by_name['GenericExceptionMessage'] = _GENERICEXCEPTIONMESSAGE
DESCRIPTOR.message_types_by_name['ForeignExceptionMessage'] = _FOREIGNEXCEPTIONMESSAGE
StackTraceElementMessage = _reflection.GeneratedProtocolMessageType('StackTraceElementMessage', (_message.Message,), dict(
DESCRIPTOR = _STACKTRACEELEMENTMESSAGE,
__module__ = 'ErrorHandling_pb2'
# @@protoc_insertion_point(class_scope:pb.StackTraceElementMessage)
))
_sym_db.RegisterMessage(StackTraceElementMessage)
GenericExceptionMessage = _reflection.GeneratedProtocolMessageType('GenericExceptionMessage', (_message.Message,), dict(
DESCRIPTOR = _GENERICEXCEPTIONMESSAGE,
__module__ = 'ErrorHandling_pb2'
# @@protoc_insertion_point(class_scope:pb.GenericExceptionMessage)
))
_sym_db.RegisterMessage(GenericExceptionMessage)
ForeignExceptionMessage = _reflection.GeneratedProtocolMessageType('ForeignExceptionMessage', (_message.Message,), dict(
DESCRIPTOR = _FOREIGNEXCEPTIONMESSAGE,
__module__ = 'ErrorHandling_pb2'
# @@protoc_insertion_point(class_scope:pb.ForeignExceptionMessage)
))
_sym_db.RegisterMessage(ForeignExceptionMessage)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n*org.apache.hadoop.hbase.protobuf.generatedB\023ErrorHandlingProtosH\001\240\001\001'))
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "8c61d3711a231869f1e97df3300a5a42",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 734,
"avg_line_length": 39.95854922279793,
"alnum_prop": 0.7244553941908713,
"repo_name": "CurleySamuel/PyBase",
"id": "d6e84432601f1fbd26a12e70d4d878b4a66eceeb",
"size": "7802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pb/ErrorHandling_pb2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "72330"
},
{
"name": "Python",
"bytes": "103140"
},
{
"name": "Shell",
"bytes": "4058"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
GPIO.setup(13, GPIO.OUT)
GPIO.output(13, False)
time.sleep(5)
GPIO.cleanup() | {
"content_hash": "512dcb4232059867e7d68b522a155eb6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 24,
"avg_line_length": 14,
"alnum_prop": 0.75,
"repo_name": "EchoFUN/raspi",
"id": "1b6d95af3a89d8772b3dfd220670e486ed2a07bc",
"size": "317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/fan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22633"
}
],
"symlink_target": ""
} |
import os
import re
from django import template
from django.conf import settings
from django.contrib.staticfiles import finders
from django.contrib.staticfiles.templatetags.staticfiles import static
register = template.Library()
DEFAULT_HTML_TAGS = {
'.css': '<link rel="stylesheet" href="{}">',
'.js': '<script src="{}"></script>'
}
HTML_TAGS = getattr(settings, 'GRUNTED_ASSETS_HTML_TAGS', DEFAULT_HTML_TAGS)
DEFAULT_HTML_TAGS_INLINE = {
'.css': '<style>{}\n</style>',
'.js': '<script>{}\n</script>'
}
HTML_TAGS_INLINE = getattr(settings, 'GRUNTED_ASSETS_HTML_TAGS_INLINE',
DEFAULT_HTML_TAGS_INLINE)
DEFAULT_DIRS = {
True: '.tmp',
False: 'dist'
}
CURRENT_STATIC_DIR = getattr(settings, 'GRUNTED_ASSETS_DIR',
DEFAULT_DIRS[settings.DEBUG])
def find_asset(filename):
# TODO: cache this?
filename_re = re.compile(filename, re.IGNORECASE)
found_files = []
for finder in finders.get_finders():
for file in finder.list('xyz'):
if file[0].startswith(CURRENT_STATIC_DIR):
if filename_re.match(os.path.basename(file[0])):
found_files.append(file)
if not found_files:
raise IOError('Could not find any file matching {} in {}'.format(
filename, CURRENT_STATIC_DIR))
if len(found_files) > 1:
raise IOError('Found more than one file matching {} in {}: {}'.format(
filename,
CURRENT_STATIC_DIR,
', '.join([f[0] for f in found_files])))
return found_files[0]
@register.simple_tag
def link_asset(filename_re):
"""
The `{% link_asset "<filename_re>" %}` tag is used to get a specific asset
from either the development or production asset output folders (by default
`.tmp` and `dist` respectively). You can use a filename regex that will
match both the file in dev as in production, like for example:
`'tail.*\.js'`, matching your `tail.js` in development and
`tail.f23r0df0se.js` in production.
Raises an error when zero or multiple files are found.
"""
asset = find_asset(filename_re)[0]
base, ext = os.path.splitext(asset)
if ext not in HTML_TAGS.keys():
raise IOError('Found a file matching "{}" ({}), but no known html tag '
'found for this extension "{}"'.format(filename_re,
asset,
ext))
return HTML_TAGS[ext].format(static(asset))
@register.simple_tag
def inline_asset(filename_re):
"""
The `{% inline_asset "<filename_re>" %}` tag is used to inline a specific
asset. File finding is implemented the same as the `link_asset` tag does.
Raises an error when zero or multiple files are found.
"""
asset, storage = find_asset(filename_re)
base, ext = os.path.splitext(asset)
if ext not in HTML_TAGS_INLINE.keys():
raise IOError('Found a file matching "{}" ({}), but no known inline '
'html tag found for the extension "{}"'.format(filename_re,
asset,
ext))
return HTML_TAGS_INLINE[ext].format(storage.open(asset).read())
@register.simple_tag
def asset_path(filename_re):
"""Return just the path, so you can use it in other tags."""
asset, storage = find_asset(filename_re)
return static(asset)
| {
"content_hash": "19da8b6983828f59884a2005e364f01e",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 81,
"avg_line_length": 37.71578947368421,
"alnum_prop": 0.5819145967066703,
"repo_name": "tino/django-grunted-assets",
"id": "208e7e136bf199588824cdeb8a9015b9af97c97f",
"size": "3583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grunted_assets/templatetags/grunted_assets.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64"
},
{
"name": "CoffeeScript",
"bytes": "2265"
},
{
"name": "Python",
"bytes": "4814"
}
],
"symlink_target": ""
} |
"""
Utility methods for working with WSGI servers
"""
import errno
import json
import logging
import os
import signal
import sys
import time
import eventlet
from eventlet.green import socket
from eventlet.green import ssl
import eventlet.greenio
import eventlet.wsgi
from oslo_config import cfg
import oslo_i18n
from oslo_utils import importutils
from paste import deploy
import routes
import routes.middleware
import six
import webob.dec
import webob.exc
from senlin.common import exception
from senlin.common.i18n import _
from senlin.common.i18n import _LE
from senlin.common.i18n import _LI
from senlin.common.i18n import _LW
from senlin.common import serializers
URL_LENGTH_LIMIT = 50000
api_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('bind_port', default=8778,
help=_('The port on which the server will listen.'),
deprecated_group='DEFAULT'),
cfg.IntOpt('backlog', default=4096,
help=_("Number of backlog requests "
"to configure the socket with."),
deprecated_group='DEFAULT'),
cfg.StrOpt('cert_file',
help=_("Location of the SSL certificate file "
"to use for SSL mode."),
deprecated_group='DEFAULT'),
cfg.StrOpt('key_file',
help=_("Location of the SSL key file to use "
"for enabling SSL mode."),
deprecated_group='DEFAULT'),
cfg.IntOpt('workers', default=0,
help=_("Number of workers for Senlin service."),
deprecated_group='DEFAULT'),
]
api_group = cfg.OptGroup('senlin_api')
cfg.CONF.register_group(api_group)
cfg.CONF.register_opts(api_opts,
group=api_group)
json_size_opt = cfg.IntOpt('max_json_body_size',
default=1048576,
help='Maximum raw byte size of JSON request body.'
' Should be larger than max_template_size.')
cfg.CONF.register_opt(json_size_opt)
def list_opts():
yield None, [json_size_opt]
yield 'senlin_api', api_opts
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, LOG, level=logging.DEBUG):
self.LOG = LOG
self.level = level
def write(self, msg):
self.LOG.log(self.level, msg.strip("\n"))
def get_bind_addr(conf, default_port=None):
"""Return the host and port to bind to."""
return (conf.bind_host, conf.bind_port or default_port)
def get_socket(conf, default_port):
'''Bind socket to bind ip:port in conf
note: Mostly comes from Swift with a few small changes...
:param conf: a cfg.ConfigOpts object
:param default_port: port to bind to if none is specified in conf
:returns : a socket object as returned from socket.listen or
ssl.wrap_socket if conf specifies cert_file
'''
bind_addr = get_bind_addr(conf, default_port)
# TODO(jaypipes): eventlet's greened socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0],
bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)][0]
cert_file = conf.cert_file
key_file = conf.key_file
use_ssl = cert_file or key_file
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
sock = None
retry_until = time.time() + 30
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr, backlog=conf.backlog,
family=address_family)
if use_ssl:
sock = ssl.wrap_socket(sock, certfile=cert_file,
keyfile=key_file)
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
eventlet.sleep(0.1)
if not sock:
raise RuntimeError(_("Could not bind to %(bind_addr)s"
"after trying for 30 seconds")
% {'bind_addr': bind_addr})
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# in my experience, sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600)
return sock
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, threads=1000):
self.threads = threads
self.children = []
self.running = True
def start(self, application, conf, default_port):
'''Run a WSGI server with the given application.
:param application: The application to run in the WSGI server
:param conf: a cfg.ConfigOpts object
:param default_port: Port to bind to if none is specified in conf
'''
def kill_children(*args):
"""Kills the entire process group."""
self.LOG.error(_LE('SIGTERM received'))
signal.signal(signal.SIGTERM, signal.SIG_IGN)
self.running = False
os.killpg(0, signal.SIGTERM)
def hup(*args):
# Shuts down the server(s), but allows running requests to complete
self.LOG.error(_LE('SIGHUP received'))
signal.signal(signal.SIGHUP, signal.SIG_IGN)
os.killpg(0, signal.SIGHUP)
signal.signal(signal.SIGHUP, hup)
# Note: may need to make this configurable
eventlet.wsgi.MAX_HEADER_LINE = 16384
self.application = application
self.sock = get_socket(conf, default_port)
self.LOG = logging.getLogger('eventlet.wsgi.server')
if conf.workers == 0:
# Useful for profiling, test, debug etc.
self.pool = eventlet.GreenPool(size=self.threads)
self.pool.spawn_n(self._single_run, application, self.sock)
return
self.LOG.info(_LI("Starting %d workers") % conf.workers)
signal.signal(signal.SIGTERM, kill_children)
signal.signal(signal.SIGHUP, hup)
while len(self.children) < conf.workers:
self.run_child()
def wait_on_children(self):
while self.running:
try:
pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
self.LOG.error(_LE('Removing dead child %s') % pid)
self.children.remove(pid)
self.run_child()
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
self.LOG.info(_LI('Caught keyboard interrupt. Exiting.'))
os.killpg(0, signal.SIGTERM)
break
eventlet.greenio.shutdown_safe(self.sock)
self.sock.close()
self.LOG.debug('Exited')
def wait(self):
"""Wait until all servers have completed running."""
try:
if self.children:
self.wait_on_children()
else:
self.pool.waitall()
except KeyboardInterrupt:
pass
def run_child(self):
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.run_server()
self.LOG.info(_LI('Child %d exiting normally') % os.getpid())
return
else:
self.LOG.info(_LI('Started child %s') % pid)
self.children.append(pid)
def run_server(self):
"""Run a WSGI server."""
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
eventlet.hubs.use_hub('poll')
eventlet.patcher.monkey_patch(all=False, socket=True)
self.pool = eventlet.GreenPool(size=self.threads)
try:
eventlet.wsgi.server(self.sock,
self.application,
custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT,
log=WritableLogger(self.LOG),
debug=cfg.CONF.debug)
except socket.error as err:
if err[0] != errno.EINVAL:
raise
self.pool.waitall()
def _single_run(self, application, sock):
"""Start a WSGI server in a new green thread."""
self.LOG.info(_LI("Starting single process server"))
eventlet.wsgi.server(sock, application,
custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT,
log=WritableLogger(self.LOG),
debug=cfg.CONF.debug)
class Middleware(object):
'''Base WSGI middleware wrapper.
These classes require an application to be initialized that will be called
next. By default the middleware will simply call its wrapped app, or you
can override __call__ to customize its behavior.
'''
def __init__(self, application):
self.application = application
def process_request(self, req):
'''Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
'''
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
'''Helper class that can be inserted into any WSGI application chain
to get information about the request and response.
'''
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
print
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in six.iteritems(resp.headers):
print(key, "=", value)
print
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
# Iterator that prints the contents of a wrapper string iterator
# when iterated.
print(("*" * 40) + " BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print
def debug_filter(app, conf, **local_conf):
return Debug(app)
class Router(object):
'''WSGI middleware that maps incoming requests to WSGI apps.'''
def __init__(self, mapper):
'''Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be a wsgi.Controller, who will route
the request to the action method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, "/svrlist", controller=sc, action="list")
# Actions are all implicitly defined
mapper.resource("server", "servers", controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
'''
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify
def __call__(self, req):
'''Route the incoming request to a controller based on self.map.
If no match, return a 404.
'''
return self._router
@staticmethod
@webob.dec.wsgify
def _dispatch(req):
'''Called by self._router after matching the incoming request to
a route and putting the information into req.environ.
Either returns 404 or the routed WSGI app's response.
'''
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def best_match_content_type(self):
"""Determine the requested response content-type."""
supported = ('application/json',)
bm = self.accept.best_match(supported)
return bm or 'application/json'
def get_content_type(self, allowed_content_types):
"""Determine content type of the request body."""
if "Content-Type" not in self.headers:
raise exception.InvalidContentType(content_type=None)
content_type = self.content_type
if content_type not in allowed_content_types:
raise exception.InvalidContentType(content_type=content_type)
else:
return content_type
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
all_languages = oslo_i18n.get_available_languages('senlin')
return self.accept_language.best_match(all_languages)
def is_json_content_type(request):
if request.method == 'GET':
try:
aws_content_type = request.params.get("ContentType")
except Exception:
aws_content_type = None
#respect aws_content_type when both available
content_type = aws_content_type or request.content_type
else:
content_type = request.content_type
#bug #1887882
#for back compatible for null or plain content type
if not content_type or content_type.startswith('text/plain'):
content_type = 'application/json'
if content_type in ('JSON', 'application/json')\
and request.body.startswith('{'):
return True
return False
class JSONRequestDeserializer(object):
def has_body(self, request):
'''Returns whether a Webob.Request object will possess an entity body.
:param request: Webob.Request object
'''
if request.content_length > 0 and is_json_content_type(request):
return True
return False
def from_json(self, datastring):
try:
if len(datastring) > cfg.CONF.max_json_body_size:
msg = _('JSON body size (%(len)s bytes) exceeds maximum '
'allowed size (%(limit)s bytes).') % \
{'len': len(datastring),
'limit': cfg.CONF.max_json_body_size}
raise exception.RequestLimitExceeded(message=msg)
return json.loads(datastring)
except ValueError as ex:
raise webob.exc.HTTPBadRequest(six.text_type(ex))
def default(self, request):
if self.has_body(request):
return {'body': self.from_json(request.body)}
else:
return {}
class Resource(object):
'''WSGI app that handles (de)serialization and controller dispatch.
Reads routing information supplied by RoutesMiddleware and calls
the requested action method upon its deserializer, controller,
and serializer. Those three objects may implement any of the basic
controller action methods (create, update, show, index, delete)
along with any that may be specified in the api router. A 'default'
method may also be implemented to be used in place of any
non-implemented actions. Deserializer methods must accept a request
argument and return a dictionary. Controller methods must accept a
request argument. Additionally, they must also accept keyword
arguments that represent the keys returned by the Deserializer. They
may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
'''
def __init__(self, controller, deserializer, serializer=None):
"""Initializer.
:param controller: object that implement methods created by routes lib
:param deserializer: object that supports webob request deserialization
through controller-like actions
:param serializer: object that supports webob response serialization
through controller-like actions
"""
self.controller = controller
self.deserializer = deserializer
self.serializer = serializer
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type = request.params.get("ContentType")
try:
deserialized_request = self.dispatch(self.deserializer,
action, request)
action_args.update(deserialized_request)
logging.debug(
('Calling %(controller)s : %(action)s'),
{'controller': self.controller, 'action': action})
action_result = self.dispatch(self.controller, action,
request, **action_args)
except TypeError as err:
logging.error(_LE('Exception handling resource: %s') % err)
msg = _('The server could not comply with the request since '
'it is either malformed or otherwise incorrect.')
err = webob.exc.HTTPBadRequest(msg)
http_exc = translate_exception(err, request.best_match_language())
# NOTE(luisg): We disguise HTTP exceptions, otherwise they will be
# treated by wsgi as responses ready to be sent back and they
# won't make it into the pipeline app that serializes errors
raise exception.HTTPExceptionDisguise(http_exc)
except webob.exc.HTTPException as err:
if not isinstance(err, webob.exc.HTTPError):
# Some HTTPException are actually not errors, they are
# responses ready to be sent back to the users, so we don't
# error log, disguise or translate those
raise
if isinstance(err, webob.exc.HTTPServerError):
logging.error(
_LE("Returning %(code)s to user: %(explanation)s"),
{'code': err.code, 'explanation': err.explanation})
http_exc = translate_exception(err, request.best_match_language())
raise exception.HTTPExceptionDisguise(http_exc)
except exception.SenlinException as err:
raise translate_exception(err, request.best_match_language())
except Exception as err:
log_exception(err, sys.exc_info())
raise translate_exception(err, request.best_match_language())
serializer = self.serializer or serializers.JSONResponseSerializer()
try:
response = webob.Response(request=request)
self.dispatch(serializer, action, response, action_result)
return response
# return unserializable result (typically an exception)
except Exception:
if content_type == "JSON":
try:
err_body = action_result.get_unserialized_body()
serializer.default(action_result, err_body)
except Exception:
logging.warning(_LW("Unable to serialize exception "
"response"))
return action_result
def dispatch(self, obj, action, *args, **kwargs):
"""Find action-specific method on self and call it."""
try:
method = getattr(obj, action)
except AttributeError:
method = getattr(obj, 'default')
return method(*args, **kwargs)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except Exception:
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def log_exception(err, exc_info):
args = {'exc_info': exc_info} if cfg.CONF.verbose or cfg.CONF.debug else {}
logging.error(_LE("Unexpected error occurred serving API: %s") % err,
**args)
def translate_exception(exc, locale):
"""Translates all translatable elements of the given exception."""
if isinstance(exc, exception.SenlinException):
exc.message = oslo_i18n.translate(exc.message, locale)
else:
exc.message = oslo_i18n.translate(six.text_type(exc), locale)
if isinstance(exc, webob.exc.HTTPError):
exc.explanation = oslo_i18n.translate(exc.explanation, locale)
exc.detail = oslo_i18n.translate(getattr(exc, 'detail', ''), locale)
return exc
class BasePasteFactory(object):
"""A base class for paste app and filter factories.
Sub-classes must override the KEY class attribute and provide
a __call__ method.
"""
KEY = None
def __init__(self, conf):
self.conf = conf
def __call__(self, global_conf, **local_conf):
raise NotImplementedError
def _import_factory(self, local_conf):
"""Import an app/filter class.
Lookup the KEY from the PasteDeploy local conf and import the
class named there. This class can then be used as an app or
filter factory.
Note we support the <module>:<class> format.
Note also that if you do e.g.
key =
value
then ConfigParser returns a value with a leading newline, so
we strip() the value before using it.
"""
class_name = local_conf[self.KEY].replace(':', '.').strip()
return importutils.import_class(class_name)
class AppFactory(BasePasteFactory):
"""A Generic paste.deploy app factory.
This requires senlin.app_factory to be set to a callable which returns a
WSGI app when invoked. The format of the name is <module>:<callable> e.g.
[app:apiv1app]
paste.app_factory = senlin.common.wsgi:app_factory
senlin.app_factory = senlin.api.v1:API
The WSGI app constructor must accept a ConfigOpts object and a local config
dict as its two arguments.
"""
KEY = 'senlin.app_factory'
def __call__(self, global_conf, **local_conf):
"""The actual paste.app_factory protocol method."""
factory = self._import_factory(local_conf)
return factory(self.conf, **local_conf)
class FilterFactory(AppFactory):
"""A Generic paste.deploy filter factory.
This requires senlin.filter_factory to be set to a callable which returns a
WSGI filter when invoked. The format is <module>:<callable> e.g.
[filter:cache]
paste.filter_factory = senlin.common.wsgi:filter_factory
senlin.filter_factory = senlin.api.middleware.cache:CacheFilter
The WSGI filter constructor must accept a WSGI app, a ConfigOpts object and
a local config dict as its three arguments.
"""
KEY = 'senlin.filter_factory'
def __call__(self, global_conf, **local_conf):
"""The actual paste.filter_factory protocol method."""
factory = self._import_factory(local_conf)
def filter(app):
return factory(app, self.conf, **local_conf)
return filter
def setup_paste_factories(conf):
"""Set up the generic paste app and filter factories.
Set things up so that:
paste.app_factory = senlin.common.wsgi:app_factory
and
paste.filter_factory = senlin.common.wsgi:filter_factory
work correctly while loading PasteDeploy configuration.
The app factories are constructed at runtime to allow us to pass a
ConfigOpts object to the WSGI classes.
:param conf: a ConfigOpts object
"""
global app_factory, filter_factory
app_factory = AppFactory(conf)
filter_factory = FilterFactory(conf)
def teardown_paste_factories():
"""Reverse the effect of setup_paste_factories()."""
global app_factory, filter_factory
del app_factory
del filter_factory
def paste_deploy_app(paste_config_file, app_name, conf):
"""Load a WSGI app from a PasteDeploy configuration.
Use deploy.loadapp() to load the app from the PasteDeploy configuration,
ensuring that the supplied ConfigOpts object is passed to the app and
filter constructors.
:param paste_config_file: a PasteDeploy config file
:param app_name: the name of the app/pipeline to load from the file
:param conf: a ConfigOpts object to supply to the app and its filters
:returns: the WSGI app
"""
setup_paste_factories(conf)
try:
return deploy.loadapp("config:%s" % paste_config_file, name=app_name)
finally:
teardown_paste_factories()
| {
"content_hash": "ee4a884da6678d74ca0e5bb36bb18827",
"timestamp": "",
"source": "github",
"line_count": 753,
"max_line_length": 79,
"avg_line_length": 35.26029216467464,
"alnum_prop": 0.6104854807728523,
"repo_name": "tengqm/senlin",
"id": "b747791c605ffc43c676be3a22cdc2e503af5566",
"size": "27312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "senlin/common/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "628"
},
{
"name": "Python",
"bytes": "1060167"
},
{
"name": "Shell",
"bytes": "12694"
}
],
"symlink_target": ""
} |
with open("primes.txt") as f:
primes = f.readlines();
total = 0
for prime in primes:
total += int(prime)
print(total)
| {
"content_hash": "ff890212b62157735f04ba1fff8d4438",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 29,
"avg_line_length": 14.666666666666666,
"alnum_prop": 0.6136363636363636,
"repo_name": "bobismijnnaam/bobe-euler",
"id": "b6f6d4d7255fb69c41cc47633dba00dba7a920c3",
"size": "132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "10/compute.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1215"
},
{
"name": "CSS",
"bytes": "42929"
},
{
"name": "HTML",
"bytes": "364582"
},
{
"name": "JavaScript",
"bytes": "98081"
},
{
"name": "Python",
"bytes": "456636"
}
],
"symlink_target": ""
} |
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='proto/api/internal/bb/swarming_bb.proto',
package='bb',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\'proto/api/internal/bb/swarming_bb.proto\x12\x02\x62\x62\x1a\x1egoogle/protobuf/duration.proto\"q\n\nCacheEntry\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\t\x12\x36\n\x13wait_for_warm_cache\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0f\n\x07\x65nv_var\x18\x04 \x01(\t\"\xf3\x01\n\x15SwarmingBackendConfig\x12\x10\n\x08priority\x18\x01 \x01(\x05\x12\x1a\n\x12\x62ot_ping_tolerance\x18\x02 \x01(\x03\x12\x15\n\rparent_run_id\x18\x03 \x01(\t\x12\x17\n\x0fservice_account\x18\x04 \x01(\t\x12\x19\n\x11wait_for_capacity\x18\x05 \x01(\x08\x12\x1d\n\x15\x61gent_binary_cipd_pkg\x18\x06 \x01(\t\x12\x1e\n\x16\x61gent_binary_cipd_vers\x18\x07 \x01(\t\x12\"\n\x1a\x61gent_binary_cipd_filename\x18\x08 \x01(\tb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,])
_CACHEENTRY = _descriptor.Descriptor(
name='CacheEntry',
full_name='bb.CacheEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='bb.CacheEntry.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='path', full_name='bb.CacheEntry.path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='wait_for_warm_cache', full_name='bb.CacheEntry.wait_for_warm_cache', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='env_var', full_name='bb.CacheEntry.env_var', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=79,
serialized_end=192,
)
_SWARMINGBACKENDCONFIG = _descriptor.Descriptor(
name='SwarmingBackendConfig',
full_name='bb.SwarmingBackendConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='priority', full_name='bb.SwarmingBackendConfig.priority', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='bot_ping_tolerance', full_name='bb.SwarmingBackendConfig.bot_ping_tolerance', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parent_run_id', full_name='bb.SwarmingBackendConfig.parent_run_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='service_account', full_name='bb.SwarmingBackendConfig.service_account', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='wait_for_capacity', full_name='bb.SwarmingBackendConfig.wait_for_capacity', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='agent_binary_cipd_pkg', full_name='bb.SwarmingBackendConfig.agent_binary_cipd_pkg', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='agent_binary_cipd_vers', full_name='bb.SwarmingBackendConfig.agent_binary_cipd_vers', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='agent_binary_cipd_filename', full_name='bb.SwarmingBackendConfig.agent_binary_cipd_filename', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=195,
serialized_end=438,
)
_CACHEENTRY.fields_by_name['wait_for_warm_cache'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
DESCRIPTOR.message_types_by_name['CacheEntry'] = _CACHEENTRY
DESCRIPTOR.message_types_by_name['SwarmingBackendConfig'] = _SWARMINGBACKENDCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CacheEntry = _reflection.GeneratedProtocolMessageType('CacheEntry', (_message.Message,), {
'DESCRIPTOR' : _CACHEENTRY,
'__module__' : 'proto.api.internal.bb.swarming_bb_pb2'
# @@protoc_insertion_point(class_scope:bb.CacheEntry)
})
_sym_db.RegisterMessage(CacheEntry)
SwarmingBackendConfig = _reflection.GeneratedProtocolMessageType('SwarmingBackendConfig', (_message.Message,), {
'DESCRIPTOR' : _SWARMINGBACKENDCONFIG,
'__module__' : 'proto.api.internal.bb.swarming_bb_pb2'
# @@protoc_insertion_point(class_scope:bb.SwarmingBackendConfig)
})
_sym_db.RegisterMessage(SwarmingBackendConfig)
# @@protoc_insertion_point(module_scope)
| {
"content_hash": "d7694716704cd9596109ffe4ce071fe1",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 767,
"avg_line_length": 47.2,
"alnum_prop": 0.7183380414312618,
"repo_name": "luci/luci-py",
"id": "17dfa59e75f765e66891cf07362f1b658e82f3dd",
"size": "8629",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "appengine/swarming/proto/api/internal/bb/swarming_bb_pb2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5576"
},
{
"name": "HTML",
"bytes": "1900972"
},
{
"name": "JavaScript",
"bytes": "113046"
},
{
"name": "Makefile",
"bytes": "11718"
},
{
"name": "Python",
"bytes": "5885612"
},
{
"name": "Shell",
"bytes": "5183"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import cts
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'numpydoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyCTS'
copyright = u'2016, Google Inc.'
author = u'Marc G. Bellemare'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'pyCTS v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyCTSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyCTS.tex', u'pyCTS Documentation',
u'Marc G. Bellemare', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycts', u'pyCTS Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyCTS', u'pyCTS Documentation',
author, 'pyCTS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| {
"content_hash": "95c71424e3a10bee890e9683b580aafa",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 80,
"avg_line_length": 28.208588957055216,
"alnum_prop": 0.6854066985645934,
"repo_name": "mgbellemare/SkipCTS",
"id": "aa5f8cbc6e95f34d2709bc7ae9cf768dea10b234",
"size": "9854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3522"
},
{
"name": "C++",
"bytes": "91203"
},
{
"name": "CMake",
"bytes": "1408"
},
{
"name": "Jupyter Notebook",
"bytes": "1226704"
},
{
"name": "Makefile",
"bytes": "7602"
},
{
"name": "Python",
"bytes": "31563"
}
],
"symlink_target": ""
} |
DOCUMENTATION = '''
Bifrost Inventory Module
========================
This is a dynamic inventory module intended to provide a platform for
consistent inventory information for Bifrost.
The inventory currently supplies two distinct groups:
- localhost
- baremetal
The localhost group is required for Bifrost to perform local actions to
bifrost for local actions such as installing Ironic.
The baremetal group contains the hosts defined by the data source along with
variables extracted from the data source. The variables are defined on a
per-host level which allows explict actions to be taken based upon the
variables.
Presently, the base mode of operation reads a CSV file in the format
originally utilized by bifrost and returns structured JSON that is
interpretted by Ansible. This has since been extended to support the
parsing of JSON and YAML data if they are detected in the file.
Conceivably, this inventory module can be extended to allow for direct
processing of inventory data from other data sources such as a configuration
management database or other inventory data source to provide a consistent
user experience.
How to use?
-----------
export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.[csv|json|yaml]
ansible-playbook playbook.yaml -i inventory/bifrost_inventory.py
One can also just directly invoke bifrost_inventory.py in order to see the
resulting JSON output. This module also has a feature to support the
pass-through of a pre-existing JSON document, which receives updates and
formatting to be supplied to Ansible. Ultimately the use of JSON will be
far more flexible and should be the preferred path forward.
Example JSON Element:
{
"node1": {
"uuid": "a8cb6624-0d9f-c882-affc-046ebb96ec01",
"driver_info": {
"power": {
"ipmi_target_channel": "0",
"ipmi_username": "ADMIN",
"ipmi_address": "192.168.122.1",
"ipmi_target_address": "0",
"ipmi_password": "undefined",
"ipmi_bridging": "single"
}
},
"nics": [
{
"mac": "00:01:02:03:04:05"
}.
{
"mac": "00:01:02:03:04:06"
}
],
"driver": "agent_ipmitool",
"ipv4_address": "192.168.122.2",
"properties": {
"cpu_arch": "x86_64",
"ram": "3072",
"disk_size": "10",
"cpus": "1"
},
"name": "node1"
}
}
Utilizing ironic as the data source
-----------------------------------
The functionality exists to allow a user to query an existing ironic
installation for the inventory data. This is an advanced feature,
as the node may not have sufficent information to allow for node
deployment or automated testing, unless DHCP reservations are used.
This setting can be invoked by setting the source to "ironic"::
export BIFROST_INVENTORY_SOURCE=ironic
Known Issues
------------
At present, this module only supports inventory list mode and is not
intended to support specific host queries.
'''
import csv
import json
import os
import six
import sys
import yaml
from oslo_config import cfg
from oslo_log import log
try:
import shade
SHADE_LOADED = True
except ImportError:
SHADE_LOADED = False
LOG = log.getLogger(__name__)
opts = [
cfg.BoolOpt('list',
default=True,
help='List active hosts'),
cfg.BoolOpt('convertcsv',
default=False,
help='Converts a CSV inventory to JSON'),
]
def _parse_config():
config = cfg.ConfigOpts()
log.register_options(config)
config.register_cli_opts(opts)
config(prog='bifrost_inventory.py')
log.set_defaults()
log.setup(config, "bifrost_inventory.py")
return config
def _prepare_inventory():
hostvars = {}
groups = {}
groups.update({'baremetal': {'hosts': []}})
groups.update({'localhost': {'hosts': ["127.0.0.1"]}})
return (groups, hostvars)
def _val_or_none(array, location):
"""Return any value that has a length"""
try:
if len(array[location]) > 0:
return array[location]
return None
except IndexError:
LOG.debug(("Out of range value encountered. Requested "
"field %s Had: %s" % (location, array)))
def _process_baremetal_data(data_source, groups, hostvars):
"""Process data through as pre-formatted data"""
with open(data_source, 'rb') as file_object:
try:
file_data = json.load(file_object)
except Exception as e:
LOG.debug("Attempting to parse JSON: %s" % e)
try:
file_object.seek(0)
file_data = yaml.load(file_object)
except Exception as e:
LOG.debug("Attempting to parse YAML: %s" % e)
raise Exception("Failed to parse JSON and YAML")
for name in file_data:
host = file_data[name]
# Perform basic validation
if ('ipv4_address' not in host or
not host['ipv4_address']):
host['addressing_mode'] = "dhcp"
else:
host['ansible_ssh_host'] = host['ipv4_address']
if ('provisioning_ipv4_address' not in host and
'addressing_mode' not in host):
host['provisioning_ipv4_address'] = host['ipv4_address']
# Add each host to the values to be returned.
groups['baremetal']['hosts'].append(host['name'])
hostvars.update({host['name']: host})
return (groups, hostvars)
def _process_baremetal_csv(data_source, groups, hostvars):
"""Process legacy baremetal.csv format"""
with open(data_source, 'r') as file_data:
for row in csv.reader(file_data, delimiter=','):
if not row:
break
if len(row) is 1:
LOG.debug("Single entry line found when attempting "
"to parse CSV file contents. Breaking "
"out of processing loop.")
raise Exception("Invalid CSV file format detected, "
"line ends with a single element")
host = {}
driver = None
driver_info = {}
power = {}
properties = {}
host['nics'] = [{
'mac': _val_or_none(row, 0)}]
# Temporary variables for ease of reading
management_username = _val_or_none(row, 1)
management_password = _val_or_none(row, 2)
management_address = _val_or_none(row, 3)
properties['cpus'] = _val_or_none(row, 4)
properties['ram'] = _val_or_none(row, 5)
properties['disk_size'] = _val_or_none(row, 6)
# Default CPU Architecture
properties['cpu_arch'] = "x86_64"
host['uuid'] = _val_or_none(row, 9)
host['name'] = _val_or_none(row, 10)
host['ipv4_address'] = _val_or_none(row, 11)
if ('ipv4_address' not in host or
not host['ipv4_address']):
host['addressing_mode'] = "dhcp"
host['provisioning_ipv4_address'] = None
else:
host['ansible_ssh_host'] = host['ipv4_address']
# Note(TheJulia): We can't assign ipv4_address if we are
# using DHCP.
if (len(row) > 17 and 'addressing_mode' not in host):
host['provisioning_ipv4_address'] = row[18]
else:
host['provisioning_ipv4_address'] = host['ipv4_address']
# Default Driver unless otherwise defined or determined.
host['driver'] = "agent_ssh"
if len(row) > 15:
driver = _val_or_none(row, 16)
if driver:
host['driver'] = driver
if "ipmi" in host['driver']:
# Set agent_ipmitool by default
host['driver'] = "agent_ipmitool"
power['ipmi_address'] = management_address
power['ipmi_username'] = management_username
power['ipmi_password'] = management_password
if len(row) > 12:
power['ipmi_target_channel'] = _val_or_none(row, 12)
power['ipmi_target_address'] = _val_or_none(row, 13)
if (power['ipmi_target_channel'] and
power['ipmi_target_address']):
power['ipmi_bridging'] = 'single'
if len(row) > 14:
power['ipmi_transit_channel'] = _val_or_none(row, 14)
power['ipmi_transit_address'] = _val_or_none(row, 15)
if (power['ipmi_transit_channel'] and
power['ipmi_transit_address']):
power['ipmi_bridging'] = 'dual'
if "ssh" in host['driver']:
# Under another model, a user would define
# and value translations to load these
# values. Since we're supporting the base
# model bifrost was developed with, then
# we need to make sure these are present as
# they are expected values.
power['ssh_virt_type'] = "virsh"
power['ssh_address'] = management_address
power['ssh_port'] = 22
# NOTE: The CSV format is desynced from the enrollment
# playbook at present, so we're hard coding ironic here
# as that is what the test is known to work with.
power['ssh_username'] = "ironic"
power['ssh_key_filename'] = "/home/ironic/.ssh/id_rsa"
# Group variables together under host.
# NOTE(TheJulia): Given the split that this demonstrates, where
# deploy details could possible be imported from a future
# inventory file format
driver_info['power'] = power
host['driver_info'] = driver_info
host['properties'] = properties
groups['baremetal']['hosts'].append(host['name'])
hostvars.update({host['name']: host})
return (groups, hostvars)
def _identify_shade_auth():
"""Return shade credentials"""
# Note(TheJulia): A logical progression is to support a user defining
# an environment variable that triggers use of os-client-config to allow
# environment variables or clouds.yaml auth configuration. This could
# potentially be passed in as variables which could then be passed
# to modules for authentication allowing the basic tooling to be
# utilized in the context of a larger cloud supporting ironic.
options = dict(
auth_type="None",
auth=dict(endpoint="http://localhost:6385/",)
)
return options
def _process_shade(groups, hostvars):
"""Retrieve inventory utilizing Shade"""
options = _identify_shade_auth()
cloud = shade.operator_cloud(**options)
machines = cloud.list_machines()
for machine in machines:
if 'properties' not in machine:
machine = cloud.get_machine(machine['uuid'])
if machine['name'] is None:
name = machine['uuid']
else:
name = machine['name']
new_machine = {}
for key, value in six.iteritems(machine):
# NOTE(TheJulia): We don't want to pass infomrational links
# nor do we want to pass links about the ports since they
# are API endpoint URLs.
if key not in ['links', 'ports']:
new_machine[key] = value
# NOTE(TheJulia): Collect network information, enumerate through
# and extract important values, presently MAC address. Once done,
# return the network information to the inventory.
nics = cloud.list_nics_for_machine(machine['uuid'])
new_nics = []
new_nic = {}
for nic in nics:
if 'address' in nic:
new_nic['mac'] = nic['address']
new_nics.append(new_nic)
new_machine['nics'] = new_nics
new_machine['addressing_mode'] = "dhcp"
groups['baremetal']['hosts'].append(name)
hostvars.update({name: new_machine})
return (groups, hostvars)
def main():
"""Generate a list of hosts."""
config = _parse_config()
if not config.list:
LOG.error("This program must be executed in list mode.")
sys.exit(1)
(groups, hostvars) = _prepare_inventory()
if 'BIFROST_INVENTORY_SOURCE' not in os.environ:
LOG.error('Please define a BIFROST_INVENTORY_SOURCE environment'
'variable with a comma separated list of data sources')
sys.exit(1)
try:
data_source = os.environ['BIFROST_INVENTORY_SOURCE']
if os.path.isfile(data_source):
try:
(groups, hostvars) = _process_baremetal_data(
data_source,
groups,
hostvars)
except Exception as e:
LOG.error("File does not appear to be JSON or YAML - %s" % e)
try:
(groups, hostvars) = _process_baremetal_csv(
data_source,
groups,
hostvars)
except Exception as e:
LOG.debug("CSV fallback processing failed, "
"received: &s" % e)
LOG.error("BIFROST_INVENTORY_SOURCE does not define "
"a file that could be processed: "
"Tried JSON, YAML, and CSV formats")
sys.exit(1)
elif "ironic" in data_source:
if SHADE_LOADED:
(groups, hostvars) = _process_shade(groups, hostvars)
else:
LOG.error("BIFROST_INVENTORY_SOURCE is set to ironic "
"however the shade library failed to load, and may "
"not be present.")
sys.exit(1)
else:
LOG.error('BIFROST_INVENTORY_SOURCE does not define a file')
sys.exit(1)
except Exception as error:
LOG.error('Failed processing: %s' % error)
sys.exit(1)
# General Data Conversion
if not config.convertcsv:
inventory = {'_meta': {'hostvars': hostvars}}
inventory.update(groups)
print(json.dumps(inventory, indent=2))
else:
print(json.dumps(hostvars, indent=2))
if __name__ == '__main__':
main()
| {
"content_hash": "ee67b1cf7e0ba290799649acf9792883",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 78,
"avg_line_length": 36.15632754342432,
"alnum_prop": 0.5707226683137739,
"repo_name": "bcornec/bifrost",
"id": "276371cf0a452b35fc1a12b5e51a7f73992a2496",
"size": "15206",
"binary": false,
"copies": "2",
"ref": "refs/heads/docker",
"path": "bifrost/inventory.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "51294"
},
{
"name": "Shell",
"bytes": "23824"
}
],
"symlink_target": ""
} |
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "[email protected]"
def import_class(qualname):
module_name, class_name = qualname.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, class_name)
def import_module(name):
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
| {
"content_hash": "4e1c7deac6e0f7946c2d5c3997994163",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 64,
"avg_line_length": 26.157894736842106,
"alnum_prop": 0.6257545271629779,
"repo_name": "nginxinc/nginx-amplify-agent",
"id": "c56bd85997c482875a9a71c39faf9a597c33b45f",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amplify/agent/common/util/loader.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1229"
},
{
"name": "Python",
"bytes": "530971"
},
{
"name": "Shell",
"bytes": "67763"
}
],
"symlink_target": ""
} |
bl_info = {
"name": "Bonsai",
"author": "Dolf Veenvliet",
"version": 1,
"blender": (2, 5, 6),
"api": 31847,
"location": "object > Bonsai ",
"description": "Build a bonsai",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Object"}
"""
Usage:
Launch from Object menu
Additional links:
Author Site: http://www.macouno.com
e-mail: dolf {at} macouno {dot} com
"""
import bpy, mathutils, math, cProfile, colorsys, datetime, time
from mathutils import geometry
from bpy.props import StringProperty, IntProperty, BoolProperty
from macouno import mesh_extras, misc, colour, select_faces, falloff_curve, liberty
# Make it as a class
class Bonsai():
# Initialise the class
def __init__(self, context, dnaString, subdivide, keepgroups, finish, run):
if not run:
return
# Start by setting up some default vars and such (in sepparate function because it's a bit much)
self.setup(context, dnaString, keepgroups)
# GO make the DNA strings
self.createDNA()
# Make the base group
baseGroups = self.makeBaseGroup()
for sc, string in enumerate(self.dna['strings']):
self.executeDNA(string, baseGroups, 1.0, sc)
# Make sure we're shaded smoothly
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.shade_smooth()
# Add self shadow (after the first subdivision)!
bpy.ops.object.mode_set(mode='VERTEX_PAINT')
bpy.ops.paint.self_shadow(contrast=3.0,method='EDG',normalize=True)
# Subsurf the first time if required
if subdivide:
# Split the edges!
'''
bpy.ops.object.modifier_add(type='EDGE_SPLIT')
mod = self.ob.modifiers[0]
mod.use_edge_angle = False
mod.use_edge_sharp = True
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="EdgeSplit")
'''
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.modifier_add(type='SUBSURF')
mod = self.ob.modifiers[0]
mod.levels = subdivide
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Subsurf")
bpy.ops.object.mode_set(mode='EDIT')
if finish:
self.finish(context)
else:
bpy.ops.object.mode_set(mode='OBJECT')
self.reset(context)
# Go grow something!
def executeDNA(self, string, baseGroups, baseWeight, subCount):
''''
if string['number'] >= 1:
#if string['number'] in [0,1,3]:
return
elif string['number'] == 5 or string['number'] == 6:
return
'''
# Redraw hack
#bpy.ops.wm.redraw_timer(type='DRAW_WIN_SWAP', iterations=1)
newGroups, formmatrix, growmatrices = self.makeAffectedGroups(string, baseGroups, subCount)
groupLen = len(newGroups)
pad = str(' ').rjust(string['level'], ' ')
idText = 'limb '+misc.nr4(string['number'])+' '+string['name'].ljust(10, ' ')
print(pad,idText)
# only if we made a group with something in it do we continue
if not groupLen:
print(' - No group!')
else:
# Loop through all the groups
for i, group in enumerate(newGroups):
# The step number to print out
stepText = misc.nr4(i+1)+' of '+misc.nr4(groupLen)
# We need a check matrix only if we're not on the trunk or body
if string['name'] == 'trunk' or string['name'] == 'body' or True:
try:
del(self.ob['formmatrix'])
except:
pass
# If not... then just try to get rid of it
else:
self.ob['formmatrix'] = formmatrix
# Body gets a set matrix (so it grows nice and straight)
if string['name'] == 'trunk':
growmatrix = mathutils.Matrix(((1.0,0.0,0.0),(0.0,-1.0,0.0),(0.0,0.0,1.0))).transposed()
# In all other cases the matrix can be dealt with by the grow addon
else:
growmatrix = growmatrices[i]
self.ob['growmatrix'] = growmatrix
# Select a group
select_faces.none()
select_faces.in_group(group)
# No need to continue if we have no selected faces
if not mesh_extras.contains_selected_item(self.me.faces):
print(pad,'skip ',stepText,'no selection',string['action']['name'])
else:
a = string['action']
if a['type'] == 'grow':
# Check for mirroring
right = mathutils.Vector((1.0,0.0,0.0))
check = mathutils.Vector(growmatrix[2])
# If we're aiming left we "invert" the rotation
if right.dot(check) < 0.0:
rot = mathutils.Vector((-a['rotation'][0],a['rotation'][1],-a['rotation'][2]))
else:
rot = a['rotation']
# Add relative intensity here (half the original + half the weight)
weight = baseWeight * self.getWeight(groupLen, a['scalin'])
trans = a['translation']
if a['type'] == 'grow' and trans == 0.0:
print(pad,'skip ',stepText,'too short',trans,'from',a['translation'])
else:
print(pad,'step ',stepText,a['name'])
bpy.ops.object.mode_set(mode='EDIT')
if a['type'] == 'bump':
bpy.ops.mesh.bump(
type=a['bumptype'],
scale=a['bumpscale'],
steps=True,
)
else:
bpy.ops.mesh.grow(
translation=trans,
rotation=rot,
rotation_falloff=a['rotation_falloff'],
scale=a['scale'],
scale_falloff=a['scale_falloff'],
retain=True,
steps=True,
debug=False,
)
bpy.ops.object.mode_set(mode='OBJECT')
select_faces.none()
select_faces.in_group(group)
self.applyGrowthColor(a)
if a['type'] == 'grow':
self.applyGrowthCrease(a)
# Remove new stuff from all but the current group
self.cleanGroup(group)
# Keep track of how much steps we've taken
self.dnaStep += 1
# If there's a sub
if len(string['strings']):
for sc, s in enumerate(string['strings']):
#print('going sub', string['name'], s['name'])
self.executeDNA(s, [group], weight, sc)
def createDNA(self):
# Make the color palette
if self.options['palettes']:
self.options['basecolor'] = self.choose('select', 'palette', 'base color')
colour.setBaseColor(self.options['basecolor'])
print("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n")
# Make the trunk
print("\n - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n")
print("\n",(self.stringCount+1),"Making nr DNA string for the trunk")
# Start with all directions
self.options['local_directions'] = self.options['directions']
selection = self.getSelection('trunk')
action = self.makeAction(selection, 'trunk')
string = {'name': 'trunk', 'action':action, 'selection':selection, 'strings':[], 'level':1,'number':self.stringCount}
self.dna['strings'].append(string)
self.stringCount += 1
# Lets make three branches!
for x in range(3):
# Lets make a branch!
print("\n",(self.stringCount+1),x,"Making nr DNA string for branches")
selection = self.getSelection('branch')
action = self.makeAction(selection, 'branch')
string = {'name': 'branch', 'action':action, 'selection':selection, 'strings':[], 'level':2,'number':self.stringCount}
self.dna['strings'][0]['strings'].append(string)
self.stringCount += 1
for y in range(1):
# Lets make a twig!
print("\n",(self.stringCount+1),x,y,"Making nr DNA string for twigs")
selection = self.getSelection('branch')
action = self.makeAction(selection, 'branch')
string = {'name': 'branch', 'action':action, 'selection':selection, 'strings':[], 'level':3,'number':self.stringCount}
self.dna['strings'][0]['strings'][x]['strings'].append(string)
self.stringCount += 1
print("\n - - - DONE MAKING DNA - - - LETS GO GROW SOMETHING - - -\n")
# Take a dna's action and selection and mirror the vector
def mirrorDNA(self, action, selection, level):
a = action.copy()
s = selection.copy()
s['vector'] = mathutils.Vector(s['vector']).copy()
s['vector'][0] = -s['vector'][0]
self.cleanDirections(s['vector'])
str = {'name':'mirrored', 'action': action, 'selection': s, 'strings': [], 'level':level,'number':self.stringCount}
self.stringCount += 1
return str
# Make an action for the dna string
def makeAction(self, selection, style='shape'):
# TRUNK
if style == 'trunk':
action = {
'name':style,
'type': 'grow',
'translation': 3.0, #self.choose('float', 'translate', 'translation'),
'rotation': mathutils.Vector((0.0,0.0,0.0)), #self.makeRotationVector('x'),
'rotation_falloff': 'LIN', #self.choose('select', 'falloffs', 'rotation falloff'),
'scale': 0.5, #self.choose('float', 'scale', 'scale'),
'scale_falloff': 'LIN', #self.choose('select', 'falloffs', 'scale falloff'),
'vertexcolor': False, #self.choose('select','palette', 'vertex color'),
'jointcolor': False, #self.choose('select','palette','joint color'),
'colorstyle': 'soft', #self.choose('select','colorstyles','color style'),
'crease': 0.0, #self.choose('float', 'crease', 'crease'),
'scalin': 'preset',
'sub': False,
}
# BRANCH!
else:
action = {
'name':style,
'type': 'grow',
'translation': 2.5, #self.choose('float', 'translate', 'translation'),
'rotation': mathutils.Vector((0.0,0.0,0.0)), #self.makeRotationVector('x'),
'rotation_falloff': 'LIN', #self.choose('select', 'falloffs', 'rotation falloff'),
'scale': 0.5, #self.choose('float', 'scale', 'scale'),
'scale_falloff': 'LIN', #self.choose('select', 'falloffs', 'scale falloff'),
'vertexcolor': False, #self.choose('select','palette', 'vertex color'),
'jointcolor': False, #self.choose('select','palette','joint color'),
'colorstyle': 'soft', #self.choose('select','colorstyles','color style'),
'crease': 0.0, #self.choose('float', 'crease', 'crease'),
'scalin': 'preset',
'sub': False,
}
return action
# Apply a vertex colour to a vertex group
def applyGrowthColor(self, a):
# Just apply the vertex colour to all the verts if it applies... easy!
if self.options['palettes']:
vec = list(a['vertexcolor'])
selFaces = []
for f in self.ob.data.faces:
if f.select:
selFaces.append(f)
if a['colorstyle'] == 'soft':
for v in f.vertices:
self.applyColorToVert(v, vec)
else:
self.applyColorToFace(f.index, vec)
select_faces.outermost()
vec = list(a['jointcolor'])
selVerts = []
outFaces = []
for f in self.ob.data.faces:
if f.select:
if a['colorstyle'] == 'soft':
for v in f.vertices:
self.applyColorToVert(v, vec)
else:
selVerts.extend(f.vertices)
outFaces.append(f)
self.applyColorToFace(f.index, vec)
# Lets make some sharp edges
if a['type'] == 'bump' and a['colorstyle'] == 'hard':
# Check every edge
for e in self.ob.data.edges:
v0 = e.vertices[0]
v1 = e.vertices[1]
# If both verts in the edge are selected... this could be sharp
if v0 in selVerts and v1 in selVerts:
ond = 0
snd = 0
# See how many faces this edge is part of
for f in outFaces:
if v0 in f.vertices and v1 in f.vertices:
ond += 1
for f in selFaces:
if not f in outFaces:
if v0 in f.vertices and v1 in f.vertices:
snd += 1
# If the edge is only part of one seleced face it's on the outside
if ond == 1: # and snd == 1:
e.crease = 1.0
'''
sharp = 0
pole = 1
for ec in self.ob.data.edges:
if not ec == e:
ecVerts = ec.vertices
if v0 in ecVerts or v1 in ecVerts:
pole += 1
if ec.use_edge_sharp:
sharp += 1
if pole == 4 and sharp < 2:
e.use_edge_sharp = True
'''
# Set the selection of faces back to the original
for f in selFaces:
f.select = True
def applyGrowthCrease(self, a):
# LETS LOOK AT CREASES!
vec = a['crease']
# Now we want to find out how many steps we made
steps = self.ob['growsteps']
if steps:
# Loop through all the steps
for i in range(int(steps)):
select_faces.outermost(True)
# Find all the selected vertices
selFaces = mesh_extras.get_selected_faces()
selVerts = []
for f in selFaces:
selVerts.extend(f.vertices)
# Loop through all edges
for e in self.me.edges:
eVerts = e.vertices
# If an edge has only 1 selected vert... it's "long" and on the outside of the selection
intersection = [v for v in e.vertices if v in selVerts]
if len(intersection) == 1 and e.crease < 1.0:
e.crease = vec
# Apply a color to a face for a harsh transition
def applyColorToFace(self, fInd, vCol):
# Get the faces
face = bpy.context.active_object.data.faces[fInd]
vColFace = self.me.vertex_colors.active.data[fInd]
vColFace.color1 = vCol
vColFace.color2 = vCol
vColFace.color3 = vCol
if len(face.vertices) == 4:
vColFace.color4 = vCol
# Apply a vertex colour to a vert
def applyColorToVert(self, vInd, vCol):
# Get the faces
for f in bpy.context.active_object.data.faces:
if vInd in f.vertices:
vColFace = self.me.vertex_colors.active.data[f.index]
for r, v in enumerate(f.vertices):
if v == vInd:
if not r:
vColFace.color1 = vCol
elif r == 1:
vColFace.color2 = vCol
elif r == 2:
vColFace.color3 = vCol
elif r == 3:
vColFace.color4 = vCol
break
# Make a section type for the dna string
def getSelection(self, type='none'):
selection = {
'type': 'direction',
'area': 'faces',
'vector': mathutils.Vector(),
'divergence': math.radians(45),
'method': 'generated'
}
if type == 'trunk':
selection['vector'] = mathutils.Vector((0.0,0.0,1.0))
# If not trunk then it must be a branch
else:
selection['vector'] = self.choose('select', 'local_directions', 'selection direction')
selection['type'] = 'twig'
# Remove the opposite!
self.cleanDirections(selection['vector'])
selection['method'] = 'forced'
if selection['area'] == 'faces':
selection['limit'] = 1 #self.choose('int', 'limit', 'selection limit')
selection['formmatrix'] = ''
selection['growmatrices'] = []
return selection
# Make a rotation vector
def makeRotationVector(self, axis='all'):
# For the body trunk and tail we only rotate up and down
if axis == 'x':
return mathutils.Vector((self.choose('float', 'rotation', 'X rotation'),0.0,0.0))
vector = mathutils.Vector((
self.choose('float', 'rotation', 'X rotation'),
self.choose('float', 'rotation', 'Y rotation'),
self.choose('float', 'rotation', 'Z rotation')
))
return vector
# Remove the items in the current group from all others
def cleanGroup(self, group):
bpy.ops.object.mode_set(mode='EDIT')
self.ob.vertex_groups.active_index = group.index
# Make sure the entire group is selected
bpy.ops.mesh.select_all(action='DESELECT')
self.ob.vertex_groups.active_index = group.index
bpy.ops.object.vertex_group_select()
# Set editing to vert mode before selecting less
bpy.ops.wm.context_set_value(data_path='tool_settings.mesh_select_mode', value="(True, False, False)")
bpy.ops.mesh.select_less()
# Set editing back to face mode
bpy.ops.wm.context_set_value(data_path='tool_settings.mesh_select_mode', value="(False, False, True)")
for g in self.newGroups:
if g.index != group.index:
self.ob.vertex_groups.active_index = g.index
bpy.ops.object.vertex_group_remove_from(all=False)
bpy.ops.object.mode_set(mode='OBJECT')
# Make all the faces that are affected selected and return them as a list
def makeAffectedGroups(self, string, baseGroups, subCount):
print('subcnt', subCount)
selection = string['selection']
newGroups = []
formmatrix = mathutils.Matrix()
growmatrices = []
# Deselect all faces to start clean!
select_faces.none()
# Select everything in the base groups
for g in baseGroups:
select_faces.in_group(g,True)
#print('in_group',len(mesh_extras.get_selected_faces()))
# If nothing is selected there's nothing to do
if mesh_extras.contains_selected_item(self.me.faces):
if selection['type'] == 'twig':
# Lets find the middle...
selFaces = mesh_extras.get_selected_faces()
midPoint = mathutils.Vector();
for f1 in selFaces:
midPoint += f1.center
midPoint /= len(selFaces)
midDist = 0.0
nearest = 0
for fc, f1 in enumerate(selFaces):
dist = midPoint - f1.center
dist = dist.length
if not fc or dist < midDist:
nearest = f1
midDist = dist
select_faces.none()
nearest.select = True
print('found at distance',len(mesh_extras.get_selected_faces(self.me.faces)))
# If we have selected faces... we can add em to a new group
newGroups, formmatrix, growmatrices = self.addToNewGroups(string, newGroups, growmatrices)
# Select the faces at the tip in a certain direction
elif selection['type'] == 'joint' or selection['type'] == 'tip' :
select_faces.innermost()
if mesh_extras.contains_selected_item(self.me.faces):
if selection['type'] == 'joint':
select_faces.connected(True)
selCnt = len(mesh_extras.get_selected_faces())
nuCnt = selCnt
div = selection['divergence']
# If the nr of faces selected isn't diminished... we select less!
while selCnt and selCnt == nuCnt and div > 0.1:
select_faces.by_direction(selection['vector'],div)
div = div * 0.75
selFaces = mesh_extras.get_selected_faces()
nuCnt = len(selFaces)
# Check for opposing normals.. .cause they should not be there!
for f1 in selFaces:
if f1.select:
f1No = f1.normal
for f2 in selFaces:
if f2.select and not f1 is f2:
f2No = f2.normal
ang = f2No.angle(f1No)
if ang > math.radians(120):
f1.select = False
break
selFaces = mesh_extras.get_selected_faces()
nuCnt = len(selFaces)
if nuCnt == selCnt:
select_faces.none()
# If we have selected faces... we can add em to a new group
newGroups, formmatrix, growmatrices = self.addToNewGroups(string, newGroups, growmatrices)
# Select by pi (fake random)
elif selection['type'] == 'liberal':
select_faces.liberal(self.dnaString)
# If we have selected faces... we can add em to a new group
newGroups, formmatrix, growmatrices = self.addToNewGroups(string, newGroups, growmatrices)
# Select all loops in the group
elif selection['type'] == 'loops':
select_faces.connected()
self.deselectUnGrouped()
step = 0
# As long as something is selected, we can continue
while mesh_extras.contains_selected_item(self.ob.data.faces):
select_faces.connected()
self.deselectGrouped(baseGroups)
# Skip selection just in case
if not step % selection['frequency']:
# If we have selected faces... we can add em to a new group
newGroups, formmatrix, grw = self.addToNewGroups(string, newGroups, growmatrices)
growmatrices.extend(grw)
step += 1
print(step)
# Select by direction
elif selection['type'] == 'direction':
select_faces.by_direction(selection['vector'],selection['divergence'])
newGroups, formmatrix, growmatrices = self.addToNewGroups(string, newGroups, growmatrices)
# All!
else:
newGroups, formmatrix, growmatrices = self.addToNewGroups(string, newGroups, growmatrices)
return newGroups, formmatrix, growmatrices
# Deselect all the faces that are not in a group
def deselectUnGrouped(self):
# Get the faces (and go into object mode)
faces = mesh_extras.get_selected_faces()
if len(faces):
for f in faces:
if f.select:
inGroup = True
# See all the verts (all should be in the group!)
for v in f.vertices:
found = False
vert = self.ob.data.vertices[v]
vertGroups = vert.groups
for g in vert.groups:
if g.weight:
found = True
if not found:
inGroup = False
if not inGroup:
f.select = False
# Deselect all faces that are already grouped, but not in the baseGroups
def deselectGrouped(self, baseGroups):
# Get the faces (and go into object mode)
faces = mesh_extras.get_selected_faces()
if len(faces):
# First lets make sure the faces are in the current base groups
for g in baseGroups:
# Check all selected faces
for f in faces:
if f.select:
inGroup = True
# See all the verts (all should be in the group!)
for v in f.vertices:
found = False
vert = self.ob.data.vertices[v]
vertGroups = vert.groups
for vg in vert.groups:
if vg.group == g.index:
found = True
if not found:
inGroup = False
if not inGroup:
f.select = False
faces = mesh_extras.get_selected_faces()
if len(faces):
for g in self.newGroups:
if not g in baseGroups:
# Check all selected faces
for f in faces:
if f.select:
inGroup = True
# See all the verts (all should be in the group!)
for v in f.vertices:
found = False
vert = self.ob.data.vertices[v]
vertGroups = vert.groups
for vg in vert.groups:
if vg.group == g.index:
found = True
if not found:
inGroup = False
if inGroup:
f.select = False
# Adding the current selection to a new group
def addToNewGroups(self, string, newGroups, growmatrices=[]):
selection = string['selection']
self.doubleCheckSelection(selection)
faces = mesh_extras.get_selected_faces()
formmatrix = mathutils.Matrix()
growmatrices = []
if len(faces):
verts = []
inds = []
for f in faces:
for v in f.vertices:
if not v in inds:
inds.append(v)
verts.append(self.me.vertices[v])
# NOW WE GO MAKE THE GROUPS
if len(verts):
weights = self.makeWeights(verts)
formmatrix = mesh_extras.get_selection_matrix(faces)
# If we do this per area, we want the entire area to be part of one group
if selection['area'] == 'area':
growmatrices.append(formmatrix)
newGroup = self.ob.vertex_groups.new(string['name']+'.'+selection['type'])
newGroups.append(newGroup)
self.newGroups.append(newGroup)
for v in verts:
newGroup.add([v.index], 1.0, 'REPLACE')
# If we have it per face, we need sepparate weights and groups
elif selection['area'] == 'faces':
if len(faces):
for i, f in enumerate(faces):
growmatrices.append(mesh_extras.get_selection_matrix([f]))
newGroup = self.ob.vertex_groups.new(string['name']+'.'+selection['type']+'.'+misc.nr4(i))
newGroups.append(newGroup)
self.newGroups.append(newGroup)
vertList = f.vertices
for i,v in enumerate(verts):
ind = v.index
if ind in vertList:
newGroup.add([v.index], weights[i], 'REPLACE')
return newGroups, formmatrix, growmatrices
# make the base group that we're working with
def makeBaseGroup(self):
newGroup = self.ob.vertex_groups.new('base')
self.ob.vertex_groups.active_index = newGroup.index
baseGroupList = [newGroup]
self.newGroups.append(newGroup)
vList = [v.index for v in self.ob.data.vertices]
newGroup.add(vList, 1.0, 'REPLACE')
return baseGroupList
# Just some nice checks to do with selections
def doubleCheckSelection(self, selection):
# Make sure there's never more than 12 faces we grow out of
if selection['area'] == 'faces':
select_faces.limit(selection['limit'], self.dnaString)
# If we still have something selected, then we need to check for Islands (only one coninuous island should be selected)
if selection['type'] == 'direction' and selection['area'] == 'area' and mesh_extras.contains_selected_item(self.me.faces):
self.checkForIslands(selection['vector'])
# Make sure only one "island" is selected
def checkForIslands(self, vector):
faces = mesh_extras.get_selected_faces()
# Find the face furthest along the vector
max = 0.0
closestFace = 0
closestVerts = 0
for i,f in enumerate(faces):
dist = vector.dot(f.center)
if dist > max or not i:
max = dist
closestFace = f
closestVerts = f.vertices
# Find the faces connected to this one!
connectedFaces = [closestFace]
connectedVerts = list(closestVerts)
foundNew = True
# As long as we can find connected faces we continue
while foundNew:
foundNew = False
for f in faces:
addThis = False
# If we haven't done this one yet
if not f in connectedFaces:
intersection = [v for v in f.vertices if v in connectedVerts]
if len(intersection):
addThis = True
if addThis:
foundNew = True
connectedFaces.append(f)
connectedVerts.extend(f.vertices)
# Deselect disconnected faces
for f in faces:
if not f in connectedFaces:
f.select = False
# Make relative weights for the verts
def makeWeights(self, verts):
cen = mathutils.Vector()
for v in verts:
cen += v.co
cen *= (1.0/len(verts))
# Find the minimum and maximum distance from the centre
min = 0.0
max = 0.0
distances = []
for i, v in enumerate(verts):
dist = (v.co - cen).length
distances.append(dist)
if not i or dist < min:
min = dist
if not i or dist > max:
max = dist
max = max - min
if max > 0.0:
factor = (1.0 / max)
else:
factor = 1.0
# Make the weights
weights = []
for i, v in enumerate(verts):
weight = (max - (distances[i] - min)) * factor
weights.append(weight)
return weights
# Get the weight of the current selection
def getWeight(self, groupLen, scalin):
weight = 1.0
# If we're applying the weight based on the edge, we find the shortest edge
if scalin == 'edge':
short = 0.0
check = 0
bpy.ops.object.mode_set(mode='OBJECT')
# Find the shortest edge
for e in self.ob.data.edges:
if e.select == True:
v0 = self.ob.data.vertices[e.vertices[0]]
v1 = self.ob.data.vertices[e.vertices[1]]
ed = v1.co - v0.co
leng = ed.length
if leng < short or not check:
short = leng
check = 1
weight *= short
# If we're doing multiple groups, we find out the distance from the centre of the group
if groupLen > 1:
bpy.ops.object.mode_set(mode='EDIT')
groupId = self.ob.vertex_groups.active_index
verts = mesh_extras.get_selected_vertices()
vLen = len(verts)
if vLen:
w = 0.0
for v in verts:
for g in v.groups:
if g.group == groupId:
w += g.weight
w *= (1.0/vLen)
weight *= w
return weight
# Remove a specific direction from the dict and rebuild it
def cleanDirections(self, direction):
directions = self.options['local_directions']
# We actually remove the negated direction (can't grow backwards!)
direction = mathutils.Vector((-direction[0],-direction[1],-direction[2]))
key = False
# See if the direction is still in the dict at all, and find it's key
for k in directions.keys():
angle = direction.angle(mathutils.Vector(directions[k]))
if angle == 0.0:
key = k
# If the direction is not there, we just return the original list... fine
if key is False:
return
# Make a new fresh dict (a-z) with the remaining directions
newDirections = {}
letter = 97
for k in directions.keys():
if not k == key:
newDirections[chr(letter)] = directions[k]
letter+=1
self.options['local_directions'] = newDirections
return
# Get the palette!
def getPalette(self):
try:
self.options['palettes'] = bpy.context.scene['palettes']
palette = self.choose('select', 'palettes', 'palette')
print(palette['title'])
self.paletteAuthor = palette['author']
self.paletteTitle = palette['title']
self.paletteId = palette['id']
self.paletteHexes = palette['hexes']
letter = 97
self.options['palette'] = {}
for swatch in palette['swatches']:
print('swatch', float(swatch[0]),float(swatch[1]),float(swatch[2]))
self.options['palette'][chr(letter)] = [float(swatch[0]),float(swatch[1]),float(swatch[2])]
letter += 1
except:
self.options['palettes'] = False
print('no palette available')
# Go choose something
def choose(self, type, val, desk):
if val in self.options.keys():
if val == 'palette':
result = self.secondary.Choose(type,self.options[val],desk)
else:
result = self.primary.Choose(type,self.options[val],desk)
elif val in self.options['primary'].keys():
pl = self.primary.key[self.primary.pos]
p = self.primary.Choose(type,self.options['primary'][val])
sl = self.secondary.key[self.secondary.pos]
s = self.secondary.Choose(type,self.options['secondary'][val])
result = p+s
print(' ',pl,sl,desk.ljust(22, ' '),'=',round(p,2),'+',round(s,2),'=',round(result,2))
else:
print('ERROR Unable to choose',val,desk)
result = False
return result
# Start with some setup
def setup(self, context, dnaString, keepgroups):
print("\n\n->-> Starting Entorform <-<-\n")
print(' - DNA string',dnaString,"\n")
# Get the active object
self.ob = context.active_object
self.me = self.ob.data
self.dnaString = dnaString
# Split the dna string into two parts if possible
prt = dnaString.partition(' ')
if not prt[2]:
self.dnaParts = {
'primary': dnaString,
'secondary': dnaString
}
else:
sec = ''
for i, p in enumerate(prt):
if i > 1:
sec = sec + p
self.dnaParts = {
'primary': prt[0],
'secondary': sec
}
self.primary = liberty.liberty('string', self.dnaParts['secondary'])
self.secondary = liberty.liberty('string', self.dnaParts['primary'])
self.options = {}
self.options['basecolor'] = [0.0,0.0,0.0]
self.options['bool'] = {'a': True,'b': False}
self.options['primary'] = {
'translate': {'min': 2.0, 'max': 3.0},
'scale': {'min': 0.5, 'max': 0.6},
'crease': {'min': 0.4, 'max': 0.7},
'bumpscale': {'min': 0.4, 'max': 0.7},
'rotation': {'min': math.radians(-30.0), 'max': math.radians(30.0)},
'divergence': {'min': math.radians(-5),'max': math.radians(15)},
'limit': {'min': 1, 'max': 1},
}
self.options['secondary'] = {
'translate': {'min': -0.5, 'max': 1.5},
'scale': {'min': -0.1, 'max': 0.1},
'crease': {'min': -0.3, 'max': 0.3},
'bumpscale': {'min': -0.35, 'max': 0.3},
'rotation': {'min': math.radians(-30.0), 'max': math.radians(30.0)},
'divergence': {'min': math.radians(-15),'max': math.radians(15)},
'limit': {'min': 0, 'max': 0},
}
self.options['falloffs'] = {'a': 'LIN', 'b': 'INC', 'c': 'DEC', 'd': 'SWO', 'e': 'SPI', 'f': 'BUM', 'g': 'SWE'}
self.options['bumptypes'] = {'a': 'BUM', 'b': 'SPI', 'c': 'DIM', 'd': 'PIM'}
self.options['selectiontypes'] = {'a': 'direction', 'b': 'liberal', 'c': 'joint', 'd': 'all', 'e': 'checkered', 'f': 'loops'} # tip = disabled
self.options['selectioneyes'] = {'a': 'direction', 'b': 'liberal', 'c': 'joint', 'd': 'all', 'e': 'checkered'}
self.options['directions'] = {
'a': mathutils.Vector((1.0,0.0,0.0)), #top
'b': mathutils.Vector((-1.0,0.0,0.0)), #bottom
'c': mathutils.Vector((0.0,1.0,0.0)), #front
'd': mathutils.Vector((0.0,-1.0,0.0)), #rear
'e': mathutils.Vector((0.0,0.0,1.0)), #right
'f': mathutils.Vector((0.0,0.0,-1.0)), #left
}
self.options['areatypes'] = {'a': 'area','b': 'faces'}
self.options['frequencies'] = {'a': 1, 'b': 2}
self.options['colorstyles'] = {'a': 'hard','b': 'soft'}
self.getPalette()
# Set the editing to face mode only
#bpy.ops.wm.context_set_value(data_path='tool_settings.mesh_select_mode', value="(False, False, True)")
self.startTime = time.time()
self.dnaPos = 0
self.dnaStep = 1
self.dna = {'name':'base','strings': []}
self.palette = []
self.keepgroups = keepgroups
# Total number of strings
self.stringCount = 0
# Level of deepness
self.LOD = 2
# If the grow function made a matrix previously, we can remove it now
try:
del(self.ob['growmatrix'])
except:
pass
# Get the vertex colours
if not self.ob.data.vertex_colors.active:
self.ob.data.vertex_colors.new()
for f in self.ob.data.vertex_colors.active.data:
try:
f.color1 = f.color2 = f.color3 = f.color4 = (0.0,0.0,0.0)
except:
f.color1 = f.color2 = f.color3 = (0.0,0.0,0.0)
self.vCols = self.ob.data.vertex_colors.active.data
# Save the dna string in a property if we want!
self.ob['dnastring'] = dnaString
# Convert the string to a list
self.origDNA = dnaString
self.newGroups = []
# Change Selection mode to face selection
self.lastSelectioMode = bpy.context.tool_settings.mesh_select_mode[:]
if bpy.context.tool_settings.mesh_select_mode != (False, False, True):
bpy.context.tool_settings.mesh_select_mode = (False, False, True)
# Set some variables before finishing
def finish(self, context):
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.shade_smooth()
#self.setFloor()
#self.setDefaultView()
#bpy.ops.wm.redraw_timer(type='DRAW', iterations=1)
# Temporarily rescale the object for camera view stuff
bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN', center='MEDIAN')
bpy.ops.wm.redraw_timer(type='DRAW', iterations=1)
# Lets scale the object
dimensions = self.ob.dimensions
max = 0.0
for i, d in enumerate(dimensions):
if (not i) or d > max:
max = d
if max != 0.0:
ratio = 15 / max
self.ob.scale *= ratio
#bpy.ops.object.scale_apply()
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
# Lets put the floor in the correct location
if 'floor' in bpy.data.objects:
for i, v in enumerate(self.ob.data.vertices):
loc = v.co[2]
if (not i) or loc < max:
max = loc
bpy.data.objects['floor'].location[2] = max
# Bonsai number
filePath = bpy.data.filepath.split('\\')
fileName = filePath[len(filePath)-1]
numbers = fileName.split('.')
for n in numbers:
if n.isdigit():
bpy.data.objects['text-form'].data.body = 'Bonsai '+n
# Dna string
if 'text-dna' in bpy.data.objects:
bpy.data.objects['text-dna'].data.body = self.origDNA
# Datetime
if 'text-date' in bpy.data.objects:
now = datetime.datetime.today()
dateString = str(now.day)+' '+misc.int_to_roman(now.month)+' '+str(now.year)+' '+str(now.hour)+':'+str(now.minute)+':'+str(now.second)
bpy.data.objects['text-date'].data.body = dateString
# execution time
if 'text-maketime' in bpy.data.objects:
bpy.data.objects['text-maketime'].data.body = str(round(time.time() - self.startTime))+'s'
if self.options['palettes']:
# Palette
if 'text-paletter' in bpy.data.objects:
bpy.data.objects['text-paletter'].data.body = self.paletteAuthor
bpy.data.objects['text-palettid'].data.body = self.paletteId
bpy.data.objects['text-palette'].data.body = self.paletteTitle
self.ob['paletter'] = self.paletteAuthor
self.ob['paletteId'] = self.paletteId
self.ob['palette'] = self.paletteTitle
#paletteQuery = "INSERT INTO ff_palettes(id, theme_id, name, creator, colour_1, colour_2, colour_3, colour_4, colour_5) VALUES (NULL,'"+self.paletteId+"','"+self.paletteTitle+"','"+self.paletteAuthor+"'"
#swatches
if 'swatches' in bpy.data.objects:
paletteOb = bpy.data.objects['swatches']
else:
paletteOb = None
for j, k in enumerate(self.options['palette'].keys()):
hex = self.paletteHexes[j]
#paletteQuery = paletteQuery+",'"+hex+"'"
swatch = self.options['palette'][k]
col = 'colour_'+str(j+1)
self.ob[col] = hex #colour.rgb_to_hex(swatch)
if paletteOb:
for i, f in enumerate(paletteOb.data.vertex_colors.active.data):
if i == j:
try:
f.color1 = f.color2 = f.color3 = f.color4 = swatch
except:
f.color1 = f.color2 = f.color3 = swatch
#paletteQuery = paletteQuery+")"
#self.ob['paletteQuery'] = paletteQuery
'''
INSERT INTO `admin_entoforms`.`ff_palettes` (`id`, `theme_id`, `name`, `creator`, `colour_1`, `colour_2`, `colour_3`, `colour_4`, `colour_5`) VALUES (NULL, '1373430', 'giblythe1', 'jakestolte', '3d3d3f', 'bf8c2f', 'bcbfbf', 'f2f2f2', 'f2dfba');
'''
# Geometry
if 'text-faces' in bpy.data.objects:
bpy.data.objects['text-faces'].data.body = str(len(self.ob.data.faces))
if 'text-edges' in bpy.data.objects:
bpy.data.objects['text-edges'].data.body = str(len(self.ob.data.edges))
if 'text-verts' in bpy.data.objects:
bpy.data.objects['text-verts'].data.body = str(len(self.ob.data.vertices))
# Frame number
fr = bpy.context.scene.frame_current
if 'text-frame' in bpy.data.objects:
bpy.data.objects['text-frame'].data.body = str(fr)
# it means fr % 360
# while fr > 360:
# fr -= 360
fr = fr % 360
if 'text-angle' in bpy.data.objects:
bpy.data.objects['text-angle'].data.body = str(fr)
# Reset everything at the very end
def reset(self, context):
print("\n Cleanup\n")
if not self.keepgroups:
for g in self.newGroups:
self.ob.vertex_groups.active_index = g.index
bpy.ops.object.vertex_group_remove()
# Return selection mode to previous value
bpy.context.tool_settings.mesh_select_mode[:] = self.lastSelectioMode
print("->-> Finished Bonsai <-<-\n")
# Scale the selection (always relative to the normal)
# val = (0.5, 0.5, 0.5)
def scale(self, val):
bpy.ops.transform.resize(value=val, constraint_axis=(False, False, False), constraint_orientation='GLOBAL', mirror=False, proportional=bpy.context.tool_settings.proportional_edit, proportional_edit_falloff=bpy.context.tool_settings.proportional_edit_falloff, proportional_size=1, snap=bpy.context.tool_settings.use_snap, snap_target=bpy.context.tool_settings.snap_target, snap_point=(0, 0, 0), snap_align=False, snap_normal=(0, 0, 0), release_confirm=False)
# Mark this point in time and print if we want... see how long it takes
def mark(self,desc):
if self.debug:
now = time.time()
jump = now - self.markTime
self.markTime = now
print(desc.rjust(10, ' '),jump)
class Bonsai_init(bpy.types.Operator):
'''Build a Bonsai'''
bl_idname = 'object.bonsai'
bl_label = 'Bonsai'
bl_options = {'REGISTER', 'UNDO'}
d='Selina'
dnaString = StringProperty(name="DNA", description="DNA string to define your shape", default=d, maxlen=100)
subdivide = IntProperty(name='Subdivide', default=0, min=0, max=10, soft_min=0, soft_max=100)
keepgroups = BoolProperty(name='Keep groups', description='Do not remove the added vertex groups', default=True)
finish = BoolProperty(name='Finish', description='Do some final touches', default=False)
run = BoolProperty(name='Execute', description='Go and actually do this', default=True)
@classmethod
def poll(cls, context):
obj = context.active_object
return (obj and obj.type == 'MESH')
def execute(self, context):
BONSAI = Bonsai(context, self.dnaString, self.subdivide, self.keepgroups, self.finish, self.run)
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(Bonsai_init.bl_idname, text="Bonsai")
def register():
bpy.utils.register_module(__name__)
bpy.types.VIEW3D_MT_object.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.VIEW3D_MT_object.remove(menu_func)
if __name__ == "__main__":
register() | {
"content_hash": "2d641a5d3bdc8c9076a0486363f45178",
"timestamp": "",
"source": "github",
"line_count": 1481,
"max_line_length": 459,
"avg_line_length": 28.491559756921,
"alnum_prop": 0.5921177362783202,
"repo_name": "kellpossible/VoxelEditor",
"id": "f9bc5bc61c4d8edc573bbe34821bb4def14c7ee0",
"size": "43059",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ScriptResearch/entoforms-read-only/addons/object_bonsai.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "478983"
}
],
"symlink_target": ""
} |
"""Scenic trainer for zero-shot text-based visual classification evaluation.
Note this "trainer" doesn't actually train but just evaluates.
"""
from collections.abc import Mapping
import functools
from typing import Optional
from absl import logging
from clu import metric_writers
from flax import jax_utils
import flax.core
import jax
import jax.numpy as jnp
import ml_collections
import optax
from scenic.dataset_lib import dataset_utils
from scenic.model_lib.base_models import model_utils
from scenic.projects.lang4video import util
from scenic.projects.lang4video.model.image_text_model import ImageTextModel
from scenic.projects.lang4video.trainer.train_utils import axis_name_exists
from scenic.projects.lang4video.trainer.train_utils import compute_mask
from scenic.projects.lang4video.trainer.train_utils import get_cached_fn
from scenic.projects.lang4video.trainer.train_utils import get_epoch_steps
from scenic.projects.lang4video.trainer.train_utils import get_input_spec
from scenic.projects.lang4video.trainer.train_utils import init_encoder
from scenic.projects.lang4video.trainer.train_utils import InputSpec
from scenic.projects.lang4video.trainer.train_utils import is_video_input
from scenic.projects.lang4video.trainer.train_utils import log_eval_summary
from scenic.projects.lang4video.trainer.train_utils import NUM_DEVICES_AXIS_NAME
from scenic.projects.lang4video.trainer.train_utils import pad_array_to_be_divisible
from scenic.projects.lang4video.trainer.train_utils import partial_with_cache
from scenic.projects.lang4video.trainer.train_utils import split_in_batches
from scenic.train_lib import train_utils
from tqdm.auto import tqdm
# TODO(sacastro): support multiple clips.
# We pass the model and not directly the encoder because it's hashable.
def eval_step(
train_state: train_utils.TrainState,
visual: jnp.ndarray, # Shape: (N, F, H, W, C) if `is_video`, w/o F if not.
target: jnp.ndarray, # Shape: (N) as int or (N, K) as float
batch_mask: jnp.ndarray, # Shape: (N,)
*,
model: ImageTextModel,
encoded_classes: jnp.ndarray, # Shape: (K, E)
is_video: bool,
debug: bool = False,
) -> Mapping[str, tuple[float, int]]:
"""Runs a single step of evaluation."""
encoded_visual = model.flax_model.apply(
variables={
'params': train_state.params,
**train_state.model_state,
},
method=model.flax_model.encode_video
if is_video else model.flax_model.encode_image,
**{'video' if is_video else 'image': visual},
train=False,
debug=debug)
score = model.flax_model.compute_similarity(encoded_visual, encoded_classes)
if target.ndim == score.ndim - 1:
target = jax.nn.one_hot(target, len(encoded_classes))
assert target.ndim == score.ndim
metrics = {
'accuracy@1':
model_utils.weighted_top_one_correctly_classified(
score, target, weights=batch_mask),
'accuracy@5':
model_utils.weighted_topk_correctly_classified(
score, target, k=5, weights=batch_mask),
'loss':
optax.softmax_cross_entropy(score, target) * batch_mask,
}
actual_local_batch_size = batch_mask.sum()
for k, v in metrics.items():
metrics[k] = (v, actual_local_batch_size)
if axis_name_exists(NUM_DEVICES_AXIS_NAME):
actual_batch_size = jax.lax.psum(
actual_local_batch_size, axis_name=NUM_DEVICES_AXIS_NAME)
for k, v in metrics.items():
metrics[k] = (jax.lax.psum(v[0].sum(), axis_name=NUM_DEVICES_AXIS_NAME),
actual_batch_size)
return metrics
def compute_class_text_embeddings(
train_state: train_utils.TrainState,
class_texts: jnp.ndarray, # Shape: (C, T, L)
mask: Optional[jnp.ndarray] = None, # Shape: (C, T, L)
*,
model: ImageTextModel,
batch_size: int,
debug: bool = False,
) -> jnp.ndarray: # Shape: (C, E)
"""Computes the class text embeddings."""
num_classes = class_texts.shape[0]
class_texts = class_texts.reshape(-1, class_texts.shape[-1])
if mask is not None:
mask = mask.reshape(-1, mask.shape[-1])
def _compute_class_text_embeddings_batch(
args, # `(batch, mask)` with shape: (N, L)
) -> jnp.ndarray: # Shape: (N, E)
batch, mask_ = args
return model.flax_model.apply(
variables={
'params': train_state.params,
**train_state.model_state,
},
method=model.flax_model.encode_text,
text=batch,
mask=mask_,
train=False,
debug=debug)
# We batch to not run OOM.
encoded_text = split_in_batches(
_compute_class_text_embeddings_batch,
batch_size=batch_size)((class_texts, mask))
encoded_text = encoded_text.reshape(num_classes, -1,
encoded_text.shape[-1]).mean(axis=1)
if axis_name_exists(NUM_DEVICES_AXIS_NAME):
encoded_text = jax.lax.all_gather(encoded_text, NUM_DEVICES_AXIS_NAME)
encoded_text = encoded_text.reshape(-1, encoded_text.shape[-1])
return encoded_text
@get_cached_fn
def _create_model_and_train_state(
config: ml_collections.ConfigDict,
dataset: dataset_utils.Dataset,
model_cls: type[ImageTextModel],
input_spec: InputSpec,
is_video: bool,
rng: Optional[jnp.ndarray] = None,
) -> tuple[ImageTextModel, train_utils.TrainState]:
"""Creates the model and train state."""
model = model_cls(config, dataset.meta_data)
encoder = model.flax_model
params, model_state = init_encoder(
encoder=encoder,
input_spec=input_spec,
method=encoder.encode_video_and_text if is_video else None,
config=config,
rng=rng,
)
train_state = train_utils.TrainState(params=params, model_state=model_state)
return model, train_state
def evaluate(
*,
config: ml_collections.ConfigDict,
model_cls: type[ImageTextModel],
dataset: dataset_utils.Dataset,
rng: Optional[jnp.ndarray] = None,
workdir: Optional[str] = None, # pylint: disable=unused-argument
writer: metric_writers.MetricWriter,
) -> Mapping[str, float]:
"""Evaluates a model on zero-shot text-based video classification."""
input_spec = get_input_spec(
dataset_meta_data=dataset.meta_data,
dataset_configs=config.get('dataset_configs', {}),
train=False)
is_video = is_video_input(input_spec)
hashable_config = config.copy_and_resolve_references()
hashable_config = ml_collections.FrozenConfigDict(hashable_config)
# Note that different calls of `_replace` with the same contents will yield
# the same hash.
dataset = dataset._replace(meta_data=flax.core.freeze(dataset.meta_data))
model, train_state = _create_model_and_train_state(
config=hashable_config,
dataset=dataset,
model_cls=model_cls,
input_spec=input_spec,
is_video=is_video,
rng=rng)
if config.checkpoint:
train_state = train_utils.restore_checkpoint(workdir, train_state)[0]
train_state = jax_utils.replicate(train_state)
dataset_configs = config.get('dataset_configs', {})
tokenizer_config = dataset_configs.get('tokenizer', {})
tokenizer = util.create_tokenizer(tokenizer_config)
tokenizer.initialize()
class_names = dataset.meta_data.get(
'classes') or dataset_configs['class_names']
class_templates = config.get('class_templates', ['{}'])
classes_with_templates = []
for class_ in class_names:
for template in class_templates:
classes_with_templates.append(template.format(class_))
tokenized_classes = tokenizer.string_tensor_to_indices(
classes_with_templates,
prepend_bos=tokenizer_config.get('prepend_bos', False),
append_eos=tokenizer_config.get('append_eos', False),
max_num_tokens=dataset_configs.get('max_num_words', 32),
)
tokenized_classes = tokenized_classes._numpy() # pylint: disable=protected-access
tokenized_classes = tokenized_classes.reshape(-1, len(class_templates),
tokenized_classes.shape[-1])
mask = compute_mask(tokenized_classes, config)
# We pmap here, to avoid OOM by optimizations. Also, it's best to pmap
# everything here, in case this is run in an eval job. If not, still pmapping
# it shouldn't take long.
logging.info('Encoding the classes as text…')
tokenized_classes, batch_padding_size = pad_array_to_be_divisible(
tokenized_classes, jax.local_device_count())
mask, batch_padding_size = pad_array_to_be_divisible(mask,
jax.local_device_count())
tokenized_classes, mask = dataset_utils.shard((tokenized_classes, mask))
compute_class_text_embeddings_pmapped = jax.pmap(
partial_with_cache(
compute_class_text_embeddings,
model=model,
batch_size=config.get(
'class_batch_size',
config.get('eval_batch_size', config.batch_size)),
debug=hashable_config.get('debug_eval'),
),
axis_name=NUM_DEVICES_AXIS_NAME,
donate_argnums=(1, 2),
)
encoded_classes = jax_utils.unreplicate(
compute_class_text_embeddings_pmapped(train_state, tokenized_classes,
mask))
encoded_classes = encoded_classes[:len(encoded_classes) - batch_padding_size]
logging.info('Classes encoded.')
eval_step_pmapped = jax.pmap(
# This function would fail to cache because `encoded_classes` is a
# `DeviceArray`. Besides, this value would change for different params.
# So we can't cache it.
functools.partial(
eval_step,
model=model,
encoded_classes=encoded_classes,
is_video=is_video,
debug=config.get('debug_eval'),
),
axis_name=NUM_DEVICES_AXIS_NAME,
donate_argnums=(1, 2, 3),
)
total_steps = get_epoch_steps(config, dataset, split='eval')
eval_metrics_all = []
for step, batch in zip(
tqdm(range(total_steps), desc='Evaluating'), dataset.valid_iter):
with jax.profiler.StepTraceAnnotation('eval', step_num=step):
eval_metrics_batch = eval_step_pmapped(train_state, batch['inputs'],
batch['label'],
batch['batch_mask'])
eval_metrics_all.append(eval_metrics_batch)
return log_eval_summary(
writer=writer,
eval_metrics=train_utils.unreplicate_and_get(eval_metrics_all),
step=jax_utils.unreplicate(train_state.global_step),
prefix=config.get('writer_prefix', 'valid'))
| {
"content_hash": "d375ac9daa7c4f00c92da77e154d926c",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 84,
"avg_line_length": 35.948805460750854,
"alnum_prop": 0.6733124465964113,
"repo_name": "google-research/scenic",
"id": "97532146644126fc6d7c7053960ebb1daacb8e0f",
"size": "10535",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scenic/projects/lang4video/trainer/zero_shot_classification_trainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1717873"
},
{
"name": "Python",
"bytes": "3692184"
}
],
"symlink_target": ""
} |
"""Render a CSV file using d3.js."""
from __future__ import absolute_import, print_function
import csv
from flask import current_app, render_template
from ..proxies import current_previewer
from ..utils import detect_encoding
previewable_extensions = ['csv', 'dsv']
def validate_csv(file):
"""Return dialect information about given csv file."""
try:
# Detect encoding and dialect
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
sample = fp.read(
current_app.config.get('PREVIEWER_CSV_VALIDATION_BYTES', 1024))
allowed_delimiters = current_app.config.get(
'PREVIEWER_CSV_SNIFFER_ALLOWED_DELIMITERS', None)
delimiter = csv.Sniffer().sniff(
sample=sample.decode(encoding),
delimiters=allowed_delimiters).delimiter
is_valid = True
except Exception as e:
current_app.logger.debug(
'File {0} is not valid CSV: {1}'.format(file.uri, e))
encoding = ''
delimiter = ''
is_valid = False
return {
'delimiter': delimiter,
'encoding': encoding,
'is_valid': is_valid
}
def can_preview(file):
"""Determine if the given file can be previewed."""
if file.is_local() and file.has_extensions('.csv', '.dsv'):
return validate_csv(file)['is_valid']
return False
def preview(file):
"""Render the appropriate template with embed flag."""
file_info = validate_csv(file)
return render_template(
'invenio_previewer/csv_bar.html',
file=file,
delimiter=file_info['delimiter'],
encoding=file_info['encoding'],
js_bundles=current_previewer.js_bundles + ['d3_csv.js'],
css_bundles=current_previewer.css_bundles,
)
| {
"content_hash": "a00da8668ed58fc3bcb23fb25afc372e",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 79,
"avg_line_length": 30.5,
"alnum_prop": 0.6125683060109289,
"repo_name": "inveniosoftware/invenio-previewer",
"id": "80349d5b7de34359029f6aa39e2969c1cb2cfcf5",
"size": "2065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invenio_previewer/extensions/csv_dthreejs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "121581"
},
{
"name": "HTML",
"bytes": "55921"
},
{
"name": "JavaScript",
"bytes": "891575"
},
{
"name": "Python",
"bytes": "68165"
},
{
"name": "SCSS",
"bytes": "84018"
},
{
"name": "Shell",
"bytes": "594"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
import argparse
import json
from pr_json_common import *
from json_dict_common import *
scalings = { 'constant' : (lambda x, y : 1.),
'lineard' : (lambda x, y : float(x) / y),
'lineari' : (lambda x, y : float(y) / x),
'quadraticd' : (lambda x, y : float(x**2) / y**2),
'quadratici' : (lambda x, y : float(y**2) / x**2)
}
def isDecreasing(name):
assert isinstance(name, str)
return name[-1] == 'd'
#### End of function isDecreasing
def get_label_name(name):
return name[:-1] if (name[-1] == 'i' or name[-1] == 'd') else name
#### End of function get_label_name
def read_time_data_from_files(fileList, threads=False):
"""
Reads the running time and process counts from the list of files passed in
and returns these as a dictionary of (processes : time)
Args:
fileList (list): List of filenames to read data from
threads (bool): Indicates whether threads, instead of processes,
should be read from the summary files
Returns:
A dictionary containing the processor count with the run time
"""
assert isinstance(fileList, list)
timeDict = {}
# Loop over the filenames
for filename in fileList:
filename = filename.strip()
try:
# Open the file for reading
with open(filename, "r") as infile:
# Read the json
jsonDict = json.load(infile)
runtime = get_runtime(jsonDict)
numprocs = get_num_threads(jsonDict) if threads else get_num_processes(jsonDict)
timeDict[numprocs] = runtime
except IOError:
print("File " + filename + " does not exist. Skipping.")
pass
return timeDict
#### End of function read_summary_data_from_files
def get_ideal_func(expected):
return scalings[expected]
#### End of function get_ideal_func
def get_ideal_line(initTime, coreCounts, expected):
"""
Gets data for an ideal scaling line in either the weak or strong case.
Args:
initTime (float): The initial time from which to draw an ideal scaling
coreCounts (list): List of counts of cores from which the ideal line
can be calculated
expected (str): Indicates what sort of scaling is expected for the
ideal line
"""
idealData = [0. for _ in coreCounts]
idealData[0] = initTime
scalingFunc = get_ideal_func(expected)
for i in range(1,len(coreCounts)):
idealData[i] = idealData[i-1] * scalingFunc(coreCounts[i-1], coreCounts[i])
return idealData
#### End of function get_ideal_line
def plot_time_data(timeData, number, handles=[], threads=False, labels=None,
expectedScaling=['lineard'], logy=True):
"""
Plots the data given in the dictionary of time data. The keys in here are
the number of processes that are used, and the values are the wallclock
time for the run
Args:
timeData (dict): A dictionary assumed to have a very specific format
number (int): Counter indicating which set of data to plot. This has an
effect on the style as well as the labelling of data
Returns:
Nothing
"""
assert isinstance(timeData, dict)
# Get the list of keys and sort them
sortedKeys = sorted(timeData.keys())
x = range(len(sortedKeys))
# Get the data to plot
yData = [timeData[key] for key in sortedKeys]
pltFunc = plt.semilogy if logy else plt.plot
#plt.autoscale(enable=True, axis='y', tight=True)
# Plot a set of expected lines
if number == 0:
lineStyles = ['k-', 'k--', 'k-.']
for cnt, expected in enumerate(expectedScaling):
label = get_label_name(expected)
idealFunc = max if isDecreasing(expected) else min
idealInit = idealFunc(yData)
idealHandle, = pltFunc(x, get_ideal_line(idealInit, sortedKeys, expected), lineStyles[cnt], label=label)
#idealHandle, = pltFunc(x, get_ideal_line(idealInit, sortedKeys, expectedScaling), 'k-', label="expected")
handles.append(idealHandle)
# We want a log plot of the results
linestyle = ['r-', 'b-', 'g-', 'c-', 'k-']
if labels:
label = labels[number]
else:
label = "actual"
lineHandle, = pltFunc(x, yData, linestyle[number], label=label, linewidth=2)
handles.append(lineHandle)
# Set the legend, axes label and ticks
plt.xticks(x, sortedKeys)
if number == 0:
if (threads):
plt.xlabel("Number of Threads")
else:
plt.xlabel("Number of Processes")
plt.ylabel("Run time (s)")
# The next line sets the limits on the y-axis manually, if required, for a
# certain plot
#plt.gca().set_ylim(bottom=1e3, top=4e4)
#### End of function plot_time_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Utility to plot the running" +
" time of a series of applications stored as JSON format" +
" Performance Report profiles. It is assumed that the series " +
"is of the same application, showing strong scaling")
# Add a file containing a list of files to read data from
parser.add_argument("infiles", help="JSON file to read a list of input files from",
type=argparse.FileType('r'), nargs="+")
# Add an argument to show if the strong scaling is for threads or processes
parser.add_argument("--threads", help="Indicates whether threads or processes" +
" should used in the scaling analysis", action="store_true",
default=False)
parser.add_argument("--labels", help="Gives the labels for the different" +
" datasets passed in. The number of labels given should be the same" +
" as the number of input files given", nargs="+", default=None)
parser.add_argument("--expected", help="Indicates which scaling is expected" +
" for the model. This should be one of ['constant', 'linear[i/d]'," +
" 'quadratic[i/d]']. The i or d suffix indicates increasing or " +
"decreasing scale", choices=sorted(scalings.keys()), default=["lineard"], nargs="+")
parser.add_argument("--nolog", help="Indicates that a log scale should not be used",
action="store_true", default=False)
args = parser.parse_args()
# Read the list of files
handles = []
for cnt, infile in enumerate(args.infiles):
fileList = infile.readlines()
# Get the summary data from the files
timeData = read_time_data_from_files(fileList, args.threads)
# Plot the summary data in a bar chart
plot_time_data(timeData, cnt, handles, args.threads, args.labels, args.expected,
not args.nolog)
plt.legend(handles=handles, loc=1, bbox_to_anchor=(1.1, 1.1))
plt.show()
| {
"content_hash": "ac432d641fb8727a6f18f87c3b64ae28",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 118,
"avg_line_length": 38.544444444444444,
"alnum_prop": 0.6274142404151052,
"repo_name": "arm-hpc/allinea_json_analysis",
"id": "7b0af33cecd90da883763cfcfe52c84d8a267617",
"size": "7545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PR_JSON_Scripts/plot_scaling_overall_time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "125668"
},
{
"name": "Shell",
"bytes": "1969"
}
],
"symlink_target": ""
} |
import pandas as pd
# TODO: move your MINT.com transactions.csv into this project directory
# / the directory where you are running jupyter notebook
PATH_TO_YOUR_TRANSACTIONS = "transactions.csv"
df = pd.DataFrame.from_csv(PATH_TO_YOUR_TRANSACTIONS)
## Create a Dataframe view for only 2016 year transactions
# the leftmost column of the ingested csv file, by default is assumed as your 'index' of the DataFrame
# here we filter for all records w. dates less than Jan 1, 2017 and greater than Dec 31, 2015
# what is returned: a new view of the DataFrame only with records in this restricted date range
y16 = df[(df.index < '2017-01-01') & ( df.index > '2015-12-31' )]
# Using the Mint 'Rental Car & Taxi' category label
# it's possible to create a view for all 2016 rental/taxi expenditure
rentalcar = y16[y16.Category == "Rental Car & Taxi"]
## A way to sum-up train/metro/public-transit expenditures
# Substitute your public transit specific search strings e.g. "MBTA", "MTA", "BART", "SUBWAY"
#
# Side Note:
# If you eat a lot at Subway and if your public transit code contains SUBWAY,
# you may be counting sandwiches too in this view :-)
MBTA = y16[y16['Original Description'].str.contains("MBTA")]
## Example of how to quickly sum up the 'Amount' columns for several different views.
transport_sum = MBTA.Amount.sum() + rentalcar.Amount.sum()
## What if we want to figure out travel expenses for a business trip to San Francisco
sf_transport = y16[(y16.index > '2016-06-24') & (y16.index < '2016-07-05')]
sf_transport_sum = sf_transport.Amount.sum()
| {
"content_hash": "b6abdf47ee4b8a459ce9c47da10bf4b0",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 102,
"avg_line_length": 43.52777777777778,
"alnum_prop": 0.7370772176132737,
"repo_name": "wilsonqin/pandas-mint-taxes",
"id": "20f40ef376264c52842c0c96bae9f2d8780d58d4",
"size": "1774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "review_transactions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3396"
},
{
"name": "Python",
"bytes": "1774"
}
],
"symlink_target": ""
} |
"""
UserBot module
Copyright 2015-2016, Ismael R. Lugo G.
"""
import glob
import posixpath
import builder
reload(builder)
for name in glob.glob('mods/ubmod/ubmod/*.ini'):
builder.ubmodule(posixpath.basename(name))
| {
"content_hash": "910da1184b48fb937a6ed7693a712e71",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 48,
"avg_line_length": 17,
"alnum_prop": 0.7375565610859729,
"repo_name": "IsmaelRLG/UserBot",
"id": "edf85a177b7182bb1f74c2fe046242a31569527c",
"size": "245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mods/ubmod/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "205252"
}
],
"symlink_target": ""
} |
import sys, inspect
import numpy as np
from copy import deepcopy
from collections.abc import Iterable
from ..mfbase import MFDataException, FlopyException
from .mfstructure import DatumType
from ...utils.datautil import PyListUtil, DatumUtil
import struct
def iterable(obj):
return isinstance(obj, Iterable)
def get_first_val(arr):
while isinstance(arr, list) or isinstance(arr, np.ndarray):
arr = arr[0]
return arr
# convert_data(data, type) : type
# converts data "data" to type "type" and returns the converted data
def convert_data(data, data_dimensions, data_type, data_item=None):
if data_type == DatumType.double_precision:
if data_item is not None and data_item.support_negative_index:
val = int(PyListUtil.clean_numeric(data))
if val == -1:
return -0.0
elif val == 1:
return 0.0
elif val < 0:
val += 1
else:
val -= 1
try:
return float(val)
except (ValueError, TypeError):
message = (
'Data "{}" with value "{}" can '
"not be converted to float"
".".format(data_dimensions.structure.name, data)
)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
data_dimensions.structure.get_model(),
data_dimensions.structure.get_package(),
data_dimensions.structure.path,
"converting data",
data_dimensions.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
message,
False,
)
else:
try:
if isinstance(data, str):
# fix any scientific formatting that python can't handle
data = data.replace("d", "e")
return float(data)
except (ValueError, TypeError):
try:
return float(PyListUtil.clean_numeric(data))
except (ValueError, TypeError):
message = (
'Data "{}" with value "{}" can '
"not be converted to float"
".".format(data_dimensions.structure.name, data)
)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
data_dimensions.structure.get_model(),
data_dimensions.structure.get_package(),
data_dimensions.structure.path,
"converting data",
data_dimensions.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
message,
False,
)
elif data_type == DatumType.integer:
if data_item is not None and data_item.numeric_index:
return int(PyListUtil.clean_numeric(data)) - 1
try:
return int(data)
except (ValueError, TypeError):
try:
return int(PyListUtil.clean_numeric(data))
except (ValueError, TypeError):
message = (
'Data "{}" with value "{}" can not be '
"converted to int"
".".format(data_dimensions.structure.name, data)
)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
data_dimensions.structure.get_model(),
data_dimensions.structure.get_package(),
data_dimensions.structure.path,
"converting data",
data_dimensions.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
message,
False,
)
elif data_type == DatumType.string and data is not None:
if data_item is None or not data_item.preserve_case:
# keep strings lower case
return data.lower()
return data
def to_string(
val,
data_type,
sim_data,
data_dim,
is_cellid=False,
possible_cellid=False,
data_item=None,
):
if data_type == DatumType.double_precision:
if data_item is not None and data_item.support_negative_index:
if val > 0:
return str(int(val + 1))
elif val == 0.0:
if (
struct.pack(">d", val)
== b"\x80\x00\x00\x00\x00\x00\x00\x00"
):
# value is negative zero
return str(int(val - 1))
else:
# value is positive zero
return str(int(val + 1))
else:
return str(int(val - 1))
else:
try:
abs_val = abs(val)
except TypeError:
return str(val)
if (
abs_val > sim_data._sci_note_upper_thres
or abs_val < sim_data._sci_note_lower_thres
) and abs_val != 0:
return sim_data.reg_format_str.format(val)
else:
return sim_data.sci_format_str.format(val)
elif is_cellid or (possible_cellid and isinstance(val, tuple)):
if DatumUtil.is_int(val):
return str(val + 1)
if len(val) > 0 and isinstance(val, str) and val.lower() == "none":
# handle case that cellid is 'none'
return val
if is_cellid and data_dim.get_model_dim(None).model_name is not None:
model_grid = data_dim.get_model_grid()
cellid_size = model_grid.get_num_spatial_coordinates()
if len(val) != cellid_size:
message = (
'Cellid "{}" contains {} integer(s). Expected a'
" cellid containing {} integer(s) for grid type"
" {}.".format(
val, len(val), cellid_size, str(model_grid.grid_type())
)
)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
data_dim.structure.get_model(),
data_dim.structure.get_package(),
data_dim.structure.path,
"converting cellid to string",
data_dim.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
message,
sim_data.debug,
)
string_val = []
if isinstance(val, str):
string_val.append(val)
else:
for item in val:
string_val.append(str(item + 1))
return " ".join(string_val)
elif data_type == DatumType.integer:
if data_item is not None and data_item.numeric_index:
if isinstance(val, str):
return str(int(val) + 1)
else:
return str(int(val) + 1)
return str(int(val))
elif data_type == DatumType.string:
try:
arr_val = val.split()
except AttributeError:
return str(val)
if len(arr_val) > 1:
# quote any string with spaces
string_val = "'{}'".format(val)
if data_item is not None and data_item.ucase:
return string_val.upper()
else:
return string_val
if data_item is not None and data_item.ucase:
return str(val).upper()
else:
return str(val)
class MFComment(object):
"""
Represents a variable in a MF6 input file
Parameters
----------
comment : string or list
comment to be displayed in output file
path : string
tuple representing location in the output file
line_number : integer
line number to display comment in output file
Attributes
----------
comment : string or list
comment to be displayed in output file
path : string
tuple representing location in the output file
line_number : integer
line number to display comment in output file
Methods
-------
write : (file)
writes the comment to file
add_text(additional_text)
adds text to the comment
get_file_entry(eoln_suffix=True)
returns the comment text in the format to write to package files
is_empty(include_whitespace=True)
checks to see if comment is just an empty string ''. if
include_whitespace is set to false a string with only whitespace is
considered empty
is_comment(text, include_empty_line=False) : boolean
returns true if text is a comment. an empty line is considered a
comment if include_empty_line is true.
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(self, comment, path, sim_data, line_number=0):
if not (
isinstance(comment, str)
or isinstance(comment, list)
or comment is None
):
raise FlopyException(
'Comment "{}" not valid. Comment must be '
"of type str of list.".format(comment)
)
self.text = comment
self.path = path
self.line_number = line_number
self.sim_data = sim_data
"""
Add text to the comment string.
Parameters
----------
additional_text: string
text to add
"""
def add_text(self, additional_text):
if additional_text:
if isinstance(self.text, list):
self.text.append(additional_text)
else:
self.text = "{} {}".format(self.text, additional_text)
"""
Get the comment text in the format to write to package files.
Parameters
----------
eoln_suffix: boolean
have comment text end with end of line character
Returns
-------
string : comment text
"""
def get_file_entry(self, eoln_suffix=True):
file_entry = ""
if self.text and self.sim_data.comments_on:
if not isinstance(self.text, str) and isinstance(self.text, list):
file_entry = self._recursive_get(self.text)
else:
if self.text.strip():
file_entry = self.text
if eoln_suffix:
file_entry = "{}\n".format(file_entry)
return file_entry
def _recursive_get(self, base_list):
file_entry = ""
if base_list and self.sim_data.comments_on:
for item in base_list:
if not isinstance(item, str) and isinstance(item, list):
file_entry = "{}{}".format(
file_entry, self._recursive_get(item)
)
else:
file_entry = "{} {}".format(file_entry, item)
return file_entry
"""
Write the comment text to a file.
Parameters
----------
fd : file
file to write to
eoln_suffix: boolean
have comment text end with end of line character
"""
def write(self, fd, eoln_suffix=True):
if self.text and self.sim_data.comments_on:
if not isinstance(self.text, str) and isinstance(self.text, list):
self._recursive_write(fd, self.text)
else:
if self.text.strip():
fd.write(self.text)
if eoln_suffix:
fd.write("\n")
"""
Check for comment text
Parameters
----------
include_whitespace : boolean
include whitespace as text
Returns
-------
boolean : True if comment text exists
"""
def is_empty(self, include_whitespace=True):
if include_whitespace:
if self.text():
return True
return False
else:
if self.text.strip():
return True
return False
"""
Check text to see if it is valid comment text
Parameters
----------
text : string
potential comment text
include_empty_line : boolean
allow empty line to be valid
Returns
-------
boolean : True if text is valid comment text
"""
@staticmethod
def is_comment(text, include_empty_line=False):
if not text:
return include_empty_line
if text and isinstance(text, list):
# look for comment mark in first item of list
text_clean = text[0].strip()
else:
text_clean = text.strip()
if include_empty_line and not text_clean:
return True
if text_clean and (
text_clean[0] == "#"
or text_clean[0] == "!"
or text_clean[0] == "//"
):
return True
return False
# recursively writes a nested list to a file
def _recursive_write(self, fd, base_list):
if base_list:
for item in base_list:
if not isinstance(item, str) and isinstance(item, list):
self._recursive_write(fd, item)
else:
fd.write(" {}".format(item))
class TemplateGenerator(object):
"""
Abstract base class for building a data template for different data types.
This is a generic class that is initialized with a path that identifies
the data to be built.
Parameters
----------
path : string
tuple containing path of data is described in dfn files
(<model>,<package>,<block>,<data name>)
"""
def __init__(self, path):
self.path = path
def _get_data_dimensions(self, model):
from ..data import mfstructure
from ..coordinates import modeldimensions
# get structure info
sim_struct = mfstructure.MFStructure().sim_struct
package_struct = sim_struct.get_data_structure(self.path[0:-2])
# get dimension info
data_struct = sim_struct.get_data_structure(self.path)
package_dim = modeldimensions.PackageDimensions(
[model.dimensions], package_struct, self.path[0:-1]
)
return (
data_struct,
modeldimensions.DataDimensions(package_dim, data_struct),
)
def build_type_header(self, ds_type, data=None):
from ..data.mfdatastorage import DataStorageType
if ds_type == DataStorageType.internal_array:
if isinstance(self, ArrayTemplateGenerator):
return {"factor": 1.0, "iprn": 1, "data": data}
else:
return None
elif ds_type == DataStorageType.internal_constant:
return data
elif ds_type == DataStorageType.external_file:
return {"filename": "", "factor": 1.0, "iprn": 1}
return None
class ArrayTemplateGenerator(TemplateGenerator):
"""
Class that builds a data template for MFArrays. This is a generic class
that is initialized with a path that identifies the data to be built.
Parameters
----------
path : string
tuple containing path of data is described in dfn files
(<model>,<package>,<block>,<data name>)
Methods
-------
empty: (model: MFModel, layered: boolean, data_storage_type_list: boolean,
default_value: int/float) : variable
Builds a template for the data you need to specify for a specific data
type (ie. "hk") in a specific model. The data type and dimensions
is determined by "path" during initialization of this class and the
model is passed in to this method as the "model" parameter. If the
data is transient a dictionary containing a single stress period
will be returned. If "layered" is set to true, data will be returned
as a list ndarrays, one for each layer. data_storage_type_list is a
list of DataStorageType, one type for each layer. If "default_value"
is specified the data template will be populated with that value,
otherwise each ndarray in the data template will be populated with
np.empty (0 or 0.0 if the DataStorageType is a constant).
"""
def __init__(self, path):
super(ArrayTemplateGenerator, self).__init__(path)
def empty(
self,
model=None,
layered=False,
data_storage_type_list=None,
default_value=None,
):
from ..data import mfdatastorage, mfstructure
from ..data.mfdatastorage import DataStorageType, DataStructureType
# get the expected dimensions of the data
data_struct, data_dimensions = self._get_data_dimensions(model)
datum_type = data_struct.get_datum_type()
data_type = data_struct.get_datatype()
# build a temporary data storage object
data_storage = mfdatastorage.DataStorage(
model.simulation_data,
model,
data_dimensions,
None,
DataStorageType.internal_array,
DataStructureType.recarray,
data_path=self.path,
)
dimension_list = data_storage.get_data_dimensions(None)
# if layered data
if layered and dimension_list[0] > 1:
if (
data_storage_type_list is not None
and len(data_storage_type_list) != dimension_list[0]
):
comment = (
"data_storage_type_list specified with the "
"wrong size. Size {} but expected to be "
"the same as the number of layers, "
"{}.".format(
len(data_storage_type_list), dimension_list[0]
)
)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
data_struct.get_model(),
data_struct.get_package(),
data_struct.path,
"generating array template",
data_struct.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
comment,
model.simulation_data.debug,
)
# build each layer
data_with_header = []
for layer in range(0, dimension_list[0]):
# determine storage type
if data_storage_type_list is None:
data_storage_type = DataStorageType.internal_array
else:
data_storage_type = data_storage_type_list[layer]
# build data type header
data_with_header.append(
self._build_layer(
datum_type,
data_storage_type,
default_value,
dimension_list,
)
)
else:
if (
data_storage_type_list is None
or data_storage_type_list[0] == DataStorageType.internal_array
):
data_storage_type = DataStorageType.internal_array
else:
data_storage_type = data_storage_type_list[0]
# build data type header
data_with_header = self._build_layer(
datum_type,
data_storage_type,
default_value,
dimension_list,
True,
)
# if transient/multiple list
if data_type == mfstructure.DataType.array_transient:
# Return as dictionary
return {0: data_with_header}
else:
return data_with_header
def _build_layer(
self,
data_type,
data_storage_type,
default_value,
dimension_list,
all_layers=False,
):
from ..data.mfdatastorage import DataStorageType
# build data
if data_storage_type == DataStorageType.internal_array:
if default_value is None:
if all_layers:
data = np.empty(dimension_list, data_type)
else:
data = np.empty(dimension_list[1:], data_type)
else:
if all_layers:
data = np.full(dimension_list, default_value, data_type)
else:
data = np.full(
dimension_list[1:], default_value, data_type
)
elif data_storage_type == DataStorageType.internal_constant:
if default_value is None:
if data_type == np.int32:
data = 0
else:
data = 0.0
else:
data = default_value
else:
data = None
# build data type header
return self.build_type_header(data_storage_type, data)
class ListTemplateGenerator(TemplateGenerator):
"""
Class that builds a data template for MFLists. This is a generic class
that is initialized with a path that identifies the data to be built.
Parameters
----------
path : string
tuple containing path of data is described in dfn files
(<model>,<package>,<block>,<data name>)
Methods
-------
empty: (maxbound: int, aux_vars: list, boundnames: boolean, nseg: int) :
dictionary
Builds a template for the data you need to specify for a specific data
type (ie. "stress_period_data") in a specific model. The data type is
determined by "path" during initialization of this class. If the data
is transient a dictionary containing a single stress period will be
returned. The number of entries in the recarray are determined by
the "maxbound" parameter. The "aux_vars" parameter is a list of aux
var names to be used in this data list. If boundnames is set to
true and boundname field will be included in the recarray. nseg is
only used on list data that contains segments. If timeseries is true,
a template that is compatible with time series data is returned.
"""
def __init__(self, path):
super(ListTemplateGenerator, self).__init__(path)
def _build_template_data(self, type_list):
template_data = []
for type in type_list:
if type[1] == int:
template_data.append(0)
elif type[1] == float:
template_data.append(np.nan)
else:
template_data.append(None)
return tuple(template_data)
def empty(
self,
model,
maxbound=None,
aux_vars=None,
boundnames=False,
nseg=None,
timeseries=False,
stress_periods=None,
):
from ..data import mfdatastorage, mfstructure
data_struct, data_dimensions = self._get_data_dimensions(model)
data_type = data_struct.get_datatype()
# build a temporary data storage object
data_storage = mfdatastorage.DataStorage(
model.simulation_data,
model,
data_dimensions,
None,
mfdatastorage.DataStorageType.internal_array,
mfdatastorage.DataStructureType.recarray,
)
# build type list
type_list = data_storage.build_type_list(nseg=nseg)
if aux_vars is not None:
if len(aux_vars) > 0 and (
isinstance(aux_vars[0], list) or isinstance(aux_vars[0], tuple)
):
aux_vars = aux_vars[0]
for aux_var in aux_vars:
type_list.append((aux_var, object))
if boundnames:
type_list.append(("boundname", object))
if timeseries:
# fix type list to make all types objects
for index, d_type in enumerate(type_list):
type_list[index] = (d_type[0], object)
# build recarray
template_data = self._build_template_data(type_list)
rec_array_data = []
if maxbound is not None:
for index in range(0, maxbound):
rec_array_data.append(template_data)
else:
rec_array_data.append(template_data)
rec_array = np.rec.array(rec_array_data, type_list)
# if transient/multiple list
if (
data_type == mfstructure.DataType.list_transient
or data_type == mfstructure.DataType.list_multiple
):
# Return as dictionary
if stress_periods is None:
return {0: rec_array}
else:
template = {}
for stress_period in stress_periods:
template[stress_period] = deepcopy(rec_array)
return template
else:
return rec_array
class MFDocString(object):
"""
Helps build a python class doc string
Parameters
----------
description : string
description of the class
Attributes
----------
indent: string
indent to use in doc string
description : string
description of the class
parameter_header : string
header for parameter section of doc string
parameters : list
list of docstrings for class parameters
Methods
-------
add_parameter : (param_descr : string, beginning_of_list : bool)
adds doc string for a parameter with description 'param_descr' to the
end of the list unless beginning_of_list is True
get_doc_string : () : string
builds and returns the docstring for the class
"""
def __init__(self, description):
self.indent = " "
self.description = description
self.parameter_header = "{}Parameters\n{}" "----------".format(
self.indent, self.indent
)
self.parameters = []
self.model_parameters = []
def add_parameter(
self, param_descr, beginning_of_list=False, model_parameter=False
):
if beginning_of_list:
self.parameters.insert(0, param_descr)
if model_parameter:
self.model_parameters.insert(0, param_descr)
else:
self.parameters.append(param_descr)
if model_parameter:
self.model_parameters.append(param_descr)
def get_doc_string(self, model_doc_string=False):
doc_string = '{}"""\n{}{}\n\n{}\n'.format(
self.indent, self.indent, self.description, self.parameter_header
)
if model_doc_string:
param_list = self.model_parameters
doc_string = (
"{} modelname : string\n name of the "
"model\n model_nam_file : string\n"
" relative path to the model name file from "
"model working folder\n version : string\n"
" version of modflow\n exe_name : string\n"
" model executable name\n"
" model_ws : string\n"
" model working folder path"
"\n".format(doc_string)
)
else:
param_list = self.parameters
for parameter in param_list:
doc_string += "{}\n".format(parameter)
if not model_doc_string:
doc_string += '\n{}"""'.format(self.indent)
return doc_string
| {
"content_hash": "8d9131dad7be029d5c0e8ad7a4267e2b",
"timestamp": "",
"source": "github",
"line_count": 822,
"max_line_length": 79,
"avg_line_length": 34.20559610705596,
"alnum_prop": 0.5255895010136217,
"repo_name": "aleaf/flopy",
"id": "530533c5d394c4f55e3d2f41c95d735fcdd468da",
"size": "28117",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flopy/mf6/data/mfdatautil.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "5469342"
},
{
"name": "Shell",
"bytes": "2562"
}
],
"symlink_target": ""
} |
"""SAC tests."""
import pickle
from absl.testing import absltest
from absl.testing import parameterized
from brax import envs
from brax.training.acme import running_statistics
from brax.training.agents.sac import networks as sac_networks
from brax.training.agents.sac import train as sac
import jax
class SACTest(parameterized.TestCase):
"""Tests for SAC module."""
def testTrain(self):
"""Test SAC with a simple env."""
_, _, metrics = sac.train(
envs.get_environment('fast'),
num_timesteps=2**15,
episode_length=128,
num_envs=64,
learning_rate=3e-4,
discounting=0.99,
batch_size=64,
normalize_observations=True,
reward_scaling=10,
grad_updates_per_step=64,
seed=0)
self.assertGreater(metrics['eval/episode_reward'], 140 * 0.995)
@parameterized.parameters(True, False)
def testNetworkEncoding(self, normalize_observations):
env = envs.get_environment('fast')
original_inference, params, _ = sac.train(
env,
num_timesteps=128,
episode_length=128,
num_envs=128,
normalize_observations=normalize_observations)
normalize_fn = lambda x, y: x
if normalize_observations:
normalize_fn = running_statistics.normalize
sac_network = sac_networks.make_sac_networks(env.observation_size,
env.action_size, normalize_fn)
inference = sac_networks.make_inference_fn(sac_network)
byte_encoding = pickle.dumps(params)
decoded_params = pickle.loads(byte_encoding)
# Compute one action.
state = env.reset(jax.random.PRNGKey(0))
original_action = original_inference(decoded_params)(
state.obs, jax.random.PRNGKey(0))[0]
action = inference(decoded_params)(state.obs, jax.random.PRNGKey(0))[0]
self.assertSequenceEqual(original_action, action)
env.step(state, action)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "a4ff0d970a4ea69067362752eb2161d3",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 79,
"avg_line_length": 31.725806451612904,
"alnum_prop": 0.6634468734112863,
"repo_name": "google/brax",
"id": "7ae0fdd7325890b7aff17b9ce263bfabe094a4e9",
"size": "2549",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "brax/training/agents/sac/train_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "27572"
},
{
"name": "Jupyter Notebook",
"bytes": "8554172"
},
{
"name": "Python",
"bytes": "1189091"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from vulyk.models.task_types import AbstractTaskType
from vulyk_tagging.models.tasks import TaggingAnswer, TaggingTask
class TaggingTaskType(AbstractTaskType):
"""
Tagging Task to work with Vulyk.
"""
answer_model = TaggingAnswer
task_model = TaggingTask
name = 'Тэггинг текста'
description = 'Проверка текста на правильное распознание'
template = 'base.html'
helptext_template = 'help.html'
type_name = 'tagging_task'
redundancy = 3
JS_ASSETS = ['static/scripts/keymaster.js',
'static/scripts/handlebars.js',
'static/scripts/bootstrap-select.js',
'static/scripts/typeahead.js',
'static/scripts/bootstrap-tagsinput.js',
'static/scripts/base.js']
CSS_ASSETS = ['static/styles/bootstrap-select.css',
'static/styles/bootstrap-tagsinput.css',
'static/styles/base.css',
'static/styles/autocomplete.css']
| {
"content_hash": "c620398ec2a33a303740028dc18a6867",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 65,
"avg_line_length": 30.558823529411764,
"alnum_prop": 0.631376323387873,
"repo_name": "hotsyk/vulyk-tagging",
"id": "1746dad5f278c79bfdc3686e547c9621785ab6e4",
"size": "1113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vulyk_tagging/models/task_types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2010"
},
{
"name": "HTML",
"bytes": "3729"
},
{
"name": "JavaScript",
"bytes": "182088"
},
{
"name": "Makefile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "8253"
}
],
"symlink_target": ""
} |
"""
SQLAlchemy-JSONAPI
Flask Adapter
Colton J. Provias
MIT License
"""
import datetime
import json
import uuid
from functools import wraps
from blinker import signal
from flask import make_response, request
from .errors import BaseError, MissingContentTypeError
from .serializer import JSONAPI
try:
from enum import Enum
except ImportError:
from enum34 import Enum
class Method(Enum):
""" HTTP Methods used by JSON API """
GET = 'GET'
POST = 'POST'
PATCH = 'PATCH'
DELETE = 'DELETE'
class Endpoint(Enum):
""" Four paths specified in JSON API """
COLLECTION = '/<api_type>'
RESOURCE = '/<api_type>/<obj_id>'
RELATED = '/<api_type>/<obj_id>/<relationship>'
RELATIONSHIP = '/<api_type>/<obj_id>/relationships/<relationship>'
class JSONAPIEncoder(json.JSONEncoder):
""" JSONEncoder Implementation that allows for UUID and datetime """
def default(self, value):
"""
Handle UUID, datetime, and callables.
:param value: Value to encode
"""
if isinstance(value, uuid.UUID):
return str(value)
elif isinstance(value, datetime.datetime):
return value.isoformat()
elif callable(value):
return str(value)
return json.JSONEncoder.default(self, value)
#: The views to generate
views = [
(Method.GET, Endpoint.COLLECTION), (Method.GET, Endpoint.RESOURCE),
(Method.GET, Endpoint.RELATED), (Method.GET, Endpoint.RELATIONSHIP),
(Method.POST, Endpoint.COLLECTION), (Method.POST, Endpoint.RELATIONSHIP),
(Method.PATCH, Endpoint.RESOURCE), (Method.PATCH, Endpoint.RELATIONSHIP),
(Method.DELETE, Endpoint.RESOURCE), (Method.DELETE, Endpoint.RELATIONSHIP)
]
def override(original, results):
"""
If a receiver to a signal returns a value, we override the original value
with the last returned value.
:param original: The original value
:param results: The results from the signal
"""
overrides = [v for fn, v in results if v is not None]
if len(overrides) == 0:
return original
return overrides[-1]
class FlaskJSONAPI(object):
""" Flask Adapter """
#: Fires before the serializer is called. Functions should implement the
#: following args: (sender, method, endpoint, data, req_args)
on_request = signal('jsonapi-on-request')
#: Fires before we return the response. Included args are:
#: (sender, method, endpoint, data, req_args, rendered_response)
on_response = signal('jsonapi-on-response')
#: Fires after a successful call to the serializer.
#: (sender, method, endpoint, data, req_args, response)
on_success = signal('jsonapi-on-success')
#: Fires when an error is encountered.
#: (sender, method, endpoint, data, req_args, error)
on_error = signal('jsonapi-on-error')
#: JSON Encoder to use
json_encoder = JSONAPIEncoder
def __init__(self,
app=None,
sqla=None,
namespace='api',
route_prefix='/api'):
"""
Initialize the adapter. If app isn't passed here, it should be passed
in init_app.
:param app: Flask application
:param sqla: Flask-SQLAlchemy instance
:param namespace: Prefixes all generated routes
:param route_prefix: The base path for the generated routes
"""
self.app = app
self.sqla = sqla
self._handler_chains = dict()
if app is not None:
self._setup_adapter(namespace, route_prefix)
def init_app(self, app, sqla, namespace='api', route_prefix='/api'):
"""
Initialize the adapter if it hasn't already been initialized.
:param app: Flask application
:param sqla: Flask-SQLAlchemy instance
:param namespace: Prefixes all generated routes
:param route_prefix: The base path for the generated routes
"""
self.app = app
self.sqla = sqla
self._setup_adapter(namespace, route_prefix)
def wrap_handler(self, api_types, methods, endpoints):
"""
Allow for a handler to be wrapped in a chain.
:param api_types: Types to wrap handlers for
:param methods: Methods to wrap handlers for
:param endpoints: Endpoints to wrap handlers for
"""
def wrapper(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
for api_type in api_types:
for method in methods:
for endpoint in endpoints:
key = (api_type, method, endpoint)
self._handler_chains.setdefault(key, [])
self._handler_chains[key].append(wrapped)
return wrapped
return wrapper
def _call_next(self, handler_chain):
"""
Generates an express-like chain for handling requests.
:param handler_chain: The current chain of handlers
"""
def wrapped(*args, **kwargs):
if len(handler_chain) == 1:
return handler_chain[0](*args, **kwargs)
else:
return handler_chain[0](self._call_next(handler_chain[1:]),
*args, **kwargs)
return wrapped
def _setup_adapter(self, namespace, route_prefix):
"""
Initialize the serializer and loop through the views to generate them.
:param namespace: Prefix for generated endpoints
:param route_prefix: Prefix for route patterns
"""
self.serializer = JSONAPI(self.sqla.Model)
for view in views:
method, endpoint = view
pattern = route_prefix + endpoint.value
name = '{}_{}_{}'.format(namespace, method.name, endpoint.name)
view = self._generate_view(method, endpoint)
self.app.add_url_rule(pattern + '/',
name + '_slashed',
view,
methods=[method.name],
strict_slashes=False)
self.app.add_url_rule(pattern, name, view, methods=[method.name])
def _generate_view(self, method, endpoint):
"""
Generate a view for the specified method and endpoint.
:param method: HTTP Method
:param endpoint: Pattern
"""
def new_view(**kwargs):
if method == Method.GET:
data = request.args
else:
content_length = request.headers.get('content-length', None)
if content_length and int(content_length) > 0:
content_type = request.headers.get('content-type', None)
if content_type != 'application/vnd.api+json':
data = MissingContentTypeError().data
data = json.dumps(data, cls=JSONAPIEncoder)
response = make_response(data)
response.status_code = 409
response.content_type = 'application/vnd.api+json'
return response
data = request.get_json(force=True)
else:
data = None
event_kwargs = {
'method': method,
'endpoint': endpoint,
'data': data,
'req_args': kwargs
}
results = self.on_request.send(self, **event_kwargs)
data = override(data, results)
args = [self.sqla.session, data, kwargs['api_type']]
if 'obj_id' in kwargs.keys():
args.append(kwargs['obj_id'])
if 'relationship' in kwargs.keys():
args.append(kwargs['relationship'])
try:
attr = '{}_{}'.format(method.name, endpoint.name).lower()
handler = getattr(self.serializer, attr)
handler_chain = list(self._handler_chains.get((
kwargs['api_type'], method, endpoint), []))
handler_chain.append(handler)
chained_handler = self._call_next(handler_chain)
response = chained_handler(*args)
results = self.on_success.send(self,
response=response,
**event_kwargs)
response = override(response, results)
except BaseError as exc:
self.sqla.session.rollback()
results = self.on_error.send(self, error=exc, **event_kwargs)
response = override(exc, results)
rendered_response = make_response('')
if response.status_code != 204:
data = json.dumps(response.data, cls=self.json_encoder)
rendered_response = make_response(data)
rendered_response.status_code = response.status_code
rendered_response.content_type = 'application/vnd.api+json'
results = self.on_response.send(self,
response=rendered_response,
**event_kwargs)
return override(rendered_response, results)
return new_view
| {
"content_hash": "04d2227caa31bc5211f6c8c941849419",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 78,
"avg_line_length": 34.86617100371747,
"alnum_prop": 0.5641326367416569,
"repo_name": "emilecaron/sqlalchemy-jsonapi",
"id": "4b432840d28ff6d7841122198d5aabd4f3ad2a67",
"size": "9379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlalchemy_jsonapi/flaskext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96241"
}
],
"symlink_target": ""
} |
""" Setup file """
from setuptools import setup, find_packages
import os
import re
HERE = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(HERE, 'README.rst')).read()
CHANGES = open(os.path.join(HERE, 'CHANGES.rst')).read()
# Remove custom RST extensions for pypi
CHANGES = re.sub(r'\(\s*:(issue|pr|sha):.*?\)', '', CHANGES)
CHANGES = re.sub(r':ref:`(.*?) <.*>`', r'\1', CHANGES)
REQUIREMENTS = [
'boto',
# We're doing enough subclassing and monkey patching to where we really do
# need to lock this in to a specific version.
'distlib==0.2.3',
'paste',
'passlib',
'pycrypto',
'pyramid',
'pyramid_beaker',
'pyramid_duh>=0.1.1',
'pyramid_jinja2',
'pyramid_rpc',
'pyramid_tm',
'rsa',
'six',
'transaction',
'zope.sqlalchemy',
]
TEST_REQUIREMENTS = [
'flywheel',
'mock',
'moto',
'nose',
'redis',
'requests',
'webtest',
]
if __name__ == "__main__":
setup(
name='pypicloud',
version='0.6.3-rk',
description='Private PyPI backed by S3',
long_description=README + '\n\n' + CHANGES,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Development Status :: 4 - Beta',
'Framework :: Pyramid',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: System :: Systems Administration',
],
license='MIT',
author='Steven Arcangeli',
author_email='[email protected]',
url='http://pypicloud.readthedocs.org/',
keywords='pypi s3 cheeseshop package',
platforms='any',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=('tests',)),
entry_points={
'console_scripts': [
'pypicloud-gen-password = pypicloud.scripts:gen_password',
'pypicloud-make-config = pypicloud.scripts:make_config',
'ppc-gen-password = pypicloud.scripts:gen_password',
'ppc-make-config = pypicloud.scripts:make_config',
'ppc-migrate = pypicloud.scripts:migrate_packages',
'ppc-export = pypicloud.scripts:export_access',
'ppc-import = pypicloud.scripts:import_access',
],
'paste.app_factory': [
'main = pypicloud:main',
],
},
install_requires=REQUIREMENTS,
tests_require=REQUIREMENTS + TEST_REQUIREMENTS,
test_suite='tests',
extras_require={
'ldap': ['python-ldap >= 2.4.0'],
'server': ['waitress'],
'dynamo': ['flywheel >= 0.2.0'],
},
)
| {
"content_hash": "f6a56015061bc63a047db8f5102ce30e",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 78,
"avg_line_length": 31.26595744680851,
"alnum_prop": 0.547465124191902,
"repo_name": "rubikloud/pypicloud",
"id": "ef2c67a5f94ac00e81d69f26c9d2d2f965f749de",
"size": "2939",
"binary": false,
"copies": "1",
"ref": "refs/heads/rubikloud",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "397"
},
{
"name": "HTML",
"bytes": "24656"
},
{
"name": "JavaScript",
"bytes": "26246"
},
{
"name": "Python",
"bytes": "287319"
},
{
"name": "Shell",
"bytes": "2143"
}
],
"symlink_target": ""
} |
import numpy as np
from pySDC.implementations.problem_classes.HeatEquation_1D_FD_periodic import heat1d_periodic
from pySDC.implementations.datatype_classes.mesh import mesh
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
from pySDC.implementations.controller_classes.allinclusive_classic_nonMPI import allinclusive_classic_nonMPI
from pySDC.implementations.controller_classes.allinclusive_multigrid_nonMPI import allinclusive_multigrid_nonMPI
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from playgrounds.HeatEquation.HookClass_error_output import error_output
def main():
"""
A simple test program to do PFASST runs for the heat equation
"""
# set up number of parallel time-steps to run PFASST with
num_proc = 16
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['dt'] = 1.0 / num_proc
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU2', 'LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 0.1 # diffusion coefficient
problem_params['freq'] = -1 # frequency for the test value
problem_params['nvars'] = [128, 64] # number of degrees of freedom for each level
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
space_transfer_params['periodic'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# controller_params['hook_class'] = error_output
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heat1d_periodic # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['dtype_u'] = mesh # pass data type for u
description['dtype_f'] = mesh # pass data type for f
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# set time parameters
t0 = 0.0
Tend = 1.0
# instantiate controller
controller = allinclusive_multigrid_nonMPI(num_procs=num_proc, controller_params=controller_params,
description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# compute exact solution and compare
uex = P.u_exact(Tend)
err = abs(uex - uend)
# filter statistics by type (number of iterations)
filtered_stats = filter_stats(stats, type='niter')
# convert filtered statistics to list of iterations count, sorted by process
iter_counts = sort_stats(filtered_stats, sortby='time')
# compute and print statistics
for item in iter_counts:
out = 'Number of iterations for time %4.2f: %2i' % item
print(out)
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % \
(int(np.argmax(niters)), int(np.argmin(niters)))
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
print(out)
print('CFL number: %4.2f' % (level_params['dt'] * problem_params['nu'] / (1.0 / problem_params['nvars'][0])**2))
print('Error: %8.4e' % err)
if __name__ == "__main__":
main()
| {
"content_hash": "0981f6ffe258451252208c04d5a1dc91",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 117,
"avg_line_length": 40.31896551724138,
"alnum_prop": 0.6886893307675861,
"repo_name": "danielru/pySDC",
"id": "384ce2bb9ac1e563124dfd9c3e1a03a583eb1d43",
"size": "4677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "playgrounds/HeatEquation/periodic_playground.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "866807"
}
],
"symlink_target": ""
} |
from restbasetest import *
from common.rest.storage_helper import VolumeHelper, VolumeSnapshotHelper
class TestVolumeSnapshotRequests(RESTBaseTest):
@classmethod
def setup_class(cls):
super(TestVolumeSnapshotRequests, cls).setup_class()
cls.vhelper = VolumeHelper(cls.utils)
cls.shelper = VolumeSnapshotHelper(cls.utils)
# remove snapshots and volumes to relieve storage
cls.utils.cleanup_objects(cls.shelper.delete_snapshot, 'snapshots')
cls.utils.cleanup_objects(cls.vhelper.delete_volume, 'volumes', name_key='displayName')
def teardown(self):
self.utils.cleanup_objects(self.shelper.delete_snapshot, 'snapshots')
self.utils.cleanup_objects(self.vhelper.delete_volume, 'volumes', name_key='displayName')
def test_list_of_snapshots(self):
snapshots = self.utils.get_list('snapshots')
ok_(type(snapshots) == list, "Unable to get list of snapshots.")
def test_create_show_delete_snapshot(self):
# operations with volumes take a lot of storage and time, so were implemented as one sequence.
# create volume
vol = self.vhelper.create_volume()
# create volume snapshot
created_snap = self.shelper.create_snapshot(parameters={'id': vol['id']})
# verify snapshot for correct volume was created
ok_(created_snap['volumeId'] == vol['id'], "Unable to create volume snapshot.")
# show snapshot
shown_snap = self.shelper.show_snapshot(created_snap['id'])
ok_(created_snap == shown_snap,
"'Show snapshot' failed. Expected: %s, Actual: %s." % (created_snap, shown_snap))
# delete snapshot
res = self.shelper.delete_snapshot(created_snap['id'])
ok_(res is True, "Unable to delete volume snapshot.")
# just for local debugging
if __name__ == "__main__":
t = TestVolumeSnapshotRequests()
t.setup_class()
t.test_list_of_snapshots()
t.test_create_show_delete_snapshot()
t.teardown()
| {
"content_hash": "9c548e4fb59107b328f8fd13f235a907",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 102,
"avg_line_length": 39.490196078431374,
"alnum_prop": 0.6708043694141013,
"repo_name": "paypal/aurora",
"id": "854bf10855f6abc83466d97f1b2797b86e2be538",
"size": "2014",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/rest_tests/test_volumesnapshot_requests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1208418"
},
{
"name": "Groovy",
"bytes": "524327"
},
{
"name": "Java",
"bytes": "1221"
},
{
"name": "JavaScript",
"bytes": "3179536"
},
{
"name": "Python",
"bytes": "356151"
},
{
"name": "Shell",
"bytes": "14218"
}
],
"symlink_target": ""
} |
import test.test_support
from test.test_support import verbose
import random
import re
import sys
import threading
import thread
import time
import unittest
import weakref
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assert_(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assert_(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class ThreadTests(unittest.TestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.failUnlessEqual(t.ident, None)
self.assert_(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assert_(not t.is_alive())
self.failIfEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assert_(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assert_(tid in threading._active)
self.assert_(isinstance(threading._active[tid],
threading._DummyThread))
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print "test_PyThreadState_SetAsyncExc can't import ctypes"
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
worker_started.wait()
if verbose:
print " verifying worker hasn't exited"
self.assert_(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assert_(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
import subprocess
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
import subprocess
rc = subprocess.call([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print 'program blocked; aborting'
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""])
self.failIf(rc == 2, "interpreted was blocked")
self.failUnless(rc == 0, "Unexpected error")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
import subprocess
p = subprocess.Popen([sys.executable, "-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(stdout.strip(),
"Woke up, sleep function is: <built-in function sleep>")
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertFalse(t in l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEquals(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEquals(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
class ThreadJoinOnShutdown(unittest.TestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
\n""" + script
import subprocess
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
self.failIf(rc == 2, "interpreter was blocked")
self.failUnless(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
import os
if not hasattr(os, 'fork'):
return
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
import os
if not hasattr(os, 'fork'):
return
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):
print >>sys.stderr, ('Skipping test_3_join_in_forked_from_thread'
' due to known OS bugs on'), sys.platform
return
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
class ThreadingExceptionTests(unittest.TestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
def test_main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
test_main()
| {
"content_hash": "eeb521f90d0120b8cb2a80addda6ee4b",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 84,
"avg_line_length": 35.998062015503876,
"alnum_prop": 0.5584387617765815,
"repo_name": "DecipherOne/Troglodyte",
"id": "054df7ba69b6a16adba7f8902aa6cb8c7d1923e7",
"size": "18620",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Trog Build Dependencies/Python26/Lib/test/test_threading.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "586396"
},
{
"name": "C++",
"bytes": "697696"
},
{
"name": "CSS",
"bytes": "837"
},
{
"name": "Python",
"bytes": "14516232"
},
{
"name": "Shell",
"bytes": "127"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import abel
from . import basex
from . import dasch
from . import daun
from . import direct
from . import hansenlaw
from . import linbasex
from . import onion_bordas
from . import rbasex
from . import tools
from timeit import default_timer as timer
import itertools
import sys
def _ensure_list(x):
"""
Convert the argument to a list (a scalar becomes a single-element list).
"""
return [x] if np.ndim(x) == 0 else list(x)
def _roundsf(x, n):
"""
Round to n significant digits
"""
return float('{:.{p}g}'.format(x, p=n))
class Timent(object):
"""
Helper class for measuring execution times.
The constructor only initializes the timing-procedure parameters.
Use the :py:meth:`.time` method to run it for particular functions.
Parameters
----------
skip : int
number of "warm-up" iterations to perform before the measurements.
Can be specified as a negative number, then ``abs(skip)``
"warm-up" iterations are performed, but if this took more than
**duration** seconds, they are accounted towards the measured
iterations.
repeat : int
minimal number of measured iterations to perform.
Must be positive.
duration : float
minimal duration (in seconds) of the measurements.
"""
def __init__(self, skip=0, repeat=1, duration=0.0):
self.skip = int(skip)
self.repeat = int(repeat)
self.duration = float(duration)
def time(self, func, *args, **kwargs):
"""
Repeatedly executes a function at least **repeat** times and for at
least **duration** seconds (see above), then returns the average time
per iteration.
The actual number of measured iterations can be retrieved from
:py:attr:`Timent.count`.
Parameters
----------
func : callable
function to execute
*args, **kwargs : any, optional
parameters to pass to **func**
Returns
-------
float
average function execution time
Notes
-----
The measurements overhead can be estimated by executing ::
Timent(...).time(lambda: None)
with a sufficiently large number of iterations (to avoid rounding
errors due to the finite timer precision).
In 2018, this overhead was on the order of 100 ns per iteration.
"""
# Execute "skip" iterations unconditionally
t0 = timer()
for i in range(abs(self.skip)):
func(*args, **kwargs)
t = timer()
# if they took longer than "duration" and should be included
if self.skip < 0 and t - t0 > self.duration:
# account for them in the "repeat" loop
start = -self.skip
if start > self.repeat:
self.repeat = start
else:
# otherwise -- reset the timer and proceed normally
start = 0
t0 = timer()
# Execute "repeat" iterations (maybe accounting for the "skipped")
for i in range(start, self.repeat):
func(*args, **kwargs)
# Continue iterations until "duration" time passes
for i in itertools.count(self.repeat):
t = timer()
if t - t0 > self.duration:
self.count = i # save the total number of measured iterations
break
func(*args, **kwargs)
# Return the average time per iteration
return (t - t0) / self.count
class AbelTiming(object):
"""
Benchmark performance of different Abel implementations
(basis generation, forward and inverse transforms, as applicable).
Parameters
----------
n : int or sequence of int
array size(s) for the benchmark (assuming 2D square arrays (*n*, *n*))
select : str or sequence of str
methods to benchmark. Use ``'all'`` (default) for all available or
choose any combination of individual methods::
select=['basex', 'direct_C', 'direct_Python', 'hansenlaw',
'linbasex', 'onion_bordas, 'onion_peeling', 'two_point',
'three_point']
repeat : int
repeat each benchmark at least this number of times to get the average
values
t_min : float
repeat each benchmark for at least this number of seconds to get the
average values
t_max : float
do not benchmark methods at array sizes when this is expected to take
longer than this number of seconds. Notice that benchmarks for the
smallest size from **n** are always run and that the estimations can be
off by a factor of 2 or so.
verbose : boolean
determines whether benchmark progress should be reported (to stderr)
Attributes
-------
n : list of int
array sizes from the parameter **n**, sorted in ascending order
bs, fabel, iabel : dict of list of float
benchmark results — dictionaries for
bs
basis-set generation
fabel
forward Abel transform
iabel
inverse Abel transform
with methods as keys and lists of timings in milliseconds as entries.
Timings correspond to array sizes in :py:attr:`AbelTiming.n`; for
skipped benchmarks (see **t_max**) they are ``np.nan``.
Notes
-----
The results can be output in a nice format by simply
``print(AbelTiming(...))``.
Keep in mind that most methods have :math:`O(n^2)` memory and
:math:`O(n^3)` time complexity, so going from *n* = 501 to *n* = 5001
would require about 100 times more memory and take about 1000 times longer.
"""
def __init__(self, n=[301, 501], select='all',
repeat=1, t_min=0.1, t_max=np.inf,
verbose=True):
self.n = sorted(_ensure_list(n))
select = _ensure_list(select)
self.repeat = repeat
self.t_max = t_max
self.verbose = verbose
# create the timing function
self._time = Timent(skip=-1, repeat=repeat, duration=t_min).time
# which methods need half and whole images
need_half = frozenset([
'basex',
'daun',
'direct_C',
'direct_Python',
'hansenlaw',
'onion_bordas',
'onion_peeling',
'two_point',
'three_point',
])
need_whole = frozenset([
'linbasex',
'rbasex',
])
# all available methods (= union of the above sets)
all_methods = need_half | need_whole
# remove direct_C, if not supported
if not direct.cython_ext:
all_methods = all_methods - frozenset(['direct_C'])
# Select methods
if 'all' in select:
methods = all_methods
else:
methods = set()
for method in select:
if method not in all_methods:
print('Warning: Unsupported method "{}" ignored!'.
format(method))
else:
methods.add(method)
if not methods:
raise ValueError('At least one valid method must be specified!')
# dictionary for the results
self.res = {'bs': {},
'forward': {},
'inverse': {}}
# same results as separate dictionaries (aliases to the above)
self.bs = self.res['bs']
self.fabel = self.res['forward']
self.iabel = self.res['inverse']
# inverse speed for time estimations
self._pace = {}
# Loop over all image sizes
for ni in self.n:
self.ni = int(ni)
# image height and half-width
self.h, self.w = self.ni, self.ni // 2 + 1
# We transform a rectangular image, since we are making the
# assumption that we are transforming just the "right side" of
# a square image.
# see: https://github.com/PyAbel/PyAbel/issues/207
self._vprint('n =', self.ni)
# The following code tries to catch the interruption signal
# (Ctrl+C) to abort as soon as possible but preserve the available
# results. Setting a negative time limit makes all remaining
# benchmarks to skip (calling them is still needed to fill the
# results with nans).
# create needed images (half and/or whole)
if (self.t_max >= 0): # (do not create while aborting)
try:
if methods & need_half:
self.half_image = np.random.randn(self.h, self.w)
if methods & need_whole:
self.whole_image = np.random.randn(self.h, self.h)
except (KeyboardInterrupt, MemoryError) as e:
self._vprint(repr(e) + ' during image creation!'
' Skipping the rest...')
self.t_max = -1.0
# (the images will not be used, so leaving them as is)
# call benchmark (see below) for each method at this image size
for method in methods:
self._vprint(' ', method)
try:
getattr(self, '_time_' + method)()
except (KeyboardInterrupt, MemoryError) as e:
self._vprint('\n' + repr(e) + '! Skipping the rest...')
self.t_max = -1.0
# rerun this interrupted benchmark to nan-fill its results
getattr(self, '_time_' + method)()
# discard images
self.half_image = None
self.whole_image = None
self._vprint('')
def _vprint(self, *args, **kwargs):
"""
Print to stderr, only if verbose=True.
"""
if self.verbose:
print(*args, file=sys.stderr, **kwargs)
sys.stderr.flush() # (Python 3 buffers stderr. Why?!)
def _append(self, kind, method, result):
"""
Store one result, ensuring that the results array exists.
"""
if method not in self.res[kind]:
self.res[kind][method] = []
self.res[kind][method].append(result)
def _benchmark(self,
kind, method,
func, *args, **kwargs):
"""
Run benchmark for the function with arguments and store the result.
"""
self._append(kind, method,
self._time(func, *args, **kwargs) * 1000) # [s] to [ms]
def _skip(*param):
"""
Decorator for benchmarking functions.
Adds a check whether the estimated execution time would exceed t_max.
If so, fills the results with np.nan, otherwise executes the
benchmarking code.
Parameters are tuples "(kind, method)". Either item can be a list, then
all combinations of kind(s) and method(s) are implied. Altogether the
set of these kind–method pairs must be the same as in the "normal"
execution results.
"""
# assemble all kind–method pairs
res_keys = []
for p in param:
res_keys += itertools.product(*map(_ensure_list, p))
def decorator(f):
method = f.__name__[6:] # (remove initial "_time_")
def decorated(self):
# get the estimated time (use 0 if cannot) and report it
t_est = self._pace.get(method, 0) * self.ni**3
self._vprint(' estimated ' +
('{:g} s'.format(_roundsf(t_est, 2)) if t_est
else '???'), end='')
# skip the benchmark if it would take too long
if t_est > self.t_max:
self._vprint(' -- skipped')
# fill the results with nan
for k, m in res_keys:
self._append(k, m, np.nan)
return
else: # otherwise run the benchmark
f(self)
# calculate the actual total time and report it
t = (sum(self.res[k][m][-1] for k, m in res_keys) *
self.repeat) / 1000 # [ms] -> [s]
self._vprint(', actually {:.3f} s'.format(t))
# save the pace for future estimations
self._pace[method] = t / self.ni**3
return decorated
return decorator
# Benchmarking functions for each method.
# Must be named "_time_method", where "method" is as in "select".
# Do not take or return anything, but use instance variables:
# parameters:
# self.ni, self.h, self.w -- image size, height, half-width,
# self.whole_image, self.half_image -- image (part) to transform
# results:
# self.res[kind][method] = [timings] -- appended for each image size,
# use np.nan for skipped points
# kind = 'bs' (basis), 'forward', 'inverse' -- as applicable
# method -- as above, but can also include variants (like in basex)
@_skip(('bs', 'basex'),
(['inverse', 'forward'], ['basex', 'basex(var.reg.)']))
def _time_basex(self):
# benchmark the basis generation (default parameters)
def gen_basis():
basex.cache_cleanup()
basex.get_bs_cached(self.w, basis_dir=None)
self._benchmark('bs', 'basex',
gen_basis)
# benchmark all transforms
for direction in ['inverse', 'forward']: # (default first)
# get the transform matrix (default parameters)
A = basex.get_bs_cached(self.w, basis_dir=None,
direction=direction)
# benchmark the transform itself
self._benchmark(direction, 'basex',
basex.basex_core_transform,
self.half_image, A)
# benchmark the transform with variable regularization
def basex_var():
A = basex.get_bs_cached(self.w, reg=1.0+np.random.random(),
basis_dir=None, direction=direction)
basex.basex_core_transform(self.half_image, A)
self._benchmark(direction, 'basex(var.reg.)',
basex_var)
# discard the unneeded transform matrix
basex.cache_cleanup(direction)
# discard all caches
basex.cache_cleanup()
@_skip(('bs', 'daun'),
(['inverse', 'forward'], 'daun'),
('inverse', 'daun(var.reg.)'))
def _time_daun(self):
# benchmark the basis generation (default parameters)
def gen_basis():
daun.cache_cleanup()
daun.get_bs_cached(self.w)
self._benchmark('bs', 'daun',
gen_basis)
# benchmark all transforms
for direction in ['inverse', 'forward']: # (default first)
# cache the transform matrix
daun.get_bs_cached(self.w, direction=direction)
# benchmark the transform itself
self._benchmark(direction, 'daun',
daun.daun_transform,
self.half_image, direction=direction,
verbose=False)
if direction == 'inverse':
# benchmark the transform with variable regularization
def daun_var():
daun.daun_transform(self.half_image,
reg=1.0 + np.random.random(),
verbose=False)
self._benchmark('inverse', 'daun(var.reg.)',
daun_var)
# discard the unneeded transform matrix
daun.cache_cleanup(direction)
# discard all caches
daun.cache_cleanup()
@_skip((['inverse', 'forward'], 'direct_C'))
def _time_direct_C(self):
for direction in ['inverse', 'forward']:
self._benchmark(direction, 'direct_C',
direct.direct_transform,
self.half_image, direction=direction, backend='C')
@_skip((['inverse', 'forward'], 'direct_Python'))
def _time_direct_Python(self):
for direction in ['inverse', 'forward']:
self._benchmark(direction, 'direct_Python',
direct.direct_transform,
self.half_image, direction=direction,
backend='python')
@_skip((['inverse', 'forward'], 'hansenlaw'))
def _time_hansenlaw(self):
for direction in ['inverse', 'forward']:
self._benchmark(direction, 'hansenlaw',
hansenlaw.hansenlaw_transform,
self.half_image, direction=direction)
@_skip((['bs', 'inverse'], 'linbasex'))
def _time_linbasex(self):
# benchmark the basis generation (default parameters)
def gen_basis():
linbasex.cache_cleanup()
linbasex.get_bs_cached(self.h, basis_dir=None)
self._benchmark('bs', 'linbasex',
gen_basis)
# get the basis (is already cached)
basis = linbasex.get_bs_cached(self.h, basis_dir=None)
# benchmark the transform
self._benchmark('inverse', 'linbasex',
linbasex._linbasex_transform_with_basis,
self.whole_image, basis)
# discard all caches
linbasex.cache_cleanup()
@_skip(('inverse', 'onion_bordas'))
def _time_onion_bordas(self):
self._benchmark('inverse', 'onion_bordas',
onion_bordas.onion_bordas_transform,
self.half_image)
# (Generic function for all Dasch methods; not called directly.)
def _time_dasch(self, method):
# benchmark the basis generation (default parameters)
def gen_basis():
dasch.cache_cleanup()
dasch.get_bs_cached(method, self.w, basis_dir=None)
self._benchmark('bs', method,
gen_basis)
# get the transform matrix (is already cached)
D = dasch.get_bs_cached(method, self.w, basis_dir=None)
# benchmark the transform
self._benchmark('inverse', method,
dasch.dasch_transform,
self.half_image, D)
# discard all caches
dasch.cache_cleanup()
@_skip((['bs', 'inverse'], 'onion_peeling'))
def _time_onion_peeling(self):
self._time_dasch('onion_peeling')
@_skip(('bs', 'rbasex'),
(['inverse', 'forward'], ['rbasex', 'rbasex(None)']))
def _time_rbasex(self):
# benchmark the basis generation (default parameters)
def gen_basis():
rbasex.cache_cleanup()
rbasex.get_bs_cached(self.w - 1) # Rmax = half-width - 1
self._benchmark('bs', 'rbasex',
gen_basis)
# benchmark all transforms
for direction in ['inverse', 'forward']: # (default first)
# warm-up run to cache "distributions" (basis is already cached)
rbasex.rbasex_transform(self.whole_image)
# benchmark the transform
self._benchmark(direction, 'rbasex',
rbasex.rbasex_transform,
self.whole_image)
# same without output image
self._benchmark(direction, 'rbasex(None)',
rbasex.rbasex_transform,
self.whole_image, out=None)
# discard the unneeded transform matrix
basex.cache_cleanup(direction)
# discard all caches
rbasex.cache_cleanup()
@_skip((['bs', 'inverse'], 'three_point'))
def _time_three_point(self):
self._time_dasch('three_point')
@_skip((['bs', 'inverse'], 'two_point'))
def _time_two_point(self):
self._time_dasch('two_point')
# (End of benchmarking functions.)
def __repr__(self):
import platform
out = ['PyAbel benchmark run on {}\n'.format(platform.processor()),
'time in milliseconds']
# field widths are chosen to accommodate up to:
# method = 15 characters
# ni = 99999 (would require at least 75 GB RAM)
# time = 9999999.9 ms (almost 3 hours)
# data columns are 9 characters wide and separated by 3 spaces
TITLE_FORMAT = '=== {} ==='
HEADER_ROW = 'Method ' + \
''.join([' {:>9}'.
format('n = {}'.format(ni)) for ni in self.n])
SEP_ROW = '-' * len(HEADER_ROW)
ROW_FORMAT = '{:15}' + ' {:9.1f}' * len(self.n)
def print_benchmark(name, res):
title = '{:=<{w}}'.format(TITLE_FORMAT.format(name),
w=len(SEP_ROW))
out = ['\n' + title + '\n']
out += [HEADER_ROW]
out += [SEP_ROW]
for name, row in sorted(res.items()):
out += [ROW_FORMAT.format(name, *row)]
return out
if self.bs:
out += print_benchmark('Basis generation', self.bs)
out += ['']
if self.fabel:
out += print_benchmark('Forward Abel transform', self.fabel)
out += ['']
if self.iabel:
out += print_benchmark('Inverse Abel transform', self.iabel)
return '\n'.join(out)
class DistributionsTiming(object):
"""
Benchmark performance of different VMI distributions implementations.
Parameters
----------
n : int or sequence of int
array size(s) for the benchmark (assuming full images to be 2D square
arrays (*n*, *n*))
shape : str
image shape:
``'Q'``:
one quadrant ((*n* + 1)/2, (*n* + 1)/2)
``'half'`` (default):
half image (*n*, (*n* + 1)/2), vertically centered
``'full'``:
full image (*n*, *n*), centered
rmax : str or sequence of str
``'MIN'`` (default) and/or ``'all'``, see **rmax** in
:class:`abel.tools.vmi.Distributions`
order : int
highest order in the angular distributions. Even number ≥ 0.
weight : str or sequence of str
weighting to test. Use ``'all'`` for all available or choose any
combination of individual types::
weight=['none', 'sin', 'array', 'sin+array']
method : str or sequence of str
methods to benchmark. Use ``'all'`` (default) for all available or
choose any combination of individual methods::
method=['nearest', 'linear', 'remap']
repeat : int
repeat each benchmark at least this number of times to get the average
values
t_min : float
repeat each benchmark for at least this number of seconds to get the
average values
Attributes
-------
n : list of int
array sizes from the parameter **n**
results : dict of dict of dict of list of tuple of float
benchmark results — multi-level dictionary, in which
``results[method][rmax][weight]`` is the list of timings in
milliseconds corresponding to array sizes in
:py:attr:`DistributionsTiming.n`. Each timing is a tuple (*t*:sub:`1`,
*t*:sub:`∞`) with *t*:sub:`1` corresponding to single-image
(non-cached) performance, and *t*:sub:`∞` corresponding to batch
(cached) performance.
Notes
-----
The results can be output in a nice format by simply
``print(DistributionsTiming(...))``.
"""
def __init__(self, n=[301, 501], shape='half', rmax='MIN', order=2,
weight=['none', 'sin', 'sin+array'], method='all',
repeat=1, t_min=0.1):
self.n = _ensure_list(n)
if shape == 'Q':
origin = 'll'
self.shape = 'One quadrant'
elif shape == 'half':
origin = 'cl'
self.shape = 'Half image'
elif shape == 'full':
origin = 'cc'
self.shape = 'Full image'
else:
raise ValueError('Incorrect shape "{}"'.format(shape))
self.rmaxs = rmaxs = _ensure_list(rmax)
self.order = order
weights = _ensure_list(weight)
if 'all' in weights:
weights = ['none', 'sin', 'array', 'sin+array']
self.weights = weights
methods = _ensure_list(method)
if 'all' in methods:
methods = ['nearest', 'linear', 'remap']
self.methods = methods
# create the timing function
time = Timent(skip=-1, repeat=repeat, duration=t_min).time
# dictionary for the results
self.results = {m: {r: {w: [] for w in weights}
for r in rmaxs}
for m in methods}
from abel.tools.vmi import Ibeta, Distributions
# make sure that everything is loaded
# (otherwise the 1st timing call is very slow)
Ibeta(np.array([[0]]))
# Loop over all image sizes
for ni in self.n:
ni = int(ni)
# create image and weight array
rows = (ni + 1) // 2 if shape == 'Q' else ni
cols = (ni + 1) // 2 if shape in ['Q', 'half'] else ni
IM = np.random.randn(rows, cols)
warray = np.random.randn(rows, cols)
# benchmark each combination of parameters
for method in methods:
for rmax in rmaxs:
for weight in weights:
if weight == 'none':
w = {'use_sin': False, 'weights': None}
elif weight == 'sin':
w = {'use_sin': True, 'weights': None}
elif weight == 'array':
w = {'use_sin': False, 'weights': warray}
elif weight == 'sin+array':
w = {'use_sin': True, 'weights': warray}
else:
raise ValueError('Incorrect weight "{}"'.
format(weight))
# single-image
t1 = time(Ibeta,
IM, origin, rmax, order, method=method, **w)
# cached
distr = Distributions(origin, rmax, order,
method=method, **w)
distr(IM) # trigger precalculations
def distrIMIbeta(IM):
return distr(IM).Ibeta()
tn = time(distrIMIbeta,
IM)
# save results
self.results[method][rmax][weight].append((t1 * 1000,
tn * 1000))
def __repr__(self):
import platform
out = ['PyAbel benchmark run on {}\n'.format(platform.processor()),
'order = {}, time in milliseconds'.format(self.order)]
# field widths are chosen to accommodate up to:
# rmax + weight = 3 leading spaces + 14 characters
# ni = 99999
# time = 9999999.9 ms (almost 3 hours)
# data columns are 9 characters wide and separated by 3 spaces
TITLE_FORMAT = '=== ' + self.shape + ', {} ==='
HEADER_ROW = 'Method ' + \
''.join([' {:>9}'.
format('n = {}'.format(ni)) for ni in self.n])
SEP_ROW = '-' * len(HEADER_ROW)
ROW_FORMAT = ' {}, {:9}' + ' {:9.1f}' * len(self.n)
def print_benchmark(mode):
title = '{:=<{w}}'.format(TITLE_FORMAT.format(mode),
w=len(SEP_ROW))
out = ['\n' + title + '\n']
out += [HEADER_ROW]
out += [SEP_ROW]
for method in self.methods:
out += [method]
resm = self.results[method]
for rmax in self.rmaxs:
resr = resm[rmax]
for weight in self.weights:
res = resr[weight]
t = list(zip(*res))[0 if mode == 'single' else 1]
out += [ROW_FORMAT.format(rmax, str(weight), *t)]
return out
out += print_benchmark('single')
out += ['']
out += print_benchmark('cached')
return '\n'.join(out)
def is_symmetric(arr, i_sym=True, j_sym=True):
"""
Takes in an array of shape (n, m) and check if it is symmetric
Parameters
----------
arr : 1D or 2D array
i_sym : array
symmetric with respect to the 1st axis
j_sym : array
symmetric with respect to the 2nd axis
Returns
-------
a binary array with the symmetry condition for the corresponding quadrants.
The globa
Notes
-----
If both **i_sym** = ``True`` and **j_sym** = ``True``, the input array is
checked for polar symmetry.
See `issue #34 comment
<https://github.com/PyAbel/PyAbel/issues/34#issuecomment-160344809>`__
for the defintion of a center of the image.
"""
Q0, Q1, Q2, Q3 = tools.symmetry.get_image_quadrants(
arr, reorient=False)
if i_sym and not j_sym:
valid_flag = [np.allclose(np.fliplr(Q1), Q0),
np.allclose(np.fliplr(Q2), Q3)]
elif not i_sym and j_sym:
valid_flag = [np.allclose(np.flipud(Q1), Q2),
np.allclose(np.flipud(Q0), Q3)]
elif i_sym and j_sym:
valid_flag = [np.allclose(np.flipud(np.fliplr(Q1)), Q3),
np.allclose(np.flipud(np.fliplr(Q0)), Q2)]
else:
raise ValueError('Checking for symmetry with both i_sym=False \
and j_sym=False does not make sense!')
return np.array(valid_flag)
def absolute_ratio_benchmark(analytical, recon, kind='inverse'):
"""
Check the absolute ratio between an analytical function and the result
of a inverse Abel reconstruction.
Parameters
----------
analytical : one of the classes from analytical, initialized
recon : 1D ndarray
a reconstruction (i.e. inverse abel)
given by some PyAbel implementation
"""
mask = analytical.mask_valid
if kind == 'inverse':
func = analytical.func
elif kind == 'forward':
func = analytical.abel
err = func[mask]/recon[mask]
return err
| {
"content_hash": "26e60b216826a76146a14c53d8875cdc",
"timestamp": "",
"source": "github",
"line_count": 839,
"max_line_length": 79,
"avg_line_length": 37.156138259833135,
"alnum_prop": 0.5255982549560532,
"repo_name": "stggh/PyAbel",
"id": "9d4145723630196e479a29e7693e441b7fedfc0d",
"size": "31230",
"binary": false,
"copies": "2",
"ref": "refs/heads/HansenLaw-fix",
"path": "abel/benchmark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "3187"
},
{
"name": "Python",
"bytes": "486460"
}
],
"symlink_target": ""
} |
"""Tests for listener subprocesses."""
# pylint: disable=protected-access,no-self-use
from unittest import TestCase
import inputs
from tests.constants import mock, PYTHON
if PYTHON == 3:
mock._magics.add('__round__')
RAW = ""
# Mocking adds an argument, whether we need it or not.
# pylint: disable=unused-argument
class MockPoint(object):
"""A pretend AppKit point object."""
# pylint: disable=too-few-public-methods,invalid-name
# pylint: disable=useless-object-inheritance
x = 600
y = 400
class BaseListenerTestCase(TestCase):
"""Tests the BaseListener class."""
def test_init(self):
"""The listener has type_codes."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
self.assertEqual(len(listener.type_codes), 14)
def test_get_timeval(self):
"""Gives seconds and microseconds."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
seconds, microseconds = listener.get_timeval()
self.assertTrue(seconds > 0)
self.assertTrue(microseconds > 0)
def test_set_timeval(self):
"""Sets the cached timeval."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
# We start with no timeval
self.assertIsNone(listener.timeval)
# We update the timeval
listener.update_timeval()
seconds, microseconds = listener.get_timeval()
self.assertTrue(seconds > 0)
self.assertTrue(microseconds > 0)
def test_create_key_event_object(self):
"""It should create an evdev object."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
eventlist = listener.create_event_object("Key", 30, 1, (100, 0))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 0, 1, 30, 1))
def test_create_mouse_event_object(self):
"""It also should create an evdev object."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
eventlist = listener.create_event_object("Absolute", 0, 285, (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 3, 0, 285))
def test_create_banana_event_object(self):
"""It should raise an exception."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
with self.assertRaises(inputs.UnknownEventType):
listener.create_event_object("Banana", 0, 285, (100, 1))
def test_create_ev_wo_timeval(self):
"""It should create an evdev object."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
eventlist = listener.create_event_object("Key", 30, 1)
event_info = next(inputs.iter_unpack(eventlist))
self.assertTrue(event_info[0] > 0)
self.assertTrue(event_info[1] > 0)
self.assertEqual(event_info[2:], (1, 30, 1))
def test_write_to_pipe(self):
"""Subprocess sends data back to the class in the mainprocess."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
listener.write_to_pipe([b'Green Eggs', b' and ', b'Ham'])
send_bytes_call = pipe.method_calls[0]
method_name = send_bytes_call[0]
args = send_bytes_call[1]
self.assertEqual(method_name, 'send_bytes')
self.assertEqual(args[0], b'Green Eggs and Ham')
def test_emulate_wheel_x(self):
"""Returns an event list for the x mouse wheel turn."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
eventlist = listener.emulate_wheel(20, 'x', (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 6, 20))
eventlist = listener.emulate_wheel(-20, 'x', (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 6, -20))
def test_emulate_wheel_y(self):
"""Returns an event list for the y mouse wheel turn."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
eventlist = listener.emulate_wheel(20, 'y', (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 8, 20))
eventlist = listener.emulate_wheel(-20, 'y', (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 8, -20))
def test_emulate_wheel_z(self):
"""Returns an event list for the z mouse wheel turn."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
eventlist = listener.emulate_wheel(20, 'z', (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 7, 20))
eventlist = listener.emulate_wheel(-20, 'z', (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 7, -20))
def test_emulate_wheel_win(self):
"""Returns an event list for the mouse wheel turn on Windows."""
inputs.WIN = True
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
eventlist = listener.emulate_wheel(240, 'x', (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 6, 2))
eventlist = listener.emulate_wheel(-240, 'x', (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 6, -2))
inputs.WIN = False
def test_emulate_rel(self):
"""Returns an event list for relative mouse movement."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
eventlist = listener.emulate_rel(0, 1, (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 0, 1))
eventlist = listener.emulate_rel(0, -5, (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 0, -5))
eventlist = listener.emulate_rel(1, 44, (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 1, 44))
eventlist = listener.emulate_rel(1, -10, (100, 1))
event_info = next(inputs.iter_unpack(eventlist))
self.assertEqual(event_info, (100, 1, 2, 1, -10))
def test_emulate_press_down(self):
"""Returns an event list for button."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
scan_list, button_list = listener.emulate_press(
272, 589825, 1, (100, 1))
scan_info = next(inputs.iter_unpack(scan_list))
button_info = next(inputs.iter_unpack(button_list))
self.assertEqual(scan_info, (100, 1, 4, 4, 589825))
self.assertEqual(button_info, (100, 1, 1, 272, 1))
def test_emulate_press_up(self):
"""Returns an event list for button."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
scan_list, button_list = listener.emulate_press(
272, 589825, 0, (100, 1))
scan_info = next(inputs.iter_unpack(scan_list))
button_info = next(inputs.iter_unpack(button_list))
self.assertEqual(scan_info, (100, 1, 4, 4, 589825))
self.assertEqual(button_info, (100, 1, 1, 272, 0))
def test_emulate_repeat(self):
"""Returns a repeat event, e.g. doubleclick, triple click."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
repeat_list = listener.emulate_repeat(1, (100, 1))
repeat_info = next(inputs.iter_unpack(repeat_list))
self.assertEqual(repeat_info, (100, 1, 20, 2, 1))
repeat_list = listener.emulate_repeat(2, (100, 1))
repeat_info = next(inputs.iter_unpack(repeat_list))
self.assertEqual(repeat_info, (100, 1, 20, 2, 2))
repeat_list = listener.emulate_repeat(3, (100, 1))
repeat_info = next(inputs.iter_unpack(repeat_list))
self.assertEqual(repeat_info, (100, 1, 20, 2, 3))
def test_sync_marker(self):
"""Returns a sync marker."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
sync_list = listener.sync_marker((100, 1))
sync_info = next(inputs.iter_unpack(sync_list))
self.assertEqual(sync_info, (100, 1, 0, 0, 0))
sync_list = listener.sync_marker((200, 2))
sync_info = next(inputs.iter_unpack(sync_list))
self.assertEqual(sync_info, (200, 2, 0, 0, 0))
def test_emulate_abs(self):
"""Returns absolute mouse event."""
pipe = mock.MagicMock()
listener = inputs.BaseListener(pipe)
x_list, y_list = listener.emulate_abs(1324, 246, (100, 1))
x_info = next(inputs.iter_unpack(x_list))
self.assertEqual(x_info, (100, 1, 3, 0, 1324))
y_info = next(inputs.iter_unpack(y_list))
self.assertEqual(y_info, (100, 1, 3, 1, 246))
class QuartzMouseBaseListenerTestCase(TestCase):
"""Test the Mac mouse support."""
def test_init(self):
"""The created object has properties."""
pipe = mock.MagicMock()
listener = inputs.QuartzMouseBaseListener(pipe)
self.assertTrue(listener.active)
self.assertEqual(
listener.codes[1],
('Key', 272, 1, 589825))
def test_abstract_methods(self):
"""Test that they raise an exception."""
pipe = mock.MagicMock()
listener = inputs.QuartzMouseBaseListener(pipe)
event = mock.MagicMock()
with self.assertRaises(NotImplementedError):
listener._get_mouse_button_number(event)
event.assert_not_called()
event = mock.MagicMock()
with self.assertRaises(NotImplementedError):
listener._get_click_state(event)
event.assert_not_called()
event = mock.MagicMock()
with self.assertRaises(NotImplementedError):
listener._get_scroll(event)
event.assert_not_called()
event = mock.MagicMock()
with self.assertRaises(NotImplementedError):
listener._get_absolute(event)
event.assert_not_called()
event = mock.MagicMock()
with self.assertRaises(NotImplementedError):
listener._get_relative(event)
event.assert_not_called()
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'_get_mouse_button_number',
return_value=1)
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'_get_click_state',
return_value=1)
def test_handle_button(self,
mock_get_mouse_button_number,
mock_get_click_state):
"""Convert quartz events to evdev events."""
pipe = mock.MagicMock()
listener = inputs.QuartzMouseBaseListener(pipe)
# We begin with no events
self.assertEqual(listener.events, [])
event = mock.MagicMock()
listener.handle_button(event, 3)
# _get_mouse_button_number was called
mock_get_mouse_button_number.assert_called_once()
# get_click_state was called
mock_get_click_state.assert_called_once()
# Now there are three events
self.assertEqual(len(listener.events), 3)
first_event = next(inputs.iter_unpack(
listener.events[0]))
self.assertEqual(first_event[2:], (4, 4, 589826))
second_event = next(inputs.iter_unpack(
listener.events[1]))
self.assertEqual(second_event[2:], (1, 273, 1))
third_event = next(inputs.iter_unpack(
listener.events[2]))
self.assertEqual(third_event[2:], (20, 2, 1))
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'_get_mouse_button_number',
return_value=2)
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'_get_click_state',
return_value=1)
def test_handle_middle_button(self,
mock_get_mouse_button_number,
mock_get_click_state):
"""Convert quartz events to evdev events."""
pipe = mock.MagicMock()
listener = inputs.QuartzMouseBaseListener(pipe)
# We begin with no events
self.assertEqual(listener.events, [])
event = mock.MagicMock()
listener.handle_button(event, 26)
# _get_mouse_button_number was called
mock_get_mouse_button_number.assert_called_once()
# get_click_state was called
mock_get_click_state.assert_called_once()
# Now there are three events
self.assertEqual(len(listener.events), 3)
first_event = next(inputs.iter_unpack(
listener.events[0]))
self.assertEqual(first_event[2:], (4, 4, 589827))
second_event = next(inputs.iter_unpack(
listener.events[1]))
self.assertEqual(second_event[2:], (1, 274, 0))
third_event = next(inputs.iter_unpack(
listener.events[2]))
self.assertEqual(third_event[2:], (20, 2, 1))
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'_get_scroll',
return_value=(2, 2))
def test_handle_scrollwheel(self,
mock_get_scroll):
"""Scroll wheel produces events."""
pipe = mock.MagicMock()
listener = inputs.QuartzMouseBaseListener(pipe)
# We begin with no evdev events
self.assertEqual(listener.events, [])
# We (pretend that we) have a Quartz event
event = mock.MagicMock()
# Let's call the method that we care about
listener.handle_scrollwheel(event)
# Now let's see what happened
# get_scroll was called
mock_get_scroll.assert_called_once()
# Do we have events
self.assertEqual(len(listener.events), 2)
first_event = next(inputs.iter_unpack(
listener.events[0]))
self.assertEqual(first_event[2:], (2, 6, 2))
second_event = next(inputs.iter_unpack(
listener.events[1]))
self.assertEqual(second_event[2:], (2, 8, 2))
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'_get_absolute',
return_value=(3.1, 2.1))
def test_handle_absolute(self, mock_get_absolute):
"""Absolute mouse movement produces events."""
pipe = mock.MagicMock()
listener = inputs.QuartzMouseBaseListener(pipe)
# We begin with no evdev events
self.assertEqual(listener.events, [])
# We have a Quartz event
event = mock.MagicMock()
# Let's call the method that we care about
listener.handle_absolute(event)
# Now let's see what happened
# get_absolute was called
mock_get_absolute.assert_called_once()
# Do we have events
self.assertEqual(len(listener.events), 2)
first_event = next(inputs.iter_unpack(
listener.events[0]))
self.assertEqual(first_event[2:], (3, 0, 3))
second_event = next(inputs.iter_unpack(
listener.events[1]))
self.assertEqual(second_event[2:], (3, 1, 2))
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'_get_relative',
return_value=(600, 400))
def test_handle_relative(self, mock_get_relative):
"""Relative mouse movement produces events."""
pipe = mock.MagicMock()
listener = inputs.QuartzMouseBaseListener(pipe)
# We begin with no evdev events
self.assertEqual(listener.events, [])
# We have a Quartz event
event = mock.MagicMock()
# Let's call the method that we care about
listener.handle_relative(event)
# Now let's see what happened
# get_relative was called
mock_get_relative.assert_called_once()
# Do we have events
self.assertEqual(len(listener.events), 2)
first_event = next(inputs.iter_unpack(
listener.events[0]))
self.assertEqual(first_event[2:], (2, 0, 600))
second_event = next(inputs.iter_unpack(
listener.events[1]))
self.assertEqual(second_event[2:], (2, 1, 400))
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'handle_relative')
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'handle_absolute')
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'handle_button')
def test_handle_input(self,
mock_handle_button,
mock_handle_absolute,
mock_handle_relative):
"""Input events from Quartz are handled with the correct method."""
pipe = mock.MagicMock()
listener = inputs.QuartzMouseBaseListener(pipe)
event = mock.MagicMock()
listener.handle_input(None, 1, event, None)
# So what happened?
# The functions were called
mock_handle_button.assert_called_once_with(event, 1)
mock_handle_absolute.assert_called_once_with(event)
mock_handle_relative.assert_called_once_with(event)
# The sync marker was added
self.assertEqual(len(listener.events), 1)
first_event = next(inputs.iter_unpack(
listener.events[0]))
self.assertEqual(first_event[2:], (0, 0, 0))
# Train
# Now we must handle the scroll wheel
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'handle_relative')
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'handle_absolute')
@mock.patch.object(
inputs.QuartzMouseBaseListener,
'handle_scrollwheel')
def test_handle_input_scroll(
self,
mock_handle_scrollwheel,
mock_handle_absolute,
mock_handle_relative):
"""Input events from Quartz are handled with the correct method."""
pipe = mock.MagicMock()
listener = inputs.QuartzMouseBaseListener(pipe)
event = mock.MagicMock()
listener.handle_input(None, 22, event, None)
# So what happened?
# The functions were called
mock_handle_scrollwheel.assert_called_once_with(event)
mock_handle_absolute.assert_called_once_with(event)
mock_handle_relative.assert_called_once_with(event)
# The sync marker was added
self.assertEqual(len(listener.events), 1)
first_event = next(inputs.iter_unpack(
listener.events[0]))
self.assertEqual(first_event[2:], (0, 0, 0))
class AppKitMouseBaseListenerTestCase(TestCase):
"""Test the Mac mouse support."""
def test_init(self):
"""The created object has properties."""
pipe = mock.MagicMock()
listener = inputs.AppKitMouseBaseListener(pipe)
self.assertEqual(listener.events, [])
self.assertEqual(listener.codes[1][0], 'Key')
def test_get_mouse_button_number(self):
"""Get mouse number calls buttonNumber method."""
pipe = mock.MagicMock()
listener = inputs.AppKitMouseBaseListener(pipe)
event = mock.MagicMock()
button_number = listener._get_mouse_button_number(event)
call = event.method_calls[0]
self.assertEqual(call[0], 'buttonNumber')
button_number.assert_not_called()
def test_get_absolute(self):
"""Get absolute calls locationInWindow method."""
pipe = mock.MagicMock()
listener = inputs.AppKitMouseBaseListener(pipe)
event = mock.MagicMock()
button_number = listener._get_absolute(event)
call = event.method_calls[0]
self.assertEqual(call[0], 'locationInWindow')
button_number.assert_not_called()
def test_get_deltas(self):
"""Get deltas calls delta methods."""
pipe = mock.MagicMock()
listener = inputs.AppKitMouseBaseListener(pipe)
event = mock.MagicMock()
button_number = listener._get_deltas(event)
self.assertEqual(len(button_number), 3)
# Check the three method names were called.
cartesian = ('X', 'Y', 'Z')
for index, call in enumerate(event.method_calls):
method_name = 'delta' + cartesian[index]
self.assertEqual(call[0], method_name)
def test_get_event_type(self):
"""Get event type called type()."""
pipe = mock.MagicMock()
listener = inputs.AppKitMouseBaseListener(pipe)
event = mock.MagicMock()
event_type = listener._get_event_type(event)
call = event.method_calls[0]
self.assertEqual(call[0], 'type')
event_type.assert_not_called()
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'_get_mouse_button_number',
return_value=2)
def test_handle_button(self, mock_get_mouse_button_number):
"""Mouse click produces an event."""
pipe = mock.MagicMock()
listener = inputs.AppKitMouseBaseListener(pipe)
# Events begin empty
self.assertEqual(listener.events, [])
event = mock.MagicMock(return_value=1)
listener.handle_button(event, 25)
self.assertEqual(len(listener.events), 2)
first_event = next(inputs.iter_unpack(
listener.events[0]))
self.assertEqual(first_event[2:], (4, 4, 589827))
second_event = next(inputs.iter_unpack(
listener.events[1]))
self.assertEqual(second_event[2:], (1, 274, 1))
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'_get_absolute',
return_value=MockPoint())
def test_handle_absolute(self,
mock_get_absolute):
"""Absolute mouse event is processed."""
pipe = mock.MagicMock()
listener = inputs.AppKitMouseBaseListener(pipe)
# Events begin empty
self.assertEqual(listener.events, [])
event = mock.MagicMock()
# Run the method under test
listener.handle_absolute(event)
# Check that we have events
self.assertEqual(len(listener.events), 2)
first_event = next(inputs.iter_unpack(
listener.events[0]))
self.assertEqual(first_event[2:], (3, 0, 600))
second_event = next(inputs.iter_unpack(
listener.events[1]))
self.assertEqual(second_event[2:], (3, 1, 400))
mock_get_absolute.assert_called_once_with(event)
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'_get_deltas',
return_value=(5, 5, 5))
def test_handle_scrollwheel(self,
mock_get_deltas):
"""Scroll wheel event is processed."""
pipe = mock.MagicMock()
listener = inputs.AppKitMouseBaseListener(pipe)
# Events begin empty
self.assertEqual(listener.events, [])
event = mock.MagicMock()
# Run the method under test
listener.handle_scrollwheel(event)
# Check that we have events
self.assertEqual(len(listener.events), 3)
first_event = next(inputs.iter_unpack(
listener.events[0]))
self.assertEqual(first_event[2:], (2, 6, 5))
second_event = next(inputs.iter_unpack(
listener.events[1]))
self.assertEqual(second_event[2:], (2, 8, 5))
third_event = next(inputs.iter_unpack(
listener.events[2]))
self.assertEqual(third_event[2:], (2, 7, 5))
mock_get_deltas.assert_called_once_with(event)
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'_get_deltas',
return_value=(5, 5, 5))
def test_handle_relative(self,
mock_get_deltas):
"""Relative position is processed."""
pipe = mock.MagicMock()
listener = inputs.AppKitMouseBaseListener(pipe)
# Events begin empty
self.assertEqual(listener.events, [])
event = mock.MagicMock()
# Run the method under test
listener.handle_relative(event)
# Check that we have events
self.assertEqual(len(listener.events), 3)
first_event = next(inputs.iter_unpack(
listener.events[0]))
self.assertEqual(first_event[2:], (2, 0, 5))
second_event = next(inputs.iter_unpack(
listener.events[1]))
self.assertEqual(second_event[2:], (2, 1, 5))
third_event = next(inputs.iter_unpack(
listener.events[2]))
self.assertEqual(third_event[2:], (2, 2, 5))
mock_get_deltas.assert_called_once_with(event)
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'write_to_pipe')
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'handle_button')
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'handle_scrollwheel')
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'_get_event_type',
return_value=22)
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'handle_absolute')
def test_handle_input_with_scroll(self,
mock_handle_absolute,
mock_get_event_type,
mock_handle_scrollwheel,
mock_handle_button,
mock_write_to_pipe):
"""Mouse events are processed."""
# pylint: disable=too-many-arguments
pipe = mock.MagicMock()
listener = inputs.AppKitMouseBaseListener(pipe)
event = mock.MagicMock()
event_type = mock.MagicMock()
event.attach_mock(event_type, 'type')
# Run the method under test
listener.handle_input(event)
# Check that the methods were called
mock_get_event_type.assert_called_once_with(event)
mock_handle_button.assert_called_once_with(event, 22)
mock_handle_absolute.assert_called_once_with(event)
mock_write_to_pipe.assert_called_once()
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'write_to_pipe')
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'handle_button')
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'_get_event_type',
return_value=1)
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'handle_relative')
@mock.patch.object(
inputs.AppKitMouseBaseListener,
'handle_absolute')
def test_handle_input_relative(self,
mock_handle_absolute,
mock_handle_relative,
mock_handle_get_event_type,
mock_handle_button,
mock_write_to_pipe):
"""Mouse events are processed."""
# pylint: disable=too-many-arguments
pipe = mock.MagicMock()
listener = inputs.AppKitMouseBaseListener(pipe)
event = mock.MagicMock()
event_type = mock.MagicMock()
event.attach_mock(event_type, 'type')
# Run the method under test
listener.handle_input(event)
# Check that the methods were called
mock_handle_get_event_type.assert_called_once_with(event)
mock_handle_button.assert_called_once_with(event, 1)
mock_handle_relative.assert_called_once_with(event)
mock_handle_absolute.assert_called_once_with(event)
mock_write_to_pipe.assert_called_once()
class AppKitKeyboardListenerTestCase(TestCase):
"""Test the Mac keyboard support."""
def test_init(self):
"""The created object knows the keyboard codes."""
pipe = mock.MagicMock()
listener = inputs.AppKitKeyboardListener(pipe)
self.assertEqual(listener.events, [])
self.assertEqual(listener.codes[0], 30)
self.assertEqual(listener.codes[120], 60)
self.assertEqual(listener.codes[104], 90)
def test_get_event_key_code(self):
"""Get event type called keyCode()."""
pipe = mock.MagicMock()
listener = inputs.AppKitKeyboardListener(pipe)
event = mock.MagicMock()
event_type = listener._get_event_key_code(event)
call = event.method_calls[0]
self.assertEqual(call[0], 'keyCode')
event_type.assert_not_called()
def test_get_event_type(self):
"""Get event type called type()."""
pipe = mock.MagicMock()
listener = inputs.AppKitKeyboardListener(pipe)
event = mock.MagicMock()
event_type = listener._get_event_type(event)
call = event.method_calls[0]
self.assertEqual(call[0], 'type')
event_type.assert_not_called()
def test_get_flag_value(self):
"""Get event flags calls event.modifierFlags()."""
pipe = mock.MagicMock()
listener = inputs.AppKitKeyboardListener(pipe)
event = mock.MagicMock()
flag_value = listener._get_flag_value(event)
call = event.method_calls[0]
self.assertEqual(call[0], 'modifierFlags')
self.assertEqual(flag_value, 1)
# Tidy up the below method names after testing on the mac.
def test_get_flag_value_something(self):
"""Get event flags calls event.modifierFlags()."""
pipe = mock.MagicMock()
listener = inputs.AppKitKeyboardListener(pipe)
event = mock.MagicMock()
modifier_flags = mock.MagicMock(return_value=256)
event.attach_mock(modifier_flags, 'modifierFlags')
flag_value = listener._get_flag_value(event)
call = event.method_calls[0]
self.assertEqual(call[0], 'modifierFlags')
self.assertEqual(flag_value, 0)
def test_get_key_value_type_10(self):
"""Event type 10 should return 1."""
pipe = mock.MagicMock()
listener = inputs.AppKitKeyboardListener(pipe)
event = mock.MagicMock()
key_value = listener._get_key_value(event, 10)
self.assertEqual(key_value, 1)
def test_get_key_value_type_11(self):
"""Event type 11 should return 0."""
pipe = mock.MagicMock()
listener = inputs.AppKitKeyboardListener(pipe)
event = mock.MagicMock()
key_value = listener._get_key_value(event, 11)
self.assertEqual(key_value, 0)
def test_get_key_value_other_type(self):
"""Unknown event type should return -1."""
pipe = mock.MagicMock()
listener = inputs.AppKitKeyboardListener(pipe)
event = mock.MagicMock()
key_value = listener._get_key_value(event, 15)
self.assertEqual(key_value, -1)
def test_get_key_value_type_12(self):
"""Event type 12 should check the flag value."""
pipe = mock.MagicMock()
listener = inputs.AppKitKeyboardListener(pipe)
event = mock.MagicMock()
key_value = listener._get_key_value(event, 12)
self.assertEqual(key_value, 1)
@mock.patch.object(
inputs.AppKitKeyboardListener,
'write_to_pipe')
@mock.patch.object(
inputs.AppKitKeyboardListener,
'_get_flag_value',
return_value=0)
@mock.patch.object(
inputs.AppKitKeyboardListener,
'_get_event_type',
return_value=10)
@mock.patch.object(
inputs.AppKitKeyboardListener,
'_get_event_key_code',
return_value=4)
def test_handle_input(self,
mock_get_event_key_code,
mock_get_event_type,
mock_get_flags,
mock_write_to_pipe):
"""Mac Keyboard events are processed."""
pipe = mock.MagicMock()
listener = inputs.AppKitKeyboardListener(pipe)
event = mock.MagicMock()
listener.handle_input(event)
self.assertEqual(len(listener.events), 3)
event_info = inputs.iter_unpack(listener.events[1])
self.assertEqual(next(event_info)[2:], (1, 35, 1))
mock_get_event_key_code.assert_called_once_with(event)
mock_get_event_type.assert_called_once_with(event)
mock_write_to_pipe.assert_called_once_with(listener.events)
@mock.patch.object(
inputs.AppKitKeyboardListener,
'write_to_pipe')
@mock.patch.object(
inputs.AppKitKeyboardListener,
'_get_flag_value',
return_value=0)
@mock.patch.object(
inputs.AppKitKeyboardListener,
'_get_event_type',
return_value=10)
@mock.patch.object(
inputs.AppKitKeyboardListener,
'_get_event_key_code',
return_value=256)
def test_handle_input_unknown_code(self,
mock_get_event_key_code,
mock_get_event_type,
mock_get_flags,
mock_write_to_pipe):
"""Mac Keyboard events are processed."""
pipe = mock.MagicMock()
listener = inputs.AppKitKeyboardListener(pipe)
event = mock.MagicMock()
listener.handle_input(event)
self.assertEqual(len(listener.events), 3)
event_info = inputs.iter_unpack(listener.events[1])
self.assertEqual(next(event_info)[2:], (1, 0, 1))
mock_get_event_key_code.assert_called_once_with(event)
mock_get_event_type.assert_called_once_with(event)
mock_write_to_pipe.assert_called_once_with(listener.events)
| {
"content_hash": "78c7a3c2906b864461eaabbcafa8550b",
"timestamp": "",
"source": "github",
"line_count": 930,
"max_line_length": 78,
"avg_line_length": 36.240860215053765,
"alnum_prop": 0.6017089959648706,
"repo_name": "zeth/inputs",
"id": "9501d13915265f9de2bed50fbb9f497be2eb93d8",
"size": "33704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_listeners.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "205942"
}
],
"symlink_target": ""
} |
import pytest
from ezdxf.entities.hatch import Hatch
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
from ezdxf.lldxf import const
HATCH = """0
HATCH
5
0
330
0
100
AcDbEntity
8
0
62
1
100
AcDbHatch
10
0.0
20
0.0
30
0.0
210
0.0
220
0.0
230
1.0
2
SOLID
70
1
71
0
91
0
75
1
76
1
98
1
10
0.0
20
0.0
"""
@pytest.fixture
def entity():
return Hatch.from_text(HATCH)
def test_if_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert "HATCH" in ENTITY_CLASSES
def test_default_init():
entity = Hatch()
assert entity.dxftype() == "HATCH"
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = Hatch.new(
handle="ABBA",
owner="0",
dxfattribs={
"color": 7,
},
)
assert entity.dxf.layer == "0"
assert entity.dxf.color == 7
def test_load_from_text(entity):
assert entity.dxf.layer == "0"
assert entity.dxf.color == 1, "default color is 1"
def test_write_dxf():
entity = Hatch.from_text(HATCH)
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(HATCH)
assert result == expected
def test_write_correct_polyline_path_tag_order(entity):
entity = Hatch.from_text(HATCH)
entity.paths.add_polyline_path(
[(0, 0), (1, 0), (1, 1)],
is_closed=True,
)
result = TagCollector.dxftags(entity)
tags = list(result.pop_tags([92, 72, 73]))
# 92 = path type 3: external polyline path
# 72 = has_bulge
# 73 = is_closed
# The group codes 72 and 73 are swapped in comparison to MPOLYGON
assert tags == [(92, 3), (72, 0), (73, 1)]
def test_hatch_boundary_state():
state = const.BoundaryPathState.from_flags(
const.BOUNDARY_PATH_EXTERNAL
+ const.BOUNDARY_PATH_DERIVED
+ const.BOUNDARY_PATH_TEXTBOX
+ const.BOUNDARY_PATH_OUTERMOST
)
assert state.external is True
assert state.derived is True
assert state.textbox is True
assert state.outermost is True
assert state.default is False
def test_hatch_boundary_default_state():
state = const.BoundaryPathState()
assert state.external is False
assert state.derived is False
assert state.textbox is False
assert state.outermost is False
assert state.default is True
| {
"content_hash": "8c4122da9a9e92c628c4887c58538c86",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 69,
"avg_line_length": 17.757575757575758,
"alnum_prop": 0.658703071672355,
"repo_name": "mozman/ezdxf",
"id": "3fa2938226741fefd4937e8c19fef364db41d0da",
"size": "2408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_02_dxf_graphics/test_229a_hatch_dxf_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5745"
},
{
"name": "CSS",
"bytes": "3565"
},
{
"name": "Common Lisp",
"bytes": "727"
},
{
"name": "Cython",
"bytes": "111923"
},
{
"name": "HTML",
"bytes": "1417"
},
{
"name": "JavaScript",
"bytes": "11132"
},
{
"name": "Python",
"bytes": "6336553"
}
],
"symlink_target": ""
} |
from rest_framework import serializers
from .models import User
class AuthenticatedUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'name', 'email')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'name')
| {
"content_hash": "703e17f4e2e29a7788fd0f500b31d0e4",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 63,
"avg_line_length": 22.066666666666666,
"alnum_prop": 0.6706948640483383,
"repo_name": "FreeMusicNinja/api.freemusic.ninja",
"id": "b94bd247ec9052413ac31250cd89046a215e56eb",
"size": "331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "135608"
}
],
"symlink_target": ""
} |
"""
Sizun - Software Quality Inspection
MIT License
(C) 2015 David Rieger
"""
"""
Instantiates a parser accd. to the parameter
passed to the constructor
"""
class ParserFactory():
concrete_parser = {'JAVA': JavaParser, 'PY': PyParser}
def create(self, language):
return self.concrete_parser[language]()
| {
"content_hash": "aa67966230cc921a0cc580b92ee33353",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 58,
"avg_line_length": 18,
"alnum_prop": 0.691358024691358,
"repo_name": "FrontSide/Sizun",
"id": "9562fcd4d17325ccdec4babbd3a56f4fefcce945",
"size": "325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sizun/controllers/parser/parser_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "45"
},
{
"name": "CSS",
"bytes": "3241"
},
{
"name": "HTML",
"bytes": "3672"
},
{
"name": "JavaScript",
"bytes": "11081"
},
{
"name": "Python",
"bytes": "50583"
},
{
"name": "Shell",
"bytes": "5245"
}
],
"symlink_target": ""
} |
import sys
import json
import shlex
import select
import threading
import contextlib
import subprocess
from try_leela import names
def close_(fd):
try:
fd.close()
except:
pass
class Session(object):
def __init__(self, exe, tree):
self.exe = exe
self.tree = tree
self.clean = True
def execute(self, *stmt):
while (not self.clean):
self.message()
env = {"rnd_name.0": "%s::%s" % (names.rnd_name(), names.rnd_name()),
"rnd_name.1": "%s::%s" % (names.rnd_name(), names.rnd_name()),
"rnd_name.2": "%s::%s" % (names.rnd_name(), names.rnd_name()),
"rnd_name.3": "%s::%s" % (names.rnd_name(), names.rnd_name())}
stmt = [line % env for line in stmt]
self.exe.stdin.write("%s\n" % json.dumps(["using (%s) %s;" % (self.tree, "\n".join(stmt))]))
self.exe.stdin.flush()
self.clean = False
def execute_fetch(self, *stmt):
self.execute(*stmt)
rows = []
for row in self.messages():
if (row is None):
break
rows.append(row)
return(rows)
def execute_fmap(self, f, *stmt):
return(f(self.execute_fetch(*stmt)))
def message(self):
return(self.messages().next())
def messages(self):
while (True):
data = self.exe.stdout.readline()
if (data == ""):
break
try:
data = json.loads(data)[0]
except:
raise(RuntimeError("bad message: %s" % data))
self.clean = data == None
yield(data)
class Driver(object):
def __init__(self, opts):
self.user = opts.username
self.secret = opts.secret
self.logfile = opts.logfile
self.program = opts.program
self.timeout = opts.timeout
self.endpoint = opts.endpoint
self.logfile = opts.logfile
@contextlib.contextmanager
def openlog(self):
if (self.logfile == "-"):
yield(sys.stderr)
else:
fh = open(self.logfile, "a")
try:
yield(fh)
finally:
fh.close()
@contextlib.contextmanager
def session(self, tree):
with self.openlog() as fh:
exe = subprocess.Popen(shlex.split("timeout %d %s" % (self.timeout / 1000, self.program)),
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = fh,
close_fds = True)
try:
exe.stdin.write("%s\n" % json.dumps({"secret": self.secret,
"timeout": self.timeout,
"endpoint": self.endpoint,
"username": self.user}))
exe.stdin.flush()
yield(Session(exe, tree))
finally:
close_(exe.stdin)
close_(exe.stdout)
exe.wait()
| {
"content_hash": "0ed97ef1275afb49fff90cf7d9335f42",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 102,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.4646433041301627,
"repo_name": "locaweb/leela",
"id": "0b4783c1837ff418ae5602612f91abf00b818487",
"size": "3221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "try/src/try_leela/program.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1447140"
},
{
"name": "C",
"bytes": "305651"
},
{
"name": "C++",
"bytes": "6897"
},
{
"name": "CMake",
"bytes": "5725"
},
{
"name": "Clojure",
"bytes": "163012"
},
{
"name": "Emacs Lisp",
"bytes": "881"
},
{
"name": "Haskell",
"bytes": "225227"
},
{
"name": "Makefile",
"bytes": "14221"
},
{
"name": "Python",
"bytes": "25067"
},
{
"name": "Ruby",
"bytes": "25777"
},
{
"name": "Shell",
"bytes": "42044"
},
{
"name": "Stata",
"bytes": "8297"
}
],
"symlink_target": ""
} |
STEPSIZE = 8
TABSIZE = 8
EXPANDTABS = 0
import os
import re
import sys
next = {}
next['if'] = next['elif'] = 'elif', 'else', 'end'
next['while'] = next['for'] = 'else', 'end'
next['try'] = 'except', 'finally'
next['except'] = 'except', 'else', 'end'
next['else'] = next['finally'] = next['def'] = next['class'] = 'end'
next['end'] = ()
start = 'if', 'while', 'for', 'try', 'def', 'class'
class PythonIndenter:
def __init__(self, fpi = sys.stdin, fpo = sys.stdout,
indentsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
self.fpi = fpi
self.fpo = fpo
self.indentsize = indentsize
self.tabsize = tabsize
self.lineno = 0
self.expandtabs = expandtabs
self._write = fpo.write
self.kwprog = re.compile(
r'^\s*(?P<kw>[a-z]+)'
r'(\s+(?P<id>[a-zA-Z_]\w*))?'
r'[^\w]')
self.endprog = re.compile(
r'^\s*#?\s*end\s+(?P<kw>[a-z]+)'
r'(\s+(?P<id>[a-zA-Z_]\w*))?'
r'[^\w]')
self.wsprog = re.compile(r'^[ \t]*')
# end def __init__
def write(self, line):
if self.expandtabs:
self._write(line.expandtabs(self.tabsize))
else:
self._write(line)
# end if
# end def write
def readline(self):
line = self.fpi.readline()
if line: self.lineno = self.lineno + 1
# end if
return line
# end def readline
def error(self, fmt, *args):
if args: fmt = fmt % args
# end if
sys.stderr.write('Error at line %d: %s\n' % (self.lineno, fmt))
self.write('### %s ###\n' % fmt)
# end def error
def getline(self):
line = self.readline()
while line[-2:] == '\\\n':
line2 = self.readline()
if not line2: break
# end if
line = line + line2
# end while
return line
# end def getline
def putline(self, line, indent = None):
if indent is None:
self.write(line)
return
# end if
tabs, spaces = divmod(indent*self.indentsize, self.tabsize)
i = 0
m = self.wsprog.match(line)
if m: i = m.end()
# end if
self.write('\t'*tabs + ' '*spaces + line[i:])
# end def putline
def reformat(self):
stack = []
while 1:
line = self.getline()
if not line: break # EOF
# end if
m = self.endprog.match(line)
if m:
kw = 'end'
kw2 = m.group('kw')
if not stack:
self.error('unexpected end')
elif stack[-1][0] != kw2:
self.error('unmatched end')
# end if
del stack[-1:]
self.putline(line, len(stack))
continue
# end if
m = self.kwprog.match(line)
if m:
kw = m.group('kw')
if kw in start:
self.putline(line, len(stack))
stack.append((kw, kw))
continue
# end if
if next.has_key(kw) and stack:
self.putline(line, len(stack)-1)
kwa, kwb = stack[-1]
stack[-1] = kwa, kw
continue
# end if
# end if
self.putline(line, len(stack))
# end while
if stack:
self.error('unterminated keywords')
for kwa, kwb in stack:
self.write('\t%s\n' % kwa)
# end for
# end if
# end def reformat
def delete(self):
begin_counter = 0
end_counter = 0
while 1:
line = self.getline()
if not line: break # EOF
# end if
m = self.endprog.match(line)
if m:
end_counter = end_counter + 1
continue
# end if
m = self.kwprog.match(line)
if m:
kw = m.group('kw')
if kw in start:
begin_counter = begin_counter + 1
# end if
# end if
self.putline(line)
# end while
if begin_counter - end_counter < 0:
sys.stderr.write('Warning: input contained more end tags than expected\n')
elif begin_counter - end_counter > 0:
sys.stderr.write('Warning: input contained less end tags than expected\n')
# end if
# end def delete
def complete(self):
self.indentsize = 1
stack = []
todo = []
current, firstkw, lastkw, topid = 0, '', '', ''
while 1:
line = self.getline()
i = 0
m = self.wsprog.match(line)
if m: i = m.end()
# end if
m = self.endprog.match(line)
if m:
thiskw = 'end'
endkw = m.group('kw')
thisid = m.group('id')
else:
m = self.kwprog.match(line)
if m:
thiskw = m.group('kw')
if not next.has_key(thiskw):
thiskw = ''
# end if
if thiskw in ('def', 'class'):
thisid = m.group('id')
else:
thisid = ''
# end if
elif line[i:i+1] in ('\n', '#'):
todo.append(line)
continue
else:
thiskw = ''
# end if
# end if
indent = len(line[:i].expandtabs(self.tabsize))
while indent < current:
if firstkw:
if topid:
s = '# end %s %s\n' % (
firstkw, topid)
else:
s = '# end %s\n' % firstkw
# end if
self.putline(s, current)
firstkw = lastkw = ''
# end if
current, firstkw, lastkw, topid = stack[-1]
del stack[-1]
# end while
if indent == current and firstkw:
if thiskw == 'end':
if endkw != firstkw:
self.error('mismatched end')
# end if
firstkw = lastkw = ''
elif not thiskw or thiskw in start:
if topid:
s = '# end %s %s\n' % (
firstkw, topid)
else:
s = '# end %s\n' % firstkw
# end if
self.putline(s, current)
firstkw = lastkw = topid = ''
# end if
# end if
if indent > current:
stack.append((current, firstkw, lastkw, topid))
if thiskw and thiskw not in start:
# error
thiskw = ''
# end if
current, firstkw, lastkw, topid = \
indent, thiskw, thiskw, thisid
# end if
if thiskw:
if thiskw in start:
firstkw = lastkw = thiskw
topid = thisid
else:
lastkw = thiskw
# end if
# end if
for l in todo: self.write(l)
# end for
todo = []
if not line: break
# end if
self.write(line)
# end while
# end def complete
# end class PythonIndenter
# Simplified user interface
# - xxx_filter(input, output): read and write file objects
# - xxx_string(s): take and return string object
# - xxx_file(filename): process file in place, return true iff changed
def complete_filter(input = sys.stdin, output = sys.stdout,
stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
pi.complete()
# end def complete_filter
def delete_filter(input= sys.stdin, output = sys.stdout,
stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
pi.delete()
# end def delete_filter
def reformat_filter(input = sys.stdin, output = sys.stdout,
stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
pi.reformat()
# end def reformat_filter
class StringReader:
def __init__(self, buf):
self.buf = buf
self.pos = 0
self.len = len(self.buf)
# end def __init__
def read(self, n = 0):
if n <= 0:
n = self.len - self.pos
else:
n = min(n, self.len - self.pos)
# end if
r = self.buf[self.pos : self.pos + n]
self.pos = self.pos + n
return r
# end def read
def readline(self):
i = self.buf.find('\n', self.pos)
return self.read(i + 1 - self.pos)
# end def readline
def readlines(self):
lines = []
line = self.readline()
while line:
lines.append(line)
line = self.readline()
# end while
return lines
# end def readlines
# seek/tell etc. are left as an exercise for the reader
# end class StringReader
class StringWriter:
def __init__(self):
self.buf = ''
# end def __init__
def write(self, s):
self.buf = self.buf + s
# end def write
def getvalue(self):
return self.buf
# end def getvalue
# end class StringWriter
def complete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
input = StringReader(source)
output = StringWriter()
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
pi.complete()
return output.getvalue()
# end def complete_string
def delete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
input = StringReader(source)
output = StringWriter()
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
pi.delete()
return output.getvalue()
# end def delete_string
def reformat_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
input = StringReader(source)
output = StringWriter()
pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
pi.reformat()
return output.getvalue()
# end def reformat_string
def complete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
source = open(filename, 'r').read()
result = complete_string(source, stepsize, tabsize, expandtabs)
if source == result: return 0
# end if
import os
try: os.rename(filename, filename + '~')
except os.error: pass
# end try
f = open(filename, 'w')
f.write(result)
f.close()
return 1
# end def complete_file
def delete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
source = open(filename, 'r').read()
result = delete_string(source, stepsize, tabsize, expandtabs)
if source == result: return 0
# end if
import os
try: os.rename(filename, filename + '~')
except os.error: pass
# end try
f = open(filename, 'w')
f.write(result)
f.close()
return 1
# end def delete_file
def reformat_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
source = open(filename, 'r').read()
result = reformat_string(source, stepsize, tabsize, expandtabs)
if source == result: return 0
# end if
import os
try: os.rename(filename, filename + '~')
except os.error: pass
# end try
f = open(filename, 'w')
f.write(result)
f.close()
return 1
# end def reformat_file
# Test program when called as a script
usage = """
usage: pindent (-c|-d|-r) [-s stepsize] [-t tabsize] [-e] [file] ...
-c : complete a correctly indented program (add #end directives)
-d : delete #end directives
-r : reformat a completed program (use #end directives)
-s stepsize: indentation step (default %(STEPSIZE)d)
-t tabsize : the worth in spaces of a tab (default %(TABSIZE)d)
-e : expand TABs into spaces (defailt OFF)
[file] ... : files are changed in place, with backups in file~
If no files are specified or a single - is given,
the program acts as a filter (reads stdin, writes stdout).
""" % vars()
def error_both(op1, op2):
sys.stderr.write('Error: You can not specify both '+op1+' and -'+op2[0]+' at the same time\n')
sys.stderr.write(usage)
sys.exit(2)
# end def error_both
def test():
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'cdrs:t:e')
except getopt.error, msg:
sys.stderr.write('Error: %s\n' % msg)
sys.stderr.write(usage)
sys.exit(2)
# end try
action = None
stepsize = STEPSIZE
tabsize = TABSIZE
expandtabs = EXPANDTABS
for o, a in opts:
if o == '-c':
if action: error_both(o, action)
# end if
action = 'complete'
elif o == '-d':
if action: error_both(o, action)
# end if
action = 'delete'
elif o == '-r':
if action: error_both(o, action)
# end if
action = 'reformat'
elif o == '-s':
stepsize = int(a)
elif o == '-t':
tabsize = int(a)
elif o == '-e':
expandtabs = 1
# end if
# end for
if not action:
sys.stderr.write(
'You must specify -c(omplete), -d(elete) or -r(eformat)\n')
sys.stderr.write(usage)
sys.exit(2)
# end if
if not args or args == ['-']:
action = eval(action + '_filter')
action(sys.stdin, sys.stdout, stepsize, tabsize, expandtabs)
else:
action = eval(action + '_file')
for filename in args:
action(filename, stepsize, tabsize, expandtabs)
# end for
# end if
# end def test
if __name__ == '__main__':
test()
# end if
| {
"content_hash": "cdba8f7ac9fe1e19808f95a7a085a2af",
"timestamp": "",
"source": "github",
"line_count": 463,
"max_line_length": 98,
"avg_line_length": 31.401727861771057,
"alnum_prop": 0.4971456083637114,
"repo_name": "OS2World/APP-INTERNET-torpak_2",
"id": "39cdcc8b469dcaf0bb06208d0edd06a279f93319",
"size": "17868",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Tools/scripts/pindent.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from .compat import callable, cmp, reduce, \
threading, py3k, py33, py2k, jython, pypy, cpython, win32, \
pickle, dottedgetter, parse_qsl, namedtuple, next, WeakSet, reraise, \
raise_from_cause, text_type, string_types, int_types, binary_type, \
quote_plus, with_metaclass, print_, itertools_filterfalse, u, ue, b,\
unquote_plus, b64decode, b64encode, byte_buffer, itertools_filter,\
StringIO
from ._collections import KeyedTuple, ImmutableContainer, immutabledict, \
Properties, OrderedProperties, ImmutableProperties, OrderedDict, \
OrderedSet, IdentitySet, OrderedIdentitySet, column_set, \
column_dict, ordered_column_set, populate_column_dict, unique_list, \
UniqueAppender, PopulateDict, EMPTY_SET, to_list, to_set, \
to_column_set, update_copy, flatten_iterator, \
LRUCache, ScopedRegistry, ThreadLocalRegistry, WeakSequence
from .langhelpers import iterate_attributes, class_hierarchy, \
portable_instancemethod, unbound_method_to_callable, \
getargspec_init, format_argspec_init, format_argspec_plus, \
get_func_kwargs, get_cls_kwargs, decorator, as_interface, \
memoized_property, memoized_instancemethod, md5_hex, \
group_expirable_memoized_property, importlater, decode_slice, \
monkeypatch_proxied_specials, asbool, bool_or_str, coerce_kw_type,\
duck_type_collection, assert_arg_type, symbol, dictlike_iteritems,\
classproperty, set_creation_order, warn_exception, warn, NoneType,\
constructor_copy, methods_equivalent, chop_traceback, asint,\
generic_repr, counter, PluginLoader, hybridmethod, safe_reraise
from .deprecations import warn_deprecated, warn_pending_deprecation, \
deprecated, pending_deprecation
# things that used to be not always available,
# but are now as of current support Python versions
from collections import defaultdict
from functools import partial
from functools import update_wrapper
from contextlib import contextmanager
| {
"content_hash": "f69f90df4196cb83b5aff22c13136b05",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 53,
"alnum_prop": 0.7577766445690974,
"repo_name": "sauloal/PiCastPy",
"id": "739caefe034d799f184059afaed0f7552d7317ec",
"size": "2196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlalchemy/util/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "39552"
},
{
"name": "CSS",
"bytes": "327822"
},
{
"name": "JavaScript",
"bytes": "125590"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "5131708"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
# Create your models here.
| {
"content_hash": "0bf93a1c48bee2df7258a58555ca2663",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 64,
"avg_line_length": 29.555555555555557,
"alnum_prop": 0.650375939849624,
"repo_name": "raytung/DjangoGirls",
"id": "5c4502ce8c46a38865dc4b44bf3a762c6d011fdf",
"size": "532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "771"
},
{
"name": "HTML",
"bytes": "1860"
},
{
"name": "Python",
"bytes": "6675"
}
],
"symlink_target": ""
} |
__all__ = ['msetting','get_machine','set_machines','outerpath']
class msetting:
def __init__(self,
engine = 'sqlite3', #'postgresql_psycopg2',
host = '',
port = '', # 5432 for postgresql (usually)
tempdir = '.',
name = '',
user = '',
password = '',
cache = '',
app_path = ''):
self.engine = engine
self.host = host
self.port = str(port)
self.name = name
self.user = user
self.password = password
self.cache = cache
self.tempdir = tempdir
self.machine = None
self.app_path = app_path
def get_machine(machines, default_setting):
import platform
node = platform.node()
node = node.split('.')[0]
sett = machines.get(node, default_setting)
sett.machine = node
return sett
def set_outerpath(p):
global outer_path
outer_path = p
return outer_path
outer_path = None | {
"content_hash": "aeb244b76239424d298610a1da0f3390",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 26.976744186046513,
"alnum_prop": 0.44913793103448274,
"repo_name": "lsbardel/flow",
"id": "e389ba3a09c9a1c0dc4464f981bba23958dafd89",
"size": "1160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flow/utils/globalsettings.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "54225"
},
{
"name": "Perl",
"bytes": "2377"
},
{
"name": "Python",
"bytes": "639731"
},
{
"name": "Shell",
"bytes": "4249"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from collections import defaultdict
import itertools
from typing import (
Hashable,
Iterable,
)
import numpy as np
from pandas._libs.sparse import IntIndex
from pandas._typing import NpDtype
from pandas.core.dtypes.common import (
is_integer_dtype,
is_list_like,
is_object_dtype,
)
from pandas.core.arrays import SparseArray
from pandas.core.arrays.categorical import factorize_from_iterable
from pandas.core.frame import DataFrame
from pandas.core.indexes.api import (
Index,
default_index,
)
from pandas.core.series import Series
def get_dummies(
data,
prefix=None,
prefix_sep: str | Iterable[str] | dict[str, str] = "_",
dummy_na: bool = False,
columns=None,
sparse: bool = False,
drop_first: bool = False,
dtype: NpDtype | None = None,
) -> DataFrame:
"""
Convert categorical variable into dummy/indicator variables.
Each variable is converted in as many 0/1 variables as there are different
values. Columns in the output are each named after a value; if the input is
a DataFrame, the name of the original variable is prepended to the value.
Parameters
----------
data : array-like, Series, or DataFrame
Data of which to get dummy indicators.
prefix : str, list of str, or dict of str, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : str, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix`.
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object`, `string`, or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default bool
Data type for new columns. Only a single dtype is allowed.
Returns
-------
DataFrame
Dummy-coded data. If `data` contains other columns than the
dummy-coded one(s), these will be prepended, unaltered, to the result.
See Also
--------
Series.str.get_dummies : Convert Series of strings to dummy codes.
:func:`~pandas.from_dummies` : Convert dummy codes to categorical ``DataFrame``.
Notes
-----
Reference :ref:`the user guide <reshaping.dummies>` for more examples.
Examples
--------
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 True False False
1 False True False
2 False False True
3 True False False
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 True False
1 False True
2 False False
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 True False False
1 False True False
2 False False True
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 True False False True False
1 2 False True True False False
2 3 True False False False True
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 True False False
1 False True False
2 False False True
3 True False False
4 True False False
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 False False
1 True False
2 False True
3 False False
4 False False
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas.core.reshape.concat import concat
dtypes_to_encode = ["object", "string", "category"]
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(include=dtypes_to_encode)
elif not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = (
f"Length of '{name}' ({len(item)}) did not match the "
"length of the columns being encoded "
f"({data_to_encode.shape[1]})."
)
raise ValueError(len_msg)
check_len(prefix, "prefix")
check_len(prefix_sep, "prefix_sep")
if isinstance(prefix, str):
prefix = itertools.cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, str):
prefix_sep = itertools.cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
with_dummies: list[DataFrame]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.items(), prefix, prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(
col[1],
prefix=pre,
prefix_sep=sep,
dummy_na=dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(
data,
prefix,
prefix_sep,
dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype,
)
return result
def _get_dummies_1d(
data,
prefix,
prefix_sep: str | Iterable[str] | dict[str, str] = "_",
dummy_na: bool = False,
sparse: bool = False,
drop_first: bool = False,
dtype: NpDtype | None = None,
) -> DataFrame:
from pandas.core.reshape.concat import concat
# Series avoids inconsistent NaN handling
codes, levels = factorize_from_iterable(Series(data))
if dtype is None:
dtype = np.dtype(bool)
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
raise ValueError("dtype=object is not a valid dtype for get_dummies")
def get_empty_frame(data) -> DataFrame:
index: Index | np.ndarray
if isinstance(data, Series):
index = data.index
else:
index = default_index(len(data))
return DataFrame(index=index)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_frame(data)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = levels.insert(len(levels), np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_frame(data)
number_of_cols = len(levels)
if prefix is None:
dummy_cols = levels
else:
dummy_cols = Index([f"{prefix}{prefix_sep}{level}" for level in levels])
index: Index | None
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
fill_value: bool | float
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == np.dtype(bool):
fill_value = False
else:
fill_value = 0.0
sparse_series = []
N = len(data)
sp_indices: list[list] = [[] for _ in range(len(dummy_cols))]
mask = codes != -1
codes = codes[mask]
n_idx = np.arange(N)[mask]
for ndx, code in zip(n_idx, codes):
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(
np.ones(len(ixs), dtype=dtype),
sparse_index=IntIndex(N, ixs),
fill_value=fill_value,
dtype=dtype,
)
sparse_series.append(Series(data=sarr, index=index, name=col))
return concat(sparse_series, axis=1, copy=False)
else:
# take on axis=1 + transpose to ensure ndarray layout is column-major
dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=1).T
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def from_dummies(
data: DataFrame,
sep: None | str = None,
default_category: None | Hashable | dict[str, Hashable] = None,
) -> DataFrame:
"""
Create a categorical ``DataFrame`` from a ``DataFrame`` of dummy variables.
Inverts the operation performed by :func:`~pandas.get_dummies`.
.. versionadded:: 1.5.0
Parameters
----------
data : DataFrame
Data which contains dummy-coded variables in form of integer columns of
1's and 0's.
sep : str, default None
Separator used in the column names of the dummy categories they are
character indicating the separation of the categorical names from the prefixes.
For example, if your column names are 'prefix_A' and 'prefix_B',
you can strip the underscore by specifying sep='_'.
default_category : None, Hashable or dict of Hashables, default None
The default category is the implied category when a value has none of the
listed categories specified with a one, i.e. if all dummies in a row are
zero. Can be a single value for all variables or a dict directly mapping
the default categories to a prefix of a variable.
Returns
-------
DataFrame
Categorical data decoded from the dummy input-data.
Raises
------
ValueError
* When the input ``DataFrame`` ``data`` contains NA values.
* When the input ``DataFrame`` ``data`` contains column names with separators
that do not match the separator specified with ``sep``.
* When a ``dict`` passed to ``default_category`` does not include an implied
category for each prefix.
* When a value in ``data`` has more than one category assigned to it.
* When ``default_category=None`` and a value in ``data`` has no category
assigned to it.
TypeError
* When the input ``data`` is not of type ``DataFrame``.
* When the input ``DataFrame`` ``data`` contains non-dummy data.
* When the passed ``sep`` is of a wrong data type.
* When the passed ``default_category`` is of a wrong data type.
See Also
--------
:func:`~pandas.get_dummies` : Convert ``Series`` or ``DataFrame`` to dummy codes.
:class:`~pandas.Categorical` : Represent a categorical variable in classic.
Notes
-----
The columns of the passed dummy data should only include 1's and 0's,
or boolean values.
Examples
--------
>>> df = pd.DataFrame({"a": [1, 0, 0, 1], "b": [0, 1, 0, 0],
... "c": [0, 0, 1, 0]})
>>> df
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> pd.from_dummies(df)
0 a
1 b
2 c
3 a
>>> df = pd.DataFrame({"col1_a": [1, 0, 1], "col1_b": [0, 1, 0],
... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0],
... "col2_c": [0, 0, 1]})
>>> df
col1_a col1_b col2_a col2_b col2_c
0 1 0 0 1 0
1 0 1 1 0 0
2 1 0 0 0 1
>>> pd.from_dummies(df, sep="_")
col1 col2
0 a b
1 b a
2 a c
>>> df = pd.DataFrame({"col1_a": [1, 0, 0], "col1_b": [0, 1, 0],
... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0],
... "col2_c": [0, 0, 0]})
>>> df
col1_a col1_b col2_a col2_b col2_c
0 1 0 0 1 0
1 0 1 1 0 0
2 0 0 0 0 0
>>> pd.from_dummies(df, sep="_", default_category={"col1": "d", "col2": "e"})
col1 col2
0 a b
1 b a
2 d e
"""
from pandas.core.reshape.concat import concat
if not isinstance(data, DataFrame):
raise TypeError(
"Expected 'data' to be a 'DataFrame'; "
f"Received 'data' of type: {type(data).__name__}"
)
if data.isna().any().any():
raise ValueError(
"Dummy DataFrame contains NA value in column: "
f"'{data.isna().any().idxmax()}'"
)
# index data with a list of all columns that are dummies
try:
data_to_decode = data.astype("boolean", copy=False)
except TypeError:
raise TypeError("Passed DataFrame contains non-dummy data")
# collect prefixes and get lists to slice data for each prefix
variables_slice = defaultdict(list)
if sep is None:
variables_slice[""] = list(data.columns)
elif isinstance(sep, str):
for col in data_to_decode.columns:
prefix = col.split(sep)[0]
if len(prefix) == len(col):
raise ValueError(f"Separator not specified for column: {col}")
variables_slice[prefix].append(col)
else:
raise TypeError(
"Expected 'sep' to be of type 'str' or 'None'; "
f"Received 'sep' of type: {type(sep).__name__}"
)
if default_category is not None:
if isinstance(default_category, dict):
if not len(default_category) == len(variables_slice):
len_msg = (
f"Length of 'default_category' ({len(default_category)}) "
f"did not match the length of the columns being encoded "
f"({len(variables_slice)})"
)
raise ValueError(len_msg)
elif isinstance(default_category, Hashable):
default_category = dict(
zip(variables_slice, [default_category] * len(variables_slice))
)
else:
raise TypeError(
"Expected 'default_category' to be of type "
"'None', 'Hashable', or 'dict'; "
"Received 'default_category' of type: "
f"{type(default_category).__name__}"
)
cat_data = {}
for prefix, prefix_slice in variables_slice.items():
if sep is None:
cats = prefix_slice.copy()
else:
cats = [col[len(prefix + sep) :] for col in prefix_slice]
assigned = data_to_decode.loc[:, prefix_slice].sum(axis=1)
if any(assigned > 1):
raise ValueError(
"Dummy DataFrame contains multi-assignment(s); "
f"First instance in row: {assigned.idxmax()}"
)
if any(assigned == 0):
if isinstance(default_category, dict):
cats.append(default_category[prefix])
else:
raise ValueError(
"Dummy DataFrame contains unassigned value(s); "
f"First instance in row: {assigned.idxmin()}"
)
data_slice = concat(
(data_to_decode.loc[:, prefix_slice], assigned == 0), axis=1
)
else:
data_slice = data_to_decode.loc[:, prefix_slice]
cats_array = np.array(cats, dtype="object")
# get indices of True entries along axis=1
cat_data[prefix] = cats_array[data_slice.to_numpy().nonzero()[1]]
return DataFrame(cat_data)
| {
"content_hash": "fb9b2cfe7739501dafaef4b5006623ab",
"timestamp": "",
"source": "github",
"line_count": 529,
"max_line_length": 87,
"avg_line_length": 33.24574669187145,
"alnum_prop": 0.554045601865014,
"repo_name": "pandas-dev/pandas",
"id": "7e45e587ca84a45bda899356b8d0efc979331a00",
"size": "17587",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/core/reshape/encoding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "512"
},
{
"name": "C",
"bytes": "366145"
},
{
"name": "CSS",
"bytes": "1800"
},
{
"name": "Cython",
"bytes": "1186787"
},
{
"name": "Dockerfile",
"bytes": "1411"
},
{
"name": "HTML",
"bytes": "456531"
},
{
"name": "Python",
"bytes": "18778786"
},
{
"name": "Shell",
"bytes": "10369"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import numpy as np
from cs231n import optim
class Solver(object):
"""
A Solver encapsulates all the logic necessary for training classification
models. The Solver performs stochastic gradient descent using different
update rules defined in optim.py.
The solver accepts both training and validataion data and labels so it can
periodically check classification accuracy on both training and validation
data to watch out for overfitting.
To train a model, you will first construct a Solver instance, passing the
model, dataset, and various optoins (learning rate, batch size, etc) to the
constructor. You will then call the train() method to run the optimization
procedure and train the model.
After the train() method returns, model.params will contain the parameters
that performed best on the validation set over the course of training.
In addition, the instance variable solver.loss_history will contain a list
of all losses encountered during training and the instance variables
solver.train_acc_history and solver.val_acc_history will be lists containing
the accuracies of the model on the training and validation set at each epoch.
Example usage might look something like this:
data = {
'X_train': # training data
'y_train': # training labels
'X_val': # validation data
'X_train': # validation labels
}
model = MyAwesomeModel(hidden_size=100, reg=10)
solver = Solver(model, data,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=10, batch_size=100,
print_every=100)
solver.train()
A Solver works on a model object that must conform to the following API:
- model.params must be a dictionary mapping string parameter names to numpy
arrays containing parameter values.
- model.loss(X, y) must be a function that computes training-time loss and
gradients, and test-time classification scores, with the following inputs
and outputs:
Inputs:
- X: Array giving a minibatch of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,) giving labels for X where y[i] is the
label for X[i].
Returns:
If y is None, run a test-time forward pass and return:
- scores: Array of shape (N, C) giving classification scores for X where
scores[i, c] gives the score of class c for X[i].
If y is not None, run a training time forward and backward pass and return
a tuple of:
- loss: Scalar giving the loss
- grads: Dictionary with the same keys as self.params mapping parameter
names to gradients of the loss with respect to those parameters.
"""
def __init__(self, model, data, **kwargs):
"""
Construct a new Solver instance.
Required arguments:
- model: A model object conforming to the API described above
- data: A dictionary of training and validation data with the following:
'X_train': Array of shape (N_train, d_1, ..., d_k) giving training images
'X_val': Array of shape (N_val, d_1, ..., d_k) giving validation images
'y_train': Array of shape (N_train,) giving labels for training images
'y_val': Array of shape (N_val,) giving labels for validation images
Optional arguments:
- update_rule: A string giving the name of an update rule in optim.py.
Default is 'sgd'.
- optim_config: A dictionary containing hyperparameters that will be
passed to the chosen update rule. Each update rule requires different
hyperparameters (see optim.py) but all update rules require a
'learning_rate' parameter so that should always be present.
- lr_decay: A scalar for learning rate decay; after each epoch the learning
rate is multiplied by this value.
- batch_size: Size of minibatches used to compute loss and gradient during
training.
- num_epochs: The number of epochs to run for during training.
- print_every: Integer; training losses will be printed every print_every
iterations.
- verbose: Boolean; if set to false then no output will be printed during
training.
"""
self.model = model
self.X_train = data['X_train']
self.y_train = data['y_train']
self.X_val = data['X_val']
self.y_val = data['y_val']
# Unpack keyword arguments
self.update_rule = kwargs.pop('update_rule', 'sgd')
self.optim_config = kwargs.pop('optim_config', {})
self.lr_decay = kwargs.pop('lr_decay', 1.0)
self.batch_size = kwargs.pop('batch_size', 100)
self.num_epochs = kwargs.pop('num_epochs', 10)
self.print_every = kwargs.pop('print_every', 10)
self.verbose = kwargs.pop('verbose', True)
# Throw an error if there are extra keyword arguments
if len(kwargs) > 0:
extra = ', '.join('"%s"' % k for k in kwargs.keys())
raise ValueError('Unrecognized arguments %s' % extra)
# Make sure the update rule exists, then replace the string
# name with the actual function
if not hasattr(optim, self.update_rule):
raise ValueError('Invalid update_rule "%s"' % self.update_rule)
self.update_rule = getattr(optim, self.update_rule)
self._reset()
def _reset(self):
"""
Set up some book-keeping variables for optimization. Don't call this
manually.
"""
# Set up some variables for book-keeping
self.epoch = 0
self.best_val_acc = 0
self.best_params = {}
self.loss_history = []
self.train_acc_history = []
self.val_acc_history = []
# Make a deep copy of the optim_config for each parameter
self.optim_configs = {}
for p in self.model.params:
d = {k: v for k, v in self.optim_config.iteritems()}
self.optim_configs[p] = d
def _step(self):
"""
Make a single gradient update. This is called by train() and should not
be called manually.
"""
# Make a minibatch of training data
num_train = self.X_train.shape[0]
batch_mask = np.random.choice(num_train, self.batch_size)
X_batch = self.X_train[batch_mask]
y_batch = self.y_train[batch_mask]
# Compute loss and gradient
loss, grads = self.model.loss(X_batch, y_batch)
self.loss_history.append(loss)
# Perform a parameter update
for p, w in self.model.params.iteritems():
dw = grads[p]
config = self.optim_configs[p]
next_w, next_config = self.update_rule(w, dw, config)
self.model.params[p] = next_w
self.optim_configs[p] = next_config
def check_accuracy(self, X, y, num_samples=None, batch_size=100):
"""
Check accuracy of the model on the provided data.
Inputs:
- X: Array of data, of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,)
- num_samples: If not None, subsample the data and only test the model
on num_samples datapoints.
- batch_size: Split X and y into batches of this size to avoid using too
much memory.
Returns:
- acc: Scalar giving the fraction of instances that were correctly
classified by the model.
"""
# Maybe subsample the data
N = X.shape[0]
if num_samples is not None and N > num_samples:
mask = np.random.choice(N, num_samples)
N = num_samples
X = X[mask]
y = y[mask]
# Compute predictions in batches
num_batches = N / batch_size
if N % batch_size != 0:
num_batches += 1
y_pred = []
for i in xrange(num_batches):
start = i * batch_size
end = (i + 1) * batch_size
scores = self.model.loss(X[start:end])
y_pred.append(np.argmax(scores, axis=1))
y_pred = np.hstack(y_pred)
acc = np.mean(y_pred == y)
return acc
def train(self):
"""
Run optimization to train the model.
"""
num_train = self.X_train.shape[0]
iterations_per_epoch = max(num_train / self.batch_size, 1)
num_iterations = self.num_epochs * iterations_per_epoch
for t in xrange(num_iterations):
self._step()
# Maybe print training loss
if self.verbose and t % self.print_every == 0:
print '(Iteration %d / %d) loss: %f' % (
t + 1, num_iterations, self.loss_history[-1])
# At the end of every epoch, increment the epoch counter and decay the
# learning rate.
epoch_end = (t + 1) % iterations_per_epoch == 0
if epoch_end:
self.epoch += 1
for k in self.optim_configs:
self.optim_configs[k]['learning_rate'] *= self.lr_decay
# Check train and val accuracy on the first iteration, the last
# iteration, and at the end of each epoch.
first_it = (t == 0)
last_it = (t == num_iterations + 1)
if first_it or last_it or epoch_end:
train_acc = self.check_accuracy(self.X_train, self.y_train,
num_samples=1000)
val_acc = self.check_accuracy(self.X_val, self.y_val)
self.train_acc_history.append(train_acc)
self.val_acc_history.append(val_acc)
if self.verbose:
print '(Epoch %d / %d) train acc: %f; val_acc: %f' % (
self.epoch, self.num_epochs, train_acc, val_acc)
# Keep track of the best model
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
self.best_params = {}
for k, v in self.model.params.iteritems():
self.best_params[k] = v.copy()
# At the end of training swap the best params into the model
self.model.params = self.best_params
| {
"content_hash": "994a28c9388f39f4ca1ead11884a9632",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 83,
"avg_line_length": 41.10236220472441,
"alnum_prop": 0.5912835249042145,
"repo_name": "machinelearningnanodegree/stanford-cs231",
"id": "911524673819f809939cca052fab55c7f6d95883",
"size": "10440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solutions/kvn219/assignment2/cs231n/solver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "13235975"
},
{
"name": "Python",
"bytes": "966466"
},
{
"name": "Shell",
"bytes": "9479"
}
],
"symlink_target": ""
} |
import logging
import json
from flask import Blueprint, request, jsonify, current_app
from sqlalchemy.exc import IntegrityError
from .core import db
from .services import (
UserGroupService, PermissionService, UserGroupMemberService,
UserPermissionService, UserGroupPermissionService,
VerificationService,
)
bp = Blueprint('flask_perm_api', __name__)
def ok(data=None, count=1):
response = jsonify(code=0, message='success', data=data)
if count:
response.headers['X-Total-Count'] = count
return response
return response
def bad_request(message='bad request', **data):
return jsonify(code=1, message=message, data=data), 400
def not_found(message='not found', **data):
return jsonify(code=1, message=message, data=data), 404
def forbidden(message='forbidden', **data):
return jsonify(code=1, message=message, data=data), 403
def check_auth(username, password):
return current_app.config['PERM_ADMIN_USERNAME'] == username and \
current_app.config['PERM_ADMIN_PASSWORD'] == password
def current_perm():
return current_app.extensions['perm']
def log_action(data, **kwargs):
data = dict(data)
data.update(kwargs)
current_perm().log_admin_action(data)
@bp.before_request
def before_request():
if not current_perm().has_perm_admin_logined():
return forbidden()
@bp.errorhandler(IntegrityError)
def detect_integrity_error(e):
return bad_request('conflict')
@bp.route('/permissions', methods=['POST'])
def add_permission():
data = request.get_json()
if 'title' not in data:
return bad_request('missing title field')
if not data['title']:
return bad_request('title is blank')
title = data['title']
code = data.get('code')
permission = PermissionService.create(title, code)
permission = PermissionService.rest(permission)
log_action(permission, action='add', model='permission')
return ok(permission)
def _get_filter_by():
filter_by = request.args.get('_filters')
if filter_by:
try:
filter_by = json.loads(filter_by)
except ValueError:
pass
return filter_by
@bp.route('/permissions')
def get_permissions():
offset = request.args.get('offset', type=int, default=0)
limit = request.args.get('limit', type=int, default=20)
sort_field = request.args.get('_sortField', 'created_at').lower()
sort_dir = request.args.get('_sortDir', 'DESC').lower()
filter_by = _get_filter_by()
permissions = PermissionService.filter_permissions(
filter_by, offset, limit, sort_field, sort_dir)
count = PermissionService.count_filter_permission(filter_by, offset, limit)
permissions = map(PermissionService.rest, permissions)
return ok(permissions, count)
@bp.route('/permissions/<int:permission_id>')
def get_permission(permission_id):
permission = PermissionService.get(permission_id)
if not permission:
return not_found()
permission = PermissionService.rest(permission)
return ok(permission)
@bp.route('/permissions/<int:permission_id>', methods=['PUT'])
def update_permission(permission_id):
permission = PermissionService.get(permission_id)
if not permission:
return not_found()
if request.get_json().get('title'):
PermissionService.rename(permission_id, request.get_json().get('title'))
if request.get_json().get('code'):
PermissionService.set_code(permission_id, request.get_json().get('code'))
permission = PermissionService.rest(PermissionService.get(permission_id))
log_action(permission, action='update', model='permission')
return ok(permission)
@bp.route('/permissions/<int:permission_id>', methods=['DELETE'])
def delete_permission(permission_id):
permission = PermissionService.get(permission_id)
if not permission:
return not_found()
log_action(PermissionService.rest(permission), action='delete', model='permission')
UserPermissionService.delete_by_permission(permission_id)
UserGroupPermissionService.delete_by_permission(permission_id)
PermissionService.delete(permission_id)
return ok()
@bp.route('/user_permissions')
def get_user_permissions():
offset = request.args.get('offset', type=int, default=0)
limit = request.args.get('limit', type=int, default=20)
sort_field = request.args.get('_sortField', 'created_at').lower()
sort_dir = request.args.get('_sortDir', 'DESC').lower()
filter_by = _get_filter_by()
user_permissions = UserPermissionService.filter_user_permissions(
filter_by, offset, limit, sort_field, sort_dir)
count = UserPermissionService.count_filter_user_permission(filter_by, offset, limit)
user_permissions = map(UserPermissionService.rest, user_permissions)
return ok(user_permissions, count)
@bp.route('/user_permissions', methods=['POST'])
def add_user_permission():
data = request.get_json()
try:
permission_id = data['permission_id']
user_id = data['user_id']
except KeyError:
return bad_request()
permission = PermissionService.get(permission_id)
if not permission:
return not_found()
user_permission = UserPermissionService.create(user_id, permission_id)
user_permission = UserPermissionService.rest(user_permission)
log_action(user_permission, action='add', model='user_permission')
return ok(user_permission)
@bp.route('/user_permissions/<int:user_permission_id>', methods=['DELETE'])
def revoke_user_permission(user_permission_id):
user_permission = UserPermissionService.get(user_permission_id)
if not user_permission:
return not_found()
log_action(UserPermissionService.rest(user_permission), action='delete', model='user_permission')
UserPermissionService.delete(user_permission_id)
return ok()
@bp.route('/user_group_permissions')
def get_user_group_permissions():
offset = request.args.get('offset', type=int, default=0)
limit = request.args.get('limit', type=int, default=20)
sort_field = request.args.get('_sortField', 'created_at').lower()
sort_dir = request.args.get('_sortDir', 'DESC').lower()
filter_by = _get_filter_by()
user_group_permissions = UserGroupPermissionService.filter_user_group_permissions(
filter_by, offset, limit, sort_field, sort_dir)
count = UserGroupPermissionService.count_filter_user_group_permissions(
filter_by, offset, limit)
user_group_permissions = map(UserGroupPermissionService.rest, user_group_permissions)
return ok(user_group_permissions, count)
@bp.route('/user_group_permissions', methods=['POST'])
def add_user_group_permission():
data = request.get_json()
try:
permission_id = data['permission_id']
user_group_id = data['user_group_id']
except KeyError:
return bad_request()
permission = PermissionService.get(permission_id)
if not permission:
return not_found()
user_group_permission = UserGroupPermissionService.create(user_group_id, permission_id)
user_group_permission = UserGroupPermissionService.rest(user_group_permission)
log_action(user_group_permission, action='add', model='user_permission')
return ok(user_group_permission)
@bp.route('/user_group_permissions/<int:user_group_permission_id>', methods=['DELETE'])
def revoke_user_group_permission(user_group_permission_id):
user_group_permission = UserGroupPermissionService.get(user_group_permission_id)
if not user_group_permission:
return not_found()
log_action(UserGroupPermissionService.rest(user_group_permission),
action='delete', model='user_group_permission')
UserGroupPermissionService.delete(user_group_permission_id)
return ok()
@bp.route('/user_groups', methods=['POST'])
def add_user_group():
try:
data = request.get_json()
title = data['title']
code = data['code']
except KeyError:
return bad_request()
user_group = UserGroupService.create(title, code)
user_group = UserGroupService.rest(user_group)
log_action(user_group, action='add', model='user_group')
return ok(user_group)
@bp.route('/user_groups')
def get_user_groups():
offset = request.args.get('offset', type=int, default=0)
limit = request.args.get('limit', type=int, default=20)
sort_field = request.args.get('_sortField', 'created_at').lower()
sort_dir = request.args.get('_sortDir', 'DESC').lower()
filter_by = _get_filter_by()
user_groups = UserGroupService.filter_user_groups(
filter_by, offset, limit, sort_field, sort_dir)
count = UserGroupService.count_filter_user_group(filter_by, offset, limit)
user_groups = map(UserGroupService.rest, user_groups)
return ok(user_groups, count)
@bp.route('/user_groups/<int:user_group_id>')
def get_user_group(user_group_id):
user_group = UserGroupService.get(user_group_id)
if not user_group:
return not_found()
user_group = UserGroupService.rest(user_group)
return ok(user_group)
@bp.route('/user_groups/<int:user_group_id>', methods=['PUT'])
def update_user_group(user_group_id):
user_group = UserGroupService.get(user_group_id)
if not user_group:
return not_found()
data = request.get_json()
if 'title' in data and data['title']:
UserGroupService.rename(user_group_id, data['title'])
if 'code' in data and data['code']:
UserGroupService.update_code(user_group_id, data['code'])
user_group = UserGroupService.rest(UserGroupService.get(user_group_id))
log_action(user_group, action='update', model='user_group')
return ok(user_group)
@bp.route('/user_groups/<int:user_group_id>', methods=['DELETE'])
def delete_user_group(user_group_id):
user_group = UserGroupService.get(user_group_id)
if not user_group:
return not_found()
log_action(UserGroupService.rest(user_group), action='delete', model='user_group')
UserGroupPermissionService.delete_by_user_group(user_group_id)
UserGroupService.delete(user_group_id)
return ok()
@bp.route('/user_group_members')
def get_user_group_members():
offset = request.args.get('offset', type=int, default=0)
limit = request.args.get('limit', type=int, default=20)
sort_field = request.args.get('_sortField', 'created_at').lower()
sort_dir = request.args.get('_sortDir', 'DESC').lower()
filter_by = _get_filter_by()
members = UserGroupMemberService.filter_user_group_members(
filter_by, offset, limit, sort_field, sort_dir)
count = UserGroupMemberService.count_filter_user_group_members(filter_by, offset, limit)
members = map(UserGroupMemberService.rest, members)
return ok(members, count)
@bp.route('/user_group_members', methods=['POST'])
def add_user_group_member():
data = request.get_json()
try:
user_id = data['user_id']
user_group_id = data['user_group_id']
except KeyError:
return bad_request()
user_group = UserGroupService.get(user_group_id)
if not user_group:
return not_found()
member = UserGroupMemberService.create(user_id, user_group_id)
member = UserGroupMemberService.rest(member)
log_action(member, action='add', model='user_group_member')
return ok(member)
@bp.route('/user_group_members/<int:user_group_member_id>', methods=['DELETE'])
def delete_user_from_user_group(user_group_member_id):
user_group_member = UserGroupMemberService.get(user_group_member_id)
if not user_group_member:
return not_found()
log_action(UserGroupMemberService.rest(user_group_member), action='delete', model='user_group_member')
UserGroupMemberService.delete(user_group_member_id)
return ok()
def jsonify_user(user):
return dict(id=user.id, nickname=user.nickname)
@bp.route('/users')
def get_users():
offset = request.args.get('offset', type=int, default=0)
limit = request.args.get('limit', type=int, default=20)
sort_field = request.args.get('_sortField', 'created_at').lower()
sort_dir = request.args.get('_sortDir', 'DESC').lower()
filter_by = _get_filter_by()
users = current_perm().load_users(filter_by, sort_field, sort_dir, offset, limit)
users = map(jsonify_user, users)
return ok(users)
@bp.route('/users/<int:user_id>')
def get_user(user_id):
user = current_perm().load_user(user_id)
if not user:
return not_found()
return ok(jsonify_user(user))
| {
"content_hash": "15c0d4ad28c816bf38cb6085901b848c",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 106,
"avg_line_length": 38.53250773993808,
"alnum_prop": 0.6933954684235899,
"repo_name": "soasme/flask-perm",
"id": "4beaf95bcc690e289a8edceb56332b0f03dc26f1",
"size": "12471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_perm/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1448"
},
{
"name": "JavaScript",
"bytes": "5557"
},
{
"name": "Makefile",
"bytes": "378"
},
{
"name": "Python",
"bytes": "60761"
}
],
"symlink_target": ""
} |
"""
Tool to figure find the USB device that a board is available at.
This is a complicated process as the FX2 is software configurable and hence
could end up under many different VID:PID names based on what firmware is
currently loaded onto it.
"""
import logging
import os
import os.path
import sys
import time
import subprocess
import re
from collections import namedtuple
from . import lsusb as usbapi
from . import files
def assert_in(needle, haystack):
assert needle in haystack, "%r not in %r" % (needle, haystack)
__filepath__ = os.path.dirname(__file__)
FIRMWARE_DIR = os.path.abspath(os.path.realpath(
os.path.join(__filepath__, '..', 'firmware')))
assert os.path.exists(FIRMWARE_DIR)
def firmware_path(filepath):
locations = ['']
locations.append(os.getcwd())
locations.append(FIRMWARE_DIR)
for loc in locations:
fullname = os.path.join(loc, filepath)
fullname = os.path.abspath(os.path.realpath(fullname))
if os.path.exists(fullname):
return fullname
assert False, "{} not found in {}".format(filepath, locations)
def poll_until(condition, timeout_sec, dt=0.1):
start_time = time.time()
satisfied = condition()
while not satisfied and (time.time() - start_time) < timeout_sec:
satisfied = condition()
time.sleep(dt)
return satisfied
BOARD_TYPES = [
'opsis',
'atlys',
]
BOARD_NAMES = {
'atlys': "Digilent Atlys",
'opsis': "Numato Opsis",
}
BOARD_STATES = [
'unconfigured',
'jtag',
'serial',
'eeprom',
'operational',
]
BOARD_FPGA = {
'atlys': "6slx45csg324",
'opsis': "6slx45tfgg484",
}
BOARD_FLASH_MAP = {
# https://github.com/timvideos/HDMI2USB-litex-firmware/blob/master/targets/atlys/base.py#L205-L215
'atlys': {'gateware': 0x0, 'bios': 0x00200000, 'firmware': 0x00208000},
# https://github.com/timvideos/HDMI2USB-litex-firmware/blob/master/targets/opsis/base.py#L256-L266
'opsis': {'gateware': 0x0, 'bios': 0x00200000, 'firmware': 0x00208000},
# https://github.com/timvideos/HDMI2USB-litex-firmware/blob/master/targets/mimasv2/base.py#L208-L220
'mimasv2': {'gateware': 0x0, 'bios': 0x00080000, 'firmware': 0x00088000},
}
USBJTAG_MAPPING = {
'hw_nexys': 'atlys',
'hw_opsis': 'opsis',
}
USBJTAG_RMAPPING = {v: k for k, v in USBJTAG_MAPPING.items()}
OPENOCD_MAPPING = {
'atlys': "board/digilent_atlys.cfg",
'opsis': "board/numato_opsis.cfg",
}
OPENOCD_FLASHPROXY = {
'opsis': firmware_path('spartan6/opsis/bscan_spi_xc6slx45t.bit'),
'atlys': firmware_path('spartan6/atlys/bscan_spi_xc6slx45.bit'),
}
FX2_MODE_MAPPING = {
'jtag': 'ixo-usb-jtag.hex',
'serial': 'usb-uart.ihx',
'eeprom': 'eeprom.ihx',
}
BoardBase = namedtuple("Board", ["dev", "type", "state"])
class Board(BoardBase):
def tty(self):
return self.dev.tty()
def detach_board_drivers(board, verbose=False):
if board.dev.inuse():
if verbose:
sys.stderr.write("Detaching drivers from board.\n")
board.dev.detach()
def load_fx2(board, mode=None, filename=None, verbose=False):
if mode is not None:
assert filename is None
filename = firmware_path(
'fx2/{}/{}'.format(board.type, FX2_MODE_MAPPING[mode]))
detach_board_drivers(board, verbose=verbose)
filepath = firmware_path(filename)
assert os.path.exists(filepath), filepath
sys.stderr.write("Using FX2 firmware %s\n" % filename)
cmdline = "fxload -t fx2lp".split()
cmdline += ["-D", str(board.dev.path)]
cmdline += ["-I", filepath]
if verbose:
cmdline += ["-v", ]
if verbose:
sys.stderr.write("Running %r\n" % " ".join(cmdline))
env = os.environ.copy()
env['PATH'] = env['PATH'] + ':/usr/sbin:/sbin'
try:
output = subprocess.check_output(
cmdline, stderr=subprocess.STDOUT, env=env)
if verbose > 2:
sys.stderr.write(output.decode('utf-8'))
except subprocess.CalledProcessError as e:
if b"can't modify CPUCS: Protocol error\n" not in e.output:
print(e.output)
raise
def load_fx2_dfu_bootloader(board, verbose=False, filename='boot-dfu.ihex'):
"""
Loads bootloader firmware onto given board and updates the board to point
to correct device. The device is identified using previous SysFs path of
the device, which should be guaranteed not to change.
"""
# use current sysfs path to later identify the bootloader after enumeration
dev_syspath = sorted(board.dev.syspaths)[0]
# because the sysfs path does not dissappear after loading new firmware,
# we also have to make sure that the device path (/dev/bus/usb/xxx/xxx)
# is different to ensure that we are dealing with a new device
previous_dev_path = board.dev.path
def is_bootloader(dev):
is_new_dev = dev.path != previous_dev_path
same_syspath = dev_syspath in dev.syspaths
return is_new_dev and same_syspath
def find_bootloader():
devices = filter(is_bootloader, usbapi.find_usb_devices())
return list(devices)
load_fx2(board, filename=filename, verbose=verbose)
# wait for the new device to enumerate
devices_found = poll_until(condition=find_bootloader, timeout_sec=3)
assert len(devices_found) > 0, 'Bootloader not found'
assert len(devices_found) == 1, 'More than one bootloader found'
board = Board(dev=devices_found[0], type=board.type, state='dfu-boot')
return board
def flash_fx2(board, filename, verbose=False):
assert filename.endswith('.dfu'), 'Firmware file must be in DFU format.'
detach_board_drivers(board, verbose=verbose)
filepath = firmware_path(filename)
assert os.path.exists(filepath), filepath
sys.stderr.write("Using FX2 firmware %s\n" % filename)
cmdline = ["dfu-util", "-D", filepath]
if verbose:
cmdline += ["-v", ]
if verbose:
sys.stderr.write("Running %r\n" % " ".join(cmdline))
env = os.environ.copy()
env['PATH'] = env['PATH'] + ':/usr/sbin:/sbin'
output = subprocess.run(cmdline, stderr=subprocess.STDOUT, env=env)
class OpenOCDError(subprocess.CalledProcessError):
def __init__(
self, msg, fatal_errors, retry_errors, returncode, cmd, output):
subprocess.CalledProcessError.__init__(
self, returncode, cmd, output)
fatal = ""
if fatal_errors:
fatal = "\n".join(
["\nFound fatal errors: "] + [" - " + f for f in fatal_errors]
)
retry += "\n"
retry = ""
if retry_errors:
retry = "\n".join(
["\nFound retry errors: "] + [" - " + f for f in retry_errors]
)
self.message = """\
OpenOCD run failure: {msg}.
{fatal}{retry}
OpenOCD command line resulted in {returncode}
-----
{cmd}
-----
OpenOCD output:
-----
{output}
-----
""".format(msg=msg, retry=retry, fatal=fatal, returncode=returncode, cmd=cmd,
output=output)
def __str__(self):
return self.message
class OpenOCDRetryError(OpenOCDError):
pass
def _openocd_script(board, script, verbose=False):
assert board.state == "jtag", board
assert not board.dev.inuse()
assert board.type in OPENOCD_MAPPING
if verbose > 1:
sys.stderr.write(
"Using OpenOCD script:\n{}\n".format(";\n".join(script)))
cmdline = ["openocd"]
cmdline += ["-f", OPENOCD_MAPPING[board.type]]
cmdline += ["-c", "; ".join(script)]
if verbose > 1:
cmdline += ["--debug={}".format(verbose - 2)]
if verbose:
sys.stderr.write("Running %r\n" % cmdline)
p = subprocess.Popen(
cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if not verbose:
output, _ = p.communicate()
output = output.decode('utf-8')
else:
output = []
while True:
output.append(p.stdout.readline().decode('utf-8'))
sys.stdout.write(output[-1])
if p.poll() is not None:
break
output = "".join(output)
# Look for common errors in the OpenOCD output
retry_strings = [
# DNA Failed to read correctly if this error is seen.
"DNA = [01]+ \\(0x18181818.*\\)",
# JTAG Errors
"Info : TAP xc6s.tap does not have IDCODE",
"Warn : Bypassing JTAG setup events due to errors",
"Error: Trying to use configured scan chain anyway...",
]
retry_error_msgs = set()
for msg in retry_strings:
found = re.search(msg, output)
if not found:
continue
retry_error_msgs.add(found.group(0))
# Look for common errors in the OpenOCD output
fatal_strings = [
# FIXME: Put something here.
]
fatal_error_msgs = set()
for msg in fatal_strings:
found = re.search(msg, output)
if not found:
continue
fatal_error_msgs.add(found.group(0))
if p.returncode == 0 and not retry_error_msgs.union(fatal_error_msgs):
return
if fatal_error_msgs:
msg = "Fatal error!"
openocd_error = OpenOCDError
else:
msg = "Error which means we should retry..."
openocd_error = OpenOCDRetryError
raise openocd_error(
msg,
fatal_error_msgs,
retry_error_msgs,
p.returncode,
cmdline,
output,
)
def _openocd_flash(board, filepath, location, verbose=False):
assert board.type in OPENOCD_FLASHPROXY
proxypath = os.path.abspath(OPENOCD_FLASHPROXY[board.type])
assert os.path.exists(proxypath), proxypath
script = ["init"]
script += ["xc6s_print_dna xc6s.tap"]
script += ["jtagspi_init 0 {}".format(proxypath)]
if verbose > 1:
script += ["flash banks"]
script += ["flash list"]
if verbose > 2:
script += ["flash info 0"]
# script += ["flash read_bank 0 backup.bit 0 0x01000000"]
script += [
"jtagspi_program {} 0x{:x}".format(filepath, location),
"exit"
]
try:
return _openocd_script(board, script, verbose=verbose)
finally:
print("After flashing, the board will need to be power cycled.")
def reset_gateware(board, verbose=False):
script = ["init"]
script += ["xc6s_print_dna xc6s.tap"]
script += ["reset halt"]
script += ["exit"]
return _openocd_script(board, script, verbose=verbose)
def load_gateware(board, filename, verbose=False):
filepath = firmware_path(filename)
assert os.path.exists(filepath), filepath
assert filename.endswith(".bit"), "Loading requires a .bit file"
xfile = files.XilinxBitFile(filepath)
assert xfile.part == BOARD_FPGA[board.type], (
"Bit file must be for {} (not {})".format(
BOARD_FPGA[board.type], xfile.part))
script = ["init"]
script += ["xc6s_print_dna xc6s.tap"]
script += ["pld load 0 {}".format(filepath)]
script += ["reset halt"]
script += ["exit"]
return _openocd_script(board, script, verbose=verbose)
def flash_gateware(board, filename, verbose=False):
filepath = firmware_path(filename)
assert os.path.exists(filepath), filepath
assert filename.endswith(".bin"), "Flashing requires a Xilinx .bin file"
xfile = files.XilinxBinFile(filepath)
_openocd_flash(
board,
filepath,
BOARD_FLASH_MAP[board.type]['gateware'],
verbose=verbose)
def flash_bios(board, filename, verbose=False):
filepath = firmware_path(filename)
assert os.path.exists(filepath), filepath
assert filename.endswith(".bin"), "Flashing requires a .bin file"
# FIXME: Bios files have the CRC at the end, should check that here.
_openocd_flash(
board,
filepath,
BOARD_FLASH_MAP[board.type]['bios'],
verbose=verbose)
def flash_firmware(board, filename, verbose=False):
assert board.state == "jtag", board
assert not board.dev.inuse()
assert board.type in OPENOCD_MAPPING
if filename is not None:
filepath = firmware_path(filename)
assert os.path.exists(filepath), filepath
assert filename.endswith(".fbi"), "Flashing requires a .fbi file"
fbifile = files.FlashBootImageFile(filepath)
else:
filepath = firmware_path("zero.bin")
_openocd_flash(
board,
filepath,
BOARD_FLASH_MAP[board.type]['firmware'],
verbose=verbose)
flash_image = flash_gateware
def find_boards(prefer_hardware_serial=True, verbose=False):
all_boards = []
exart_uarts = []
for device in usbapi.find_usb_devices():
if False:
pass
# https://github.com/timvideos/HDMI2USB/wiki/USB-IDs
# Digilent Atlys
# --------------------------
# Digilent Atlys board with stock "Adept" firmware
# Bus 003 Device 019: ID 1443:0007 Digilent Development board JTAG
if device.vid == 0x1443 and device.pid == 0x0007:
all_boards.append(
Board(dev=device, type="atlys", state="unconfigured"))
# Digilent Atlys board unconfigured mode with Openmoko ID
# Bus 003 Device 019: ID 1d50:60b5
elif device.vid == 0x1d50 and device.pid == 0x60b5:
all_boards.append(
Board(dev=device, type="atlys", state="unconfigured"))
# Digilent Atlys board JTAG/firmware upgrade mode with Openmoko ID.
# Device ID 0x10 indicates test JTAG mode, 0x11 indicates test Serial,
# 0x12 indicates test Audio and 0x13 indicates test UVC.
# Bus 003 Device 019: ID 1d50:60b6
elif device.vid == 0x1d50 and device.pid == 0x60b6:
if device.did == '0001':
all_boards.append(
Board(dev=device, type="atlys", state="jtag"))
elif device.did == '0010':
all_boards.append(
Board(dev=device, type="atlys", state="test-jtag"))
elif device.did == '0011':
all_boards.append(
Board(dev=device, type="atlys", state="test-serial"))
elif device.did == '0012':
all_boards.append(
Board(dev=device, type="atlys", state="test-audio"))
elif device.did == '0013':
all_boards.append(
Board(dev=device, type="atlys", state="test-uvc"))
else:
all_boards.append(
Board(dev=device, type="atlys", state="test-???"))
# Digilent Atlys board in operational mode with Openmoko ID.
# Bus 003 Device 019: ID 1d50:60b7
elif device.vid == 0x1d50 and device.pid == 0x60b7:
all_boards.append(
Board(dev=device, type="atlys", state="operational"))
elif device.vid == 0x04e2 and device.pid == 0x1410:
exart_uarts.append(device)
# Numato Opsis
# --------------------------
# The Numato Opsis will boot in the following mode when the EEPROM is
# not set up correctly.
# http://opsis.hdmi2usb.tv/getting-started/usb-ids.html#failsafe-mode
# Bus 003 Device 091: ID 04b4:8613 Cypress Semiconductor Corp.
# CY7C68013 EZ-USB FX2 USB 2.0 Development Kit
elif device.vid == 0x04b4 and device.pid == 0x8613:
all_boards.append(
Board(dev=device, type="opsis", state="unconfigured"))
# The preproduction Numato Opsis shipped to Champions will boot into
# this mode by default.
# The production Numato Opsis will fallback to booting in the following
# mode when the FPGA doesn't have EEPROM emulation working.
# http://opsis.hdmi2usb.tv/getting-started/usb-ids.html#unconfigured-mode
# Bus 003 Device 091: ID 2a19:5440 Numato Opsis (Unconfigured Mode)
elif device.vid == 0x2A19 and device.pid == 0x5440:
all_boards.append(
Board(dev=device, type="opsis", state="unconfigured"))
# The production Numato Opsis will boot in this mode when SW1 is held
# during boot, or when held for 5 seconds with correctly configured
# FPGA gateware.
# http://opsis.hdmi2usb.tv/getting-started/usb-ids.html#usb-jtag-and-usb-uart-mode
# Bus 003 Device 091: ID 2a19:5441 Numato Opsis (JTAG and USB Mode)
elif device.vid == 0x2A19 and device.pid == 0x5441:
if device.did == '0001':
all_boards.append(
Board(dev=device, type="opsis", state="jtag"))
elif device.did == '0002':
all_boards.append(
Board(dev=device, type="opsis", state="eeprom"))
elif device.did == '0003':
all_boards.append(
Board(dev=device, type="opsis", state="serial"))
elif device.did == '0011':
all_boards.append(
Board(dev=device, type="opsis", state="test-serial"))
elif device.did == '0012':
all_boards.append(
Board(dev=device, type="opsis", state="test-audio"))
elif device.did == '0013':
all_boards.append(
Board(dev=device, type="opsis", state="test-uvc"))
else:
assert False, "Unknown mode: %s" % device.did
# The production Numato Opsis will boot in this mode by default.
# http://opsis.hdmi2usb.tv/getting-started/usb-ids.html#hdmi2usb.tv-mode
# Bus 003 Device 091: ID 2a19:5441 Numato Opsis (HDMI2USB.tv mode)
elif device.vid == 0x2A19 and device.pid == 0x5442:
all_boards.append(
Board(dev=device, type="opsis", state="operational"))
# ixo-usb-jtag
# --------------------------
# Boards loaded with the ixo-usb-jtag firmware from mithro's repo
# https://github.com/mithro/ixo-usb-jtag
# Bus 003 Device 090: ID 16c0:06ad Van Ooijen Technische Informatica
elif device.vid == 0x16c0 and device.pid == 0x06ad:
if device.did in ('0001', '0004'):
if device.serialno not in USBJTAG_MAPPING:
logging.warn("Unknown usb-jtag device! %r (%s)",
device.serialno, device)
continue
all_boards.append(Board(
dev=device, type=USBJTAG_MAPPING[device.serialno],
state="jtag"))
elif device.did == 'ff00':
all_boards.append(
Board(dev=device, type='opsis', state="jtag"))
else:
logging.warn(
"Unknown usb-jtag device version! %r (%s)",
device.did,
device)
continue
# FIXME: This is a horrible hack!?@
# Patch the Atlys board so the exar_uart is associated with it.
atlys_boards = [b for b in all_boards if b.type == "atlys"]
if exart_uarts and atlys_boards:
if verbose:
sys.stderr.write(
" Found exart-uarts at %s associating with Atlys at %s\n" %
(exart_uarts, atlys_boards))
assert len(exart_uarts) == len(atlys_boards), repr(
(exart_uarts, atlys_boards))
assert len(atlys_boards) == 1
def extra_tty(
uart=exart_uarts[0],
board=atlys_boards[0],
prefer=prefer_hardware_serial):
if prefer:
return uart.tty() + board.dev.tty()
else:
return board.dev.tty() + uart.tty()
atlys_boards[0].tty = extra_tty
return all_boards
| {
"content_hash": "596b2b270534ac552f5d4582af1bf413",
"timestamp": "",
"source": "github",
"line_count": 599,
"max_line_length": 104,
"avg_line_length": 32.97662771285476,
"alnum_prop": 0.5969726117551765,
"repo_name": "timvideos/HDMI2USB-mode-switch",
"id": "a65434f6693d6744559c8edc8a48b28f967fb1c1",
"size": "19810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hdmi2usb/modeswitch/boards.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1808"
},
{
"name": "Makefile",
"bytes": "3452"
},
{
"name": "Python",
"bytes": "150202"
},
{
"name": "Shell",
"bytes": "1204"
}
],
"symlink_target": ""
} |
import os
import time
import sys
import uuid
from threading import Thread
import yaml
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import libtorrent as lt
from pytvdbapi import api as TVDB
from tpb import TPB, ORDERS as TPB_ORDERS
from ui.mainWindow import Ui_MainWindow
from libs.progress import Progress
class ZTorrent:
def __init__(self, handler):
self.handler = handler
self.data = {}
def update(self, **kwargs):
for key, value in kwargs.items():
self.data[key] = value
def get(self, key):
try:
return self.data[key]
except KeyError:
return None
class DownloadManager:
lt_states_str = ['queued', 'checking', 'downloading metadata',
'downloading', 'finished', 'seeding',
'allocating', 'checking fast-resume']
def __init__(self, instance):
self.instance = instance
self.session = lt.session()
self.session.listen_on(6881, 6891)
self.session.add_dht_router("router.utorrent.com", 6881)
self.session.start_dht(self.session)
self.torrents = {}
def add_magnet(self, magnet, completion_callback=None, play_callback=None):
uid = uuid.uuid4().hex
self.torrents[uid] = ZTorrent(
lt.add_magnet_uri(self.session, magnet, {'save_path': self.instance.settings.get('save_path')}))
self.torrents[uid].handler.set_sequential_download(True)
download_speed_limit = self.instance.settings.get('download_speed_limit')
if download_speed_limit is not None and download_speed_limit > 0:
self.torrents[uid].handler.set_download_limit(download_speed_limit * 1000)
Thread(target=self.download_worker, args=(self.torrents[uid], completion_callback, play_callback,)).start()
return self.torrents[uid]
def get_torrents(self):
return self.session.get_torrents(self.session)
def download_worker(self, torrent, completion_callback=None, play_callback=None):
while not torrent.handler.has_metadata():
time.sleep(.1)
info = torrent.handler.get_torrent_info()
eta = Progress(info.total_size())
launch_at = self.instance.settings.get('launch_video_percent')
launched = False
while not torrent.handler.is_seed():
eta.increment()
status = torrent.handler.status()
torrent.update(
progress=status.progress * 100,
download_rate=status.download_rate / 1000,
upload_rate=status.upload_rate / 1000,
num_peers=status.num_peers,
state=self.lt_states_str[status.state],
eta=str(eta.time_remaining())
)
if (not launched and launch_at > 0 and status.progress >= launch_at / 100
and status.state in [3, 4, 5]):
if play_callback:
play_callback(torrent)
launched = True
time.sleep(1)
torrent.update(progress=100)
if completion_callback:
completion_callback(torrent)
class Settings:
class SettingsException(Exception):
pass
def __init__(self, path=None):
if path is None:
print('[WARNING] Settings: path is not set, the configuration will not be saved')
self.path = path
self.store = None
def load(self):
self.store = yaml.load(open(self.path))
def save(self):
yaml.dump(self.store, open(self.path, 'w'), default_flow_style=False)
def get(self, key):
if self.store is None:
self.store = {}
try:
return self.store[key]
except KeyError:
return None
def set(self, key, value):
if self.store is None:
self.store = {}
self.store[key] = value
class ZTShows:
def __init__(self):
self.settings = Settings('config.yml')
self.download_manager = DownloadManager(self)
self.api_tpb = None
self.api_tvdb = None
def load(self):
self.settings.load()
tpb_base_url = self.settings.get('tpb_base_url')
self.api_tpb = TPB('https://thepiratebay.se' if not tpb_base_url else tpb_base_url)
tvdb_api_key = self.settings.get('tvdb_api_key')
self.api_tvdb = TVDB.TVDB('81DD35DB106172E7' if not tvdb_api_key else tvdb_api_key)
def unload(self):
self.settings.save()
def search(self, query, callback):
def work(w_callback):
w_callback(self.api_tvdb.search(query, 'en'))
Thread(target=work, args=(callback,)).start()
print(self.download_manager.get_torrents())
def search_episode(self, episode, callback):
def work(w_callback):
results = self.api_tpb.search(query).order(TPB_ORDERS.SEEDERS.DES)
w_callback(results)
query = '{} s{:02d}e{:02d}'.format(episode.season.show.SeriesName,
episode.season.season_number,
episode.EpisodeNumber)
Thread(target=work, args=(callback,)).start()
def download(self, torrent, completion_callback=None, play_callback=None):
self.download_manager.add_magnet(torrent.magnet_link, completion_callback, play_callback)
def open_player(self, torrent):
def work(path):
os.system(path)
player_path = self.settings.get('player_path').format(
video='"' + self.settings.get('save_path') + torrent.handler.name() + '"', subtitles='')
t = Thread(target=work, args=(player_path,)).start()
t.daemon = True
class CustomProgressBar(QProgressBar):
def __init__(self, parent=None):
super(CustomProgressBar, self).__init__(parent)
self.restore()
def restore(self):
self.setTextVisible(False)
self.setValue(0)
self.setRange(0, 1)
def indeterminate(self):
self.setTextVisible(False)
self.setValue(0)
self.setRange(0, 0)
def start(self, max_value):
self.setTextVisible(True)
self.setValue(0)
self.setRange(0, max_value)
def progress(self, current_value):
self.setTextVisible(True)
self.setValue(current_value)
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, instance):
super(MainWindow, self).__init__()
self.instance = instance
self.instance.load()
self.progress = None
self.setupUi(self)
self.init_ui()
def init_ui(self):
self.init_menu()
self.init_tabs()
self.setCentralWidget(self.tabWidget)
self.setMinimumSize(600, 400)
self.init_status_bar()
self.setWindowTitle('ZTShows - Be simple')
self.show()
def init_status_bar(self):
self.progress = CustomProgressBar()
self.statusBar().showMessage('Ready')
self.statusBar().addPermanentWidget(self.progress)
def init_menu(self):
def exit_now():
self.instance.unload()
qApp.quit()
self.actionExit.triggered.connect(exit_now)
self.actionSoftExit.triggered.connect(exit_now)
def init_tabs(self):
self.search_series_tab_init()
self.settings_tab_init()
def search_series_tab_init(self):
def populate_shows(shows):
self.resultList.clear()
for show in shows:
show_row = QTreeWidgetItem(self.resultList)
show_row.setText(0, show.SeriesName)
for season in show:
season_row = QTreeWidgetItem(show_row)
if season.season_number == 0:
season_row.setText(0, 'Special')
else:
season_row.setText(0, 'Season {:02d}'.format(season.season_number))
season_row.setData(0, Qt.UserRole + 1, season)
show_row.addChild(season_row)
for episode in season:
episode_row = QTreeWidgetItem(season_row)
episode_row.setText(0, '{:02d} - {}'.format(episode.EpisodeNumber, episode.EpisodeName))
episode_row.setData(0, Qt.UserRole + 1, episode)
season_row.addChild(episode_row)
self.resultList.addTopLevelItem(show_row)
if len(shows) == 1:
self.resultList.expandItem(self.resultList.topLevelItem(0))
def populate_torrents(item, torrents):
for torrent in torrents:
row = QTreeWidgetItem(item)
row.setText(0, '{} : {}'.format(torrent.title, torrent.seeders))
row.setData(0, Qt.UserRole + 1, torrent)
item.addChild(row)
def launch_download(item, completion_callback=None, play_callback=None):
torrent = item.data(0, Qt.UserRole + 1)
if not hasattr(torrent, 'title'):
return
print('[INFO] Starting downloading')
self.instance.download(torrent, completion_callback, play_callback)
def get_shows_results():
def done(shows):
self.progress.restore()
populate_shows(shows)
self.progress.indeterminate()
self.instance.search(self.searchField.text(), done)
def get_torrents_results(item):
def done(torrents):
self.progress.restore()
populate_torrents(item, torrents)
self.resultList.expandItem(item)
episode = item.data(0, Qt.UserRole + 1)
if not hasattr(episode, 'season'):
return
self.progress.indeterminate()
self.instance.search_episode(episode, done)
def open_menu(position):
def trigger_play():
item = self.resultList.selectedItems()[0]
launch_download(item, None, self.instance.open_player)
def trigger_download():
item = self.resultList.selectedItems()[0]
launch_download(item, None, None)
def trigger_load():
item = self.resultList.selectedItems()[0]
get_torrents_results(item)
indexes = self.resultList.selectedIndexes()
level = 0
if len(indexes) > 0:
index = indexes[0]
while index.parent().isValid():
index = index.parent()
level += 1
menu = QMenu()
if level == 2:
load_action = QAction("Load torrents", self)
load_action.triggered.connect(trigger_load)
menu.addAction(load_action)
elif level == 3:
play_action = QAction("Play", self)
play_action.triggered.connect(trigger_play)
menu.addAction(play_action)
download_action = QAction("Download", self)
download_action.triggered.connect(trigger_download)
menu.addAction(download_action)
menu.exec_(self.resultList.viewport().mapToGlobal(position))
self.searchField.returnPressed.connect(get_shows_results)
QTimer.singleShot(0, self.searchField.setFocus)
self.searchButton.clicked.connect(get_shows_results)
self.resultList.header().close()
self.resultList.itemClicked.connect(get_torrents_results)
self.resultList.itemDoubleClicked.connect(launch_download)
self.resultList.setContextMenuPolicy(Qt.CustomContextMenu)
self.resultList.customContextMenuRequested.connect(open_menu)
def settings_tab_init(self):
def text_item(node, key, data_type):
def update(u_value):
self.instance.settings.set(key, data_type(u_value))
value = self.instance.settings.get(key)
if value is not None:
node.setText(str(value))
node.textChanged.connect(update)
def numeric_item(node, key):
def update(u_value):
self.instance.settings.set(key, float(u_value))
value = self.instance.settings.get(key)
if value is not None:
node.setValue(float(value))
node.valueChanged.connect(update)
def check_item(node, key):
def update(u_value):
u_value = True if u_value == Qt.Checked else False
self.instance.settings.set(key, u_value)
value = self.instance.settings.get(key)
if value is not None and value is True:
node.setCheckState(Qt.Checked)
node.stateChanged.connect(update)
def combo_item(node, key):
def update(u_value):
self.instance.settings.set(key, str(u_value))
value = self.instance.settings.get(key)
if value is not None:
idx = node.findText(value)
if idx > -1:
node.setCurrentIndex(idx)
node.currentIndexChanged.connect(update)
text_item(self.launchCommandValue, "player_path", str)
text_item(self.savePathValue, "save_path", str)
combo_item(self.subtitlesLanguageValue, "subtitle_language")
text_item(self.tpbUrlValue, "tpb_base_url", str)
text_item(self.tvdbApiKeyValue, "tvdb_api_key", str)
numeric_item(self.speedLimitValue, "download_speed_limit")
numeric_item(self.launchAfterValue, "launch_video_percent")
if __name__ == '__main__':
app = QApplication(sys.argv)
ztshows = ZTShows()
window = MainWindow(ztshows)
window.show()
sys.exit(app.exec_())
| {
"content_hash": "ed9439a4be5ccd30f7bdb5a29d2a1506",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 115,
"avg_line_length": 35.74935400516796,
"alnum_prop": 0.5825804119985544,
"repo_name": "lecler-i/ZTShows",
"id": "16b9fba6c8dee6e9fc27a2f2a70d41ee0d505338",
"size": "13859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ztshows.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "45090"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
import cv2
import sys
import os
from keras.models import Sequential
from keras.callbacks import Callback, ModelCheckpoint
from keras.layers import (Flatten, Dense, Convolution2D, MaxPool2D,
BatchNormalization, Dropout, Activation, Cropping2D, Lambda)
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.preprocessing.image import ImageDataGenerator
from keras.backend import tf as ktf
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from scipy.misc import imread
import scipy
import matplotlib
import matplotlib.pyplot as plt
import argparse
import json
import random
matplotlib.style.use('ggplot')
########################### Utilities #########################################
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, bar_length=100):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
###
############################# VISUALIZATION ####################################
def show_data_distribution(df):
binwidth = 0.025
# histogram before image augmentation
plt.hist(df.steering_angle, bins=np.arange(min(df.steering_angle), max(df.steering_angle) + binwidth, binwidth))
plt.title('Number of images per steering angle')
plt.xlabel('Steering Angle')
plt.ylabel('# Frames')
plt.show()
############################### NETWORK ########################################
def nvidia_end_to_end(shape, l2_regularization_scale):
print("Training Nvidia End To End of input shape %s" % str(shape))
height = shape[0]
crop_factor = 0.2 # Top 40% to be removed
crop_size = (int)(crop_factor * height)
model = Sequential()
model.add(Cropping2D(cropping=((crop_size, 0), (0, 0)), input_shape=shape))
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
model.add(BatchNormalization(axis=1, input_shape=shape))
model.add(Convolution2D(16, (3, 3), padding='valid', strides=(2, 2), activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(24, (3, 3), padding='valid', strides=(1, 2), activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(36, (3, 3), padding='valid', activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(48, (2, 2), padding='valid', activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Convolution2D(48, (2, 2), padding='valid', activation='elu',
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Flatten())
model.add(Dense(512,
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Dropout(.5))
model.add(Activation('elu'))
model.add(Dense(10,
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.add(Activation('elu'))
model.add(Dense(1,
kernel_regularizer=l2(l2_regularization_scale),
bias_regularizer=l2(l2_regularization_scale)))
model.summary()
adam = Adam(lr=0.0001)
model.compile(loss='mse', optimizer=adam)
return model
################################# Dataset Manipulation Functions ##############################
def flip_image(img):
fimg = np.fliplr(img)
return fimg
def read_image(filename):
img = imread(filename).astype(np.float32)
img = scipy.misc.imresize(img, 50)
return img
def change_brightness(img):
change_pct = int(random.uniform(0, 100))
mask = (255 - img) < change_pct
img = np.where((255 - img) < change_pct, 255, img + change_pct)
return img
def read_csv(filename, cols):
print("Reading Training file: %s" % filename)
return pd.read_csv(filename, names=cols)
def drop_zero_value_steering_angle_rows(df, drop_to):
"""
df: The dataframe to drop rows from
col_name: The column to check from for steering_angle
drop_to: How many rows to drop to
"""
# print("Total rows: %s" % len(df))
# indices = df[df[col_name] == 0.0].index
# total_existing = indices.shape[0]
# print("Total Zero Value rows: %s" % total_existing)
# print("Dropping %s rows from df" % (total_existing - drop_to))
# remove_indices = np.random.choice(indices, size=total_existing - drop_to)
# new_df = df.drop(remove_indices)
# indices = new_df[new_df[col_name] == 0.0].index
# print("Remaining zero value %s" % len(indices))
#
# print("Total rows: %s" % len(new_df))
# print("Dropped %s rows" % (total_existing - drop_to))
# assert(len(df) - len(new_df) == (total_existing - drop_to))
# return new_df
df_with_zero = df[df.steering_angle == 0]
df_without_zero = df[df.steering_angle != 0]
df_with_zero = df_with_zero.sample(n=drop_to)
new_df = pd.concat([df_with_zero, df_without_zero])
return new_df
def align_steering_angles_data(df):
"""
Given a dataframe drop the 0 value steering angles to bring it at par
"""
new_df = drop_zero_value_steering_angle_rows(df, 600)
return new_df
############################# Data Reading Routines #################################
def read_training_data(track):
cols = ['center_image', 'left_image', 'right_image', 'steering_angle', 'throttle', 'brake', 'speed']
data_dirs = [entry.path for entry in os.scandir('data') if entry.is_dir()]
dfs = []
for ddir in data_dirs:
# Ignore the recovery tracks since they will be loaded later
if "recovery" not in ddir:
if track in ddir:
dfs.append(read_csv(ddir + '/driving_log.csv', cols))
elif track == "both":
dfs.append(read_csv(ddir + '/driving_log.csv', cols))
df = pd.concat(dfs)
return df
def read_sample_training(df):
"""
df: Original DF from our training data which is to be augmented
"""
cols = ['center_image', 'left_image', 'right_image', 'steering_angle', 'throttle', 'brake', 'speed']
sample_df = read_csv('sample_training_data/driving_log.csv', cols)
df = pd.concat([df, sample_df])
return df
def preprocess(img):
return img
def augment_image(img, technique):
if technique == "flip":
return flip_image(img)
elif technique == "brightness":
return change_brightness(img)
assert("No Valid technique passed for image augmentation")
def load_data(df):
all_samples = []
measurements = []
shape = None
total_images = len(df)
index = 0
for i, row in df.iterrows():
print_progress_bar(index, total_images)
index += 1
center_image = preprocess(read_image(row[0]))
all_samples.append(center_image)
measurements.append(float(row[3]))
left_image = preprocess(read_image(row[1]))
all_samples.append(left_image)
measurements.append(float(row[3]) + (0.25))
right_image = preprocess(read_image(row[2]))
all_samples.append(right_image)
measurements.append(float(row[3]) - (0.25))
shape = center_image.shape
# Add an image for the flipped version of the center image
flipped_center_image = flip_image(center_image)
all_samples.append(flipped_center_image)
measurements.append(-float(row[3]))
return np.array(all_samples), np.array(measurements), shape
# def setup_probabilistic_distribution(df):
# binwidth = 0.025
# num_bins = int((max(df.steering_angle) - min(df.steering_angle)) / binwidth)
# # histogram before image augmentation
# counts, bins = np.histogram(df['steering_angle'])
# total = len(df.index)
def rearrange_and_augment_dataframe(df, shuffle_data):
"""
Rearrange the dataframe to linearize the steering angle images and also add
a column to indicate whether augmentation is required or not and what kind of
augmentation is required.
"""
center_df = pd.DataFrame()
left_df = pd.DataFrame()
right_df = pd.DataFrame()
flipped_center = pd.DataFrame()
center_df['image'] = df['center_image']
flipped_center['image'] = df['center_image']
left_df['image'] = df['left_image']
right_df['image'] = df['right_image']
center_df['steering_angle'] = df['steering_angle']
left_df['steering_angle'] = df['steering_angle'] + 0.25
right_df['steering_angle'] = df['steering_angle'] - 0.25
flipped_center['steering_angle'] = -1.0 * df['steering_angle']
# Set the dataframe columns for augmentation to false for some
center_df['augmentation'] = False
left_df['augmentation'] = False
right_df['augmentation'] = False
flipped_center['augmentation'] = True
# Set the augmentation techniques we need
center_df['techniques'] = ""
left_df['techniques'] = ""
right_df['techniques'] = ""
flipped_center['techniques'] = "flip"
# Change the brightness for images with different steering angles and add them
brightness_df = center_df.loc[(center_df.steering_angle < -0.025) | (center_df.steering_angle > 0.025)]
BRIGHTNESS_AUG_FACTOR = 20
brightness_df = brightness_df.append([brightness_df]*BRIGHTNESS_AUG_FACTOR, ignore_index=True)
brightness_df.steering_angle = brightness_df.steering_angle + (np.random.uniform(-1, 1)/30.0)
brightness_df.augmentation = True
brightness_df.techniques = "brightness"
new_df = pd.concat([center_df, left_df, right_df, flipped_center, brightness_df])
if shuffle_data:
shuffle(new_df)
return new_df
def read_recovery_track_data():
# Read the recovery track data for track 2
cols = ['center_image', 'left_image', 'right_image', 'steering_angle', 'throttle', 'brake', 'speed']
df = read_csv('data/track2_recovery/driving_log.csv', cols)
recovery_df = rearrange_and_augment_dataframe(df, shuffle_data=True)
return recovery_df
def save_experiment(name, network_used, epochs, model, hist):
# Based on the experiment name, save the history and the model for future use
experiments_folder = "experiments/"
history_filename = experiments_folder + name + ".json"
fp = open(history_filename, 'w')
json.dump(hist.history, fp)
print(hist.history)
fp.close()
model_filename = experiments_folder + name + "_" + str(epochs) + "_epochs_" + network_used + '.h5'
model.save(model_filename)
print("Wrote History file: %s" % history_filename)
print("Wrote Model file: %s" % model_filename)
NETWORKS = {
"nvidia": nvidia_end_to_end,
}
################################# GENERATORS ###################################
def new_generator(samples, batch_size=32):
num_samples = len(samples)
while 1:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset: offset + batch_size]
images = []
angles = []
for i, batch_sample in batch_samples.iterrows():
img = read_image(batch_sample.image)
steering_angle = float(batch_sample.steering_angle)
augment = batch_sample.augmentation
techniques = batch_sample.techniques
if augment:
# Techniques should be setup like this for multiple ones
# flip,brightness
techniques = techniques.split(",")
for technique in techniques:
img = augment_image(img, technique)
images.append(img)
angles.append(steering_angle)
X = np.array(images)
y = np.array(angles)
yield X, y
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1:
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset: offset + batch_size]
images = []
angles = []
for i, batch_sample in batch_samples.iterrows():
center_image = read_image(batch_sample[0])
center_angle = float(batch_sample[3])
images.append(center_image)
angles.append(center_angle)
left_image = read_image(batch_sample[1])
left_angle = float(batch_sample[3] + 0.25)
images.append(left_image)
angles.append(left_angle)
right_image = read_image(batch_sample[0])
right_angle = float(batch_sample[3] - 0.25)
images.append(right_image)
angles.append(right_angle)
X = np.array(images)
y = np.array(angles)
yield shuffle(X, y)
def training_generator(samples, batch_size=32):
num_samples = len(samples)
images = []
angles = []
# Drop all the rows and just keep 10
# drop_indices = np.random.choice(samples.index, size=len(samples.index) - 100, replace=False)
# samples = samples.drop(drop_indices)
# First create the proper training data.
print("Creating Initial Training Data...")
for i, batch_sample in samples.iterrows():
center_image = read_image(batch_sample[0])
center_angle = float(batch_sample[3])
images.append(center_image)
angles.append(center_angle)
left_image = read_image(batch_sample[1])
left_angle = float(batch_sample[3] + 0.25)
images.append(left_image)
angles.append(left_angle)
right_image = read_image(batch_sample[0])
right_angle = float(batch_sample[3] - 0.25)
images.append(right_image)
angles.append(right_angle)
# Also flip the center image and change the steering angle.
flipped_center_image = flip_image(center_image)
images.append(flipped_center_image)
angles.append(-center_angle)
images = np.array(images)
angles = np.array(angles)
print("Feeding to Keras Generator...")
datagen = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=10,
width_shift_range=0.2,
height_shift_range=0.2,
zca_whitening=False,
channel_shift_range=0.2,
zoom_range=0.2)
# datagen.fit(images)
while 1:
X, y = shuffle(images, angles)
for X_train, y_train in datagen.flow(X, y, batch_size=batch_size):
yield shuffle(X_train, y_train)
################################# MAIN METHODS #################################
def args_definition():
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", help="Number of Epochs to train the network for"
,type=int, default=20)
parser.add_argument("--network", help="Specify which Neural Network to execute"
,choices=list(NETWORKS.keys()) + ["all"], default="simple_network")
parser.add_argument("--track", help="Specify which track data to use",
choices=["track1", "track2", "both"], default="both")
parser.add_argument("--use_sample_training", help="Use the sample training data",
action='store_true')
parser.add_argument("--experiment", help="Give the run an experiment name", type=str)
parser.add_argument("--show_data_distribution", help="Show the data distribution for the training data",
action='store_true')
args = parser.parse_args()
return args
def main():
global NETWORKS
args = args_definition()
df = read_training_data(args.track)
if args.use_sample_training:
df = read_sample_training(df)
frames, steering_angles, shape = load_data(df)
model = NETWORKS[args.network](shape)
hist = model.fit(frames,
steering_angles,
validation_split=0.2,
shuffle=True,
epochs=args.epochs)
model_name = args.network + '.h5'
model.save(model_name)
if args.experiment != "":
save_experiment(args.experiment, args.network, model, hist)
from keras import backend as K
K.clear_session()
class EarlyStoppingByLossVal(Callback):
def __init__(self, monitor='val_loss', value=0.00001, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
def main_generator():
global NETWORKS
args = args_definition()
df = read_training_data(args.track)
if args.use_sample_training:
df = read_sample_training(df)
df = rearrange_and_augment_dataframe(df, shuffle_data=True)
if args.track == "track2" or args.track == "both":
recovery_df = read_recovery_track_data()
df = pd.concat([df, recovery_df])
# df = align_steering_angles_data(df)
if args.show_data_distribution:
show_data_distribution(df)
return
BATCH_SIZE = 512
train_samples, validation_samples = train_test_split(df, test_size=0.2)
print("Total Training Samples: %s" % len(train_samples.index))
print("Total Validation Samples: %s" % len(validation_samples.index))
train_generator = new_generator(train_samples, batch_size=BATCH_SIZE)
validation_generator = new_generator(validation_samples, batch_size=BATCH_SIZE)
shape = (80, 160, 3)
l2_regularization = 1e-7
model = NETWORKS[args.network](shape, l2_regularization)
callbacks = [
EarlyStoppingByLossVal(monitor='val_loss', value=0.00001, verbose=1),
ModelCheckpoint('latest_run/' + args.experiment + "_" + args.network + "_{epoch:02d}-{val_loss:.2f}.h5", monitor='val_loss', save_best_only=True, verbose=1),
]
hist = model.fit_generator(train_generator,
steps_per_epoch=len(df) // BATCH_SIZE + 1,
validation_data=validation_generator,
epochs=args.epochs,
validation_steps=10,
callbacks=callbacks)
model_name = args.network + '.h5'
model.save(model_name)
if args.experiment != "":
save_experiment(args.experiment, args.network, args.epochs, model, hist)
from keras import backend as K
K.clear_session()
if __name__ == "__main__":
# main()
main_generator()
| {
"content_hash": "f38ba26ad44fa22bd83d732a23cc240e",
"timestamp": "",
"source": "github",
"line_count": 556,
"max_line_length": 165,
"avg_line_length": 35.89568345323741,
"alnum_prop": 0.6119851688545946,
"repo_name": "ssarangi/self_driving_cars",
"id": "34f2bd06dcaeeca76c827c30ef2928e4d545c833",
"size": "19960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "behavior_cloning/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "13968138"
},
{
"name": "Jupyter Notebook",
"bytes": "37513598"
},
{
"name": "Python",
"bytes": "186610"
}
],
"symlink_target": ""
} |
class Problem(object):
def __init__(self, filename, line, message, linter,
is_error=True, has_line_change=False):
self.filename = filename
self.line = line
self.message = message
self.linter = linter
self.is_error = is_error
self.has_line_change = has_line_change
def __hash__(self):
return hash(self.__str__())
def __repr__(self):
return '<Problem {}:{} {}>'.format(
self.filename,
self.line,
self.message
)
def __str__(self):
return '{}:{} {}'.format(self.filename, self.line, self.message)
class Problems(object):
"""Lint problems"""
REPORT_CHANGES_RANGE = 3
def __init__(self):
self._items = set()
self._changes = None
def add(self, problem):
self._items.add(problem)
def set_changes(self, changes):
self._changes = changes
def limit_to_changes(self):
changes = self._changes
if not changes:
return
def should_keep(item):
keep = False
for patched_file in changes:
if patched_file.path != item.filename:
continue
for hunk in patched_file:
if not hunk.is_valid():
continue
for line in hunk.target_lines():
if abs(item.line - line.target_line_no) <= self.REPORT_CHANGES_RANGE:
if item.line == line.target_line_no:
item.has_line_change = line.is_added
if line.is_context:
item.line = line.source_line_no
keep = True
if keep:
break
if keep:
break
if keep:
break
return keep
self._items = [item for item in self._items if should_keep(item)]
def __len__(self):
return len(self._items)
def __iter__(self):
for item in self._items:
yield item
def __getitem__(self, key):
return self._items[key]
| {
"content_hash": "9a0d435c6003d520e54555b5f6b9347c",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 93,
"avg_line_length": 28.848101265822784,
"alnum_prop": 0.4646774901272488,
"repo_name": "bosondata/badwolf",
"id": "5295572949f01339d3cbdd65eb3b11e88e2c295d",
"size": "2305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "badwolf/lint/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "6010"
},
{
"name": "HTML",
"bytes": "11698"
},
{
"name": "Python",
"bytes": "201074"
},
{
"name": "Shell",
"bytes": "313"
}
],
"symlink_target": ""
} |
import sys
import os
import sphinx_rtd_theme
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import gems
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gems'
copyright = u'2016, Blake Printy'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = gems.__version__
# The full version, including alpha/beta/rc tags.
release = gems.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'gemsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'gems.tex',
u'gems Documentation',
u'Blake Printy', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gems',
u'gems Documentation',
[u'Blake Printy'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'gems',
u'gems Documentation',
u'Blake Printy',
'gems',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "8ba43d389adfa13c00b56a3bcf6cc60c",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 76,
"avg_line_length": 29.85239852398524,
"alnum_prop": 0.703584672435105,
"repo_name": "bprinty/gems",
"id": "6f7837cf689ab510c9ea4456228f8f4bc03000e9",
"size": "8529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2089"
},
{
"name": "Python",
"bytes": "48488"
}
],
"symlink_target": ""
} |
from firehose.model import (Analysis, Generator, Metadata,
DebianBinary, DebianSource)
from ethel.commands import PLUGINS, load_module
from ethel.client import get_proxy, checkout
from contextlib import contextmanager
from ethel.utils import tdir, cd, run_command
from ethel.config import load
import logging
import time
config = load()
proxy = get_proxy()
class IDidNothingError(Exception):
pass
def listize(entry):
items = [x.strip() for x in entry.split(",")]
return [None if x == "null" else x for x in items]
@contextmanager
def workon(suites, arches, capabilities):
job = proxy.get_next_job(suites, arches, capabilities)
if job is None:
yield
else:
logging.info("Acquired job %s (%s) for %s/%s", job['_id'], job['type'], job['suite'], job['arch'])
try:
yield job
except:
logging.warn("Forfeiting the job because of internal exception")
proxy.forfeit_job(job['_id'])
raise
else:
logging.info("Successfully closing the job")
proxy.close_job(job['_id'])
def generate_sut_from_source(package):
name = package['source']
local = None
version = package['version']
if "-" in version:
version, local = version.rsplit("-", 1)
return DebianSource(name, version, local)
def generate_sut_from_binary(package):
source = proxy.get_source_package(package['source'])
arch = package['arch']
name = source['source']
local = None
version = source['version']
if "-" in version:
version, local = version.rsplit("-", 1)
return DebianBinary(name, version, local, arch)
def create_firehose(package, version_getter):
logging.info("Initializing empty firehose report")
sut = {
"sources": generate_sut_from_source,
"binaries": generate_sut_from_binary
}[package['_type']](package)
gname_, gversion = version_getter()
gname = "ethel/%s" % gname_
return Analysis(metadata=Metadata(
generator=Generator(name=gname, version=gversion),
sut=sut, file_=None, stats=None), results=[])
def iterate():
suites = listize(config['suites'])
arches = listize(config['arches'])
with workon(suites, arches, list(PLUGINS.keys())) as job:
if job is None:
raise IDidNothingError("No more jobs")
package_id = job['package']
type_ = job['package_type']
logging.debug("Fetching the %s package, id=%s", type_, package_id)
package = None
if type_ == 'binary':
package = proxy.get_binary_package(package_id)
elif type_ == 'source':
package = proxy.get_source_package(package_id)
else:
raise IDidNothingError("SHIT")
handler, version_getter = load_module(job['type'])
firehose = create_firehose(package, version_getter)
with tdir() as fd:
with cd(fd):
with checkout(package) as target:
firehose, log, err = handler(target, package,
job, firehose)
type_ = {"sources": "source",
"binaries": "binary"}[package['_type']]
logging.info("Job worker returned, filing reports")
report = proxy.submit_report(firehose.to_json(),
job['_id'], err)
logging.info("Sending the XML firehose report to the pool")
open('firehose.xml', 'w').write(firehose.to_xml_bytes())
remote_firehose_path = proxy.get_firehose_write_location(report)
cmd = config['copy'].format(src='firehose.xml',
dest=remote_firehose_path)
out, err, ret = run_command(cmd)
logging.info("Sending the logs to the pool")
remote_path = proxy.get_log_write_location(report)
open('ethel-log', 'wb').write(log.encode('utf-8'))
cmd = config['copy'].format(src='ethel-log',
dest=remote_path)
out, err, ret = run_command(cmd)
if ret != 0:
print(out)
raise Exception("SHIT.")
def main():
logging.basicConfig(format='%(asctime)s - %(levelname)8s - [ethel] %(message)s', level=logging.DEBUG)
logging.info("Booting ethel daemon")
while True:
logging.debug("Checking for new jobs")
try:
iterate()
except IDidNothingError:
logging.debug("Nothing to do for now, sleeping 30s")
time.sleep(30)
| {
"content_hash": "2418eb75070e13a08103ca9255e24c96",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 106,
"avg_line_length": 34.11347517730496,
"alnum_prop": 0.5594594594594594,
"repo_name": "paultag/ethel",
"id": "0ebe2fe0d961ef53592d79981535f22db730f8b4",
"size": "4810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ethel/daemon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37976"
},
{
"name": "Shell",
"bytes": "1725"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import kpcc_backroom_handshakes.custom_fields
class Migration(migrations.Migration):
dependencies = [
('election_registrar', '0003_resultsource_ready_to_build'),
('newscast', '0006_remove_topic_measure'),
]
operations = [
migrations.CreateModel(
name='DataNugget',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scope', models.CharField(blank=True, max_length=255, null=True, verbose_name='State or the County the nugget came from')),
('nugget_text', models.TextField(blank=True, null=True, verbose_name='Latest Update')),
('nugget_date', models.DateField(auto_now_add=True, verbose_name='Date of this Information')),
('nugget_source', models.CharField(blank=True, max_length=255, null=True, verbose_name='Name of the Sources')),
('nugget_link', models.URLField(blank=True, max_length=1024, null=True, verbose_name='URL To Source')),
('nugget_tags', kpcc_backroom_handshakes.custom_fields.ListField(blank=True, null=True, verbose_name='Cities and Counties')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Date Modified')),
('election', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='election_registrar.Election')),
],
),
]
| {
"content_hash": "6bcc8034fe6135e995f5706fbf572fed",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 141,
"avg_line_length": 53.87096774193548,
"alnum_prop": 0.6431137724550898,
"repo_name": "SCPR/kpcc_backroom_handshakes",
"id": "268c31d3b43219a3264af7fd8fa75adc558a7e2e",
"size": "1740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newscast/migrations/0007_datanugget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "90581"
},
{
"name": "HTML",
"bytes": "97407"
},
{
"name": "JavaScript",
"bytes": "100814"
},
{
"name": "Python",
"bytes": "432359"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from agms.exception.unexpected_exception import UnexpectedException
class Response():
"""
A class representing AGMS Response objects.
"""
def __init__(self, response, op):
self._response = response
self._op = op
self._mapping = None
def to_array(self):
return self._map_response(self._response)
def _map_response(self, arr):
if self._mapping:
response = self.__do_map(arr)
return response
else:
raise UnexpectedException('Response mapping not defined for this API.')
def __do_map(self, arr):
response = {}
mapping = self._mapping
if mapping:
# We only map the end of the array containing data
# If this element is an array, then we map its individual sub-arrays
# Otherwise, we map
for key, value in arr.items():
if isinstance(value, dict):
response.append(self.__do_map(value))
else:
if key not in mapping.keys():
raise UnexpectedException('Unmapped field ' + key + ' in response')
else:
response[mapping[key]] = value
return response | {
"content_hash": "0a47c2b9ec6e466c2f399bd3218ca102",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 91,
"avg_line_length": 32.1219512195122,
"alnum_prop": 0.5504935459377372,
"repo_name": "agmscode/agms_python",
"id": "67b1a0542bbcf187a05056dd7127a12a9c6090c9",
"size": "1317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agms/response/response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "273109"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "msd.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "7d3bc165c74ba10f59367ea2b19886eb",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 67,
"avg_line_length": 24.88888888888889,
"alnum_prop": 0.7053571428571429,
"repo_name": "bigbugbb/MSD",
"id": "6104c38bf68569afda38815fdd2829557ff54b54",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MSD-django/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "102290"
},
{
"name": "Makefile",
"bytes": "24870704"
},
{
"name": "Python",
"bytes": "17753"
},
{
"name": "Shell",
"bytes": "2345"
}
],
"symlink_target": ""
} |
import subprocess
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class SparkSqlHook(BaseHook):
"""
This hook is a wrapper around the spark-sql binary. It requires that the
"spark-sql" binary is in the PATH.
:param sql: The SQL query to execute
:type sql: str
:param conf: arbitrary Spark configuration property
:type conf: str (format: PROP=VALUE)
:param conn_id: connection_id string
:type conn_id: str
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors
(Default: all the available cores on the worker)
:type total_executor_cores: int
:param executor_cores: (Standalone & YARN only) Number of cores per
executor (Default: 2)
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param master: spark://host:port, mesos://host:port, yarn, or local
:type master: str
:param name: Name of the job.
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param verbose: Whether to pass the verbose flag to spark-sql
:type verbose: bool
:param yarn_queue: The YARN queue to submit to (Default: "default")
:type yarn_queue: str
"""
def __init__(self,
sql,
conf=None,
conn_id='spark_sql_default',
total_executor_cores=None,
executor_cores=None,
executor_memory=None,
keytab=None,
principal=None,
master='yarn',
name='default-name',
num_executors=None,
verbose=True,
yarn_queue='default'
):
self._sql = sql
self._conf = conf
self._conn = self.get_connection(conn_id)
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._keytab = keytab
self._principal = principal
self._master = master
self._name = name
self._num_executors = num_executors
self._verbose = verbose
self._yarn_queue = yarn_queue
self._sp = None
def get_conn(self):
pass
def _prepare_command(self, cmd):
"""
Construct the spark-sql command to execute. Verbose output is enabled
as default.
:param cmd: command to append to the spark-sql command
:type cmd: str or list[str]
:return: full command to be executed
"""
connection_cmd = ["spark-sql"]
if self._conf:
for conf_el in self._conf.split(","):
connection_cmd += ["--conf", conf_el]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._sql:
sql = self._sql.strip()
if sql.endswith(".sql") or sql.endswith(".hql"):
connection_cmd += ["-f", sql]
else:
connection_cmd += ["-e", sql]
if self._master:
connection_cmd += ["--master", self._master]
if self._name:
connection_cmd += ["--name", self._name]
if self._verbose:
connection_cmd += ["--verbose"]
if self._yarn_queue:
connection_cmd += ["--queue", self._yarn_queue]
if isinstance(cmd, str):
connection_cmd += cmd.split()
elif isinstance(cmd, list):
connection_cmd += cmd
else:
raise AirflowException("Invalid additional command: {}".format(cmd))
self.log.debug("Spark-Sql cmd: %s", connection_cmd)
return connection_cmd
def run_query(self, cmd="", **kwargs):
"""
Remote Popen (actually execute the Spark-sql query)
:param cmd: command to append to the spark-sql command
:type cmd: str or list[str]
:param kwargs: extra arguments to Popen (see subprocess.Popen)
:type kwargs: dict
"""
spark_sql_cmd = self._prepare_command(cmd)
self._sp = subprocess.Popen(spark_sql_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
for line in iter(self._sp.stdout.readline, ''):
self.log.info(line)
returncode = self._sp.wait()
if returncode:
raise AirflowException(
"Cannot execute {} on {}. Process exit code: {}.".format(
cmd, self._conn.host, returncode
)
)
def kill(self):
if self._sp and self._sp.poll() is None:
self.log.info("Killing the Spark-Sql job")
self._sp.kill()
| {
"content_hash": "7670f9a3e97d25c616a31e93ee3973af",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 89,
"avg_line_length": 36.30718954248366,
"alnum_prop": 0.5598559855985599,
"repo_name": "owlabs/incubator-airflow",
"id": "b12f355aaa47bad275b479fced8fbaa7a6e8b756",
"size": "6368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/spark_sql_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57045"
},
{
"name": "HTML",
"bytes": "147187"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1647566"
},
{
"name": "Shell",
"bytes": "18823"
}
],
"symlink_target": ""
} |
"""
File used for testing of API calls on tweepy before extending them.
http://docs.tweepy.org/en/v3.5.0/code_snippet.html
Goals for testing:
Write test data to JSON files in var.
tweepy offers ._json method
Get 200 tweets for a user.
Get all locations.
Get 50 trends a for a location.
Get another user
Get another location
Get my own tweets
Do bar graph.
Do tag cloud.
Store tweets in DB with mix of columns I want and JSON column
for full object.
"""
import json
import tweepy
from lib.twitter_api import authentication as twitterAuth
api = twitterAuth.getAPIConnection()
def _writeJSON(data, filename):
print("Write")
with open(filename, "w") as writer:
json.dump(data, writer, indent=4)
return True
def _readJSON(filename):
print("Read")
with open(filename, "r") as reader:
data = json.load(reader)
return data
def getUserTweets(screen_name):
"""
Do API query to get timeline for a Twitter user.
"""
global api
timeline = api.user_timeline(screen_name=screen_name)
filename = "var/tweet_{0}.json".format("test")
# Convert from class to JSON.
outData = [tweet._json for tweet in timeline]
print(filename)
_writeJSON(outData, filename)
_readJSON(filename)
# print(json.dumps(st._json, indent=4))
# print()
# dir(st) =>
# class to JSON conversion
# '_json'
# other fields
# e.g. tweet.id
# 'author', 'contributors', 'coordinates', 'created_at', 'destroy', 'entities', 'favorite', 'favorite_count', 'favorited', 'geo', 'id', 'id_str', 'in_reply_to_screen_name', 'in_reply_to_status_id', 'in_reply_to_status_id_str', 'in_reply_to_user_id', 'in_reply_to_user_id_str', 'is_quote_status', 'lang', 'parse', 'parse_list', 'place', 'retweet', 'retweet_count', 'retweeted', 'retweets', 'source', 'source_url', 'text', 'truncated', 'user']
def getAvailable():
"""
Do API query for available trends.
"""
global api
places = api.trends_available()
filename = "var/places.json"
print(filename)
_writeJSON(places, filename)
_readJSON(filename)
def getTrend(woeid):
"""
Do API query for trends of a place.
"""
global api
trends = api.trends_place(woeid)
filename = "var/trend_{0}.json".format("test")
print(filename)
_writeJSON(trends, filename)
_readJSON(filename)
if __name__ == "__main__":
if True:
screen_name = "RealDonaldTrump"
getUserTweets(screen_name)
if False:
places = getAvailable()
if False:
# USA
woeid = 23424977
getTrend(woeid)
| {
"content_hash": "77c6c2eb2a18e961f73eab2562ba5e4f",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 447,
"avg_line_length": 23.5929203539823,
"alnum_prop": 0.6305326331582896,
"repo_name": "MichaelCurrin/twitterverse",
"id": "c573bd6687f93c0d4ba384413a59677a01d1eed5",
"size": "2666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/lib/wip/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1345"
},
{
"name": "PLpgSQL",
"bytes": "523"
},
{
"name": "Python",
"bytes": "292539"
},
{
"name": "Shell",
"bytes": "4301"
},
{
"name": "TSQL",
"bytes": "15142"
}
],
"symlink_target": ""
} |
import sys
import os
import tarfile
import argparse
from os.path import dirname, join, abspath
from shutil import copy, copytree, rmtree
from subprocess import check_call
from contextlib import closing
parser = argparse.ArgumentParser(
description='Build dynamic cluster RPM',
)
parser.add_argument('buildid',
help='The build id to use i.e. the bit after the salt version in the package name',
)
args = parser.parse_args()
src = abspath(join(dirname(__file__), '..'))
sys.path.append(src)
from dynamiccluster.__version__ import version
rpmbuild = join(os.environ['HOME'], 'rpmbuild')
if os.path.exists(join(rpmbuild, 'SOURCES')):
rmtree(join(rpmbuild, 'SOURCES'))
copy(join(src, 'pkg/rpm.spec'), join(rpmbuild, 'SPECS'))
os.makedirs(join(rpmbuild, 'SOURCES'))
copy(join(src, 'scripts/initd-script'), join(rpmbuild, 'SOURCES'))
copy(join(src, 'config/dynamiccluster.yaml'), join(rpmbuild, 'SOURCES'))
#copytree(src, join(rpmbuild, 'SOURCES'))
# for f in os.listdir(src):
# if f in ['bin', 'pkg', 'tests', 'README.md', 'test.sh']:
# continue
# print('copying %s...'%f)
# if os.path.isdir(join(src, f)):
# copytree(join(src, f), join(rpmbuild, 'SOURCES'))
# else:
# copy(join(src, f), join(rpmbuild, 'SOURCES'))
def srcfilter(ti):
if '/.git' in ti.name:
return None
return ti
with closing(tarfile.open(join(rpmbuild, 'SOURCES/dynamiccluster-%s.tar.gz' % version), 'w|gz')) as tf:
tf.add(src, arcname='dynamiccluster-%s' % version)
cmd = ['rpmbuild', '-bb',
'--define=version %s' % version,
'--define=buildid %s' % args.buildid,
'rpm.spec']
print('Executing: %s' % ' '.join('"%s"' % c for c in cmd))
check_call(cmd, cwd=join(rpmbuild, 'SPECS')) | {
"content_hash": "3a0b98e412eb85da5d77ecb028d4f248",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 103,
"avg_line_length": 31.553571428571427,
"alnum_prop": 0.6502546689303905,
"repo_name": "eResearchSA/dynamiccluster",
"id": "beb39f873114b3e99edd5283b079f8fc6eadfdbc",
"size": "1787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pkg/build.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2263"
},
{
"name": "HTML",
"bytes": "8565"
},
{
"name": "JavaScript",
"bytes": "46166"
},
{
"name": "Python",
"bytes": "324461"
},
{
"name": "Shell",
"bytes": "5006"
}
],
"symlink_target": ""
} |
from eventlet.green import zmq
context = zmq.Context(1)
buffer = []
for i in range(10000):
buffer.append('Hello, world: %s\n' % str(i))
# Socket to talk to server
print("Connecting to hello world server ...")
global socket
socket = context.socket(zmq.PUSH)
socket.connect("tcp://localhost:5555")
def send(sock, buf):
# Do 1 requests, waiting each time for a response
for request in range(1):
print("Sending request %s ..." % request)
tracker = sock.send_multipart(buf, copy=False, track=True)
while(True):
print("Is socket completed? %s." % str(tracker.done))
if tracker.done:
return '\nDONE!!!!!!!!!!!!\n'
print send(socket, buffer)
| {
"content_hash": "954c24bb3714b448648a9cbf5eefdeb9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 66,
"avg_line_length": 28.68,
"alnum_prop": 0.6248256624825662,
"repo_name": "li-ma/homework",
"id": "68493f83cd971b04b68f06ca4960cbcae3d34366",
"size": "853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zeromq/test-zmq-client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20091"
},
{
"name": "Python",
"bytes": "130929"
},
{
"name": "Shell",
"bytes": "6056"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
EXPERIMENTAL_STUFF = True
MAXNFILES = 1000
if EXPERIMENTAL_STUFF:
if is_mobile:
response.view = response.view.replace('default/', 'default.mobile/')
response.menu = []
import re
from gluon.admin import *
from gluon.fileutils import abspath, read_file, write_file
from gluon.utils import web2py_uuid
from gluon.tools import Config
from gluon.compileapp import find_exposed_functions
from glob import glob
import shutil
import platform
try:
import git
if git.__version__ < '0.3.1':
raise ImportError("Your version of git is %s. Upgrade to 0.3.1 or better." % git.__version__)
have_git = True
except ImportError, e:
have_git = False
GIT_MISSING = 'Requires gitpython module, but not installed or incompatible version: %s' % e
from gluon.languages import (read_possible_languages, read_dict, write_dict,
read_plural_dict, write_plural_dict)
if DEMO_MODE and request.function in ['change_password', 'pack',
'pack_custom','pack_plugin', 'upgrade_web2py', 'uninstall',
'cleanup', 'compile_app', 'remove_compiled_app', 'delete',
'delete_plugin', 'create_file', 'upload_file', 'update_languages',
'reload_routes', 'git_push', 'git_pull', 'install_plugin']:
session.flash = T('disabled in demo mode')
redirect(URL('site'))
if is_gae and request.function in ('edit', 'edit_language',
'edit_plurals', 'update_languages', 'create_file', 'install_plugin'):
session.flash = T('disabled in GAE mode')
redirect(URL('site'))
if not is_manager() and request.function in ['change_password', 'upgrade_web2py']:
session.flash = T('disabled in multi user mode')
redirect(URL('site'))
if FILTER_APPS and request.args(0) and not request.args(0) in FILTER_APPS:
session.flash = T('disabled in demo mode')
redirect(URL('site'))
if not session.token:
session.token = web2py_uuid()
def count_lines(data):
return len([line for line in data.split('\n') if line.strip() and not line.startswith('#')])
def log_progress(app, mode='EDIT', filename=None, progress=0):
progress_file = os.path.join(apath(app, r=request), 'progress.log')
now = str(request.now)[:19]
if not os.path.exists(progress_file):
safe_open(progress_file, 'w').write('[%s] START\n' % now)
if filename:
safe_open(progress_file, 'a').write(
'[%s] %s %s: %s\n' % (now, mode, filename, progress))
def safe_open(a, b):
if (DEMO_MODE or is_gae) and ('w' in b or 'a' in b):
class tmp:
def write(self, data):
pass
def close(self):
pass
return tmp()
return open(a, b)
def safe_read(a, b='r'):
safe_file = safe_open(a, b)
try:
return safe_file.read()
finally:
safe_file.close()
def safe_write(a, value, b='w'):
safe_file = safe_open(a, b)
try:
safe_file.write(value)
finally:
safe_file.close()
def get_app(name=None):
app = name or request.args(0)
if (app and os.path.exists(apath(app, r=request)) and
(not MULTI_USER_MODE or is_manager() or
db(db.app.name == app)(db.app.owner == auth.user.id).count())):
return app
session.flash = T('App does not exist or you are not authorized')
redirect(URL('site'))
def index():
""" Index handler """
send = request.vars.send
if DEMO_MODE:
session.authorized = True
session.last_time = t0
if not send:
send = URL('site')
if session.authorized:
redirect(send)
elif request.vars.password:
if verify_password(request.vars.password[:1024]):
session.authorized = True
login_record(True)
if CHECK_VERSION:
session.check_version = True
else:
session.check_version = False
session.last_time = t0
if isinstance(send, list): # ## why does this happen?
send = str(send[0])
redirect(send)
else:
times_denied = login_record(False)
if times_denied >= allowed_number_of_attempts:
response.flash = \
T('admin disabled because too many invalid login attempts')
elif times_denied == allowed_number_of_attempts - 1:
response.flash = \
T('You have one more login attempt before you are locked out')
else:
response.flash = T('invalid password.')
return dict(send=send)
def check_version():
""" Checks if web2py is up to date """
session.forget()
session._unlock(response)
new_version, version = check_new_version(request.env.web2py_version,
WEB2PY_VERSION_URL)
if new_version == -1:
return A(T('Unable to check for upgrades'), _href=WEB2PY_URL)
elif new_version != True:
return A(T('web2py is up to date'), _href=WEB2PY_URL)
elif platform.system().lower() in ('windows', 'win32', 'win64') and os.path.exists("web2py.exe"):
return SPAN('You should upgrade to %s' % version.split('(')[0])
else:
return sp_button(URL('upgrade_web2py'), T('upgrade now to %s') % version.split('(')[0])
def logout():
""" Logout handler """
session.authorized = None
if MULTI_USER_MODE:
redirect(URL('user/logout'))
redirect(URL('index'))
def change_password():
if session.pam_user:
session.flash = T(
'PAM authenticated user, cannot change password here')
redirect(URL('site'))
form = SQLFORM.factory(Field('current_admin_password', 'password'),
Field('new_admin_password',
'password', requires=IS_STRONG()),
Field('new_admin_password_again', 'password'),
_class="span4 well")
if form.accepts(request.vars):
if not verify_password(request.vars.current_admin_password):
form.errors.current_admin_password = T('invalid password')
elif form.vars.new_admin_password != form.vars.new_admin_password_again:
form.errors.new_admin_password_again = T('no match')
else:
path = abspath('parameters_%s.py' % request.env.server_port)
safe_write(path, 'password="%s"' % CRYPT()(
request.vars.new_admin_password)[0])
session.flash = T('password changed')
redirect(URL('site'))
return dict(form=form)
def site():
""" Site handler """
myversion = request.env.web2py_version
# Shortcut to make the elif statements more legible
file_or_appurl = 'file' in request.vars or 'appurl' in request.vars
class IS_VALID_APPNAME(object):
def __call__(self, value):
if not re.compile('^\w+$').match(value):
return (value, T('Invalid application name'))
if not request.vars.overwrite and \
os.path.exists(os.path.join(apath(r=request), value)):
return (value, T('Application exists already'))
return (value, None)
is_appname = IS_VALID_APPNAME()
form_create = SQLFORM.factory(Field('name', requires=is_appname),
table_name='appcreate')
form_update = SQLFORM.factory(Field('name', requires=is_appname),
Field('file', 'upload', uploadfield=False),
Field('url'),
Field('overwrite', 'boolean'),
table_name='appupdate')
form_create.process()
form_update.process()
if DEMO_MODE:
pass
elif form_create.accepted:
# create a new application
appname = cleanpath(form_create.vars.name)
created, error = app_create(appname, request, info=True)
if created:
if MULTI_USER_MODE:
db.app.insert(name=appname, owner=auth.user.id)
log_progress(appname)
session.flash = T('new application "%s" created', appname)
redirect(URL('design', args=appname))
else:
session.flash = \
DIV(T('unable to create application "%s"', appname),
PRE(error))
redirect(URL(r=request))
elif form_update.accepted:
if (form_update.vars.url or '').endswith('.git'):
if not have_git:
session.flash = GIT_MISSING
redirect(URL(r=request))
target = os.path.join(apath(r=request), form_update.vars.name)
try:
new_repo = git.Repo.clone_from(form_update.vars.url, target)
session.flash = T('new application "%s" imported',
form_update.vars.name)
except git.GitCommandError, err:
session.flash = T('Invalid git repository specified.')
redirect(URL(r=request))
elif form_update.vars.url:
# fetch an application via URL or file upload
try:
f = urllib.urlopen(form_update.vars.url)
if f.code == 404:
raise Exception("404 file not found")
except Exception, e:
session.flash = \
DIV(T('Unable to download app because:'), PRE(str(e)))
redirect(URL(r=request))
fname = form_update.vars.url
elif form_update.accepted and form_update.vars.file:
fname = request.vars.file.filename
f = request.vars.file.file
else:
session.flash = 'No file uploaded and no URL specified'
redirect(URL(r=request))
if f:
appname = cleanpath(form_update.vars.name)
installed = app_install(appname, f,
request, fname,
overwrite=form_update.vars.overwrite)
if f and installed:
msg = 'application %(appname)s installed with md5sum: %(digest)s'
if MULTI_USER_MODE:
db.app.insert(name=appname, owner=auth.user.id)
log_progress(appname)
session.flash = T(msg, dict(appname=appname,
digest=md5_hash(installed)))
elif f and form_update.vars.overwrite:
msg = 'unable to install application "%(appname)s"'
session.flash = T(msg, dict(appname=form_update.vars.name))
else:
msg = 'unable to install application "%(appname)s"'
session.flash = T(msg, dict(appname=form_update.vars.name))
redirect(URL(r=request))
regex = re.compile('^\w+$')
if is_manager():
apps = [f for f in os.listdir(apath(r=request)) if regex.match(f)]
else:
apps = [f.name for f in db(db.app.owner == auth.user_id).select()]
if FILTER_APPS:
apps = [f for f in apps if f in FILTER_APPS]
apps = sorted(apps, lambda a, b: cmp(a.upper(), b.upper()))
myplatform = platform.python_version()
return dict(app=None, apps=apps, myversion=myversion, myplatform=myplatform,
form_create=form_create, form_update=form_update)
def report_progress(app):
import datetime
progress_file = os.path.join(apath(app, r=request), 'progress.log')
regex = re.compile('\[(.*?)\][^\:]+\:\s+(\-?\d+)')
if not os.path.exists(progress_file):
return []
matches = regex.findall(open(progress_file, 'r').read())
events, counter = [], 0
for m in matches:
if not m:
continue
days = -(request.now - datetime.datetime.strptime(m[0],
'%Y-%m-%d %H:%M:%S')).days
counter += int(m[1])
events.append([days, counter])
return events
def pack():
app = get_app()
try:
if len(request.args) == 1:
fname = 'web2py.app.%s.w2p' % app
filename = app_pack(app, request, raise_ex=True)
else:
fname = 'web2py.app.%s.compiled.w2p' % app
filename = app_pack_compiled(app, request, raise_ex=True)
except Exception, e:
filename = None
if filename:
response.headers['Content-Type'] = 'application/w2p'
disposition = 'attachment; filename=%s' % fname
response.headers['Content-Disposition'] = disposition
return safe_read(filename, 'rb')
else:
session.flash = T('internal error: %s', e)
redirect(URL('site'))
def pack_plugin():
app = get_app()
if len(request.args) == 2:
fname = 'web2py.plugin.%s.w2p' % request.args[1]
filename = plugin_pack(app, request.args[1], request)
if filename:
response.headers['Content-Type'] = 'application/w2p'
disposition = 'attachment; filename=%s' % fname
response.headers['Content-Disposition'] = disposition
return safe_read(filename, 'rb')
else:
session.flash = T('internal error')
redirect(URL('plugin', args=request.args))
def pack_custom():
app = get_app()
base = apath(app, r=request)
if request.post_vars.file:
files = request.post_vars.file
files = [files] if not isinstance(files,list) else files
fname = 'web2py.app.%s.w2p' % app
try:
filename = app_pack(app, request, raise_ex=True, filenames=files)
except Exception, e:
filename = None
if filename:
response.headers['Content-Type'] = 'application/w2p'
disposition = 'attachment; filename=%s' % fname
response.headers['Content-Disposition'] = disposition
return safe_read(filename, 'rb')
else:
session.flash = T('internal error: %s', e)
redirect(URL(args=request.args))
def ignore(fs):
return [f for f in fs if not (
f[:1] in '#' or f.endswith('~') or f.endswith('.bak'))]
files = {}
for (r,d,f) in os.walk(base):
files[r] = {'folders':ignore(d),'files':ignore(f)}
return locals()
def upgrade_web2py():
dialog = FORM.confirm(T('Upgrade'),
{T('Cancel'): URL('site')})
if dialog.accepted:
(success, error) = upgrade(request)
if success:
session.flash = T('web2py upgraded; please restart it')
else:
session.flash = T('unable to upgrade because "%s"', error)
redirect(URL('site'))
return dict(dialog=dialog)
def uninstall():
app = get_app()
dialog = FORM.confirm(T('Uninstall'),
{T('Cancel'): URL('site')})
dialog['_id'] = 'confirm_form'
dialog['_class'] = 'well'
for component in dialog.components:
component['_class'] = 'btn'
if dialog.accepted:
if MULTI_USER_MODE:
if is_manager() and db(db.app.name == app).delete():
pass
elif db(db.app.name == app)(db.app.owner == auth.user.id).delete():
pass
else:
session.flash = T('no permission to uninstall "%s"', app)
redirect(URL('site'))
try:
filename = app_pack(app, request, raise_ex=True)
except:
session.flash = T('unable to uninstall "%s"', app)
else:
if app_uninstall(app, request):
session.flash = T('application "%s" uninstalled', app)
else:
session.flash = T('unable to uninstall "%s"', app)
redirect(URL('site'))
return dict(app=app, dialog=dialog)
def cleanup():
app = get_app()
clean = app_cleanup(app, request)
if not clean:
session.flash = T("some files could not be removed")
else:
session.flash = T('cache, errors and sessions cleaned')
redirect(URL('site'))
def compile_app():
app = get_app()
c = app_compile(app, request)
if not c:
session.flash = T('application compiled')
else:
session.flash = DIV(T('Cannot compile: there are errors in your app:'),
CODE(c))
redirect(URL('site'))
def remove_compiled_app():
""" Remove the compiled application """
app = get_app()
remove_compiled_application(apath(app, r=request))
session.flash = T('compiled application removed')
redirect(URL('site'))
def delete():
""" Object delete handler """
app = get_app()
filename = '/'.join(request.args)
sender = request.vars.sender
if isinstance(sender, list): # ## fix a problem with Vista
sender = sender[0]
dialog = FORM.confirm(T('Delete'),
{T('Cancel'): URL(sender, anchor=request.vars.id)})
if dialog.accepted:
try:
full_path = apath(filename, r=request)
lineno = count_lines(open(full_path, 'r').read())
os.unlink(full_path)
log_progress(app, 'DELETE', filename, progress=-lineno)
session.flash = T('file "%(filename)s" deleted',
dict(filename=filename))
except Exception:
session.flash = T('unable to delete file "%(filename)s"',
dict(filename=filename))
redirect(URL(sender, anchor=request.vars.id2))
return dict(dialog=dialog, filename=filename)
def enable():
app = get_app()
filename = os.path.join(apath(app, r=request), 'DISABLED')
if is_gae:
return SPAN(T('Not supported'), _style='color:yellow')
elif os.path.exists(filename):
os.unlink(filename)
return SPAN(T('Disable'), _style='color:green')
else:
safe_open(filename, 'wb').write('disabled: True\ntime-disabled: %s' % request.now)
return SPAN(T('Enable'), _style='color:red')
def peek():
""" Visualize object code """
app = get_app(request.vars.app)
filename = '/'.join(request.args)
if request.vars.app:
path = abspath(filename)
else:
path = apath(filename, r=request)
try:
data = safe_read(path).replace('\r', '')
except IOError:
session.flash = T('file does not exist')
redirect(URL('site'))
extension = filename[filename.rfind('.') + 1:].lower()
return dict(app=app,
filename=filename,
data=data,
extension=extension)
def test():
""" Execute controller tests """
app = get_app()
if len(request.args) > 1:
file = request.args[1]
else:
file = '.*\.py'
controllers = listdir(
apath('%s/controllers/' % app, r=request), file + '$')
return dict(app=app, controllers=controllers)
def keepalive():
return ''
def search():
keywords = request.vars.keywords or ''
app = get_app()
def match(filename, keywords):
filename = os.path.join(apath(app, r=request), filename)
if keywords in read_file(filename, 'rb'):
return True
return False
path = apath(request.args[0], r=request)
files1 = glob(os.path.join(path, '*/*.py'))
files2 = glob(os.path.join(path, '*/*.html'))
files3 = glob(os.path.join(path, '*/*/*.html'))
files = [x[len(path) + 1:].replace(
'\\', '/') for x in files1 + files2 + files3 if match(x, keywords)]
return response.json(dict(files=files, message=T.M('Searching: **%s** %%{file}', len(files))))
def edit():
""" File edit handler """
# Load json only if it is ajax edited...
app = get_app(request.vars.app)
app_path = apath(app, r=request)
preferences={'theme':'web2py', 'editor': 'default', 'closetag': 'true', 'codefolding': 'false', 'tabwidth':'4', 'indentwithtabs':'false', 'linenumbers':'true', 'highlightline':'true'}
config = Config(os.path.join(request.folder, 'settings.cfg'),
section='editor', default_values={})
preferences.update(config.read())
if not(request.ajax) and not(is_mobile):
# return the scaffolding, the rest will be through ajax requests
response.title = T('Editing %s') % app
return response.render ('default/edit.html', dict(app=app, editor_settings=preferences))
# show settings tab and save prefernces
if 'settings' in request.vars:
if request.post_vars: #save new preferences
post_vars = request.post_vars.items()
# Since unchecked checkbox are not serialized, we must set them as false by hand to store the correct preference in the settings
post_vars+= [(opt, 'false') for opt in preferences if opt not in request.post_vars ]
if config.save(post_vars):
response.headers["web2py-component-flash"] = T('Preferences saved correctly')
else:
response.headers["web2py-component-flash"] = T('Preferences saved on session only')
response.headers["web2py-component-command"] = "update_editor(%s);$('a[href=#editor_settings] button.close').click();" % response.json(config.read())
return
else:
details = {'realfilename':'settings', 'filename':'settings', 'id':'editor_settings', 'force': False}
details['plain_html'] = response.render('default/editor_settings.html', {'editor_settings':preferences})
return response.json(details)
""" File edit handler """
# Load json only if it is ajax edited...
app = get_app(request.vars.app)
filename = '/'.join(request.args)
realfilename = request.args[-1]
if request.vars.app:
path = abspath(filename)
else:
path = apath(filename, r=request)
# Try to discover the file type
if filename[-3:] == '.py':
filetype = 'python'
elif filename[-5:] == '.html':
filetype = 'html'
elif filename[-5:] == '.load':
filetype = 'html'
elif filename[-4:] == '.css':
filetype = 'css'
elif filename[-3:] == '.js':
filetype = 'javascript'
else:
filetype = 'html'
# ## check if file is not there
if ('revert' in request.vars) and os.path.exists(path + '.bak'):
try:
data = safe_read(path + '.bak')
data1 = safe_read(path)
except IOError:
session.flash = T('Invalid action')
if 'from_ajax' in request.vars:
return response.json({'error': str(T('Invalid action'))})
else:
redirect(URL('site'))
safe_write(path, data)
file_hash = md5_hash(data)
saved_on = time.ctime(os.stat(path)[stat.ST_MTIME])
safe_write(path + '.bak', data1)
response.flash = T('file "%s" of %s restored', (filename, saved_on))
else:
try:
data = safe_read(path)
except IOError:
session.flash = T('Invalid action')
if 'from_ajax' in request.vars:
return response.json({'error': str(T('Invalid action'))})
else:
redirect(URL('site'))
lineno_old = count_lines(data)
file_hash = md5_hash(data)
saved_on = time.ctime(os.stat(path)[stat.ST_MTIME])
if request.vars.file_hash and request.vars.file_hash != file_hash:
session.flash = T('file changed on disk')
data = request.vars.data.replace('\r\n', '\n').strip() + '\n'
safe_write(path + '.1', data)
if 'from_ajax' in request.vars:
return response.json({'error': str(T('file changed on disk')),
'redirect': URL('resolve',
args=request.args)})
else:
redirect(URL('resolve', args=request.args))
elif request.vars.data:
safe_write(path + '.bak', data)
data = request.vars.data.replace('\r\n', '\n').strip() + '\n'
safe_write(path, data)
lineno_new = count_lines(data)
log_progress(
app, 'EDIT', filename, progress=lineno_new - lineno_old)
file_hash = md5_hash(data)
saved_on = time.ctime(os.stat(path)[stat.ST_MTIME])
response.flash = T('file saved on %s', saved_on)
data_or_revert = (request.vars.data or request.vars.revert)
# Check compile errors
highlight = None
if filetype == 'python' and request.vars.data:
import _ast
try:
code = request.vars.data.rstrip().replace('\r\n', '\n') + '\n'
compile(code, path, "exec", _ast.PyCF_ONLY_AST)
except Exception, e:
# offset calculation is only used for textarea (start/stop)
start = sum([len(line) + 1 for l, line
in enumerate(request.vars.data.split("\n"))
if l < e.lineno - 1])
if e.text and e.offset:
offset = e.offset - (len(e.text) - len(
e.text.splitlines()[-1]))
else:
offset = 0
highlight = {'start': start, 'end': start +
offset + 1, 'lineno': e.lineno, 'offset': offset}
try:
ex_name = e.__class__.__name__
except:
ex_name = 'unknown exception!'
response.flash = DIV(T('failed to compile file because:'), BR(),
B(ex_name), ' ' + T('at line %s', e.lineno),
offset and ' ' +
T('at char %s', offset) or '',
PRE(str(e)))
if data_or_revert and request.args[1] == 'modules':
# Lets try to reload the modules
try:
mopath = '.'.join(request.args[2:])[:-3]
exec 'import applications.%s.modules.%s' % (
request.args[0], mopath)
reload(sys.modules['applications.%s.modules.%s'
% (request.args[0], mopath)])
except Exception, e:
response.flash = DIV(
T('failed to reload module because:'), PRE(str(e)))
edit_controller = None
editviewlinks = None
view_link = None
if filetype == 'html' and len(request.args) >= 3:
cfilename = os.path.join(request.args[0], 'controllers',
request.args[2] + '.py')
if os.path.exists(apath(cfilename, r=request)):
edit_controller = URL('edit', args=[cfilename.replace(os.sep, "/")])
view = request.args[3].replace('.html', '')
view_link = URL(request.args[0], request.args[2], view)
elif filetype == 'python' and request.args[1] == 'controllers':
## it's a controller file.
## Create links to all of the associated view files.
app = get_app()
viewname = os.path.splitext(request.args[2])[0]
viewpath = os.path.join(app, 'views', viewname)
aviewpath = apath(viewpath, r=request)
viewlist = []
if os.path.exists(aviewpath):
if os.path.isdir(aviewpath):
viewlist = glob(os.path.join(aviewpath, '*.html'))
elif os.path.exists(aviewpath + '.html'):
viewlist.append(aviewpath + '.html')
if len(viewlist):
editviewlinks = []
for v in viewlist:
vf = os.path.split(v)[-1]
vargs = "/".join([viewpath.replace(os.sep, "/"), vf])
editviewlinks.append(A(vf.split(".")[0],
_class="editor_filelink",
_href=URL('edit', args=[vargs])))
if len(request.args) > 2 and request.args[1] == 'controllers':
controller = (request.args[2])[:-3]
functions = find_exposed_functions(data)
else:
(controller, functions) = (None, None)
if 'from_ajax' in request.vars:
return response.json({'file_hash': file_hash, 'saved_on': saved_on, 'functions': functions, 'controller': controller, 'application': request.args[0], 'highlight': highlight})
else:
file_details = dict(app=request.args[0],
lineno=request.vars.lineno or 1,
editor_settings=preferences,
filename=filename,
realfilename=realfilename,
filetype=filetype,
data=data,
edit_controller=edit_controller,
file_hash=file_hash,
saved_on=saved_on,
controller=controller,
functions=functions,
view_link=view_link,
editviewlinks=editviewlinks,
id=IS_SLUG()(filename)[0],
force= True if (request.vars.restore or
request.vars.revert) else False)
plain_html = response.render('default/edit_js.html', file_details)
file_details['plain_html'] = plain_html
if is_mobile:
return response.render('default.mobile/edit.html',
file_details, editor_settings=preferences)
else:
return response.json(file_details)
def todolist():
""" Returns all TODO of the requested app
"""
app = request.vars.app or ''
app_path = apath('%(app)s' % {'app':app}, r=request)
dirs=['models', 'controllers', 'modules', 'private' ]
def listfiles(app, dir, regexp='.*\.py$'):
files = sorted( listdir(apath('%(app)s/%(dir)s/' % {'app':app, 'dir':dir}, r=request), regexp))
files = [x.replace(os.path.sep, '/') for x in files if not x.endswith('.bak')]
return files
pattern = '#\s*(todo)+\s+(.*)'
regex = re.compile(pattern, re.IGNORECASE)
output = []
for d in dirs:
for f in listfiles(app, d):
matches = []
filename= apath(os.path.join(app, d, f), r=request)
with open(filename, 'r') as f_s:
src = f_s.read()
for m in regex.finditer(src):
start = m.start()
lineno = src.count('\n', 0, start) + 1
matches.append({'text':m.group(0), 'lineno':lineno})
if len(matches) != 0:
output.append({'filename':f,'matches':matches, 'dir':d})
return {'todo':output, 'app': app}
def editor_sessions():
config = Config(os.path.join(request.folder, 'settings.cfg'),
section='editor_sessions', default_values={})
preferences = config.read()
if request.vars.session_name and request.vars.files:
session_name = request.vars.session_name
files = request.vars.files
preferences.update({session_name:','.join(files)})
if config.save(preferences.items()):
response.headers["web2py-component-flash"] = T('Session saved correctly')
else:
response.headers["web2py-component-flash"] = T('Session saved on session only')
return response.render('default/editor_sessions.html', {'editor_sessions':preferences})
def resolve():
"""
"""
filename = '/'.join(request.args)
# ## check if file is not there
path = apath(filename, r=request)
a = safe_read(path).split('\n')
try:
b = safe_read(path + '.1').split('\n')
except IOError:
session.flash = 'Other file, no longer there'
redirect(URL('edit', args=request.args))
d = difflib.ndiff(a, b)
def leading(line):
""" """
# TODO: we really need to comment this
z = ''
for (k, c) in enumerate(line):
if c == ' ':
z += ' '
elif c == ' \t':
z += ' '
elif k == 0 and c == '?':
pass
else:
break
return XML(z)
def getclass(item):
""" Determine item class """
if item[0] == ' ':
return 'normal'
if item[0] == '+':
return 'plus'
if item[0] == '-':
return 'minus'
if request.vars:
c = '\n'.join([item[2:].rstrip() for (i, item) in enumerate(d) if item[0]
== ' ' or 'line%i' % i in request.vars])
safe_write(path, c)
session.flash = 'files merged'
redirect(URL('edit', args=request.args))
else:
# Making the short circuit compatible with <= python2.4
gen_data = lambda index, item: not item[:1] in ['+', '-'] and "" \
or INPUT(_type='checkbox',
_name='line%i' % index,
value=item[0] == '+')
diff = TABLE(*[TR(TD(gen_data(i, item)),
TD(item[0]),
TD(leading(item[2:]),
TT(item[2:].rstrip())),
_class=getclass(item))
for (i, item) in enumerate(d) if item[0] != '?'])
return dict(diff=diff, filename=filename)
def edit_language():
""" Edit language file """
app = get_app()
filename = '/'.join(request.args)
response.title = request.args[-1]
strings = read_dict(apath(filename, r=request))
if '__corrupted__' in strings:
form = SPAN(strings['__corrupted__'], _class='error')
return dict(filename=filename, form=form)
keys = sorted(strings.keys(), lambda x, y: cmp(
unicode(x, 'utf-8').lower(), unicode(y, 'utf-8').lower()))
rows = []
rows.append(H2(T('Original/Translation')))
for key in keys:
name = md5_hash(key)
s = strings[key]
(prefix, sep, key) = key.partition('\x01')
if sep:
prefix = SPAN(prefix + ': ', _class='tm_ftag')
k = key
else:
(k, prefix) = (prefix, '')
_class = 'untranslated' if k == s else 'translated'
if len(s) <= 40:
elem = INPUT(_type='text', _name=name, value=s,
_size=70, _class=_class)
else:
elem = TEXTAREA(_name=name, value=s, _cols=70,
_rows=5, _class=_class)
# Making the short circuit compatible with <= python2.4
k = (s != k) and k or B(k)
new_row = DIV(LABEL(prefix, k, _style="font-weight:normal;"),
CAT(elem, '\n', TAG.BUTTON(
T('delete'),
_onclick='return delkey("%s")' % name,
_class='btn')), _id=name, _class='span6 well well-small')
rows.append(DIV(new_row,_class="row-fluid"))
rows.append(DIV(INPUT(_type='submit', _value=T('update'), _class="btn btn-primary"), _class='controls'))
form = FORM(*rows)
if form.accepts(request.vars, keepvalues=True):
strs = dict()
for key in keys:
name = md5_hash(key)
if form.vars[name] == chr(127):
continue
strs[key] = form.vars[name]
write_dict(apath(filename, r=request), strs)
session.flash = T('file saved on %(time)s', dict(time=time.ctime()))
redirect(URL(r=request, args=request.args))
return dict(app=request.args[0], filename=filename, form=form)
def edit_plurals():
""" Edit plurals file """
app = get_app()
filename = '/'.join(request.args)
plurals = read_plural_dict(
apath(filename, r=request)) # plural forms dictionary
nplurals = int(request.vars.nplurals) - 1 # plural forms quantity
xnplurals = xrange(nplurals)
if '__corrupted__' in plurals:
# show error message and exit
form = SPAN(plurals['__corrupted__'], _class='error')
return dict(filename=filename, form=form)
keys = sorted(plurals.keys(), lambda x, y: cmp(
unicode(x, 'utf-8').lower(), unicode(y, 'utf-8').lower()))
tab_rows = []
for key in keys:
name = md5_hash(key)
forms = plurals[key]
if len(forms) < nplurals:
forms.extend(None for i in xrange(nplurals - len(forms)))
tab_col1 = DIV(CAT(LABEL(T("Singular Form")), B(key,
_class='fake-input')))
tab_inputs = [SPAN(LABEL(T("Plural Form #%s", n + 1)), INPUT(_type='text', _name=name + '_' + str(n), value=forms[n], _size=20), _class='span6') for n in xnplurals]
tab_col2 = DIV(CAT(*tab_inputs))
tab_col3 = DIV(CAT(LABEL(XML(' ')), TAG.BUTTON(T('delete'), _onclick='return delkey("%s")' % name, _class='btn'), _class='span6'))
tab_row = DIV(DIV(tab_col1, '\n', tab_col2, '\n', tab_col3, _class='well well-small'), _id=name, _class='row-fluid tab_row')
tab_rows.append(tab_row)
tab_rows.append(DIV(TAG['button'](T('update'), _type='submit',
_class='btn btn-primary'),
_class='controls'))
tab_container = DIV(*tab_rows, **dict(_class="row-fluid"))
form = FORM(tab_container)
if form.accepts(request.vars, keepvalues=True):
new_plurals = dict()
for key in keys:
name = md5_hash(key)
if form.vars[name + '_0'] == chr(127):
continue
new_plurals[key] = [form.vars[name + '_' + str(n)]
for n in xnplurals]
write_plural_dict(apath(filename, r=request), new_plurals)
session.flash = T('file saved on %(time)s', dict(time=time.ctime()))
redirect(URL(r=request, args=request.args, vars=dict(
nplurals=request.vars.nplurals)))
return dict(app=request.args[0], filename=filename, form=form)
def about():
""" Read about info """
app = get_app()
# ## check if file is not there
about = safe_read(apath('%s/ABOUT' % app, r=request))
license = safe_read(apath('%s/LICENSE' % app, r=request))
return dict(app=app, about=MARKMIN(about), license=MARKMIN(license), progress=report_progress(app))
def design():
""" Application design handler """
app = get_app()
if not response.flash and app == request.application:
msg = T('ATTENTION: you cannot edit the running application!')
response.flash = msg
if request.vars and not request.vars.token == session.token:
redirect(URL('logout'))
if request.vars.pluginfile is not None and not isinstance(request.vars.pluginfile, str):
filename = os.path.basename(request.vars.pluginfile.filename)
if plugin_install(app, request.vars.pluginfile.file,
request, filename):
session.flash = T('new plugin installed')
redirect(URL('design', args=app))
else:
session.flash = \
T('unable to create application "%s"', request.vars.filename)
redirect(URL(r=request))
elif isinstance(request.vars.pluginfile, str):
session.flash = T('plugin not specified')
redirect(URL(r=request))
# If we have only pyc files it means that
# we cannot design
if os.path.exists(apath('%s/compiled' % app, r=request)):
session.flash = \
T('application is compiled and cannot be designed')
redirect(URL('site'))
# Get all models
models = listdir(apath('%s/models/' % app, r=request), '.*\.py$')
models = [x.replace('\\', '/') for x in models]
defines = {}
for m in models:
data = safe_read(apath('%s/models/%s' % (app, m), r=request))
defines[m] = regex_tables.findall(data)
defines[m].sort()
# Get all controllers
controllers = sorted(
listdir(apath('%s/controllers/' % app, r=request), '.*\.py$'))
controllers = [x.replace('\\', '/') for x in controllers]
functions = {}
for c in controllers:
data = safe_read(apath('%s/controllers/%s' % (app, c), r=request))
items = find_exposed_functions(data)
functions[c] = items
# Get all views
views = sorted(
listdir(apath('%s/views/' % app, r=request), '[\w/\-]+(\.\w+)+$'))
views = [x.replace('\\', '/') for x in views if not x.endswith('.bak')]
extend = {}
include = {}
for c in views:
data = safe_read(apath('%s/views/%s' % (app, c), r=request))
items = regex_extend.findall(data)
if items:
extend[c] = items[0][1]
items = regex_include.findall(data)
include[c] = [i[1] for i in items]
# Get all modules
modules = listdir(apath('%s/modules/' % app, r=request), '.*\.py$')
modules = modules = [x.replace('\\', '/') for x in modules]
modules.sort()
# Get all private files
privates = listdir(apath('%s/private/' % app, r=request), '[^\.#].*')
privates = [x.replace('\\', '/') for x in privates]
privates.sort()
# Get all static files
statics = listdir(apath('%s/static/' % app, r=request), '[^\.#].*',
maxnum = MAXNFILES)
statics = [x.replace(os.path.sep, '/') for x in statics]
statics.sort()
# Get all languages
langpath = os.path.join(apath(app, r=request),'languages')
languages = dict([(lang, info) for lang, info
in read_possible_languages(langpath).iteritems()
if info[2] != 0]) # info[2] is langfile_mtime:
# get only existed files
#Get crontab
cronfolder = apath('%s/cron' % app, r=request)
crontab = apath('%s/cron/crontab' % app, r=request)
if not is_gae:
if not os.path.exists(cronfolder):
os.mkdir(cronfolder)
if not os.path.exists(crontab):
safe_write(crontab, '#crontab')
plugins = []
def filter_plugins(items, plugins):
plugins += [item[7:].split('/')[0].split(
'.')[0] for item in items if item.startswith('plugin_')]
plugins[:] = list(set(plugins))
plugins.sort()
return [item for item in items if not item.startswith('plugin_')]
return dict(app=app,
models=filter_plugins(models, plugins),
defines=defines,
controllers=filter_plugins(controllers, plugins),
functions=functions,
views=filter_plugins(views, plugins),
modules=filter_plugins(modules, plugins),
extend=extend,
include=include,
privates=filter_plugins(privates, plugins),
statics=filter_plugins(statics, plugins),
languages=languages,
crontab=crontab,
plugins=plugins)
def delete_plugin():
""" Object delete handler """
app = request.args(0)
plugin = request.args(1)
plugin_name = 'plugin_' + plugin
dialog = FORM.confirm(
T('Delete'),
{T('Cancel'): URL('design', args=app)})
if dialog.accepted:
try:
for folder in ['models', 'views', 'controllers', 'static', 'modules', 'private']:
path = os.path.join(apath(app, r=request), folder)
for item in os.listdir(path):
if item.rsplit('.', 1)[0] == plugin_name:
filename = os.path.join(path, item)
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.unlink(filename)
session.flash = T('plugin "%(plugin)s" deleted',
dict(plugin=plugin))
except Exception:
session.flash = T('unable to delete file plugin "%(plugin)s"',
dict(plugin=plugin))
redirect(URL('design', args=request.args(0), anchor=request.vars.id2))
return dict(dialog=dialog, plugin=plugin)
def plugin():
""" Application design handler """
app = get_app()
plugin = request.args(1)
if not response.flash and app == request.application:
msg = T('ATTENTION: you cannot edit the running application!')
response.flash = msg
# If we have only pyc files it means that
# we cannot design
if os.path.exists(apath('%s/compiled' % app, r=request)):
session.flash = \
T('application is compiled and cannot be designed')
redirect(URL('site'))
# Get all models
models = listdir(apath('%s/models/' % app, r=request), '.*\.py$')
models = [x.replace('\\', '/') for x in models]
defines = {}
for m in models:
data = safe_read(apath('%s/models/%s' % (app, m), r=request))
defines[m] = regex_tables.findall(data)
defines[m].sort()
# Get all controllers
controllers = sorted(
listdir(apath('%s/controllers/' % app, r=request), '.*\.py$'))
controllers = [x.replace('\\', '/') for x in controllers]
functions = {}
for c in controllers:
data = safe_read(apath('%s/controllers/%s' % (app, c), r=request))
items = find_exposed_functions(data)
functions[c] = items
# Get all views
views = sorted(
listdir(apath('%s/views/' % app, r=request), '[\w/\-]+\.\w+$'))
views = [x.replace('\\', '/') for x in views]
extend = {}
include = {}
for c in views:
data = safe_read(apath('%s/views/%s' % (app, c), r=request))
items = regex_extend.findall(data)
if items:
extend[c] = items[0][1]
items = regex_include.findall(data)
include[c] = [i[1] for i in items]
# Get all modules
modules = listdir(apath('%s/modules/' % app, r=request), '.*\.py$')
modules = modules = [x.replace('\\', '/') for x in modules]
modules.sort()
# Get all private files
privates = listdir(apath('%s/private/' % app, r=request), '[^\.#].*')
privates = [x.replace('\\', '/') for x in privates]
privates.sort()
# Get all static files
statics = listdir(apath('%s/static/' % app, r=request), '[^\.#].*',
maxnum = MAXNFILES)
statics = [x.replace(os.path.sep, '/') for x in statics]
statics.sort()
# Get all languages
languages = sorted([lang + '.py' for lang, info in
T.get_possible_languages_info().iteritems()
if info[2] != 0]) # info[2] is langfile_mtime:
# get only existed files
#Get crontab
crontab = apath('%s/cron/crontab' % app, r=request)
if not os.path.exists(crontab):
safe_write(crontab, '#crontab')
def filter_plugins(items):
regex = re.compile('^plugin_' + plugin + '(/.*|\..*)?$')
return [item for item in items if item and regex.match(item)]
return dict(app=app,
models=filter_plugins(models),
defines=defines,
controllers=filter_plugins(controllers),
functions=functions,
views=filter_plugins(views),
modules=filter_plugins(modules),
extend=extend,
include=include,
privates=filter_plugins(privates),
statics=filter_plugins(statics),
languages=languages,
crontab=crontab)
def create_file():
""" Create files handler """
if request.vars and not request.vars.token == session.token:
redirect(URL('logout'))
try:
anchor = '#' + request.vars.id if request.vars.id else ''
if request.vars.app:
app = get_app(request.vars.app)
path = abspath(request.vars.location)
else:
if request.vars.dir:
request.vars.location += request.vars.dir + '/'
app = get_app(name=request.vars.location.split('/')[0])
path = apath(request.vars.location, r=request)
filename = re.sub('[^\w./-]+', '_', request.vars.filename)
if path[-7:] == '/rules/':
# Handle plural rules files
if len(filename) == 0:
raise SyntaxError
if not filename[-3:] == '.py':
filename += '.py'
lang = re.match('^plural_rules-(.*)\.py$', filename).group(1)
langinfo = read_possible_languages(apath(app, r=request))[lang]
text = dedent("""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Plural-Forms for %(lang)s (%(langname)s)
nplurals=2 # for example, English language has 2 forms:
# 1 singular and 1 plural
# Determine plural_id for number *n* as sequence of positive
# integers: 0,1,...
# NOTE! For singular form ALWAYS return plural_id = 0
get_plural_id = lambda n: int(n != 1)
# Construct and return plural form of *word* using
# *plural_id* (which ALWAYS>0). This function will be executed
# for words (or phrases) not found in plural_dict dictionary.
# By default this function simply returns word in singular:
construct_plural_form = lambda word, plural_id: word
""")[1:] % dict(lang=langinfo[0], langname=langinfo[1])
elif path[-11:] == '/languages/':
# Handle language files
if len(filename) == 0:
raise SyntaxError
if not filename[-3:] == '.py':
filename += '.py'
path = os.path.join(apath(app, r=request), 'languages', filename)
if not os.path.exists(path):
safe_write(path, '')
# create language xx[-yy].py file:
findT(apath(app, r=request), filename[:-3])
session.flash = T('language file "%(filename)s" created/updated',
dict(filename=filename))
redirect(request.vars.sender + anchor)
elif path[-8:] == '/models/':
# Handle python models
if not filename[-3:] == '.py':
filename += '.py'
if len(filename) == 3:
raise SyntaxError
text = '# -*- coding: utf-8 -*-\n'
elif path[-13:] == '/controllers/':
# Handle python controllers
if not filename[-3:] == '.py':
filename += '.py'
if len(filename) == 3:
raise SyntaxError
text = '# -*- coding: utf-8 -*-\n# %s\ndef index(): return dict(message="hello from %s")'
text = text % (T('try something like'), filename)
elif path[-7:] == '/views/':
if request.vars.plugin and not filename.startswith('plugin_%s/' % request.vars.plugin):
filename = 'plugin_%s/%s' % (request.vars.plugin, filename)
# Handle template (html) views
if filename.find('.') < 0:
filename += '.html'
extension = filename.split('.')[-1].lower()
if len(filename) == 5:
raise SyntaxError
msg = T(
'This is the %(filename)s template', dict(filename=filename))
if extension == 'html':
text = dedent("""
{{extend 'layout.html'}}
<h1>%s</h1>
{{=BEAUTIFY(response._vars)}}""" % msg)[1:]
else:
generic = os.path.join(path, 'generic.' + extension)
if os.path.exists(generic):
text = read_file(generic)
else:
text = ''
elif path[-9:] == '/modules/':
if request.vars.plugin and not filename.startswith('plugin_%s/' % request.vars.plugin):
filename = 'plugin_%s/%s' % (request.vars.plugin, filename)
# Handle python module files
if not filename[-3:] == '.py':
filename += '.py'
if len(filename) == 3:
raise SyntaxError
text = dedent("""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from gluon import *\n""")[1:]
elif (path[-8:] == '/static/') or (path[-9:] == '/private/'):
if (request.vars.plugin and
not filename.startswith('plugin_%s/' % request.vars.plugin)):
filename = 'plugin_%s/%s' % (request.vars.plugin, filename)
text = ''
else:
redirect(request.vars.sender + anchor)
full_filename = os.path.join(path, filename)
dirpath = os.path.dirname(full_filename)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
if os.path.exists(full_filename):
raise SyntaxError
safe_write(full_filename, text)
log_progress(app, 'CREATE', filename)
if request.vars.dir:
result = T('file "%(filename)s" created',
dict(filename=full_filename[len(path):]))
else:
session.flash = T('file "%(filename)s" created',
dict(filename=full_filename[len(path):]))
vars = {}
if request.vars.id:
vars['id'] = request.vars.id
if request.vars.app:
vars['app'] = request.vars.app
redirect(URL('edit',
args=[os.path.join(request.vars.location, filename)], vars=vars))
except Exception, e:
if not isinstance(e, HTTP):
session.flash = T('cannot create file')
if request.vars.dir:
response.flash = result
response.headers['web2py-component-content'] = 'append'
response.headers['web2py-component-command'] = """
$.web2py.invalidate('#files_menu');
load_file('%s');
$.web2py.enableElement($('#form form').find($.web2py.formInputClickSelector));
""" % URL('edit', args=[app,request.vars.dir,filename])
return ''
else:
redirect(request.vars.sender + anchor)
def listfiles(app, dir, regexp='.*\.py$'):
files = sorted(
listdir(apath('%(app)s/%(dir)s/' % {'app':app, 'dir':dir}, r=request), regexp))
files = [x.replace('\\', '/') for x in files if not x.endswith('.bak')]
return files
def editfile(path,file,vars={}, app = None):
args=(path,file) if 'app' in vars else (app,path,file)
url = URL('edit', args=args, vars=vars)
return A(file, _class='editor_filelink', _href=url, _style='word-wrap: nowrap;')
def files_menu():
app = request.vars.app or 'welcome'
dirs=[{'name':'models', 'reg':'.*\.py$'},
{'name':'controllers', 'reg':'.*\.py$'},
{'name':'views', 'reg':'[\w/\-]+(\.\w+)+$'},
{'name':'modules', 'reg':'.*\.py$'},
{'name':'static', 'reg': '[^\.#].*'},
{'name':'private', 'reg':'.*\.py$'}]
result_files = []
for dir in dirs:
result_files.append(TAG[''](LI(dir['name'], _class="nav-header component", _onclick="collapse('" + dir['name'] + "_files');"),
LI(UL(*[LI(editfile(dir['name'], f, dict(id=dir['name'] + f.replace('.','__')), app), _style="overflow:hidden", _id=dir['name']+"__"+f.replace('.','__'))
for f in listfiles(app, dir['name'], regexp=dir['reg'])],
_class="nav nav-list small-font"),
_id=dir['name'] + '_files', _style="display: none;")))
return dict(result_files = result_files)
def upload_file():
""" File uploading handler """
if request.vars and not request.vars.token == session.token:
redirect(URL('logout'))
try:
filename = None
app = get_app(name=request.vars.location.split('/')[0])
path = apath(request.vars.location, r=request)
if request.vars.filename:
filename = re.sub('[^\w\./]+', '_', request.vars.filename)
else:
filename = os.path.split(request.vars.file.filename)[-1]
if path[-8:] == '/models/' and not filename[-3:] == '.py':
filename += '.py'
if path[-9:] == '/modules/' and not filename[-3:] == '.py':
filename += '.py'
if path[-13:] == '/controllers/' and not filename[-3:] == '.py':
filename += '.py'
if path[-7:] == '/views/' and not filename[-5:] == '.html':
filename += '.html'
if path[-11:] == '/languages/' and not filename[-3:] == '.py':
filename += '.py'
filename = os.path.join(path, filename)
dirpath = os.path.dirname(filename)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
data = request.vars.file.file.read()
lineno = count_lines(data)
safe_write(filename, data, 'wb')
log_progress(app, 'UPLOAD', filename, lineno)
session.flash = T('file "%(filename)s" uploaded',
dict(filename=filename[len(path):]))
except Exception:
if filename:
d = dict(filename=filename[len(path):])
else:
d = dict(filename='unkown')
session.flash = T('cannot upload file "%(filename)s"', d)
redirect(request.vars.sender)
def errors():
""" Error handler """
import operator
import os
import pickle
import hashlib
app = get_app()
if is_gae:
method = 'dbold' if ('old' in
(request.args(1) or '')) else 'dbnew'
else:
method = request.args(1) or 'new'
db_ready = {}
db_ready['status'] = get_ticket_storage(app)
db_ready['errmessage'] = T(
"No ticket_storage.txt found under /private folder")
db_ready['errlink'] = "http://web2py.com/books/default/chapter/29/13#Collecting-tickets"
if method == 'new':
errors_path = apath('%s/errors' % app, r=request)
delete_hashes = []
for item in request.vars:
if item[:7] == 'delete_':
delete_hashes.append(item[7:])
hash2error = dict()
for fn in listdir(errors_path, '^[a-fA-F0-9.\-]+$'):
fullpath = os.path.join(errors_path, fn)
if not os.path.isfile(fullpath):
continue
try:
fullpath_file = open(fullpath, 'r')
try:
error = pickle.load(fullpath_file)
finally:
fullpath_file.close()
except IOError:
continue
except EOFError:
continue
hash = hashlib.md5(error['traceback']).hexdigest()
if hash in delete_hashes:
os.unlink(fullpath)
else:
try:
hash2error[hash]['count'] += 1
except KeyError:
error_lines = error['traceback'].split("\n")
last_line = error_lines[-2] if len(error_lines)>1 else 'unknown'
error_causer = os.path.split(error['layer'])[1]
hash2error[hash] = dict(count=1, pickel=error,
causer=error_causer,
last_line=last_line,
hash=hash, ticket=fn)
decorated = [(x['count'], x) for x in hash2error.values()]
decorated.sort(key=operator.itemgetter(0), reverse=True)
return dict(errors=[x[1] for x in decorated], app=app, method=method, db_ready=db_ready)
elif method == 'dbnew':
errors_path = apath('%s/errors' % app, r=request)
tk_db, tk_table = get_ticket_storage(app)
delete_hashes = []
for item in request.vars:
if item[:7] == 'delete_':
delete_hashes.append(item[7:])
hash2error = dict()
for fn in tk_db(tk_table.id > 0).select():
try:
error = pickle.loads(fn.ticket_data)
hash = hashlib.md5(error['traceback']).hexdigest()
if hash in delete_hashes:
tk_db(tk_table.id == fn.id).delete()
tk_db.commit()
else:
try:
hash2error[hash]['count'] += 1
except KeyError:
error_lines = error['traceback'].split("\n")
last_line = error_lines[-2]
error_causer = os.path.split(error['layer'])[1]
hash2error[hash] = dict(count=1,
pickel=error, causer=error_causer,
last_line=last_line, hash=hash,
ticket=fn.ticket_id)
except AttributeError, e:
tk_db(tk_table.id == fn.id).delete()
tk_db.commit()
decorated = [(x['count'], x) for x in hash2error.values()]
decorated.sort(key=operator.itemgetter(0), reverse=True)
return dict(errors=[x[1] for x in decorated], app=app,
method=method, db_ready=db_ready)
elif method == 'dbold':
tk_db, tk_table = get_ticket_storage(app)
for item in request.vars:
if item[:7] == 'delete_':
tk_db(tk_table.ticket_id == item[7:]).delete()
tk_db.commit()
tickets_ = tk_db(tk_table.id > 0).select(tk_table.ticket_id,
tk_table.created_datetime,
orderby=~tk_table.created_datetime)
tickets = [row.ticket_id for row in tickets_]
times = dict([(row.ticket_id, row.created_datetime) for
row in tickets_])
return dict(app=app, tickets=tickets, method=method,
times=times, db_ready=db_ready)
else:
for item in request.vars:
# delete_all rows doesn't contain any ticket
# Remove anything else as requested
if item[:7] == 'delete_' and (not item == "delete_all}"):
os.unlink(apath('%s/errors/%s' % (app, item[7:]), r=request))
func = lambda p: os.stat(apath('%s/errors/%s' %
(app, p), r=request)).st_mtime
tickets = sorted(
listdir(apath('%s/errors/' % app, r=request), '^\w.*'),
key=func,
reverse=True)
return dict(app=app, tickets=tickets, method=method, db_ready=db_ready)
def get_ticket_storage(app):
private_folder = apath('%s/private' % app, r=request)
ticket_file = os.path.join(private_folder, 'ticket_storage.txt')
if os.path.exists(ticket_file):
db_string = open(ticket_file).read()
db_string = db_string.strip().replace('\r', '').replace('\n', '')
elif is_gae:
# use Datastore as fallback if there is no ticket_file
db_string = "google:datastore"
else:
return False
tickets_table = 'web2py_ticket'
tablename = tickets_table + '_' + app
db_path = apath('%s/databases' % app, r=request)
ticketsdb = DAL(db_string, folder=db_path, auto_import=True)
if not ticketsdb.get(tablename):
table = ticketsdb.define_table(
tablename,
Field('ticket_id', length=100),
Field('ticket_data', 'text'),
Field('created_datetime', 'datetime'),
)
return ticketsdb, ticketsdb.get(tablename)
def make_link(path):
""" Create a link from a path """
tryFile = path.replace('\\', '/')
if os.path.isabs(tryFile) and os.path.isfile(tryFile):
(folder, filename) = os.path.split(tryFile)
(base, ext) = os.path.splitext(filename)
app = get_app()
editable = {'controllers': '.py', 'models': '.py', 'views': '.html'}
for key in editable.keys():
check_extension = folder.endswith("%s/%s" % (app, key))
if ext.lower() == editable[key] and check_extension:
return A('"' + tryFile + '"',
_href=URL(r=request,
f='edit/%s/%s/%s' % (app, key, filename))).xml()
return ''
def make_links(traceback):
""" Make links using the given traceback """
lwords = traceback.split('"')
# Making the short circuit compatible with <= python2.4
result = (len(lwords) != 0) and lwords[0] or ''
i = 1
while i < len(lwords):
link = make_link(lwords[i])
if link == '':
result += '"' + lwords[i]
else:
result += link
if i + 1 < len(lwords):
result += lwords[i + 1]
i = i + 1
i = i + 1
return result
class TRACEBACK(object):
""" Generate the traceback """
def __init__(self, text):
""" TRACEBACK constructor """
self.s = make_links(CODE(text).xml())
def xml(self):
""" Returns the xml """
return self.s
def ticket():
""" Ticket handler """
if len(request.args) != 2:
session.flash = T('invalid ticket')
redirect(URL('site'))
app = get_app()
myversion = request.env.web2py_version
ticket = request.args[1]
e = RestrictedError()
e.load(request, app, ticket)
return dict(app=app,
ticket=ticket,
output=e.output,
traceback=(e.traceback and TRACEBACK(e.traceback)),
snapshot=e.snapshot,
code=e.code,
layer=e.layer,
myversion=myversion)
def ticketdb():
""" Ticket handler """
if len(request.args) != 2:
session.flash = T('invalid ticket')
redirect(URL('site'))
app = get_app()
myversion = request.env.web2py_version
ticket = request.args[1]
e = RestrictedError()
request.tickets_db = get_ticket_storage(app)[0]
e.load(request, app, ticket)
response.view = 'default/ticket.html'
return dict(app=app,
ticket=ticket,
output=e.output,
traceback=(e.traceback and TRACEBACK(e.traceback)),
snapshot=e.snapshot,
code=e.code,
layer=e.layer,
myversion=myversion)
def error():
""" Generate a ticket (for testing) """
raise RuntimeError('admin ticket generator at your service')
def update_languages():
""" Update available languages """
app = get_app()
update_all_languages(apath(app, r=request))
session.flash = T('Language files (static strings) updated')
redirect(URL('design', args=app, anchor='languages'))
def user():
if MULTI_USER_MODE:
if not db(db.auth_user).count():
auth.settings.registration_requires_approval = False
return dict(form=auth())
else:
return dict(form=T("Disabled"))
def reload_routes():
""" Reload routes.py """
import gluon.rewrite
gluon.rewrite.load()
redirect(URL('site'))
def manage_students():
if not (MULTI_USER_MODE and is_manager()):
session.flash = T('Not Authorized')
redirect(URL('site'))
db.auth_user.registration_key.writable = True
grid = SQLFORM.grid(db.auth_user)
return locals()
def bulk_register():
if not (MULTI_USER_MODE and is_manager()):
session.flash = T('Not Authorized')
redirect(URL('site'))
form = SQLFORM.factory(Field('emails', 'text'))
if form.process().accepted:
emails = [x.strip() for x in form.vars.emails.split('\n') if x.strip()]
n = 0
for email in emails:
if not db.auth_user(email=email):
n += db.auth_user.insert(email=email) and 1 or 0
session.flash = T('%s students registered', n)
redirect(URL('site'))
return locals()
### Begin experimental stuff need fixes:
# 1) should run in its own process - cannot os.chdir
# 2) should not prompt user at console
# 3) should give option to force commit and not reuqire manual merge
def git_pull():
""" Git Pull handler """
app = get_app()
if not have_git:
session.flash = GIT_MISSING
redirect(URL('site'))
dialog = FORM.confirm(T('Pull'),
{T('Cancel'): URL('site')})
if dialog.accepted:
try:
repo = git.Repo(os.path.join(apath(r=request), app))
origin = repo.remotes.origin
origin.fetch()
origin.pull()
session.flash = T("Application updated via git pull")
redirect(URL('site'))
except git.CheckoutError:
session.flash = T("Pull failed, certain files could not be checked out. Check logs for details.")
redirect(URL('site'))
except git.UnmergedEntriesError:
session.flash = T("Pull is not possible because you have unmerged files. Fix them up in the work tree, and then try again.")
redirect(URL('site'))
except git.GitCommandError:
session.flash = T(
"Pull failed, git exited abnormally. See logs for details.")
redirect(URL('site'))
except AssertionError:
session.flash = T("Pull is not possible because you have unmerged files. Fix them up in the work tree, and then try again.")
redirect(URL('site'))
elif 'cancel' in request.vars:
redirect(URL('site'))
return dict(app=app, dialog=dialog)
def git_push():
""" Git Push handler """
app = get_app()
if not have_git:
session.flash = GIT_MISSING
redirect(URL('site'))
form = SQLFORM.factory(Field('changelog', requires=IS_NOT_EMPTY()))
form.element('input[type=submit]')['_value'] = T('Push')
form.add_button(T('Cancel'), URL('site'))
form.process()
if form.accepted:
try:
repo = git.Repo(os.path.join(apath(r=request), app))
index = repo.index
index.add([apath(r=request) + app + '/*'])
new_commit = index.commit(form.vars.changelog)
origin = repo.remotes.origin
origin.push()
session.flash = T(
"Git repo updated with latest application changes.")
redirect(URL('site'))
except git.UnmergedEntriesError:
session.flash = T("Push failed, there are unmerged entries in the cache. Resolve merge issues manually and try again.")
redirect(URL('site'))
return dict(app=app, form=form)
def plugins():
app = request.args(0)
from serializers import loads_json
if not session.plugins:
try:
rawlist = urllib.urlopen("http://www.web2pyslices.com/" +
"public/api.json/action/list/content/Package?package" +
"_type=plugin&search_index=false").read()
session.plugins = loads_json(rawlist)
except:
response.flash = T('Unable to download the list of plugins')
session.plugins = []
return dict(plugins=session.plugins["results"], app=request.args(0))
def install_plugin():
app = request.args(0)
source = request.vars.source
plugin = request.vars.plugin
if not (source and app):
raise HTTP(500, T("Invalid request"))
form = SQLFORM.factory()
result = None
if form.process().accepted:
# get w2p plugin
if "web2py.plugin." in source:
filename = "web2py.plugin.%s.w2p" % \
source.split("web2py.plugin.")[-1].split(".w2p")[0]
else:
filename = "web2py.plugin.%s.w2p" % cleanpath(plugin)
if plugin_install(app, urllib.urlopen(source),
request, filename):
session.flash = T('New plugin installed: %s', filename)
else:
session.flash = \
T('unable to install plugin "%s"', filename)
redirect(URL(f="plugins", args=[app,]))
return dict(form=form, app=app, plugin=plugin, source=source)
| {
"content_hash": "03ff7525ed971fbb9aa9c911b72a7730",
"timestamp": "",
"source": "github",
"line_count": 1944,
"max_line_length": 187,
"avg_line_length": 36.88786008230453,
"alnum_prop": 0.5398270812996793,
"repo_name": "Titosoft/ferry-boat",
"id": "547d6c947950cd4c9e93f2c4c16483d8ed376ce0",
"size": "71710",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "web2py/applications/admin/controllers/default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "230159"
},
{
"name": "JavaScript",
"bytes": "305848"
},
{
"name": "Perl",
"bytes": "1688"
},
{
"name": "Python",
"bytes": "6385104"
},
{
"name": "Shell",
"bytes": "90609"
}
],
"symlink_target": ""
} |
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Wrapper for psapi.dll in ctypes.
"""
__revision__ = "$Id$"
from winappdbg.win32.defines import *
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- PSAPI structures and constants -------------------------------------------
LIST_MODULES_DEFAULT = 0x00
LIST_MODULES_32BIT = 0x01
LIST_MODULES_64BIT = 0x02
LIST_MODULES_ALL = 0x03
# typedef struct _MODULEINFO {
# LPVOID lpBaseOfDll;
# DWORD SizeOfImage;
# LPVOID EntryPoint;
# } MODULEINFO, *LPMODULEINFO;
class MODULEINFO(Structure):
_fields_ = [
("lpBaseOfDll", LPVOID), # remote pointer
("SizeOfImage", DWORD),
("EntryPoint", LPVOID), # remote pointer
]
LPMODULEINFO = POINTER(MODULEINFO)
#--- psapi.dll ----------------------------------------------------------------
# BOOL WINAPI EnumDeviceDrivers(
# __out LPVOID *lpImageBase,
# __in DWORD cb,
# __out LPDWORD lpcbNeeded
# );
def EnumDeviceDrivers():
_EnumDeviceDrivers = windll.psapi.EnumDeviceDrivers
_EnumDeviceDrivers.argtypes = [LPVOID, DWORD, LPDWORD]
_EnumDeviceDrivers.restype = bool
_EnumDeviceDrivers.errcheck = RaiseIfZero
size = 0x1000
lpcbNeeded = DWORD(size)
unit = sizeof(LPVOID)
while 1:
lpImageBase = (LPVOID * (size // unit))()
_EnumDeviceDrivers(byref(lpImageBase), lpcbNeeded, byref(lpcbNeeded))
needed = lpcbNeeded.value
if needed <= size:
break
size = needed
return [ lpImageBase[index] for index in compat.xrange(0, (needed // unit)) ]
# BOOL WINAPI EnumProcesses(
# __out DWORD *pProcessIds,
# __in DWORD cb,
# __out DWORD *pBytesReturned
# );
def EnumProcesses():
_EnumProcesses = windll.psapi.EnumProcesses
_EnumProcesses.argtypes = [LPVOID, DWORD, LPDWORD]
_EnumProcesses.restype = bool
_EnumProcesses.errcheck = RaiseIfZero
size = 0x1000
cbBytesReturned = DWORD()
unit = sizeof(DWORD)
while 1:
ProcessIds = (DWORD * (size // unit))()
cbBytesReturned.value = size
_EnumProcesses(byref(ProcessIds), cbBytesReturned, byref(cbBytesReturned))
returned = cbBytesReturned.value
if returned < size:
break
size = size + 0x1000
ProcessIdList = list()
for ProcessId in ProcessIds:
if ProcessId is None:
break
ProcessIdList.append(ProcessId)
return ProcessIdList
# BOOL WINAPI EnumProcessModules(
# __in HANDLE hProcess,
# __out HMODULE *lphModule,
# __in DWORD cb,
# __out LPDWORD lpcbNeeded
# );
def EnumProcessModules(hProcess):
_EnumProcessModules = windll.psapi.EnumProcessModules
_EnumProcessModules.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD]
_EnumProcessModules.restype = bool
_EnumProcessModules.errcheck = RaiseIfZero
size = 0x1000
lpcbNeeded = DWORD(size)
unit = sizeof(HMODULE)
while 1:
lphModule = (HMODULE * (size // unit))()
_EnumProcessModules(hProcess, byref(lphModule), lpcbNeeded, byref(lpcbNeeded))
needed = lpcbNeeded.value
if needed <= size:
break
size = needed
return [ lphModule[index] for index in compat.xrange(0, int(needed // unit)) ]
# BOOL WINAPI EnumProcessModulesEx(
# __in HANDLE hProcess,
# __out HMODULE *lphModule,
# __in DWORD cb,
# __out LPDWORD lpcbNeeded,
# __in DWORD dwFilterFlag
# );
def EnumProcessModulesEx(hProcess, dwFilterFlag = LIST_MODULES_DEFAULT):
_EnumProcessModulesEx = windll.psapi.EnumProcessModulesEx
_EnumProcessModulesEx.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, DWORD]
_EnumProcessModulesEx.restype = bool
_EnumProcessModulesEx.errcheck = RaiseIfZero
size = 0x1000
lpcbNeeded = DWORD(size)
unit = sizeof(HMODULE)
while 1:
lphModule = (HMODULE * (size // unit))()
_EnumProcessModulesEx(hProcess, byref(lphModule), lpcbNeeded, byref(lpcbNeeded), dwFilterFlag)
needed = lpcbNeeded.value
if needed <= size:
break
size = needed
return [ lphModule[index] for index in compat.xrange(0, (needed // unit)) ]
# DWORD WINAPI GetDeviceDriverBaseName(
# __in LPVOID ImageBase,
# __out LPTSTR lpBaseName,
# __in DWORD nSize
# );
def GetDeviceDriverBaseNameA(ImageBase):
_GetDeviceDriverBaseNameA = windll.psapi.GetDeviceDriverBaseNameA
_GetDeviceDriverBaseNameA.argtypes = [LPVOID, LPSTR, DWORD]
_GetDeviceDriverBaseNameA.restype = DWORD
nSize = MAX_PATH
while 1:
lpBaseName = ctypes.create_string_buffer("", nSize)
nCopied = _GetDeviceDriverBaseNameA(ImageBase, lpBaseName, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpBaseName.value
def GetDeviceDriverBaseNameW(ImageBase):
_GetDeviceDriverBaseNameW = windll.psapi.GetDeviceDriverBaseNameW
_GetDeviceDriverBaseNameW.argtypes = [LPVOID, LPWSTR, DWORD]
_GetDeviceDriverBaseNameW.restype = DWORD
nSize = MAX_PATH
while 1:
lpBaseName = ctypes.create_unicode_buffer(u"", nSize)
nCopied = _GetDeviceDriverBaseNameW(ImageBase, lpBaseName, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpBaseName.value
GetDeviceDriverBaseName = GuessStringType(GetDeviceDriverBaseNameA, GetDeviceDriverBaseNameW)
# DWORD WINAPI GetDeviceDriverFileName(
# __in LPVOID ImageBase,
# __out LPTSTR lpFilename,
# __in DWORD nSize
# );
def GetDeviceDriverFileNameA(ImageBase):
_GetDeviceDriverFileNameA = windll.psapi.GetDeviceDriverFileNameA
_GetDeviceDriverFileNameA.argtypes = [LPVOID, LPSTR, DWORD]
_GetDeviceDriverFileNameA.restype = DWORD
nSize = MAX_PATH
while 1:
lpFilename = ctypes.create_string_buffer("", nSize)
nCopied = ctypes.windll.psapi.GetDeviceDriverFileNameA(ImageBase, lpFilename, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpFilename.value
def GetDeviceDriverFileNameW(ImageBase):
_GetDeviceDriverFileNameW = windll.psapi.GetDeviceDriverFileNameW
_GetDeviceDriverFileNameW.argtypes = [LPVOID, LPWSTR, DWORD]
_GetDeviceDriverFileNameW.restype = DWORD
nSize = MAX_PATH
while 1:
lpFilename = ctypes.create_unicode_buffer(u"", nSize)
nCopied = ctypes.windll.psapi.GetDeviceDriverFileNameW(ImageBase, lpFilename, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpFilename.value
GetDeviceDriverFileName = GuessStringType(GetDeviceDriverFileNameA, GetDeviceDriverFileNameW)
# DWORD WINAPI GetMappedFileName(
# __in HANDLE hProcess,
# __in LPVOID lpv,
# __out LPTSTR lpFilename,
# __in DWORD nSize
# );
def GetMappedFileNameA(hProcess, lpv):
_GetMappedFileNameA = ctypes.windll.psapi.GetMappedFileNameA
_GetMappedFileNameA.argtypes = [HANDLE, LPVOID, LPSTR, DWORD]
_GetMappedFileNameA.restype = DWORD
nSize = MAX_PATH
while 1:
lpFilename = ctypes.create_string_buffer("", nSize)
nCopied = _GetMappedFileNameA(hProcess, lpv, lpFilename, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpFilename.value
def GetMappedFileNameW(hProcess, lpv):
_GetMappedFileNameW = ctypes.windll.psapi.GetMappedFileNameW
_GetMappedFileNameW.argtypes = [HANDLE, LPVOID, LPWSTR, DWORD]
_GetMappedFileNameW.restype = DWORD
nSize = MAX_PATH
while 1:
lpFilename = ctypes.create_unicode_buffer(u"", nSize)
nCopied = _GetMappedFileNameW(hProcess, lpv, lpFilename, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpFilename.value
GetMappedFileName = GuessStringType(GetMappedFileNameA, GetMappedFileNameW)
# DWORD WINAPI GetModuleFileNameEx(
# __in HANDLE hProcess,
# __in_opt HMODULE hModule,
# __out LPTSTR lpFilename,
# __in DWORD nSize
# );
def GetModuleFileNameExA(hProcess, hModule = None):
_GetModuleFileNameExA = ctypes.windll.psapi.GetModuleFileNameExA
_GetModuleFileNameExA.argtypes = [HANDLE, HMODULE, LPSTR, DWORD]
_GetModuleFileNameExA.restype = DWORD
nSize = MAX_PATH
while 1:
lpFilename = ctypes.create_string_buffer("", nSize)
nCopied = _GetModuleFileNameExA(hProcess, hModule, lpFilename, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpFilename.value
def GetModuleFileNameExW(hProcess, hModule = None):
_GetModuleFileNameExW = ctypes.windll.psapi.GetModuleFileNameExW
_GetModuleFileNameExW.argtypes = [HANDLE, HMODULE, LPWSTR, DWORD]
_GetModuleFileNameExW.restype = DWORD
nSize = MAX_PATH
while 1:
lpFilename = ctypes.create_unicode_buffer(u"", nSize)
nCopied = _GetModuleFileNameExW(hProcess, hModule, lpFilename, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpFilename.value
GetModuleFileNameEx = GuessStringType(GetModuleFileNameExA, GetModuleFileNameExW)
# BOOL WINAPI GetModuleInformation(
# __in HANDLE hProcess,
# __in HMODULE hModule,
# __out LPMODULEINFO lpmodinfo,
# __in DWORD cb
# );
def GetModuleInformation(hProcess, hModule, lpmodinfo = None):
_GetModuleInformation = windll.psapi.GetModuleInformation
_GetModuleInformation.argtypes = [HANDLE, HMODULE, LPMODULEINFO, DWORD]
_GetModuleInformation.restype = bool
_GetModuleInformation.errcheck = RaiseIfZero
if lpmodinfo is None:
lpmodinfo = MODULEINFO()
_GetModuleInformation(hProcess, hModule, byref(lpmodinfo), sizeof(lpmodinfo))
return lpmodinfo
# DWORD WINAPI GetProcessImageFileName(
# __in HANDLE hProcess,
# __out LPTSTR lpImageFileName,
# __in DWORD nSize
# );
def GetProcessImageFileNameA(hProcess):
_GetProcessImageFileNameA = windll.psapi.GetProcessImageFileNameA
_GetProcessImageFileNameA.argtypes = [HANDLE, LPSTR, DWORD]
_GetProcessImageFileNameA.restype = DWORD
nSize = MAX_PATH
while 1:
lpFilename = ctypes.create_string_buffer("", nSize)
nCopied = _GetProcessImageFileNameA(hProcess, lpFilename, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpFilename.value
def GetProcessImageFileNameW(hProcess):
_GetProcessImageFileNameW = windll.psapi.GetProcessImageFileNameW
_GetProcessImageFileNameW.argtypes = [HANDLE, LPWSTR, DWORD]
_GetProcessImageFileNameW.restype = DWORD
nSize = MAX_PATH
while 1:
lpFilename = ctypes.create_unicode_buffer(u"", nSize)
nCopied = _GetProcessImageFileNameW(hProcess, lpFilename, nSize)
if nCopied == 0:
raise ctypes.WinError()
if nCopied < (nSize - 1):
break
nSize = nSize + MAX_PATH
return lpFilename.value
GetProcessImageFileName = GuessStringType(GetProcessImageFileNameA, GetProcessImageFileNameW)
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
| {
"content_hash": "787123b86da5b52118f993fda7ac0185",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 102,
"avg_line_length": 36.625974025974024,
"alnum_prop": 0.6408765335791787,
"repo_name": "glenngillen/dotfiles",
"id": "34b791c77647807ecf0a60f7270bcdd0559d7f8d",
"size": "14149",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": ".vscode/extensions/ms-python.python-2022.2.1924087327/pythonFiles/lib/python/debugpy/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/psapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "3634"
},
{
"name": "Shell",
"bytes": "4225"
},
{
"name": "Vim script",
"bytes": "16306"
}
],
"symlink_target": ""
} |
import sys
import decorator
import fnmatch
import os
import glob
@decorator.target("android_resource")
def gen_android_res(name, in_deps, is_res, is_assets):
export_deps = []
deps = []
name = name + "_res"
print "name = '%s'," % name
print "package = 'com.tencent.mm',"
if is_res:
print "res = 'res',"
if is_assets:
print "assets = 'assets',"
print "visibility = [ 'PUBLIC' ],"
gen_deps(in_deps)
target = ":%s" % name
export_deps.append(target)
#deps.append(target)
return export_deps, deps
@decorator.target("android_library")
def gen_android_lib(name, sdk_target, aidl, deps, export_deps):
name = name + "_proj"
print "name = '%s'," % name
if not sdk_target.startswith("Google"):
sdk_target = "Google Inc.:Google APIs:%d" % int(sdk_target.split('-')[1])
print "android_target = '%s'," % sdk_target
##print srcs target
if name.startswith("libsupport"):
print "srcs = glob(['src/**/*.java', 'java/**/*.java', 'eclair/**/*.java','eclair-mr1/**/*.java', 'froyo/**/*.java', 'gingerbread/**/*.java','honeycomb/**/*.java', 'honeycomb_mr2/**/*.java', 'ics/**/*.java', 'ics-mr1/**/*.java', 'jellybean/**/*.java', 'jellybean-mr1/**/*.java', 'jellybean-mr2/**/*.java']) + ",
else:
print "srcs = glob(['src/**/*.java', 'gen/**/*.java']) + "
print "["
_print_aidl_genfile(aidls)
print "],"
print "visibility = [ 'PUBLIC' ],"
print "manifest = 'AndroidManifest.xml',"
gen_deps(deps)
gen_exported_deps(export_deps)
@decorator.target("prebuilt_native_library")
def gen_native_lib(name):
native_name = name + "_native"
print "name = '%s'," % native_name
print "native_libs = 'libs',"
return ":%s" % native_name
@decorator.target("gen_aidl")
def gen_aidl(name, aidl, proj):
name = path_get_basename(name)
print "name = '%s'," % name
print "aidl = '%s'," % aidl
print "import_path = '%s/src/'," % proj
return ":%s" % name
@decorator.target("prebuilt_jar")
def gen_jar(name, relative_path):
print "name = '%s'," % name
print "binary_jar = '%s'," % relative_path
print "visibility = [ 'PUBLIC' ],"
return ":%s" % name
@decorator.var_with_comma("deps")
def gen_deps(deps):
for dep in deps:
print "'%s'," % dep
@decorator.var_with_comma("exported_deps")
def gen_exported_deps(exported_deps):
for dep in exported_deps:
print "'%s'," % dep
def gen_res(path, name, proj_deps):
is_res, is_assets = check_res_stat(path)
exported_deps, deps = format_res_deps(path, proj_deps)
_exported_deps = []
_deps = []
if is_assets or is_res:
_exported_deps, _deps = gen_android_res(name, deps, is_res, is_assets)
return _exported_deps, _deps
def check_res_stat(path):
return os.path.isdir(os.path.join(path, "res")) and len(_find_all_files_with_suffix(os.path.join(path, "res"), "*.*")) > 0, os.path.isdir(os.path.join(path, "assets")) and len(_find_all_files_with_suffix(os.path.join(path, "assets"), "*.*")) > 0
def check_res_existed(path):
is_res, is_assets = check_res_stat(path)
return is_res or is_assets
def gen_jars(path):
export_deps = []
deps = []
jars = _find_all_files_with_suffix(path, "*.jar")
for relative_path in jars:
##extract filename without suffix (-4 means .jar's lenght)
name = relative_path.split("/")[-1][:-4]
target = gen_jar(name, relative_path)
export_deps.append(target)
#deps.append(target)
return export_deps, deps
def gen_aidls(aidls, proj):
export_deps = []
deps = []
for aidl in aidls:
name = os.path.split(aidl)[1]
target = gen_aidl(name, aidl, proj)
deps.append(target)
return export_deps, deps
def gen_native_libs(path, name):
export_desp = []
deps = []
lib_path = os.path.join(path, "libs")
if os.path.isdir(lib_path) and len(_find_all_files_with_suffix(path, "*.so")) > 0:
target = gen_native_lib(name)
deps.append(target)
#don't export native lib, buck will copy all native .so files
return export_desp, deps
def _print_aidl_genfile(aidls):
for aidl in aidls:
#remove .aild
aidl = aidl[:-5]
print "genfile( '%s.java' )," % aidl
def _find_all_aidls(relative_path):
aidls = _find_all_files_with_suffix(relative_path, "*.aidl")
no_aidls = ["src/com/tencent/mm/cache/MCacheItem.aidl",
"src/com/tencent/tmassistantsdk/downloadclient/TMAssistantDownloadTaskInfo.aidl"]
for no_aidl in no_aidls:
if no_aidl in aidls:
aidls.remove(no_aidl)
return aidls
def _find_all_files_with_suffix(relative_path, suffix):
matches = []
for root, dirnames, filenames in os.walk(relative_path):
for filename in fnmatch.filter(filenames, suffix):
matches.append(os.path.relpath(os.path.join(root,filename), relative_path))
return matches
def parse_deps(path):
"""
parse the project propertie file,
return (sdk_target, is_library_flag , deps)
"""
proj_fd = path + "/project.properties"
sdk_target = None
lib_flag = None
deps = []
with open(proj_fd) as fd:
for line in fd.readlines():
if (line.startswith("target=")):
sdk_target = line[len("target="):]
sdk_target = sdk_target.strip("\r\n")
if (line.startswith("android.library=")):
lib_flag = line[len("android.library="):]
lib_flag = lib_flag.strip("\r\n")
if (line.startswith("android.library.reference.")):
dep = line.split('=')[1].strip("\r\n")
if (dep.startswith("../")):
dep = dep[3:]
deps.append(dep)
return sdk_target, lib_flag, deps
def format_proj_deps(root, folders):
deps = []
export_deps = []
for proj in folders:
target = "//%s:%s_proj" % (proj, proj)
#deps.append(target)
export_deps.append(target)
return export_deps, deps
def format_res_deps(root, folders):
deps = []
export_deps = []
for proj in folders:
target = "//%s:%s_proj" % (proj, proj)
deps.append(target)
#export_deps.append(target)
return export_deps, deps
def path_get_parent(path):
return os.path.abspath(os.path.join(path, os.pardir))
def path_get_basename(path):
return os.path.splitext(os.path.basename(path))[0]
if __name__ == "__main__":
if len(sys.argv) > 1:
root = sys.argv[1]
else:
root = os.getcwd()
root = os.path.realpath(root)
path, proj_name = os.path.split(root)
##dep_libs just the folders of the dependency modules
sdk_target, is_lib, dep_libs = parse_deps(root)
if is_lib != "true":
##only gen libary project's BUCK
errinfo = "ONLY GEN LIB PROJECT's BUCK_%s" % proj_name
raise Exception(errinfo)
##gen aidls
aidls = _find_all_aidls(root)
aidl_exported_deps, aidl_deps = gen_aidls(aidls, proj_name)
##gen native libs
native_expoted_deps , native_deps = gen_native_libs(root, proj_name)
##gen jars
jar_exported_deps, jar_deps = gen_jars(root)
proj_exported_deps, proj_deps = format_proj_deps(root, dep_libs)
##gen res
res_exported_deps, res_deps = gen_res(root, proj_name, dep_libs)
##gen lib project
all_deps = []
all_deps.extend(proj_deps)
all_deps.extend(res_deps)
all_deps.extend(aidl_deps)
all_deps.extend(native_deps)
all_deps.extend(jar_deps)
all_exported_deps = []
all_exported_deps.extend(proj_exported_deps)
all_exported_deps.extend(res_exported_deps)
all_exported_deps.extend(aidl_exported_deps)
all_exported_deps.extend(native_expoted_deps)
all_exported_deps.extend(jar_exported_deps)
gen_android_lib(proj_name, sdk_target, aidls, all_deps, all_exported_deps)
| {
"content_hash": "de1f1d053d893f47232e6e91186f4d52",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 319,
"avg_line_length": 32.07258064516129,
"alnum_prop": 0.5993210963037465,
"repo_name": "simpleton/eclipse2buck",
"id": "124395fc801d6294ee70d48e014640ec2be4c029",
"size": "7954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "legacy/buck_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39699"
}
],
"symlink_target": ""
} |
import datetime
from django.template import defaultfilters
class Column(object):
"""
This class represents a table column. It is responsible for holding
metadata, and rending table cells. Like Django model/fields, columns
are usually created within the table class which they are bound to.
"""
creation_counter = 0
def __init__(self, name=None, value=None, link=None, sortable=True, css_class=None, sort_key_fn=None, titleized=True, safe=False, header_class=""):
self._name = name
self._value = value
self._link = link
self._css_class = css_class
self._header_class = header_class
self.sortable = sortable
self.sort_key_fn = sort_key_fn
self._safe = safe
self._titleized = titleized
# like django fields, keep track of the order which columns are
# created, so they can be sorted later. (unfortunately, python
# attrs are passed to __new__ as an unsorted dict, so we must
# keep track of this manually to avoid random column order.)
self.creation_counter = Column.creation_counter
Column.creation_counter += 1
self.bound_to = None
def __lt__(self, other):
"""Allow columns to be sorted by order of creation."""
return self.creation_counter < other.creation_counter
def __unicode__(self):
return self.name
def bind_to(self, table, name):
"""
Bind this column to a table, and assign it a name. This method
can only be called once per instance, because a Column cannot be
bound to multiple tables. (The sort order would be ambiguous.)
"""
if self.bound_to is not None:
raise AttributeError(
"Column is already bound to '%s' as '%s'" %\
self.bound_to)
self.bound_to = (table, name)
@property
def is_bound(self):
"""Return true if this column is bound to a table."""
return (self.bound_to is not None)
@property
def name(self):
"""Return the column name, whether explicit or implicit."""
return self._name or self.bound_to[1]
@property
def titleized(self):
"""Return whether the column title should be titleized."""
return self._titleized
@property
def safe(self):
"""
Return whether cell should be rendered directly as HTML.
"""
return self._safe
def value(self, cell):
"""
Extract the value of ``cell``, ready to be rendered.
If this Column was instantiated with a ``value`` attribute, it
is called here to provide the value. (For example, to provide a
calculated value.) Otherwise, ``cell.value`` is returned.
"""
if self._value is not None:
return self._value(cell)
else:
return cell.value
def render(self, cell):
"""
Render ``cell``, ready for display. The default behavior is to
simply cast its value to unicode, but this may be overridden by
child classes to do something more useful.
"""
return unicode(self.value(cell))
@property
def has_link(self):
"""Return True if this column contains links."""
return self._link is not None
def link(self, cell):
"""
Return the URL which ``cell`` should link to, or None if this
column does not contain links.
If this Column was instantiated with a ``link`` attribute, it is
called here (with a single parameter of ``cell``), to provide
the value. Otherwise, None is returned.
"""
if self.has_link:
return self._link(cell)
return None
@property
def has_css_class(self):
"""Return True if a CSS class is defined for this column."""
return self._css_class is not None
@property
def has_default_css_class(self):
"""Return True if a CSS class is defined for this column."""
return self._css_class is not None and isinstance(self._css_class, basestring)
@property
def default_css_class(self):
if isinstance(self._css_class, basestring):
return self._css_class
return None
def css_class(self, cell):
"""Return the CSS class for this column."""
if isinstance(self._css_class, basestring):
return self._css_class
else:
return self._css_class(cell)
def header_class(self):
return self._header_class
class DateColumn(Column):
"""
This class provides a simple way to render a Date field, using the
Django 'date' template filter. The ``format`` argument specifies the
string in ``Django date format``_, **not** ``Python date format``_.
If ``format`` is None the ``DATE_FORMAT`` setting is used.
.. `Django date format``: http://docs.djangoproject.com/en/dev/ref/templates/builtins/#ttag-now
.. `Python date format``: http://docs.python.org/library/datetime.html#strftime-strptime-behavior
"""
def __init__(self, format=None, *args, **kwargs):
super(DateColumn, self).__init__(*args, **kwargs)
self._format = format
def render(self, cell):
return defaultfilters.date(
self.value(cell),
self._format)
class WrappedColumn(object):
"""
This class wraps a Column instance, and binds it to a Table instance
to provide useful properties to the template. This represents a per-
render instance of the column, containing its volatile state, such
as sort order. (More properties, such as visibility, filtering, and
grouping may come later.)
All of the attributes (and methods) of the wrapped Column can be
accessed via this class, with help from some __getattr__ magic.
"""
def __init__(self, table, column):
self.table = table
self.column = column
@property
def sort_url(self):
"""
Return the URL to sort the linked table by this column. If the
table is already sorted by this column, the order is reversed.
Since there is no canonical URL for a table the current URL (via
the HttpRequest linked to the Table instance) is reused, and any
unrelated parameters will be included in the output.
"""
prefix = (self.sort_direction == "asc") and "-" or ""
return self.table.get_url(order_by=prefix + self.name)
@property
def is_sorted(self):
return self.sort_direction is not None
@property
def sort_direction(self):
"""
Return the direction in which the linked table is is sorted by
this column ("asc" or "desc"), or None this column is unsorted.
"""
if self.table._meta.order_by == self.name:
return "asc"
elif self.table._meta.order_by == ("-" + self.name):
return "desc"
else:
return None
def __unicode__(self):
return unicode(self.column)
def __getattr__(self, name):
return getattr(self.column, name)
| {
"content_hash": "d3072fa42d43a970aa924ad87d5e9c42",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 151,
"avg_line_length": 31.844444444444445,
"alnum_prop": 0.6120027913468249,
"repo_name": "dimagi/djtables",
"id": "1275ee2cb80805a4fd419a7829c7b29a16df9914",
"size": "7209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/djtables/column.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36266"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Template.date_added'
db.delete_column('scribe_template', 'date_added')
# Adding field 'Template.date_edited'
db.add_column('scribe_template', 'date_edited',
self.gf('django.db.models.fields.DateField')(auto_now=True, default=datetime.datetime(2014, 1, 30, 0, 0), blank=True),
keep_default=False)
# Deleting field 'Header.date_added'
db.delete_column('scribe_header', 'date_added')
# Adding field 'Header.date_edited'
db.add_column('scribe_header', 'date_edited',
self.gf('django.db.models.fields.DateField')(auto_now=True, default=datetime.datetime(2014, 1, 30, 0, 0), blank=True),
keep_default=False)
# Adding field 'Email.date_edited'
db.add_column('scribe_email', 'date_edited',
self.gf('django.db.models.fields.DateField')(auto_now=True, default=datetime.datetime(2014, 1, 30, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Template.date_added'
raise RuntimeError("Cannot reverse this migration. 'Template.date_added' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Template.date_added'
db.add_column('scribe_template', 'date_added',
self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True),
keep_default=False)
# Deleting field 'Template.date_edited'
db.delete_column('scribe_template', 'date_edited')
# User chose to not deal with backwards NULL issues for 'Header.date_added'
raise RuntimeError("Cannot reverse this migration. 'Header.date_added' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Header.date_added'
db.add_column('scribe_header', 'date_added',
self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True),
keep_default=False)
# Deleting field 'Header.date_edited'
db.delete_column('scribe_header', 'date_edited')
# Deleting field 'Email.date_edited'
db.delete_column('scribe_email', 'date_edited')
models = {
'scribe.email': {
'Meta': {'object_name': 'Email'},
'content': ('tinymce.models.HTMLField', [], {}),
'date_edited': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'header': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scribe.Header']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scribe.Template']"})
},
'scribe.header': {
'Meta': {'object_name': 'Header'},
'date_edited': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'scribe.template': {
'Meta': {'object_name': 'Template'},
'date_edited': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'template': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['scribe'] | {
"content_hash": "289feb496ac68aa53953ce209ec1f244",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 140,
"avg_line_length": 49.04597701149425,
"alnum_prop": 0.5847199437543942,
"repo_name": "DArtagan/thoth",
"id": "ac109b093ce1f545a9f48b6e69a98360baf0593f",
"size": "4291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scribe/migrations/0005_auto__del_field_template_date_added__add_field_template_date_edited__d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5178"
},
{
"name": "JavaScript",
"bytes": "73"
},
{
"name": "Python",
"bytes": "63815"
}
],
"symlink_target": ""
} |
import pymel.core as pm
import logging
log = logging.getLogger("ui")
class BaseTemplate(pm.ui.AETemplate):
def addControl(self, control, label=None, **kwargs):
pm.ui.AETemplate.addControl(self, control, label=label, **kwargs)
def beginLayout(self, name, collapse=True):
pm.ui.AETemplate.beginLayout(self, name, collapse=collapse)
class AECoronaFrontBackTemplate(BaseTemplate):
def __init__(self, nodeName):
BaseTemplate.__init__(self,nodeName)
log.debug("AECoronaLightTemplate")
self.thisNode = None
self.node = pm.PyNode(self.nodeName)
pm.mel.AEswatchDisplay(nodeName)
self.beginScrollLayout()
self.buildBody(nodeName)
allAttributes = self.node.listAttr()
allowedAttributes = ["frontMaterial", "backMaterial", "opacity"]
for att in allAttributes:
att = att.split(".")[-1]
if not att in allowedAttributes:
self.suppress(att)
self.addExtraControls("ExtraControls")
self.endScrollLayout()
def buildBody(self, nodeName):
self.thisNode = pm.PyNode(nodeName)
self.beginLayout("Emission" ,collapse=0)
self.beginNoOptimize()
self.addControl("frontMaterial", label="Front Material")
self.addControl("backMaterial", label="Back Material")
self.addSeparator()
self.addControl("iesProfile", label="IES Profile")
self.addControl("emissionSharpnessFake", label="Sharp Patterns")
#self.addControl("emissionDisableSampling", label="Disable Sampling")
#self.addControl("emissionSharpnessFakePoint", label="Sharpness Fake Point")
self.endNoOptimize()
self.endLayout()
#self.beginLayout("Hardware Texturing" ,collapse=0)
#pm.mel.eval('AEhardwareTextureTemplate "%s"' % self.nodeName + r'("diffuse emissionColor ")')
#self.endLayout()
| {
"content_hash": "1e46646ac2e3762f227f5e814063501b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 102,
"avg_line_length": 39.12,
"alnum_prop": 0.6467280163599182,
"repo_name": "haggi/OpenMaya",
"id": "48228fdbb237cf9ef7fd466c112998fb76dffc90",
"size": "1956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mayaToCorona/mtco_devmodule/scripts/Corona/AETemplate/AECoronaFrontBackTemplate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "5333"
},
{
"name": "Batchfile",
"bytes": "587"
},
{
"name": "C",
"bytes": "246300"
},
{
"name": "C++",
"bytes": "4178594"
},
{
"name": "Mathematica",
"bytes": "12660820"
},
{
"name": "Objective-C",
"bytes": "316"
},
{
"name": "Python",
"bytes": "1583249"
}
],
"symlink_target": ""
} |
import os
from perf_insights import corpus_driver
from perf_insights.mre import file_handle
def _GetFilesIn(basedir):
data_files = []
for dirpath, dirnames, filenames in os.walk(basedir, followlinks=True):
new_dirnames = [d for d in dirnames if not d.startswith('.')]
del dirnames[:]
dirnames += new_dirnames
for f in filenames:
if f.startswith('.'):
continue
if f == 'README.md':
continue
full_f = os.path.join(dirpath, f)
rel_f = os.path.relpath(full_f, basedir)
data_files.append(rel_f)
data_files.sort()
return data_files
def _GetTagsForRelPath(relpath):
# Tags.
sub_dir = os.path.dirname(relpath)
if len(sub_dir) == 0:
return []
parts = sub_dir.split(os.sep)
return [p for p in parts if len(p) > 0]
def _GetMetadataForFilename(base_directory, filename):
relpath = os.path.relpath(filename, base_directory)
tags = _GetTagsForRelPath(relpath)
metadata = {'tags': tags}
# TODO(nduca): Add modification time to metadata.
return metadata
def _DefaultUrlResover(abspath):
return 'file:///%s' % abspath
class LocalDirectoryCorpusDriver(corpus_driver.CorpusDriver):
def __init__(self, trace_directory, url_resolver=_DefaultUrlResover):
self.directory = trace_directory
self.url_resolver = url_resolver
@staticmethod
def CheckAndCreateInitArguments(parser, args):
trace_dir = os.path.abspath(os.path.expanduser(args.trace_directory))
if not os.path.exists(trace_dir):
parser.error('Trace directory does not exist')
return None
return {'trace_directory': trace_dir}
@staticmethod
def AddArguments(parser):
parser.add_argument(
'--trace_directory',
help='Local directory containing traces to process.')
def GetTraceHandlesMatchingQuery(self, query):
trace_handles = []
files = _GetFilesIn(self.directory)
for rel_filename in files:
filename = os.path.join(self.directory, rel_filename)
metadata = _GetMetadataForFilename(self.directory, filename)
if not query.Eval(metadata, len(trace_handles)):
continue
# Make URL relative to server root.
url = self.url_resolver(filename)
if url is None:
url = _DefaultUrlResover(filename)
th = file_handle.URLFileHandle(url, 'file://' + filename)
trace_handles.append(th)
return trace_handles
| {
"content_hash": "46ce51636605b839d042661e93def1fa",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 73,
"avg_line_length": 26.831460674157302,
"alnum_prop": 0.6792294807370184,
"repo_name": "SummerLW/Perf-Insight-Report",
"id": "21400e38ddeec7dabe9f0f85989c0d989a8a2a99",
"size": "2554",
"binary": false,
"copies": "1",
"ref": "refs/heads/test",
"path": "perf_insights/perf_insights/local_directory_corpus_driver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6411"
},
{
"name": "CSS",
"bytes": "14952"
},
{
"name": "HTML",
"bytes": "27508823"
},
{
"name": "JavaScript",
"bytes": "75587"
},
{
"name": "Python",
"bytes": "4638631"
},
{
"name": "Shell",
"bytes": "2124"
}
],
"symlink_target": ""
} |
import aiocoap.resource as resource
import aiocoap
import asyncio
import http.client
from SERVCONFIG import SERVER_HOST,SERVER_PORT
COMMAND_ALIVE = 'alive'
COMMAND_BUTTON = 'button'
GLOBAL_HOST = SERVER_HOST
GLOBAL_PORT = SERVER_PORT
def send_http_request(host, pport, kit, cmd, payload):
print('Building request:', host, pport, kit, cmd, payload)
conn = http.client.HTTPConnection(host,port=pport)
conn.request('PUT', '/%s/%s/handle_msg/%s' % (kit, cmd, 'll'), payload)
conn.getresponse()
conn.close()
print('Request finished')
class LedResource(resource.Resource):
def __init__(self,kit):
super(LedResource, self).__init__()
self.kit = kit
def render_put(self, req):
print("Got payload: %s" % req.payload)
send_http_request(GLOBAL_HOST, GLOBAL_PORT, self.kit, COMMAND_BUTTON, req.payload.decode('ascii'))
return aiocoap.Message(code=aiocoap.CHANGED,payload='')
class LastSeenResource(resource.Resource):
def __init__(self,kit):
super(LastSeenResource, self).__init__()
self.kit = kit
def render_put(self,req):
print("Keepalive: %s" % req.payload)
send_http_request(GLOBAL_HOST, GLOBAL_PORT, self.kit, COMMAND_ALIVE, req.remote[0])
"""
http put to flask server <int>/<str>/ledpushed
"""
return aiocoap.Message(code=aiocoap.CHANGED,payload='')
class BlockResource(resource.Resource):
def __init__(self):
super(BlockResource,self).__init__()
def render(self, request):
print('We got it:',request.payload)
return aiocoap.Message(code=aiocoap.CHANGED,payload='')
def main():
root = resource.Site()
for kit in range(1,21):
root.add_resource((str(kit).zfill(2), 'button'), LedResource(str(kit).zfill(2)))
root.add_resource((str(kit).zfill(2), 'i_am_alive'), LastSeenResource(str(kit).zfill(2)))
root.add_resource(('led',), BlockResource())
asyncio.async(aiocoap.Context.create_server_context(root))
asyncio.get_event_loop().run_forever()
if __name__ == '__main__':
main() | {
"content_hash": "84a6f6b74079bafd3bc62a589a752f2f",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 106,
"avg_line_length": 31.313432835820894,
"alnum_prop": 0.6501429933269781,
"repo_name": "hackerspace-ntnu/coap-iot",
"id": "65c08c1115abe37f1e689e5ed8185df00beeb47a",
"size": "2098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/coap-server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "658"
},
{
"name": "HTML",
"bytes": "7177"
},
{
"name": "Python",
"bytes": "9613"
}
],
"symlink_target": ""
} |
from numpy.testing import assert_array_equal
from astropy.modeling import powerlaws
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from . import _parameter_to_value
__all__ = ['PowerLaw1DType', 'BrokenPowerLaw1DType',
'SmoothlyBrokenPowerLaw1DType', 'ExponentialCutoffPowerLaw1DType',
'LogParabola1DType']
class PowerLaw1DType(TransformType):
name = 'transform/power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.PowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.PowerLaw1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.PowerLaw1D) and
isinstance(b, powerlaws.PowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
class BrokenPowerLaw1DType(TransformType):
name = 'transform/broken_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.BrokenPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.BrokenPowerLaw1D(amplitude=node['amplitude'],
x_break=node['x_break'],
alpha_1=node['alpha_1'],
alpha_2=node['alpha_2'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_break': _parameter_to_value(model.x_break),
'alpha_1': _parameter_to_value(model.alpha_1),
'alpha_2': _parameter_to_value(model.alpha_2)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.BrokenPowerLaw1D) and
isinstance(b, powerlaws.BrokenPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_break, b.x_break)
assert_array_equal(a.alpha_1, b.alpha_1)
assert_array_equal(a.alpha_2, b.alpha_2)
class SmoothlyBrokenPowerLaw1DType(TransformType):
name = 'transform/smoothly_broken_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.SmoothlyBrokenPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.SmoothlyBrokenPowerLaw1D(amplitude=node['amplitude'],
x_break=node['x_break'],
alpha_1=node['alpha_1'],
alpha_2=node['alpha_2'],
delta=node['delta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_break': _parameter_to_value(model.x_break),
'alpha_1': _parameter_to_value(model.alpha_1),
'alpha_2': _parameter_to_value(model.alpha_2),
'delta': _parameter_to_value(model.delta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.SmoothlyBrokenPowerLaw1D) and
isinstance(b, powerlaws.SmoothlyBrokenPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_break, b.x_break)
assert_array_equal(a.alpha_1, b.alpha_1)
assert_array_equal(a.alpha_2, b.alpha_2)
assert_array_equal(a.delta, b.delta)
class ExponentialCutoffPowerLaw1DType(TransformType):
name = 'transform/exponential_cutoff_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.ExponentialCutoffPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.ExponentialCutoffPowerLaw1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'],
x_cutoff=node['x_cutoff'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha),
'x_cutoff': _parameter_to_value(model.x_cutoff)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.ExponentialCutoffPowerLaw1D) and
isinstance(b, powerlaws.ExponentialCutoffPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
assert_array_equal(a.x_cutoff, b.x_cutoff)
class LogParabola1DType(TransformType):
name = 'transform/log_parabola1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.LogParabola1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.LogParabola1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'],
beta=node['beta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha),
'beta': _parameter_to_value(model.beta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.LogParabola1D) and
isinstance(b, powerlaws.LogParabola1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
assert_array_equal(a.beta, b.beta)
| {
"content_hash": "4dd5825b41e40729aebea29141a4b3c8",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 81,
"avg_line_length": 40.84705882352941,
"alnum_prop": 0.5849654377880185,
"repo_name": "saimn/astropy",
"id": "0c3a5ab9f55c58c329bd8114c7498a61dd8ef6e3",
"size": "7032",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/io/misc/asdf/tags/transform/powerlaws.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11034753"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78631"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52457"
},
{
"name": "Python",
"bytes": "12214998"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
"""Represent an air purifier."""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, cast
from pytradfri.command import Command
from homeassistant.components.fan import (
SUPPORT_PRESET_MODE,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .base_class import TradfriBaseDevice
from .const import ATTR_AUTO, CONF_GATEWAY_ID, DEVICES, DOMAIN, KEY_API
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Load Tradfri switches based on a config entry."""
gateway_id = config_entry.data[CONF_GATEWAY_ID]
tradfri_data = hass.data[DOMAIN][config_entry.entry_id]
api = tradfri_data[KEY_API]
devices = tradfri_data[DEVICES]
async_add_entities(
TradfriAirPurifierFan(dev, api, gateway_id)
for dev in devices
if dev.has_air_purifier_control
)
def _from_percentage(percentage: int) -> int:
"""Convert percent to a value that the Tradfri API understands."""
if percentage < 20:
# The device cannot be set to speed 5 (10%), so we should turn off the device
# for any value below 20
return 0
nearest_10: int = round(percentage / 10) * 10 # Round to nearest multiple of 10
return round(nearest_10 / 100 * 50)
def _from_fan_speed(fan_speed: int) -> int:
"""Convert the Tradfri API fan speed to a percentage value."""
nearest_10: int = round(fan_speed / 10) * 10 # Round to nearest multiple of 10
return round(nearest_10 / 50 * 100)
class TradfriAirPurifierFan(TradfriBaseDevice, FanEntity):
"""The platform class required by Home Assistant."""
def __init__(
self,
device: Command,
api: Callable[[Command | list[Command]], Any],
gateway_id: str,
) -> None:
"""Initialize a switch."""
super().__init__(device, api, gateway_id)
self._attr_unique_id = f"{gateway_id}-{device.id}"
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_PRESET_MODE + SUPPORT_SET_SPEED
@property
def speed_count(self) -> int:
"""
Return the number of speeds the fan supports.
These are the steps:
0 = Off
10 = Min
15
20
25
30
35
40
45
50 = Max
"""
return 10
@property
def is_on(self) -> bool:
"""Return true if switch is on."""
if not self._device_data:
return False
return cast(bool, self._device_data.mode)
@property
def preset_modes(self) -> list[str] | None:
"""Return a list of available preset modes."""
return [ATTR_AUTO]
@property
def percentage(self) -> int | None:
"""Return the current speed percentage."""
if not self._device_data:
return None
if self._device_data.fan_speed:
return _from_fan_speed(self._device_data.fan_speed)
return None
@property
def preset_mode(self) -> str | None:
"""Return the current preset mode."""
if not self._device_data:
return None
if self._device_data.mode == ATTR_AUTO:
return ATTR_AUTO
return None
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set the preset mode of the fan."""
if not self._device_control:
return
if not preset_mode == ATTR_AUTO:
raise ValueError("Preset must be 'Auto'.")
await self._api(self._device_control.set_mode(1))
async def async_turn_on(
self,
speed: str | None = None,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs: Any,
) -> None:
"""Turn on the fan."""
if not self._device_control:
return
if percentage is not None:
await self._api(self._device_control.set_mode(_from_percentage(percentage)))
return
if preset_mode:
await self.async_set_preset_mode(preset_mode)
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed percentage of the fan."""
if not self._device_control:
return
await self._api(self._device_control.set_mode(_from_percentage(percentage)))
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the fan."""
if not self._device_control:
return
await self._api(self._device_control.set_mode(0))
def _refresh(self, device: Command, write_ha: bool = True) -> None:
"""Refresh the purifier data."""
# Caching of air purifier control and purifier object
self._device_control = device.air_purifier_control
self._device_data = device.air_purifier_control.air_purifiers[0]
super()._refresh(device, write_ha=write_ha)
| {
"content_hash": "bb5436375d4f57c5c8fdbac60a4125d9",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 88,
"avg_line_length": 29.649425287356323,
"alnum_prop": 0.6105834464043419,
"repo_name": "home-assistant/home-assistant",
"id": "845d5e6d9c3b5f61ff9b4052aff026def4ff6354",
"size": "5159",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tradfri/fan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
"""
Usage:
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import random as rd
import logging, spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, Job)
# API Authorization for Spotify
client_credentials_manager = SpotifyClientCredentials(client_id='d859b7310236443a85af5b2c4dd8f169', client_secret='853138e3fc6b42c3857b3fc03e6ea48d')
spotify = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def start(bot, update):
update.message.reply_text('Olá! Eu sou o BotFWD e existo unicamente para'
' causar o desconforto nas pessoas!')
bot.send_message(chat_id=update.message.chat_id,
text="Meus comandos:\n"
"Iniciar o bot: /start\n"
"Mensagem aleatória: /random ou /fwd\n"
"Gerador de lero lero: /lero ou /lerolero\n"
"Gerador de palavra: /word ou palavra\n"
"Aplicação de palavra aleatória em frase: /aplicacaonumafrase\n"
"Nome de filme com substituição: /filme\n"
"Música aleatória do spotify: /musica\n"
"Ajuda: /help")
def help(bot, update):
update.message.reply_text('Sem ajuda malandro!')
bot.send_message(chat_id=update.message.chat_id,
text="Quer ajudar a desenvolver o bot?\n"
"https://goo.gl/x3jDri")
def random(bot, update):
# update.message.reply_text('Em manutenção :(')
messageID = rd.randint(6, 175)
try:
bot.forwardMessage(update.message.chat_id, '@botFwdTeste', messageID)
print("Success => message_id %d" % messageID)
except:
print("Error => message_id %d does not exist" % messageID)
random(bot, update)
def debug(bot, update):
bot.send_message(chat_id=update.message.chat_id,
text="Toschi, para de tentar fazer merda de novo.")
# print("Chat id %d" % update.message.chat_id)
# print("Message id %d" % update.message.message_id)
# for x in range(0, 175):
# try:
# # bot.forwardMessage(update.message.chat_id, '@ofwdnovo', messageID)
# bot.forwardMessage(update.message.chat_id, '@botFwdTeste', x)
# print("Success => message_id %d" % x)
# except:
# print("Error => message_id %d does not exist" % x)
# continue
def lero(bot, update):
t0 = [ 'Caros amigos, ',
'Por outro lado, ',
'Assim mesmo, ',
'No entanto, não podemos esquecer que ',
'Do mesmo modo, ',
'A prática cotidiana prova que ',
'Nunca é demais lembrar o peso e o significado destes problemas, uma vez que ',
'As experiências acumuladas demonstram que ',
'Acima de tudo, é fundamental ressaltar que ',
'O incentivo ao avanço tecnológico, assim como ',
'Não obstante, ',
'Todas estas questões, devidamente ponderadas, levantam dúvidas sobre se ',
'Pensando mais a longo prazo, ',
'O que temos que ter sempre em mente é que ',
'Ainda assim, existem dúvidas a respeito de como ',
'Gostaria de enfatizar que ',
'Todavia, ',
'A nível organizacional, ',
'O empenho em analisar ',
'Percebemos, cada vez mais, que ',
'No mundo atual, ',
'É importante questionar o quanto ',
'Neste sentido, ',
'Evidentemente, ',
'Por conseguinte, ',
'É claro que ',
'Podemos já vislumbrar o modo pelo qual ',
'Desta maneira, ',
'O cuidado em identificar pontos críticos n',
'A certificação de metodologias que nos auxiliam a lidar com ' ]
t1 = [ 'a execução dos pontos do programa ',
'a complexidade dos estudos efetuados ',
'a contínua expansão de nossa atividade ',
'a estrutura atual da organização ',
'o novo modelo estrutural aqui preconizado ',
'o desenvolvimento contínuo de distintas formas de atuação ',
'a constante divulgação das informações ',
'a consolidação das estruturas ',
'a consulta aos diversos militantes ',
'o início da atividade geral de formação de atitudes ',
'o desafiador cenário globalizado ',
'a mobilidade dos capitais internacionais ',
'o fenômeno da Internet ',
'a hegemonia do ambiente político ',
'a expansão dos mercados mundiais ',
'o aumento do diálogo entre os diferentes setores produtivos ',
'a crescente influência da mídia ',
'a necessidade de renovação processual ',
'a competitividade nas transações comerciais ',
'o surgimento do comércio virtual ',
'a revolução dos costumes ',
'o acompanhamento das preferências de consumo ',
'o comprometimento entre as equipes ',
'a determinação clara de objetivos ',
'a adoção de políticas descentralizadoras ',
'a valorização de fatores subjetivos ',
'a percepção das dificuldades ',
'o entendimento das metas propostas ',
'o consenso sobre a necessidade de qualificação ',
'o julgamento imparcial das eventualidades ' ]
t2 = [ 'nos obriga à análise ',
'cumpre um papel essencial na formulação ',
'exige a precisão e a definição ',
'auxilia a preparação e a composição ',
'garante a contribuição de um grupo importante na determinação ',
'assume importantes posições no estabelecimento ',
'facilita a criação ',
'obstaculiza a apreciação da importância ',
'oferece uma interessante oportunidade para verificação ',
'acarreta um processo de reformulação e modernização ',
'pode nos levar a considerar a reestruturação ',
'representa uma abertura para a melhoria ',
'ainda não demonstrou convincentemente que vai participar na mudança ',
'talvez venha a ressaltar a relatividade ',
'prepara-nos para enfrentar situações atípicas decorrentes ',
'maximiza as possibilidades por conta ',
'desafia a capacidade de equalização ',
'agrega valor ao estabelecimento ',
'é uma das consequências ',
'promove a alavancagem ',
'não pode mais se dissociar ',
'possibilita uma melhor visão global ',
'estimula a padronização ',
'aponta para a melhoria ',
'faz parte de um processo de gerenciamento ',
'causa impacto indireto na reavaliação ',
'apresenta tendências no sentido de aprovar a manutenção ',
'estende o alcance e a importância ',
'deve passar por modificações independentemente ',
'afeta positivamente a correta previsão ' ]
t3 = [ 'das condições financeiras e administrativas exigidas.',
'das diretrizes de desenvolvimento para o futuro.',
'do sistema de participação geral.',
'das posturas dos órgãos dirigentes com relação às suas atribuições.',
'das novas proposições.',
'das direções preferenciais no sentido do progresso.',
'do sistema de formação de quadros que corresponde às necessidades.',
'das condições inegavelmente apropriadas.',
'dos índices pretendidos.',
'das formas de ação.',
'dos paradigmas corporativos.',
'dos relacionamentos verticais entre as hierarquias.',
'do processo de comunicação como um todo.',
'dos métodos utilizados na avaliação de resultados.',
'de todos os recursos funcionais envolvidos.',
'dos níveis de motivação departamental.',
'da gestão inovadora da qual fazemos parte.',
'dos modos de operação convencionais.',
'de alternativas às soluções ortodoxas.',
'dos procedimentos normalmente adotados.',
'dos conhecimentos estratégicos para atingir a excelência.',
'do fluxo de informações.',
'do levantamento das variáveis envolvidas.',
'das diversas correntes de pensamento.',
'do impacto na agilidade decisória.',
'das regras de conduta normativas.',
'do orçamento setorial.',
'do retorno esperado a longo prazo.',
'do investimento em reciclagem técnica.',
'do remanejamento dos quadros funcionais.' ]
leroLero = rd.choice(t0) + rd.choice(t1) + rd.choice(t2) + rd.choice(t3)
bot.send_message(chat_id=update.message.chat_id, text=leroLero)
def wordGenerate(bot, update):
consonants = 'bcdfghjlmnpqrstvxz'
vowels = 'aeiou'
syllables = rd.randint(2, 10)
result = ''
for i in range(syllables):
consonant = rd.choice(consonants)
this_vowels = vowels
if consonant == 'q':
consonant += 'u'
this_vowels = 'aeio'
result += consonant + rd.choice(this_vowels)
if i > 0 and result[-2] in 'bp' and rd.randint(0, 5) == 0:
result = result[:-2] + 'm' + result[-2:]
if i > 0 and result[-2] in 'cglrst' and rd.randint(0, 5) == 0:
result = result[:-2] + 'n' + result[-2:]
return result
def word(bot, update):
result = wordGenerate(bot, update)
bot.send_message(chat_id=update.message.chat_id, text=result)
def aplicacaonumafrase(bot, update):
word = wordGenerate(bot, update)
with open('aplicacaonumafrase.txt') as f:
frases = [l.rstrip('\n') for l in f]
result = rd.choice(frases).format(word=word)
bot.send_message(chat_id=update.message.chat_id, text=result)
def filme(bot, update):
palavrasM = ['Cu', 'Pinto', 'Ânus', 'Pipi', 'Temer', 'Caralho']
palavrasF = ['Rola', 'Vagina', 'Dilma', 'Jeba', ]
with open('filmeM.txt') as f:
frasesM = [l.rstrip('\n') for l in f]
with open('filmeF.txt') as f:
frasesF = [l.rstrip('\n') for l in f]
rPalavras = rd.randint(0, len(palavrasM) + len(palavrasF) - 1)
if rPalavras < len(palavrasM): # M
result = rd.choice(frasesM).format(word=palavrasM[rPalavras])
result = result.replace('Ânuss', 'Ânus') # caso de borda
else: # F
rPalavras -= len(palavrasM)
result = rd.choice(frasesF).format(word=palavrasF[rPalavras])
result = result.lower()
bot.send_message(chat_id=update.message.chat_id, text=result)
def musica(bot, update):
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
# Some chars have lower values for the default max offset
max_offset_special = {'h': 68587, 'q': 21550, 'w': 65601, 'z': 60495, '0': 57375}
# Randomly get one of the chars
rand_query = chars[rd.randint(0, 35)]
# Verify special cases in the max offset
if rand_query in max_offset_special:
max_offset = max_offset_special.get(rand_query)
else:
max_offset = 100000
# Gets the result depending of the previously defined variables
# 'results' contains a lot of information about the track selected, such as artist, track name, album, etc
results = spotify.search(q = rand_query, limit=1, offset = rd.randint(0, int(max_offset)))
# Get only the track id from the 'results'
for i, t in enumerate(results['tracks']['items']):
url = 'https://open.spotify.com/track/' + t['id']
bot.send_message(chat_id=update.message.chat_id, text=url)
def error(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def main():
# Create the EventHandler and pass it your bot's token.
updater = Updater("390975324:AAG57sa1pBQ9Swk7ry-I4FJijWOc1XZYM5s")
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(CommandHandler("random", random))
dp.add_handler(CommandHandler("fwd", random))
dp.add_handler(CommandHandler("lero", lero))
dp.add_handler(CommandHandler("lerolero", lero))
dp.add_handler(CommandHandler("word", word))
dp.add_handler(CommandHandler("palavra", word))
dp.add_handler(CommandHandler("aplicacaonumafrase", aplicacaonumafrase))
dp.add_handler(CommandHandler("frase", aplicacaonumafrase))
dp.add_handler(CommandHandler("filme", filme))
dp.add_handler(CommandHandler("musica", musica))
dp.add_handler(CommandHandler("debug", debug))
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
print('==== BOT started ====')
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
| {
"content_hash": "06bc00c8d45f1d07aa908edbaf972e92",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 149,
"avg_line_length": 43.379746835443036,
"alnum_prop": 0.6153341114677561,
"repo_name": "rogerscristo/BotFWD",
"id": "c7feb4a34eec7d611261fc87e4a8cf907fe5a2bc",
"size": "13999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13999"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import tokenize
from dataclasses import dataclass
from io import BytesIO
from pants.engine.internals.parser import ParseError
@dataclass(frozen=True)
class FixBUILDFileRequest:
path: str
content: bytes
@property
def lines(self) -> list[str]:
return self.content.decode("utf-8").splitlines(keepends=True)
def tokenize(self) -> list[tokenize.TokenInfo]:
try:
return list(tokenize.tokenize(BytesIO(self.content).readline))
except tokenize.TokenError as e:
raise ParseError(f"Failed to parse {self.path}: {e}")
@dataclass(frozen=True)
class FixedBUILDFile:
path: str
content: bytes
| {
"content_hash": "06abd97cb7e8e52f3b2bbfa3b91aa1fd",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 74,
"avg_line_length": 24.03448275862069,
"alnum_prop": 0.6987087517934003,
"repo_name": "pantsbuild/pants",
"id": "b06f43733b9b2ebac6fecb6fed20f78ef7e82b90",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/build_files/fix/deprecations/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Dockerfile",
"bytes": "1132"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "97190"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3771"
},
{
"name": "Python",
"bytes": "7582858"
},
{
"name": "Rust",
"bytes": "1657282"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31400"
},
{
"name": "Starlark",
"bytes": "76892"
}
],
"symlink_target": ""
} |
from bempy import block
from bempy.django import uses
from bempy.blocks import b
@block()
def guideline(*sections):
def process_content_item(content):
# content can be either a single item or tuple of
# two items where first one is description of the
# example and second one is a block
if isinstance(content, tuple):
return content
else:
return None, content
def process_content(content):
if not isinstance(content, tuple):
content = (content,)
return map(process_content_item, content)
def process_section(section):
title = section[0]
content = section[1:]
return dict(title=title,
content=process_content(content))
return dict(sections=map(process_section, sections))
| {
"content_hash": "e65555fa7f8ce07beb49c60a83d99321",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 57,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.6328502415458938,
"repo_name": "svetlyak40wt/bempy",
"id": "0a73556146de85deb4e6f4878a6d35efd1a2bc98",
"size": "828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/blocks/guideline/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3482"
},
{
"name": "HTML",
"bytes": "3179"
},
{
"name": "JavaScript",
"bytes": "458"
},
{
"name": "Python",
"bytes": "33118"
}
],
"symlink_target": ""
} |
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
# By converting each letter in a word to a number corresponding to its
# alphabetical position and adding these values we form a word value. For
# example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word
# value is a triangle number then we shall call the word a triangle word.
# Using words.txt (right click and 'Save Link/Target As...'), a 16K text file
# containing nearly two-thousand common English words, how many are triangle
# words?
import helpers.file as fileutils
# arbitrary value for the highest reachable triangle number
MAX = 1000
def euler():
# set of the triangle numbers until an arbitrary maximum number
triangles = set()
# generate triangle numbers
n = 1
highest_triangle = 0
while highest_triangle < MAX:
highest_triangle = n * (n + 1) // 2
triangles.add(highest_triangle)
n += 1
# read the words and put them into a list of strings
words = fileutils.flattened_list_from_file('data/042.txt',
separator = ',', convert_to = str)
# strip the quote-sign from the strings, leaving only the word
words = [word.replace('"', '') for word in words]
# accumulator for the final answer, the number of triangle words
triangle_word_count = 0
# count the number of triangle words
for word in words:
if word_to_int(word) in triangles:
triangle_word_count += 1
# return it
return triangle_word_count
def word_to_int(word):
"""Returns the sum of the 'letter value' of each letter in the word.
('a' = 1, 'b' = 2, 'c' = 3, ...)"""
return sum(ord(letter) - ord('a') + 1 for letter in word.lower())
| {
"content_hash": "2593e31737088c0ee58a9ea6d3399d85",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 77,
"avg_line_length": 36.91304347826087,
"alnum_prop": 0.6566548881036514,
"repo_name": "6112/project-euler",
"id": "d4af2375a257e296c59f1453cf2dbc8106f02668",
"size": "1863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problems/042.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "161858"
}
],
"symlink_target": ""
} |
"""
Copyright 2013, Ahmet Emre Aladağ, AGMLAB
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from PyQt4.QtCore import SIGNAL, QVariant, QAbstractListModel, QModelIndex, Qt
from PyQt4.QtGui import QApplication, QMainWindow, QGridLayout, QWidget, \
QListView, QPushButton, QHBoxLayout, QVBoxLayout, QComboBox, QLabel
from nutch2.runner.hadoop import HadoopJobMonitor
class JobListModel(QAbstractListModel):
def __init__(self, job_monitor, parent=None, *args):
""" datain: a list where each item is a row
"""
QAbstractListModel.__init__(self, parent, *args)
self.job_monitor = job_monitor
def rowCount(self, parent=QModelIndex()):
return len(self.job_monitor.job_names)
def data(self, index, role):
if index.isValid() and role == Qt.DisplayRole:
return QVariant(self.job_monitor.job_names[index.row()])
else:
return QVariant()
def get_job_object(self, name):
return self.job_monitor.get_job_with_name(str(name))
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setupGUI()
self.job_monitor = HadoopJobMonitor()
def setupGUI(self):
layout = QGridLayout()
self.widget = QWidget()
self.widget.setLayout(layout)
self.setCentralWidget(self.widget)
self.setWindowTitle("Nutch Job Service")
# create active job list
active_label = QLabel("Active Hadoop Jobs")
layout.addWidget(active_label, 0, 0)
self.lv = QListView()
layout.addWidget(self.lv, 1, 0, 3, 1)
# Create buttons
self.stop_button = QPushButton("Stop")
self.refresh_button = QPushButton("Refresh")
button_layout = QVBoxLayout()
button_layout.addWidget(self.stop_button)
button_layout.addWidget(self.refresh_button)
layout.addLayout(button_layout, 1, 1)
# Start Button
self.new_job_layout = QHBoxLayout()
self.new_job_combo = QComboBox()
self.new_job_combo.addItems(["Inject", "Generate", "Fetch", "Parse", "Solr"])
self.new_job_button = QPushButton("Start New Job")
self.new_job_layout.addWidget(self.new_job_combo)
self.new_job_layout.addWidget(self.new_job_button)
layout.addLayout(self.new_job_layout, 5, 0)
#self.statusBar()
self.connectSlots()
def load_data(self):
"""
Loads data from the hadoop job list.
"""
self.showStatusMessage("Fetching Hadoop job list...")
print "Loading data..."
self.job_monitor.fetch_hadoop_job_info()
self.job_list_model = JobListModel(self.job_monitor, self)
self.lv.setModel(self.job_list_model)
self.updateStatusBar()
def start_new_nob(self):
"""
Starts a new job according to the selected item in combo box.
"""
current_text = self.new_job_combo.currentText()
print "Starting %s" % current_text
def stop_job(self):
current_item = self.lv.currentIndex().data(0).toString()
job_object = self.job_list_model.get_job_object(current_item)
job_object.stop()
def connectSlots(self):
"""
Connects signals to slots.
"""
self.connect(self.refresh_button, SIGNAL('clicked()'), self.load_data)
self.connect(self.stop_button, SIGNAL('clicked()'), self.stop_job)
self.connect(self.new_job_button, SIGNAL('clicked()'), self.start_new_nob)
def showStatusMessage(self, message):
self.statusBar().showMessage(message)
def updateStatusBar(self):
"""
Updates status bar according to the number of active hadoop jobs.
"""
self.statusBar().showMessage("%s jobs are active." % self.job_monitor.num_jobs)
def main():
app = QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main() | {
"content_hash": "292e3f92ec5cee22531d1ff5edcd5aaf",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 87,
"avg_line_length": 32.9485294117647,
"alnum_prop": 0.6476232983708994,
"repo_name": "AGMLab/python-nutch2",
"id": "5f73f1913dcb863b161cd9bda5866c3dd2fd393e",
"size": "4529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nutch2/gui/MainWindow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15875"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('recipe_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Favorite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fav_recipe', models.ForeignKey(default=False, on_delete=django.db.models.deletion.CASCADE, to='recipe_app.Recipe')),
('fav_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| {
"content_hash": "f63aeb6d579e8780eb23a466b79b94df",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 134,
"avg_line_length": 34.625,
"alnum_prop": 0.6401925391095066,
"repo_name": "mpollachek/Recipe_website_project",
"id": "07769db6117b3c2037a79d69aad3dc4beda05763",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/recipe_app/migrations/0002_favorite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3054"
},
{
"name": "HTML",
"bytes": "118572"
},
{
"name": "JavaScript",
"bytes": "251294"
},
{
"name": "Python",
"bytes": "37569"
}
],
"symlink_target": ""
} |
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
from kolibri.core.content.utils.sqlalchemybridge import get_default_db_string
from kolibri.core.content.utils.sqlalchemybridge import SharingPool
def django_connection_engine():
if get_default_db_string().startswith("sqlite"):
return create_engine(
get_default_db_string(), poolclass=SharingPool, convert_unicode=True
)
return create_engine(
get_default_db_string(), poolclass=NullPool, convert_unicode=True
)
| {
"content_hash": "47f665c77b8487746cbfc122ead1b85a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.7420560747663552,
"repo_name": "mrpau/kolibri",
"id": "976c149a63b70f286901b2de9d66ce4143e802b7",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/core/content/test/sqlalchemytesting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "601"
},
{
"name": "CSS",
"bytes": "1716299"
},
{
"name": "Dockerfile",
"bytes": "7303"
},
{
"name": "Gherkin",
"bytes": "278074"
},
{
"name": "HTML",
"bytes": "26440"
},
{
"name": "JavaScript",
"bytes": "1537923"
},
{
"name": "Makefile",
"bytes": "13308"
},
{
"name": "Python",
"bytes": "2298911"
},
{
"name": "Shell",
"bytes": "11777"
},
{
"name": "Vue",
"bytes": "1558714"
}
],
"symlink_target": ""
} |
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'button04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.insert_button('C2', {})
worksheet2.insert_button('E5', {})
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "6556cccc4f07b290273a421385cf0d18",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 68,
"avg_line_length": 25.88888888888889,
"alnum_prop": 0.628755364806867,
"repo_name": "jkyeung/XlsxWriter",
"id": "66b0b9ea081a3312bbf2c6d71546a62de5440e67",
"size": "1105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/comparison/test_button04.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7819"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2430294"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
} |
from time import sleep
import RPi.GPIO as GPIO
import picamera
import picamera.array
import glob
#set up camera
camera = picamera.PiCamera()
camera.hflip = True
camera.brightness = 50
camera.resolution = (500,500)
camera.framrate = 1
camera.iso = 200
camera.zoom = (0,0,1,1)
sleep(1.5)
camera.shutter_speed = camera.exposure_speed
camera.exposure_mode = 'off'
g = camera.awb_gains
camera.awb_mode = 'off'
camera.awb_gains = g
#label LEDs
red1 = 18
yellow1 = 23
green1 = 24
red2 = 25
yellow2 = 8
green2 = 7
output_list = []
#setup GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.cleanup()
for i in output_list:
GPIO.setup(i, GPIO.OUT)
# set all outputs to 'low'
for i in output_list:
GPIO.setup(i, GPIO.OUT)
#capture sequential images of LEDs
filename='11-6-img_01'
for i in range(len(output_list)):
GPIO.output(output_list[i], GPIO.HIGH)
sleep(1)
camera.capture('%s%s%02d%s' %(filename, '#', i, '.png'))
GPIO.output(output_list[i], GPIO.LOW)
#capture image with all lights on
for i in range(len(output_list)):
GPIO.output(output_list[i], GPIO.HIGH)
sleep(1)
camera.capture('%s%s%s%s' %(filename, '#', str(len(output_list)), '.png'))
#capture image with all lights off
for i in range(len(output_list)):
GPIO.output(output_list[i], GPIO.LOW)
camera.capture('%s%s%s%s' %(filename, '#', str(len(output_list)+1), '.png'))
print "Acquisition finished"
| {
"content_hash": "e5ef2a29ce789cf5b9f8f30dbe780f41",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 76,
"avg_line_length": 20.98507462686567,
"alnum_prop": 0.6884779516358464,
"repo_name": "dilloncyh/photometric_stereo",
"id": "df9f7e8d88bb88ce759e709b42277cc041dde162",
"size": "1406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RBP_capture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15518"
}
],
"symlink_target": ""
} |
from pandarus.maps import Map, DuplicateFieldID
from rtree import Rtree
import fiona
import os
import pandarus
import pytest
dirpath = os.path.abspath(os.path.join(os.path.dirname(__file__), "data"))
grid = os.path.join(dirpath, "grid.geojson")
duplicates = os.path.join(dirpath, "duplicates.geojson")
raster = os.path.join(dirpath, "test_raster_cfs.tif")
countries = os.path.join(dirpath, "test_countries.gpkg")
def test_init():
m = Map(grid, 'name')
assert m.filepath == grid
assert m.file
def test_raster_error(monkeypatch):
with pytest.raises(AssertionError):
m = Map(raster, None)
def test_metadata(monkeypatch):
m = Map(grid, 'name')
assert m.metadata == {}
def fake_open(filepath, **others):
return others
monkeypatch.setattr(
pandarus.maps,
'check_type',
lambda x: 'vector'
)
monkeypatch.setattr(
pandarus.maps.fiona,
'open',
fake_open
)
m = Map(grid, 'name', foo='bar')
assert m.metadata == {'foo': 'bar'}
assert m.file == {'foo': 'bar'}
def test_get_fieldnames_dictionary():
m = Map(grid, 'name')
expected = {0: 'grid cell 0', 1: 'grid cell 1',
2: 'grid cell 2', 3: 'grid cell 3'}
assert m.get_fieldnames_dictionary("name") == expected
def test_get_fieldnames_dictionary_errors():
m = Map(grid, 'name')
assert m.get_fieldnames_dictionary()
assert m.get_fieldnames_dictionary(None)
assert m.get_fieldnames_dictionary("")
with pytest.raises(AssertionError):
m.get_fieldnames_dictionary("bar")
dupes = Map(duplicates, 'name')
with pytest.raises(DuplicateFieldID):
dupes.get_fieldnames_dictionary()
def test_properties():
m = Map(grid, 'name')
assert m.geometry == 'Polygon'
assert m.hash
assert m.crs == '+init=epsg:4326'
def test_magic_methods():
m = Map(grid, 'name')
for i, x in enumerate(m):
pass
assert i == 3
expected = {
'geometry': {
'type': 'Polygon',
'coordinates': [[(1.0, 0.0), (1.0, 1.0), (2.0, 1.0), (2.0, 0.0), (1.0, 0.0)]]
},
'properties': {'name': 'grid cell 2'},
'id': '2',
'type': 'Feature'
}
assert m[2] == expected
assert len(m) == 4
def test_getitem():
print("Supported Fiona drivers:")
print(fiona.supported_drivers)
m = Map(grid, 'name')
expected = {
'geometry': {
'type': 'Polygon',
'coordinates': [[(1.0, 0.0), (1.0, 1.0), (2.0, 1.0), (2.0, 0.0), (1.0, 0.0)]]
},
'properties': {'name': 'grid cell 2'},
'id': '2',
'type': 'Feature'
}
assert m[2] == expected
assert hasattr(m, "_index_map")
@pytest.mark.skipif('TRAVIS' in os.environ,
reason="No GPKG driver in Travis")
def test_getitem_geopackage():
print("Supported Fiona drivers:")
print(fiona.supported_drivers)
m = Map(countries, 'name')
assert m[0]
assert m[0]['id'] == '1'
assert hasattr(m, "_index_map")
def test_rtree():
m = Map(grid, 'name')
r = m.create_rtree_index()
assert r == m.rtree_index
assert isinstance(r, Rtree)
| {
"content_hash": "680d82b6b92f26ea9ee230bc78ddccfd",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 89,
"avg_line_length": 24.519083969465647,
"alnum_prop": 0.5762764632627646,
"repo_name": "cmutel/pandarus",
"id": "5509d4951067a0b96f550b17ac4895407189ab42",
"size": "3212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_maps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "130638"
},
{
"name": "TeX",
"bytes": "707"
}
],
"symlink_target": ""
} |
from datetime import date, timedelta
from typing import Any
from django.core.management.base import BaseCommand, CommandParser
from django.utils.timezone import now as tz_now
from django.utils.translation import gettext_lazy as _lazy
from request_profiler.models import ProfilingRecord
from request_profiler.settings import LOG_TRUNCATION_DAYS
class Command(BaseCommand):
help = "Truncate the profiler log after a specified number days."
def add_arguments(self, parser: CommandParser) -> None:
super().add_arguments(parser)
parser.add_argument(
"-d",
"--days",
dest="days",
type=int,
default=LOG_TRUNCATION_DAYS,
help=_lazy(
"Number of days after which to truncate logs. "
"Defaults to REQUEST_PROFILER_LOG_TRUNCATION_DAYS."
),
)
parser.add_argument(
"--commit",
action="store_true",
help=_lazy(
"Use --commit to commit the deletion. Without this the "
" command is a 'dry-run'."
),
)
def handle(self, *args: Any, **options: Any) -> None:
self.stdout.write(
f"request_profiler: truncating request_profile logs at {tz_now()}"
)
if (days := options["days"]) == 0:
self.stdout.write(
"request_profiler: aborting truncation as truncation limit is set to 0"
)
return
cutoff = date.today() - timedelta(days=days)
self.stdout.write(f"request_profiler: truncation cutoff: {cutoff}")
logs = ProfilingRecord.objects.filter(start_ts__date__lt=cutoff)
self.stdout.write(f"request_profiler: found {logs.count()} records to delete.")
if not options["commit"]:
self.stderr.write(
"request_profiler: aborting truncation as --commit option is not set."
)
return
count, _ = logs.delete()
self.stdout.write(f"request_profiler: deleted {count} log records.")
self.stdout.write(f"request_profiler: truncation completed at {tz_now()}")
| {
"content_hash": "f02a83a9737d91f4e2f61b236fd10f40",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 87,
"avg_line_length": 37.46551724137931,
"alnum_prop": 0.5954901058444547,
"repo_name": "yunojuno/django-request-profiler",
"id": "0d020b07ee2ade26547d8c23b3f5352df2b724c7",
"size": "2173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "request_profiler/management/commands/truncate_request_profiler_logs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "113"
},
{
"name": "Python",
"bytes": "64530"
}
],
"symlink_target": ""
} |
import asyncio
import bisect
import collections
import itertools
import os
import sys
from progressbar import ProgressBar, AnimatedMarker, Timer, Bar, Percentage, Widget
import pgpy
from pgpy.packet import Packet
from pgpy.types import Exportable
ascfiles = [ os.path.abspath(os.path.expanduser(f)) for f in sys.argv[1:] if os.path.exists(os.path.abspath(os.path.expanduser(f))) ]
if len(ascfiles) == 0:
sys.stderr.write("Please specify one or more ASCII-armored files to load\n")
sys.exit(-1)
for a in [ os.path.abspath(os.path.expanduser(a)) for a in sys.argv[1:] if a not in ascfiles ]:
sys.stderr.write("Error: {} does not exist\n".write())
class Mebibyte(int):
iec = {1: 'B',
1024: 'KiB',
1024**2: 'MiB',
1024**3: 'GiB',
1024**4: 'TiB',
1024**5: 'PiB',
1024**6: 'EiB',
1024**7: 'ZiB',
1024**8: 'YiB'}
iecl = [1, 1024, 1024**2, 1024**3, 1024**4, 1024**5, 1024**6, 1024**7, 1024**8]
# custom format class for human readable IEC byte formatting
def __format__(self, spec):
# automatically format based on size
iiec = max(0, min(bisect.bisect_right(self.iecl, int(self)), len(self.iecl)))
ieck = self.iecl[iiec - 1]
return '{:,.2f} {:s}'.format(int(self) / ieck, self.iec[ieck])
@asyncio.coroutine
def _dospinner(pbar):
for i in pbar(itertools.cycle(range(100))):
try:
yield from asyncio.shield(asyncio.sleep(0.005))
except asyncio.CancelledError:
print("")
break
@asyncio.coroutine
def _load_pubring(ascfile, future):
with open(ascfile, 'r') as ppr:
a = yield from asyncio.get_event_loop().run_in_executor(None, ppr.read)
future.set_result(a)
@asyncio.coroutine
def _unarmor(a, future):
b = yield from asyncio.get_event_loop().run_in_executor(None, pgpy.types.Exportable.ascii_unarmor, a)
future.set_result(b)
_b = bytearray()
loop = asyncio.get_event_loop()
for ascfile in ascfiles:
ascfile = os.path.abspath(ascfile)
if not os.path.isfile(ascfile):
sys.stderr.write('Error: {} does not exist'.format(ascfile))
continue
load_bar = ProgressBar(widgets=["Reading {} ({}): ".format(ascfile, Mebibyte(os.path.getsize(ascfile))), AnimatedMarker()])
unarmor_bar = ProgressBar(widgets=["Unarmoring data: ", AnimatedMarker()])
a = asyncio.Future()
b = asyncio.Future()
lbp = asyncio.Task(_dospinner(load_bar))
asyncio.Task(_load_pubring(ascfile, a))
loop.run_until_complete(a)
_a = a.result()
lbp.cancel()
uap = asyncio.Task(_dospinner(unarmor_bar))
asyncio.Task(_unarmor(_a, b))
loop.run_until_complete(b)
_b += b.result()['body']
uap.cancel()
loop.stop()
print("\n")
packets = []
_mv = len(_b)
class BetterCounter(Widget):
def __init__(self, pktlist, iec=False, format='{:,}'):
self.list = pktlist
self.iec = iec
self.format = format
def update(self, pbar):
if self.iec:
return self.format.format(Mebibyte(len(self.list)))
return self.format.format(len(self.list))
pb3w = [BetterCounter(packets, False, '{:,} pkts'), '|', BetterCounter(_b, True, '{:,} rem.'), '|', Timer("%s"), '|', Percentage(), Bar()]
pbar3 = ProgressBar(maxval=_mv, widgets=pb3w).start()
while len(_b) > 0:
olen = len(_b)
pkt = Packet(_b)
# if len(packets) == 10132:
# a=0
# try:
# pkt = Packet(_b)
#
# except:
# print("\n\tSomething went wrong!")
# print("\tBad packet followed packet #{:,d}".format(len(packets)))
# print("\tLast packet was: {:s} (tag {:d}) ({:,d} bytes)".format(packets[-1].__class__.__name__, packets[-1].header.tag, packets[-1].header.length))
# print("\t{:,d} bytes left unparsed".format(len(_b)))
# print("\tFailed packet consumed {:,d} bytes".format(olen - len(_b)))
# raise
#
# if (olen - len(_b)) != len(pkt.header) + pkt.header.length:
# print("Incorrect number of bytes consumed. Got: {:,}. Expected: {:,}".format((olen - len(_b)), (len(pkt.header) + pkt.header.length)))
# print("Bad packet was: {cls:s}, {id:d}, {ver:s}".format(cls=pkt.__class__.__name__, id=pkt.header.typeid, ver=str(pkt.header.version) if hasattr(pkt.header, 'version') else ''))
# print("loaded: " + str(len(packets)))
packets.append(pkt)
pbar3.update(_mv - len(_b))
pbar3.finish()
print("\n\n")
print('Parsed Packet Stats\n')
pcnts = collections.Counter(['{cls:s} v{v:d}'.format(cls=c.__class__.__name__, v=c.version) if hasattr(c, 'version') else c.__class__.__name__
for c in packets if not isinstance(c, pgpy.packet.Opaque)] +
['Opaque [{:02d}]{:s}'.format(c.header.tag, '[v{:d}]'.format(c.header.version) if hasattr(c.header, 'version') else '') for c in packets if isinstance(c, pgpy.packet.Opaque)])
ml = max(5, max([len(s) for s in pcnts.keys()]))
mcl = max(5, max([len("{:,}".format(c)) for c in pcnts.values()]))
print('Class{0: <{pad1}} Count\n' \
'====={0:=<{pad1}} ====={0:=<{pad2}}'.format('', pad1=(ml - 5), pad2=(mcl - 5)))
for pc, cnt in sorted(pcnts.items(), key=lambda x: x[1], reverse=True):
print('{cls:{pad1}} {count: <{pad2},}'.format(pad1=ml, pad2=mcl, cls=pc, count=cnt))
print("")
| {
"content_hash": "905271c10b427c28ab7d609039c7c1d8",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 203,
"avg_line_length": 34.050632911392405,
"alnum_prop": 0.5962825278810409,
"repo_name": "SecurityInnovation/PGPy",
"id": "1d9cf59f331d0ffb933df88be9ae91d610c573b6",
"size": "5402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_load_asc_bench.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "479169"
},
{
"name": "Ruby",
"bytes": "87"
},
{
"name": "Shell",
"bytes": "1505"
}
],
"symlink_target": ""
} |
class Player(object):
def __init__(self, nome, pontos, carta1, carta2, carta3, ident):
self.nome = nome
self.pontos = pontos
self.carta1 = carta1
self.carta2 = carta2
self.carta3 = carta3
self.ident = ident
| {
"content_hash": "d050f40550dffd946342bf491bef8de7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 65,
"avg_line_length": 27.875,
"alnum_prop": 0.6771300448430493,
"repo_name": "gabrielpapke/truco-py",
"id": "258f6b124f9f1c109fa8b3353f1f52498517183a",
"size": "223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "player.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8232"
}
],
"symlink_target": ""
} |
"""
$Id$
$URL$
Copyright (c) 2010 foption
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@since Sep 8, 2011
@author Mario Steinhoff
"""
| {
"content_hash": "3a9927f4f2a3a745303f582e9954a3bb",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 37.266666666666666,
"alnum_prop": 0.7915921288014311,
"repo_name": "msteinhoff/foption-bot",
"id": "1f310377bb0fb15ca1259f65e04363d9790c8c9a",
"size": "1142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/test/feature/core/persistence.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "400174"
}
],
"symlink_target": ""
} |
from .parallel import Parallel
from .build import build_parallel
| {
"content_hash": "f6ba3a6f3e2e0db2cda33f28aab4e2f0",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 33,
"avg_line_length": 32.5,
"alnum_prop": 0.8307692307692308,
"repo_name": "alphatwirl/alphatwirl",
"id": "9c5c114741a5a2d1a4340c160a1b74303397e2aa",
"size": "65",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alphatwirl/parallel/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3603"
},
{
"name": "Python",
"bytes": "775977"
},
{
"name": "R",
"bytes": "1222"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
} |
import string, re, os
import sys
# Execute git log with the desired command line options.
command="git log --summary --stat --no-merges --date=short "
if len(sys.argv) < 2:
print "Please specify a tag."
sys.exit(1)
command += sys.argv[1] + ".."
fin = os.popen(command, 'r')
# Create a ChangeLog file in the current directory.
fout = open('ChangeLog', 'w')
# Set up the loop variables in order to locate the blocks we want
authorFound = False
dateFound = False
messageFound = False
filesFound = False
message = ""
messageNL = False
files = ""
prevAuthorLine = ""
commitID = ""
# The main part of the loop
for line in fin:
# The commit line marks the start of a new commit object.
if string.find(line, 'commit') >= 0:
# Start all over again...
authorFound = False
dateFound = False
messageFound = False
messageNL = False
message = ""
filesFound = False
files = ""
foo = line.split( None )
commitID = foo[1][:8]
continue
# Match the author line and extract the part we want
elif re.match('Author:', line) >=0:
authorList = re.split(': ', line, 1)
author = authorList[1]
author = author[0:len(author)-1]
author = ""
authorFound = True
# Match the date line
elif re.match('Date:', line) >= 0:
dateList = re.split(': ', line, 1)
date = dateList[1]
date = date[0:len(date)-1]
dateFound = True
# The svn-id lines are ignored
elif re.match(' git-svn-id:', line) >= 0:
continue
# The sign off line is ignored too
elif re.search('Signed-off-by', line) >= 0:
continue
# Extract the actual commit message for this commit
elif authorFound & dateFound & messageFound == False:
# Find the commit message if we can
if len(line) == 1:
if messageNL:
messageFound = True
else:
messageNL = True
elif len(line) == 4:
messageFound = True
else:
if len(message) == 0:
message = message + line.strip()
else:
message = message + " " + line.strip()
# If this line is hit all of the files have been stored for this commit
elif any(x in line for x in ['files changed', 'file changed']):
filesFound = True
continue
# Collect the files for this commit. FIXME: Still need to add +/- to files
elif authorFound & dateFound & messageFound:
fileList = re.split(' \| ', line, 2)
if len(fileList) > 1:
if len(files) > 0:
files = files + ", " + fileList[0].strip()
else:
files = fileList[0].strip()
# All of the parts of the commit have been found - write out the entry
if authorFound & dateFound & messageFound & filesFound:
# First the author line, only outputted if it is the first for that
# author on this day
authorLine = date + " " + author
if len(prevAuthorLine) == 0:
fout.write(authorLine + "\n")
elif authorLine == prevAuthorLine:
pass
else:
fout.write("\n" + authorLine + "\n")
pruned_files = files.split(",")
files = ""
for i in range( len(pruned_files) )[:3]:
files+= pruned_files[i]
if len(pruned_files) > 3:
files+="..."
# Assemble the actual commit message line(s) and limit the line length
# to 80 characters.
commitLine = "* " + commitID + " " + files + ": " + message
i = 0
commit = ""
while i < len(commitLine):
if len(commitLine) < i + 78:
commit = commit + "\n " + commitLine[i:len(commitLine)]
break
index = commitLine.rfind(' ', i, i+78)
if index > i:
commit = commit + "\n " + commitLine[i:index]
i = index+1
else:
commit = commit + "\n " + commitLine[i:78]
i = i+79
# Write out the commit line
fout.write(commit + "\n")
#Now reset all the variables ready for a new commit block.
authorFound = False
dateFound = False
messageFound = False
messageNL = False
message = ""
filesFound = False
files = ""
prevAuthorLine = authorLine
# Close the input and output lines now that we are finished.
fin.close()
fout.close()
| {
"content_hash": "25141731659efef42a05bbec34be60f6",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 32.97080291970803,
"alnum_prop": 0.5514722160726145,
"repo_name": "arm-hpc/papi",
"id": "ae131e43900b53045cbd2e11a1ffac8692b49a83",
"size": "4666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gitlog2changelog.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "609"
},
{
"name": "C",
"bytes": "15553077"
},
{
"name": "C++",
"bytes": "28718"
},
{
"name": "Cuda",
"bytes": "21780"
},
{
"name": "FORTRAN",
"bytes": "120620"
},
{
"name": "Groff",
"bytes": "429223"
},
{
"name": "HTML",
"bytes": "53267"
},
{
"name": "Makefile",
"bytes": "177012"
},
{
"name": "Matlab",
"bytes": "17193"
},
{
"name": "Objective-C",
"bytes": "847314"
},
{
"name": "Perl",
"bytes": "11571"
},
{
"name": "Prolog",
"bytes": "3949"
},
{
"name": "Python",
"bytes": "31097"
},
{
"name": "Shell",
"bytes": "26724"
}
],
"symlink_target": ""
} |
"""Test suite for XenAPI."""
import ast
import base64
import contextlib
import functools
import os
import re
from oslo.config import cfg
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import test
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_network
from nova.tests import fake_utils
import nova.tests.image.fake as fake_image
from nova.tests import matchers
from nova.tests.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import host
from nova.virt.xenapi.imageupload import glance
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
IMAGE_MACHINE = '1'
IMAGE_KERNEL = '2'
IMAGE_RAMDISK = '3'
IMAGE_RAW = '4'
IMAGE_VHD = '5'
IMAGE_ISO = '6'
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
'image_meta': {'name': 'fakemachine', 'size': 0,
'disk_format': 'ami',
'container_format': 'ami'},
},
IMAGE_KERNEL: {
'image_meta': {'name': 'fakekernel', 'size': 0,
'disk_format': 'aki',
'container_format': 'aki'},
},
IMAGE_RAMDISK: {
'image_meta': {'name': 'fakeramdisk', 'size': 0,
'disk_format': 'ari',
'container_format': 'ari'},
},
IMAGE_RAW: {
'image_meta': {'name': 'fakeraw', 'size': 0,
'disk_format': 'raw',
'container_format': 'bare'},
},
IMAGE_VHD: {
'image_meta': {'name': 'fakevhd', 'size': 0,
'disk_format': 'vhd',
'container_format': 'ovf'},
},
IMAGE_ISO: {
'image_meta': {'name': 'fakeiso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare'},
},
}
def set_image_fixtures():
image_service = fake_image.FakeImageService()
image_service.images.clear()
for image_id, image_meta in IMAGE_FIXTURES.items():
image_meta = image_meta['image_meta']
image_meta['id'] = image_id
image_service.create(None, image_meta)
def get_fake_device_info():
# FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
# can be removed from the dict when LP bug #1087308 is fixed
fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
fake = {'block_device_mapping':
[{'connection_info': {'driver_volume_type': 'iscsi',
'data': {'sr_uuid': 'falseSR',
'introduce_sr_keys': ['sr_type'],
'sr_type': 'iscsi',
'vdi_uuid': fake_vdi_uuid,
'target_discovered': False,
'target_iqn': 'foo_iqn:foo_volid',
'target_portal': 'localhost:3260',
'volume_id': 'foo_volid',
'target_lun': 1,
'auth_password': 'my-p@55w0rd',
'auth_username': 'johndoe',
'auth_method': u'CHAP'}, },
'mount_device': 'vda',
'delete_on_termination': False}, ],
'root_device_name': '/dev/sda',
'ephemerals': [],
'swap': None, }
return fake
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_image_download(*args, **kwargs):
pass
def fake_is_vdi_pv(*args, **kwargs):
return should_return
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_image_download = fake_image._FakeImageService.download
orig_is_vdi_pv = vm_utils._is_vdi_pv
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
fake_image._FakeImageService.download = fake_image_download
vm_utils._is_vdi_pv = fake_is_vdi_pv
return function(self, *args, **kwargs)
finally:
vm_utils._is_vdi_pv = orig_is_vdi_pv
fake_image._FakeImageService.download = orig_image_download
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
def create_instance_with_system_metadata(context, instance_values):
instance_type = db.instance_type_get(context,
instance_values['instance_type_id'])
sys_meta = instance_types.save_instance_type_info({},
instance_type)
instance_values['system_metadata'] = sys_meta
return db.instance_create(context, instance_values)
class XenAPIVolumeTestCase(stubs.XenAPITestBase):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.flags(disable_process_locking=True,
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
db_fakes.stub_out_db_instance_api(self.stubs)
self.instance_values = {'id': 1,
'project_id': self.user_id,
'user_id': 'fake',
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
def _create_volume(self, size=0):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
vol['availability_zone'] = CONF.default_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
@staticmethod
def _make_connection_data():
return {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_username': 'username',
'auth_password': 'password',
}
@classmethod
def _make_connection_info(cls):
return {
'driver_volume_type': 'iscsi',
'data': cls._make_connection_data()
}
def test_mountpoint_to_number(self):
cases = {
'sda': 0,
'sdp': 15,
'hda': 0,
'hdp': 15,
'vda': 0,
'xvda': 0,
'0': 0,
'10': 10,
'vdq': -1,
'sdq': -1,
'hdq': -1,
'xvdq': -1,
}
for (input, expected) in cases.iteritems():
actual = volume_utils.mountpoint_to_number(input)
self.assertEqual(actual, expected,
'%s yielded %s, not %s' % (input, actual, expected))
def test_parse_volume_info_parsing_auth_details(self):
result = volume_utils.parse_volume_info(
self._make_connection_data())
self.assertEquals('username', result['chapuser'])
self.assertEquals('password', result['chappassword'])
def test_get_device_number_raise_exception_on_wrong_mountpoint(self):
self.assertRaises(
volume_utils.StorageError,
volume_utils.get_device_number,
'dev/sd')
def test_attach_volume(self):
# This shows how to test Ops classes' methods.
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance['name'], 'Running')
result = conn.attach_volume(self._make_connection_info(),
instance, '/dev/sdc')
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
# This shows how to test when exceptions are raised.
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
{'driver_volume_type': 'nonexist'},
instance,
'/dev/sdc')
class XenAPIVMTestCase(stubs.XenAPITestBase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.network = importutils.import_object(CONF.network_manager)
self.flags(disable_process_locking=True,
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',)
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', CONF.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
stubs.stub_out_vm_methods(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_image.stub_out_image_service(self.stubs)
set_image_fixtures()
stubs.stubout_image_service_download(self.stubs)
stubs.stubout_stream_disk(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, 'inject_instance_metadata',
fake_inject_instance_metadata)
def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
name_label = "fakenamelabel"
disk_type = "fakedisktype"
virtual_size = 777
return vm_utils.create_vdi(
session, sr_ref, instance, name_label, disk_type,
virtual_size)
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def tearDown(self):
fake_image.FakeImageService_reset()
super(XenAPIVMTestCase, self).tearDown()
def test_init_host(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
vm = vm_utils._get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
vbd0 = xenapi_fake.create_vbd(vm, vdi0)
# Instance VDI
vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
other_config={'nova_instance_uuid': 'aaaa'})
vbd1 = xenapi_fake.create_vbd(vm, vdi1)
# Only looks like instance VDI
vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
vbd2 = xenapi_fake.create_vbd(vm, vdi2)
self.conn.init_host(None)
self.assertEquals(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
def test_list_instance_uuids_0(self):
instance_uuids = self.conn.list_instance_uuids()
self.assertEquals(instance_uuids, [])
def test_list_instance_uuids(self):
uuids = []
for x in xrange(1, 4):
instance = self._create_instance(x)
uuids.append(instance['uuid'])
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(len(uuids), len(instance_uuids))
self.assertEqual(set(uuids), set(instance_uuids))
def test_get_rrd_server(self):
self.flags(xenapi_connection_url='myscheme://myaddress/')
server_info = vm_utils._get_rrd_server()
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'xenapi/vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
fake_diagnostics = {
'vbd_xvdb_write': '0.0',
'memory_target': '4294967296.0000',
'memory_internal_free': '1415564.0000',
'memory': '4294967296.0000',
'vbd_xvda_write': '0.0',
'cpu0': '0.0042',
'vif_0_tx': '287.4134',
'vbd_xvda_read': '0.0',
'vif_0_rx': '1816.0144',
'vif_2_rx': '0.0',
'vif_2_tx': '0.0',
'vbd_xvdb_read': '0.0',
'last_update': '1328795567',
}
instance = self._create_instance()
expected = self.conn.get_diagnostics(instance)
self.assertThat(fake_diagnostics, matchers.DictMatches(expected))
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False,
osvol=False):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
image_id = "my_snapshot_id"
self.assertRaises(exception.NovaException, self.conn.snapshot,
self.context, instance, image_id,
lambda *args, **kwargs: None)
def test_instance_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
image_id = "my_snapshot_id"
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
self.fake_upload_called = False
def fake_image_upload(_self, ctx, session, inst, vdi_uuids,
img_id):
self.fake_upload_called = True
self.assertEqual(ctx, self.context)
self.assertEqual(inst, instance)
self.assertTrue(isinstance(vdi_uuids, list))
self.assertEqual(img_id, image_id)
self.stubs.Set(glance.GlanceStore, 'upload_image',
fake_image_upload)
self.conn.snapshot(self.context, instance, image_id,
func_call_matcher.call)
# Ensure VM was torn down
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEquals(vm_labels, [instance['name']])
# Ensure VBDs were torn down
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEquals(vbd_labels, [instance['name']])
# Ensure task states changed in correct order
self.assertIsNone(func_call_matcher.match())
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assert_(not name_label.endswith('snapshot'))
self.assertTrue(self.fake_upload_called)
def create_vm_record(self, conn, os_type, name):
instances = conn.list_instances()
self.assertEquals(instances, [name])
# Get Nova record for VM
vm_info = conn.get_info({'name': name})
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, check_injection=False):
# Check that m1.large above turned into the right thing.
instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
self.assertEquals(self.vm_info['max_mem'], mem_kib)
self.assertEquals(self.vm_info['mem'], mem_kib)
self.assertEquals(self.vm['memory_static_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEquals(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
self.assertEquals(xenstore_data['vm-data/hostname'], 'test')
key = 'vm-data/networking/DEADBEEF0001'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertEquals(tcpip_data,
{'broadcast': '192.168.1.255',
'dns': ['192.168.1.4', '192.168.1.3'],
'gateway': '192.168.1.1',
'gateway_v6': 'fe80::def',
'ip6s': [{'enabled': '1',
'ip': '2001:db8:0:1::1',
'netmask': 64,
'gateway': 'fe80::def'}],
'ips': [{'enabled': '1',
'ip': '192.168.1.100',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'},
{'enabled': '1',
'ip': '192.168.1.101',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'}],
'label': 'test1',
'mac': 'DE:AD:BE:EF:00:01'})
def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true')
self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], '')
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEquals(self.vm['PV_kernel'], '')
self.assertNotEquals(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
url = CONF.xenapi_connection_url
username = CONF.xenapi_connection_username
password = CONF.xenapi_connection_password
session = xenapi_conn.XenAPISession(url, username, password,
fake.FakeVirtAPI())
return session.call_xenapi('VDI.get_all')
def _list_vms(self):
url = CONF.xenapi_connection_url
username = CONF.xenapi_connection_username
password = CONF.xenapi_connection_password
session = xenapi_conn.XenAPISession(url, username, password,
fake.FakeVirtAPI())
return session.call_xenapi('VM.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if vdi_ref not in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
if 'other_config' in vdi_rec:
if 'image-id' not in vdi_rec['other_config']:
self.fail('Found unexpected VDI:%s' % vdi_ref)
else:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
injected_files=None, check_injection=False,
create_record=True, empty_dns=False,
image_meta={'id': IMAGE_VHD,
'disk_format': 'vhd'},
block_device_info=None,
key_data=None):
if injected_files is None:
injected_files = []
# Fake out inject_instance_metadata
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, 'inject_instance_metadata',
fake_inject_instance_metadata)
if create_record:
instance_values = {'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'root_gb': 20,
'instance_type_id': instance_type_id,
'os_type': os_type,
'hostname': hostname,
'key_data': key_data,
'architecture': architecture}
instance = create_instance_with_system_metadata(self.context,
instance_values)
else:
instance = db.instance_get(self.context, instance_id)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
if empty_dns:
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
self.conn.spawn(self.context, instance, image_meta, injected_files,
'herp', network_info, block_device_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance['os_type'])
self.assertTrue(instance['architecture'])
def test_spawn_empty_dns(self):
# Test spawning with an empty dns list.
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory,
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_3(self):
"""Simulates an error while attaching disks.
Verifies that the VM and VDIs created are properly cleaned up.
"""
stubs.stubout_attach_disks(self.stubs)
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
@stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
self._test_spawn(IMAGE_RAW, None, None)
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_windows(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
stubs.stubout_fetch_disk_image(self.stubs)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_boot_from_volume_no_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(None, None, None,
image_meta={}, block_device_info=dev_info)
def test_spawn_boot_from_volume_with_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(None, None, None,
block_device_info=dev_info)
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
input = kwargs.get('process_input', None)
self.assertNotEqual(input, None)
config = [line.strip() for line in input.split("\n")]
# Find the start of eth0 configuration and check it
index = config.index('auto eth0')
self.assertEquals(config[index + 1:index + 8], [
'iface eth0 inet static',
'address 192.168.1.100',
'netmask 255.255.255.0',
'broadcast 192.168.1.255',
'gateway 192.168.1.1',
'dns-nameservers 192.168.1.3 192.168.1.4',
''])
self._tee_executed = True
return '', ''
def _readlink_handler(cmd_parts, **kwargs):
return os.path.realpath(cmd_parts[2]), ''
fake_utils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
(r'readlink -nm.*', _readlink_handler),
])
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug(_('Creating files in %s to simulate guest agent'),
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normall make files in the m,ounted filesystem
# disappear, so do that here
LOG.debug(_('Removing simulated guest agent files in %s'),
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn(1, 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_vlanmanager(self):
self.flags(network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
instance = self._create_instance(2, False)
networks = self.network.db.network_get_all(ctxt)
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=2,
instance_uuid='00000000-0000-0000-0000-000000000002',
host=CONF.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id,
macs=None)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
instance_id=2,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 10 * 1024))
def test_spawn_ssh_key_injection(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
def noop(*args, **kwargs):
pass
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
self.stubs.Set(agent.XenAPIBasedAgent,
'set_admin_password', noop)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nfake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='fake_keydata')
self.assertEquals(actual_injected_files, injected_files)
def test_spawn_injected_files(self):
# Test spawning with injected_files.
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
injected_files = [('/tmp/foo', 'foobar')]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
injected_files=injected_files)
self.check_vm_params_for_linux()
self.assertEquals(actual_injected_files, injected_files)
def test_rescue(self):
instance = self._create_instance()
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
vm_ref = vm_utils.lookup(session, instance['name'])
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
root_vdi_ref = xenapi_fake.create_vdi('root', None)
xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=1)
xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
conn.rescue(self.context, instance, [], image_meta, '')
vm = xenapi_fake.get_record('VM', vm_ref)
rescue_name = "%s-rescue" % vm["name_label"]
rescue_ref = vm_utils.lookup(session, rescue_name)
rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
vdi_uuids = []
for vbd_uuid in rescue_vm["VBDs"]:
vdi_uuids.append(xenapi_fake.get_record('VBD', vbd_uuid)["VDI"])
self.assertTrue("swap" not in vdi_uuids)
def test_unrescue(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock():
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, instance, block_info):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "HARD")
def test_reboot_soft(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "SOFT")
def test_reboot_halted(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Halted')
conn.reboot(self.context, instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEquals(vm['power_state'], 'Running')
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
instance, None, "SOFT")
def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
# Record all the xenapi calls, and return a fake list of hosts
# for the host.get_all call
def fake_call_xenapi(method, *args):
api_calls[method] = args
if method == 'host.get_all':
return ['foo', 'bar', 'baz']
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
def fake_aggregate_get(context, host, key):
if find_aggregate:
return [{'fake': 'aggregate'}]
else:
return []
self.stubs.Set(self.conn.virtapi, 'aggregate_get_by_host',
fake_aggregate_get)
def fake_host_find(context, session, src, dst):
if find_host:
return 'bar'
else:
raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
self.assertEqual(result, 'on_maintenance')
# We expect the VM.pool_migrate call to have been called to
# migrate our instance to the 'bar' host
expected = (instance['uuid'], 'bar', {})
self.assertTrue(api_calls.get('VM.pool_migrate'), expected)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertTrue(instance['vm_state'], vm_states.ACTIVE)
self.assertTrue(instance['task_state'], task_states.MIGRATING)
def test_maintenance_mode(self):
self._test_maintenance_mode(True, True)
def test_maintenance_mode_no_host(self):
self.assertRaises(exception.NoValidHost,
self._test_maintenance_mode, False, True)
def test_maintenance_mode_no_aggregate(self):
self.assertRaises(exception.NotFound,
self._test_maintenance_mode, True, False)
def test_session_virtapi(self):
was = {'called': False}
def fake_aggregate_get_by_host(self, *args, **kwargs):
was['called'] = True
raise Exception()
self.stubs.Set(self.conn._session._virtapi, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.stubs.Set(self.conn._session, "is_slave", True)
try:
self.conn._session._get_host_uuid()
except Exception:
pass
self.assertTrue(was['called'])
def test_per_instance_usage_running(self):
instance = self._create_instance(spawn=True)
instance_type = instance_types.get_instance_type(3)
expected = {instance['uuid']: {'memory_mb': instance_type['memory_mb'],
'uuid': instance['uuid']}}
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
# Paused instances still consume resources:
self.conn.pause(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
def test_per_instance_usage_suspended(self):
# Suspended instances do not consume memory:
instance = self._create_instance(spawn=True)
self.conn.suspend(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def test_per_instance_usage_halted(self):
instance = self._create_instance(spawn=True)
self.conn.power_off(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
instance_values = {
'id': instance_id,
'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
'display_name': 'host-%d' % instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'vm_mode': 'hvm',
'architecture': 'x86-64'}
instance = create_instance_with_system_metadata(self.context,
instance_values)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
if spawn:
self.conn.spawn(self.context, instance, image_meta, [], 'herp',
network_info)
return instance
class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = agent.SimpleDH()
self.bob = agent.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEquals(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEquals(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
class XenAPIMigrateInstance(stubs.XenAPITestBase):
"""Unit test for verifying migration-related actions."""
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', CONF.flat_network_bridge)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 5,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, 'inject_instance_metadata',
fake_inject_instance_metadata)
def test_resize_xenserver_6(self):
instance = db.instance_create(self.context, self.instance_values)
called = {'resize': False}
def fake_vdi_resize(*args, **kwargs):
called['resize'] = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(6, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
conn._vmops._resize_instance(instance,
{'uuid': vdi_uuid, 'ref': vdi_ref})
self.assertEqual(called['resize'], True)
def test_resize_xcp(self):
instance = db.instance_create(self.context, self.instance_values)
called = {'resize': False}
def fake_vdi_resize(*args, **kwargs):
called['resize'] = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(1, 4, 99),
product_brand='XCP')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
conn._vmops._resize_instance(instance,
{'uuid': vdi_uuid, 'ref': vdi_ref})
self.assertEqual(called['resize'], True)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', instance_type, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_vhd", fake_raise)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', instance_type, None)
def test_revert_migrate(self):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy=base_uuid, cow=cow_uuid),
network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
conn.finish_revert_migration(instance, network_info)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_finish_migrate(self):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
def test_finish_migrate_no_local_storage(self):
tiny_type = instance_types.get_instance_type_by_name('m1.tiny')
tiny_type_id = tiny_type['id']
self.instance_values.update({'instance_type_id': tiny_type_id,
'root_gb': 0})
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
# Resize instance would be determined by the compute call
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
def test_migrate_no_auto_disk_config_no_resize_down(self):
# Resize down should fail when auto_disk_config not set.
instance_values = self.instance_values
instance_values['root_gb'] = 40
instance_values['auto_disk_config'] = False
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
instance_type = db.instance_type_get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ResizeError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', instance_type, None)
class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
def test_to_string(self):
# Can convert from type id to type string.
self.assertEquals(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def _assert_role(self, expected_role, image_type_id):
self.assertEquals(
expected_role,
vm_utils.ImageType.get_role(image_type_id))
def test_get_image_role_kernel(self):
self._assert_role('kernel', vm_utils.ImageType.KERNEL)
def test_get_image_role_ramdisk(self):
self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
def test_get_image_role_disk(self):
self._assert_role('root', vm_utils.ImageType.DISK)
def test_get_image_role_disk_raw(self):
self._assert_role('root', vm_utils.ImageType.DISK_RAW)
def test_get_image_role_disk_vhd(self):
self._assert_role('root', vm_utils.ImageType.DISK_VHD)
class XenAPIDetermineDiskImageTestCase(test.TestCase):
"""Unit tests for code that detects the ImageType."""
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = {'id': 'a', 'disk_format': 'ami'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = {'id': 'a', 'disk_format': 'raw'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = {'id': 'a', 'disk_format': 'vhd'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
def test_none(self):
image_meta = None
self.assert_disk_type(image_meta, None)
class XenAPIDetermineIsPVTestCase(test.TestCase):
"""Unit tests for code that detects the PV status based on ImageType."""
def assert_pv_status(self, disk_image_type, os_type, expected_pv_status):
session = None
vdi_ref = None
actual = vm_utils.determine_is_pv(session, vdi_ref,
disk_image_type, os_type)
self.assertEqual(expected_pv_status, actual)
def test_windows_vhd(self):
self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'windows', False)
def test_linux_vhd(self):
self.assert_pv_status(vm_utils.ImageType.DISK_VHD, 'linux', True)
@stub_vm_utils_with_vdi_attached_here
def test_raw(self):
self.assert_pv_status(vm_utils.ImageType.DISK_RAW, 'linux', True)
def test_disk(self):
self.assert_pv_status(vm_utils.ImageType.DISK, None, True)
def test_iso(self):
self.assert_pv_status(vm_utils.ImageType.DISK_ISO, None, False)
@stub_vm_utils_with_vdi_attached_here
def test_none(self):
self.assert_pv_status(None, None, True)
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
# Test that cmp_version compares a as less than b.
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
# Test that cmp_version compares a as greater than b.
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
# Test that cmp_version compares a as equal to b.
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
# Test that cmp_version compares non-lexically.
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
# Test that cmp_version compares by length as last resort.
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
class XenAPIHostTestCase(stubs.XenAPITestBase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers."""
def setUp(self):
super(XenAPIHostTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
xenapi_fake.create_local_srs()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def test_host_state(self):
stats = self.conn.get_host_stats()
self.assertEquals(stats['disk_total'], 40000)
self.assertEquals(stats['disk_used'], 20000)
self.assertEquals(stats['host_memory_total'], 10)
self.assertEquals(stats['host_memory_overhead'], 20)
self.assertEquals(stats['host_memory_free'], 30)
self.assertEquals(stats['host_memory_free_computed'], 40)
self.assertEquals(stats['hypervisor_hostname'], 'fake-xenhost')
def test_host_state_missing_sr(self):
def fake_safe_find_sr(session):
raise exception.StorageRepositoryNotFound('not there')
self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
self.assertRaises(exception.StorageRepositoryNotFound,
self.conn.get_host_stats)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
if not expected:
expected = action
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action(self.conn.host_power_action, 'shutdown')
def test_host_startup(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'host', 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode,
True, 'on_maintenance')
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode,
False, 'off_maintenance')
def test_set_enable_host_enable(self):
self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
def test_set_enable_host_disable(self):
self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
def test_get_host_uptime(self):
result = self.conn.get_host_uptime('host')
self.assertEqual(result, 'fake uptime')
def test_supported_instances_is_included_in_host_state(self):
stats = self.conn.get_host_stats()
self.assertTrue('supported_instances' in stats)
def test_supported_instances_is_calculated_by_to_supported_instances(self):
def to_supported_instances(somedata):
self.assertEquals(None, somedata)
return "SOMERETURNVALUE"
self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
stats = self.conn.get_host_stats()
self.assertEquals("SOMERETURNVALUE", stats['supported_instances'])
class ToSupportedInstancesTestCase(test.TestCase):
def test_default_return_value(self):
self.assertEquals([],
host.to_supported_instances(None))
def test_return_value(self):
self.assertEquals([('x86_64', 'xapi', 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64']))
def test_invalid_values_do_not_break(self):
self.assertEquals([('x86_64', 'xapi', 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
def test_multiple_values(self):
self.assertEquals(
[
('x86_64', 'xapi', 'xen'),
('x86_32', 'xapi', 'hvm')
],
host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
)
class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False):
pass
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old, new):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
ctx = context.RequestContext(self.user_id, self.project_id)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
disk_image_type = vm_utils.ImageType.DISK_VHD
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
vdis, disk_image_type)
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
class XenAPIGenerateLocal(stubs.XenAPITestBase):
"""Test generating of local disks, like swap and ephemeral."""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False):
pass
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertCalled(self, instance):
ctx = context.RequestContext(self.user_id, self.project_id)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
disk_image_type = vm_utils.ImageType.DISK_VHD
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
vdis, disk_image_type)
self.assertTrue(self.called)
def test_generate_swap(self):
# Test swap disk generation.
instance_values = dict(self.instance_values, instance_type_id=5)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_swap(*args, **kwargs):
self.called = True
self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
# Test ephemeral disk generation.
instance_values = dict(self.instance_values, instance_type_id=4)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
self.assertCalled(instance)
class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
FAKE_VMS = {'test1:ref': dict(name_label='test1',
other_config=dict(nova_uuid='hash'),
domid='12',
_vifmap={'0': "a:b:c:d...",
'1': "e:f:12:q..."}),
'test2:ref': dict(name_label='test2',
other_config=dict(nova_uuid='hash'),
domid='42',
_vifmap={'0': "a:3:c:d...",
'1': "e:f:42:q..."}),
}
def setUp(self):
super(XenAPIBWCountersTestCase, self).setUp()
self.stubs.Set(vm_utils, 'list_vms',
XenAPIBWCountersTestCase._fake_list_vms)
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def _fake_get_vif_device_map(vm_rec):
return vm_rec['_vifmap']
self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
_fake_get_vif_device_map)
@classmethod
def _fake_list_vms(cls, session):
return cls.FAKE_VMS.iteritems()
@classmethod
def _fake_fetch_bandwidth_mt(cls, session):
return {}
@classmethod
def _fake_fetch_bandwidth(cls, session):
return {'42':
{'0': {'bw_in': 21024, 'bw_out': 22048},
'1': {'bw_in': 231337, 'bw_out': 221212121}},
'12':
{'0': {'bw_in': 1024, 'bw_out': 2048},
'1': {'bw_in': 31337, 'bw_out': 21212121}},
}
def test_get_all_bw_counters(self):
instances = [dict(name='test1', uuid='1-2-3'),
dict(name='test2', uuid='4-5-6')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
bw_in=1024,
bw_out=2048), result)
self.assertIn(dict(uuid='1-2-3',
mac_address="e:f:12:q...",
bw_in=31337,
bw_out=21212121), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="a:3:c:d...",
bw_in=21024,
bw_out=22048), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="e:f:42:q...",
bw_in=231337,
bw_out=221212121), result)
def test_get_all_bw_counters_in_failure_case(self):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth_mt)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.test_libvirt.IPTablesFirewallDriverTestCase share a lot of code.
# Consider abstracting common code in a base class for firewall driver testing.
class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
_in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*mangle',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(CONF.network_manager)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = filter(lambda l: not l.startswith('#'),
self._in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertTrue(rule in self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
' --icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_model = fake_network.fake_get_instance_nw_info(self.stubs,
1, spectacular=True)
from nova.compute import utils as compute_utils
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
network_info = network_model.legacy()
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
_get_instance_nw_info = fake_network.fake_get_instance_nw_info
network_info = _get_instance_nw_info(self.stubs,
networks_count,
ipv4_addr_per_network)
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEquals(ipv4_network_rules, rules)
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instances[instance_ref['id']] = instance_ref
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
#validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"Rules were not updated properly."
"The rule for UDP acceptance is missing")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: as in libvirt tests
# peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
# Ensure StorageRepositoryNotFound is raise when wrong filter.
self.flags(sr_matching_filter='yadayadayada')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
self.assertRaises(exception.StorageRepositoryNotFound,
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
# Ensure the default local-storage is found.
self.flags(sr_matching_filter='other-config:i18n-key=local-storage')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(
name_label='Fake Storage',
type='lvm',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
host_ref=host_ref)
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
# Ensure the SR is found when using a different filter.
self.flags(sr_matching_filter='other-config:my_fake_sr=true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
# Ensure the default SR is found regardless of other-config.
self.flags(sr_matching_filter='default-sr:true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
fake.FakeVirtAPI())
pool_ref = xenapi_fake.create_pool('')
expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in values.iteritems():
for host in hosts:
db.service_create(context,
{'host': host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
return values
class XenAPIAggregateTestCase(stubs.XenAPITestBase):
"""Unit tests for aggregate operations."""
def setUp(self):
super(XenAPIAggregateTestCase, self).setUp()
self.flags(xenapi_connection_url='http://test_url',
xenapi_connection_username='test_user',
xenapi_connection_password='test_pass',
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host',
compute_driver='xenapi.XenAPIDriver',
default_availability_zone='avail_zone1')
self.flags(use_local=True, group='conductor')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = importutils.import_object(CONF.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
'metadata': {'availability_zone': 'test_zone',
pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = db.aggregate_create(self.context, values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
'master_compute': 'host',
'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
def test_pool_add_to_aggregate_called_by_driver(self):
calls = []
def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
self.assertEquals("CONTEXT", context)
self.assertEquals("AGGREGATE", aggregate)
self.assertEquals("HOST", host)
self.assertEquals("SLAVEINFO", slave_info)
calls.append(pool_add_to_aggregate)
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
pool_add_to_aggregate)
self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertTrue(pool_add_to_aggregate in calls)
def test_pool_remove_from_aggregate_called_by_driver(self):
calls = []
def pool_remove_from_aggregate(context, aggregate, host,
slave_info=None):
self.assertEquals("CONTEXT", context)
self.assertEquals("AGGREGATE", aggregate)
self.assertEquals("HOST", host)
self.assertEquals("SLAVEINFO", slave_info)
calls.append(pool_remove_from_aggregate)
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
pool_remove_from_aggregate)
self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertTrue(pool_remove_from_aggregate in calls)
def test_add_to_aggregate_for_first_host_sets_metadata(self):
def fake_init_pool(id, name):
fake_init_pool.called = True
self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_init_pool.called)
self.assertThat(self.fake_metadata,
matchers.DictMatches(result['metadetails']))
def test_join_slave(self):
# Ensure join_slave gets called when the request gets to master.
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
dict(compute_uuid='fake_uuid',
url='fake_url',
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid'))
self.assertTrue(fake_join_slave.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
fake_pool_set_name_label.called = True
self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
fake_pool_set_name_label)
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
values = {"name": 'fake_aggregate',
'metadata': {'availability_zone': 'fake_zone'}}
result = db.aggregate_create(self.context, values)
metadata = {'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
db.aggregate_metadata_add(self.context, result['id'], metadata)
db.aggregate_host_add(self.context, result['id'], "host")
aggregate = db.aggregate_get(self.context, result['id'])
self.assertEqual(["host"], aggregate['hosts'])
self.assertEqual(metadata, aggregate['metadetails'])
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
def test_remove_from_aggregate_called(self):
def fake_remove_from_aggregate(context, aggregate, host):
fake_remove_from_aggregate.called = True
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
fake_remove_from_aggregate)
self.conn.remove_from_aggregate(None, None, None)
self.assertTrue(fake_remove_from_aggregate.called)
def test_remove_from_empty_aggregate(self):
result = self._aggregate_setup()
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, result, "test_host")
def test_remove_slave(self):
# Ensure eject slave gets called.
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
# Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_clear_pool.called)
self.assertThat({'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result['metadetails']))
def test_remote_master_non_empty_pool(self):
# Ensure AggregateError is raised if removing the master.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, aggregate, "host")
def _aggregate_setup(self, aggr_name='fake_aggregate',
aggr_zone='fake_zone',
aggr_state=pool_states.CREATED,
hosts=['host'], metadata=None):
values = {"name": aggr_name}
result = db.aggregate_create(self.context, values,
metadata={'availability_zone': aggr_zone})
pool_flag = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: aggr_state}
db.aggregate_metadata_add(self.context, result['id'], pool_flag)
for host in hosts:
db.aggregate_host_add(self.context, result['id'], host)
if metadata:
db.aggregate_metadata_add(self.context, result['id'], metadata)
return db.aggregate_get(self.context, result['id'])
def test_add_host_to_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when adding host while
aggregate is not ready."""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'host')
def test_add_host_to_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
deleted."""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
def test_add_host_to_aggregate_invalid_error_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
in error."""
aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_error(self):
# Ensure we can remove a host from an aggregate even if in error.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
# let's mock the fact that the aggregate is ready!
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, aggr['id'], metadata)
for host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
# let's mock the fact that the aggregate is in error!
status = {'operational_state': pool_states.ERROR}
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
values[fake_zone][0])
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
self.assertEqual(expected['metadata'][pool_states.KEY],
pool_states.ACTIVE)
def test_remove_host_from_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
deleted."""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
changing."""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
# Ensure the undo operation works correctly on add.
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
raise exception.AggregateError(
aggregate_id='', action='', reason='')
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
self.context, "fake_host",
aggregate=jsonutils.to_primitive(self.aggr))
excepted = db.aggregate_get(self.context, self.aggr['id'])
self.assertEqual(excepted['metadetails'][pool_states.KEY],
pool_states.ERROR)
self.assertEqual(excepted['hosts'], [])
class MockComputeAPI(object):
def __init__(self):
self._mock_calls = []
def add_aggregate_host(self, ctxt, aggregate,
host_param, host, slave_info):
self._mock_calls.append((
self.add_aggregate_host, ctxt, aggregate,
host_param, host, slave_info))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
host, slave_info):
self._mock_calls.append((
self.remove_aggregate_host, ctxt, aggregate_id,
host_param, host, slave_info))
class StubDependencies(object):
"""Stub dependencies for ResourcePool."""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
def _is_hv_pool(self, *_ignore):
return True
def _get_metadata(self, *_ignore):
return {
pool_states.KEY: {},
'master_compute': 'master'
}
def _create_slave_info(self, *ignore):
return "SLAVE_INFO"
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
"""A ResourcePool, use stub dependencies."""
class HypervisorPoolTestCase(test.TestCase):
fake_aggregate = {
'id': 98,
'hosts': [],
'metadetails': {
'master_compute': 'master',
pool_states.POOL_FLAG: {},
pool_states.KEY: {}
}
}
def test_slave_asks_master_to_add_slave_to_pool(self):
slave = ResourcePoolWithStubs()
slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
"CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
"slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
slave = ResourcePoolWithStubs()
slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.remove_aggregate_host,
"CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
class SwapXapiHostTestCase(test.TestCase):
def test_swapping(self):
self.assertEquals(
"http://otherserver:8765/somepath",
pool.swap_xapi_host(
"http://someserver:8765/somepath", 'otherserver'))
def test_no_port(self):
self.assertEquals(
"http://otherserver/somepath",
pool.swap_xapi_host(
"http://someserver/somepath", 'otherserver'))
def test_no_path(self):
self.assertEquals(
"http://otherserver",
pool.swap_xapi_host(
"http://someserver", 'otherserver'))
class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
"""Unit tests for live_migration."""
def setUp(self):
super(XenAPILiveMigrateTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
db_fakes.stub_out_db_instance_api(self.stubs)
self.context = context.get_admin_context()
xenapi_fake.create_local_pifs()
def test_live_migration_calls_vmops(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_live_migrate(context, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data):
fake_live_migrate.called = True
self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
self.conn.live_migration(None, None, None, None, None)
self.assertTrue(fake_live_migrate.called)
def test_pre_live_migration(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.pre_live_migration(None, None, None, None)
def test_post_live_migration_at_destination(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.post_live_migration_at_destination(None, None, None, None)
def test_check_can_live_migrate_destination_with_block_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
expected = {'block_migration': True,
'migrate_data': {
'migrate_send_data': "fake_migrate_data",
'destination_sr_ref': 'asdf'
}
}
result = self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'},
{}, {},
True, False)
self.assertEqual(expected, result)
def test_check_can_live_migrate_destination_block_migration_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def test_check_can_live_migrate_source_with_block_migrate(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
self.stubs.Set(self.conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
self.stubs.Set(self.conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context,
{'host': 'host'},
dest_check_data)
def test_check_can_live_migrate_works(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
class fake_aggregate:
def __init__(self):
self.metadetails = {"host": "test_host_uuid"}
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [fake_aggregate()]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'}, False, False)
def test_check_can_live_migrate_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
class fake_aggregate:
def __init__(self):
self.metadetails = {"dest_other": "test_host_uuid"}
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [fake_aggregate()]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'}, None, None)
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def post_method(context, instance, destination_hostname,
block_migration):
post_method.called = True
self.conn.live_migration(self.conn, None, None, post_method, None)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_on_failure(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def fake_call_xenapi(*args):
raise NotImplementedError()
self.stubs.Set(self.conn._vmops._session, "call_xenapi",
fake_call_xenapi)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
self.assertRaises(NotImplementedError, self.conn.live_migration,
self.conn, None, None, None, recover_method)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_calls_post_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
self.stubs.Set(self.conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def post_method(context, instance, destination_hostname,
block_migration):
post_method.called = True
# pass block_migration = True and migrate data
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_with_block_migration_raises_invalid_param(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and no migrate data
self.assertRaises(exception.InvalidParameterValue,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, None)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_with_block_migration_fails_migrate_send(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
self.stubs.Set(self.conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and migrate data
migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
self.assertRaises(exception.MigrationError,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, migrate_data)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migrate_block_migration_xapi_call_parameters(self):
fake_vdi_map = object()
class Session(xenapi_fake.SessionBase):
def VM_migrate_send(self_, session, vmref, migrate_data, islive,
vdi_map, vif_map, options):
self.assertEquals('SOMEDATA', migrate_data)
self.assertEquals(fake_vdi_map, vdi_map)
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
return fake_vdi_map
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def dummy_callback(*args, **kwargs):
pass
conn.live_migration(
self.context, instance_ref=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration="SOMEDATA",
migrate_data=dict(migrate_send_data='SOMEDATA',
destination_sr_ref="TARGET_SR_OPAQUE_REF"))
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = "fake_vm_ref"
def fake_find_sr(_session):
self.assertEquals(conn._session, _session)
return "source_sr_ref"
self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
self.assertEquals(conn._session, _session)
self.assertEquals(vm_ref, _vm_ref)
self.assertEquals("source_sr_ref", _sr_ref)
return ["vdi0", "vdi1"]
self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
fake_get_instance_vdis_for_sr)
result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
self.assertEquals({"vdi0": "dest_sr_ref",
"vdi1": "dest_sr_ref"}, result)
class XenAPIInjectMetadataTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIInjectMetadataTestCase, self).setUp()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.xenstore = dict(persist={}, ephem={})
def fake_get_vm_opaque_ref(inst, instance):
self.assertEqual(instance, 'instance')
return 'vm_ref'
def fake_add_to_param_xenstore(inst, vm_ref, key, val):
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['persist'][key] = val
def fake_remove_from_param_xenstore(inst, vm_ref, key):
self.assertEqual(vm_ref, 'vm_ref')
if key in self.xenstore['persist']:
del self.xenstore['persist'][key]
def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
self.assertEqual(instance, 'instance')
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['ephem'][path] = jsonutils.dumps(value)
def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
self.assertEqual(instance, 'instance')
self.assertEqual(vm_ref, 'vm_ref')
if path in self.xenstore['ephem']:
del self.xenstore['ephem'][path]
self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
fake_get_vm_opaque_ref)
self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
fake_add_to_param_xenstore)
self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
fake_remove_from_param_xenstore)
self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
fake_write_to_xenstore)
self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
fake_delete_from_xenstore)
def test_inject_instance_metadata(self):
# Add some system_metadata to ensure it doesn't get added
# to xenstore
instance = dict(metadata=[{'key': 'a', 'value': 1},
{'key': 'b', 'value': 2},
{'key': 'c', 'value': 3},
# Check xenstore key sanitizing
{'key': 'hi.there', 'value': 4},
{'key': 'hi!t.e/e', 'value': 5}],
# Check xenstore key sanitizing
system_metadata=[{'key': 'sys_a', 'value': 1},
{'key': 'sys_b', 'value': 2},
{'key': 'sys_c', 'value': 3}])
self.conn._vmops.inject_instance_metadata(instance, 'vm_ref')
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/hi_there': '4',
'vm-data/user-metadata/hi_t_e_e': '5',
},
'ephem': {},
})
def test_change_instance_metadata_add(self):
# Test XenStore key sanitizing here, too.
diff = {'test.key': ['+', 4]}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata('instance', diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
})
def test_change_instance_metadata_update(self):
diff = dict(b=['+', 4])
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata('instance', diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_delete(self):
diff = dict(b=['-'])
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata('instance', diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
})
class VMOpsTestCase(test.TestCase):
def _get_mock_session(self, product_brand, product_version):
class Mock(object):
pass
mock_session = Mock()
mock_session.product_brand = product_brand
mock_session.product_version = product_version
return mock_session
def test_check_resize_func_name_defaults_to_VDI_resize(self):
session = self._get_mock_session(None, None)
ops = vmops.VMOps(session, fake.FakeVirtAPI())
self.assertEquals(
'VDI.resize',
ops.check_resize_func_name())
def _test_finish_revert_migration_after_crash(self, backup_made, new_made):
instance = {'name': 'foo',
'task_state': task_states.RESIZE_MIGRATING}
session = self._get_mock_session(None, None)
ops = vmops.VMOps(session, fake.FakeVirtAPI())
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(ops, '_destroy')
self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')
self.mox.StubOutWithMock(ops, '_attach_mapped_block_devices')
self.mox.StubOutWithMock(ops, '_start')
vm_utils.lookup(session, 'foo-orig').AndReturn(
backup_made and 'foo' or None)
vm_utils.lookup(session, 'foo').AndReturn(
(not backup_made or new_made) and 'foo' or None)
if backup_made:
if new_made:
ops._destroy(instance, 'foo')
vm_utils.set_vm_name_label(session, 'foo', 'foo')
ops._attach_mapped_block_devices(instance, [])
ops._start(instance, 'foo')
self.mox.ReplayAll()
ops.finish_revert_migration(instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(False, False)
class XenAPISessionTestCase(test.TestCase):
def _get_mock_xapisession(self, software_version):
class XcpXapiSession(xenapi_conn.XenAPISession):
def __init__(_ignore):
"Skip the superclass's dirty init"
def _get_software_version(_ignore):
return software_version
return XcpXapiSession()
def test_get_product_version_product_brand_does_not_fail(self):
session = self._get_mock_xapisession({
'build_number': '0',
'date': '2012-08-03',
'hostname': 'komainu',
'linux': '3.2.0-27-generic',
'network_backend': 'bridge',
'platform_name': 'XCP_Kronos',
'platform_version': '1.6.0',
'xapi': '1.3',
'xen': '4.1.2',
'xencenter_max': '1.10',
'xencenter_min': '1.10'
})
self.assertEquals(
(None, None),
session._get_product_version_and_brand()
)
def test_get_product_version_product_brand_xs_6(self):
session = self._get_mock_xapisession({
'product_brand': 'XenServer',
'product_version': '6.0.50'
})
self.assertEquals(
((6, 0, 50), 'XenServer'),
session._get_product_version_and_brand()
)
| {
"content_hash": "7e08bf4b746e398ffc8c9b989d4ed60d",
"timestamp": "",
"source": "github",
"line_count": 3238,
"max_line_length": 79,
"avg_line_length": 42.69456454601606,
"alnum_prop": 0.554739773590365,
"repo_name": "yrobla/nova",
"id": "8141e7527dbae1ac0a4fea135820b046a4e41670",
"size": "138910",
"binary": false,
"copies": "1",
"ref": "refs/heads/debian/unstable",
"path": "nova/tests/test_xenapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "9162801"
},
{
"name": "Shell",
"bytes": "17067"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import biplist
import os.path
application = defines.get('app', 'sireum-v3/distros/idea/mac/Sireum.app')
appname = os.path.basename(application)
def icon_from_app(app_path):
plist_path = os.path.join(app_path, 'Contents', 'Info.plist')
plist = biplist.readPlist(plist_path)
icon_name = plist['CFBundleIconFile']
icon_root, icon_ext = os.path.splitext(icon_name)
if not icon_ext:
icon_ext = '.icns'
icon_name = icon_root + icon_ext
return os.path.join(app_path, 'Contents', 'Resources', icon_name)
# .. Basics ....................................................................
format = defines.get('format', 'UDZO')
size = defines.get('size', '8000M')
# Files to include
files = [application]
# Symlinks to create
symlinks = {'Applications': '/Applications'}
# Volume icon
#
# You can either define icon, in which case that icon file will be copied to the
# image, *or* you can define badge_icon, in which case the icon file you specify
# will be used to badge the system's Removable Disk icon
#
# icon = '/path/to/icon.icns'
badge_icon = icon_from_app(application)
# Where to put the icons
icon_locations = {
appname: (0, 90),
'Applications': (256, 90)
}
# .. Window configuration ......................................................
# Background
#
# This is a STRING containing any of the following:
#
# #3344ff - web-style RGB color
# #34f - web-style RGB color, short form (#34f == #3344ff)
# rgb(1,0,0) - RGB color, each value is between 0 and 1
# hsl(120,1,.5) - HSL (hue saturation lightness) color
# hwb(300,0,0) - HWB (hue whiteness blackness) color
# cmyk(0,1,0,0) - CMYK color
# goldenrod - X11/SVG named color
# builtin-arrow - A simple built-in background with a blue arrow
# /foo/bar/baz.png - The path to an image file
#
# The hue component in hsl() and hwb() may include a unit; it defaults to
# degrees ('deg'), but also supports radians ('rad') and gradians ('grad'
# or 'gon').
#
# Other color components may be expressed either in the range 0 to 1, or
# as percentages (e.g. 60% is equivalent to 0.6).
background = defines.get('background', '../resources/distro/images/dmg-background.png')
show_status_bar = False
show_tab_view = False
show_toolbar = False
show_pathbar = False
show_sidebar = False
sidebar_width = 180
# Window position in ((x, y), (w, h)) format
window_rect = ((0, 100000), (500, 220))
# Select the default view; must be one of
#
# 'icon-view'
# 'list-view'
# 'column-view'
# 'coverflow'
#
default_view = 'icon-view'
# General view configuration
show_icon_preview = False
# Set these to True to force inclusion of icon/list view settings (otherwise
# we only include settings for the default view)
include_icon_view_settings = 'auto'
include_list_view_settings = 'auto'
# .. Icon view configuration ...................................................
arrange_by = None
grid_offset = (0, 0)
grid_spacing = 100
scroll_position = (0, 0)
label_pos = 'bottom' # or 'right'
text_size = 16
icon_size = 64
# .. List view configuration ...................................................
# Column names are as follows:
#
# name
# date-modified
# date-created
# date-added
# date-last-opened
# size
# kind
# label
# version
# comments
#
list_icon_size = 16
list_text_size = 12
list_scroll_position = (0, 0)
list_sort_by = 'name'
list_use_relative_dates = True
list_calculate_all_sizes = False,
list_columns = ('name', 'date-modified', 'size', 'kind', 'date-added')
list_column_widths = {
'name': 300,
'date-modified': 181,
'date-created': 181,
'date-added': 181,
'date-last-opened': 181,
'size': 97,
'kind': 115,
'label': 100,
'version': 75,
'comments': 300,
}
list_column_sort_directions = {
'name': 'ascending',
'date-modified': 'descending',
'date-created': 'descending',
'date-added': 'descending',
'date-last-opened': 'descending',
'size': 'descending',
'kind': 'ascending',
'label': 'ascending',
'version': 'ascending',
'comments': 'ascending',
}
| {
"content_hash": "0c1c4fedd3658ecf4f86737ae98c742a",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 87,
"avg_line_length": 27.123376623376622,
"alnum_prop": 0.6183864017237252,
"repo_name": "sireum/v3",
"id": "ff140881987f0e9261ddf18b51e060d9b47c91db",
"size": "4201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/distro/dmgbuild-settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ANTLR",
"bytes": "3305"
},
{
"name": "Batchfile",
"bytes": "4676"
},
{
"name": "Dockerfile",
"bytes": "2200"
},
{
"name": "HTML",
"bytes": "3328"
},
{
"name": "Java",
"bytes": "61133"
},
{
"name": "Python",
"bytes": "4201"
},
{
"name": "Scala",
"bytes": "736161"
},
{
"name": "Shell",
"bytes": "25171"
}
],
"symlink_target": ""
} |
from settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'tests.sqlite', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
'DENORM_BACKEND': 'denorm.db.sqlite3',
}
}
TESTS = True
| {
"content_hash": "c81c7eea4f4d1e8acc31232edc401eed",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 122,
"avg_line_length": 42.375,
"alnum_prop": 0.5103244837758112,
"repo_name": "Floppy/kepler-explorer",
"id": "e42b2ccc7f2bb64e28ad08d5380d4ac00e0d24d4",
"size": "678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kepler/test_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "38406"
},
{
"name": "Python",
"bytes": "32149"
},
{
"name": "Ruby",
"bytes": "710"
}
],
"symlink_target": ""
} |
"""This module tests the cros build command."""
from __future__ import print_function
from chromite.cli import command
from chromite.cli import command_unittest
from chromite.cli.cros import cros_build
from chromite.lib import chroot_util
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import parallel_unittest
from chromite.lib import partial_mock
from chromite.lib import workon_helper
class MockBuildCommand(command_unittest.MockCommand):
"""Mock out the build command."""
TARGET = 'chromite.cli.cros.cros_build.BuildCommand'
TARGET_CLASS = cros_build.BuildCommand
def __init__(self, *args, **kwargs):
super(MockBuildCommand, self).__init__(*args, **kwargs)
self.chroot_update_called = 0
def OnChrootUpdate(self, *_args, **_kwargs):
self.chroot_update_called += 1
def Run(self, inst):
self.PatchObject(chroot_util, 'UpdateChroot',
side_effect=self.OnChrootUpdate)
self.PatchObject(chroot_util, 'Emerge')
with parallel_unittest.ParallelMock():
command_unittest.MockCommand.Run(self, inst)
class FakeWorkonHelper(object):
"""Fake workon_helper.WorkonHelper."""
def __init__(self, *_args, **_kwargs):
self.start_called = 0
self.use_workon_only = None
def ListAtoms(self, *_args, **_kwargs):
pass
def StartWorkingOnPackages(self, *_args, **kwargs):
self.start_called += 1
self.use_workon_only = kwargs.get('use_workon_only')
class BuildCommandTest(cros_test_lib.MockTempDirTestCase):
"""Test class for our BuildCommand class."""
def testBrilloBuildOperationCalled(self):
"""Test that BrilloBuildOperation is used when appropriate."""
cmd = ['--board=randonname', 'power_manager']
self.PatchObject(workon_helper, 'WorkonHelper')
self.PatchObject(command, 'UseProgressBar', return_value=True)
with MockBuildCommand(cmd) as build:
operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')
build.inst.Run()
self.assertTrue(operation_run.called)
def testBrilloBuildOperationNotCalled(self):
"""Test that BrilloBuildOperation is not used when it shouldn't be."""
cmd = ['--board=randonname', 'power_manager']
self.PatchObject(workon_helper, 'WorkonHelper')
self.PatchObject(command, 'UseProgressBar', return_value=False)
with MockBuildCommand(cmd) as build:
operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')
build.inst.Run()
self.assertFalse(operation_run.called)
def testSuccess(self):
"""Test that successful commands work."""
cmds = [['--host', 'power_manager'],
['--board=randomname', 'power_manager'],
['--board=randomname', '--debug', 'power_manager'],
['--board=randomname', '--no-deps', 'power_manager'],
['--board=randomname', '--no-chroot-update', 'power_manager'],
['--board=randomname', '--no-enable-only-latest', 'power_manager']]
for cmd in cmds:
update_chroot = not ('--no-deps' in cmd or '--no-chroot-update' in cmd)
enable_only_latest = '--no-enable-only-latest' not in cmd
fake_workon_helper = FakeWorkonHelper()
self.PatchObject(workon_helper, 'WorkonHelper',
return_value=fake_workon_helper)
with MockBuildCommand(cmd) as build:
build.inst.Run()
self.assertEquals(1 if update_chroot else 0, build.chroot_update_called)
self.assertEquals(1 if enable_only_latest else 0,
fake_workon_helper.start_called)
self.assertEquals(True if enable_only_latest else None,
fake_workon_helper.use_workon_only)
def testFailedDeps(self):
"""Test that failures are detected correctly."""
# pylint: disable=protected-access
args = ['--board=randomname', 'power_manager']
self.PatchObject(workon_helper, 'WorkonHelper',
return_value=FakeWorkonHelper())
with MockBuildCommand(args) as build:
cmd = partial_mock.In('--backtrack=0')
build.rc_mock.AddCmdResult(cmd=cmd, returncode=1, error='error\n')
ex = self.assertRaises2(cros_build_lib.RunCommandError, build.inst.Run)
self.assertTrue(cros_build.BuildCommand._BAD_DEPEND_MSG in ex.msg)
| {
"content_hash": "bdb0e2142253b3e724f2b139d404e267",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 80,
"avg_line_length": 40.424528301886795,
"alnum_prop": 0.6802800466744457,
"repo_name": "guorendong/iridium-browser-ubuntu",
"id": "12a468c6c80f86c625eabc7de16e77c948a86499",
"size": "4455",
"binary": false,
"copies": "1",
"ref": "refs/heads/ubuntu/precise",
"path": "third_party/chromite/cli/cros/cros_build_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "256197"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "15445429"
},
{
"name": "C++",
"bytes": "276628399"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "867238"
},
{
"name": "Emacs Lisp",
"bytes": "3348"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "7777"
},
{
"name": "HTML",
"bytes": "20250399"
},
{
"name": "Java",
"bytes": "9950308"
},
{
"name": "JavaScript",
"bytes": "13873772"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "179129"
},
{
"name": "Objective-C",
"bytes": "1871766"
},
{
"name": "Objective-C++",
"bytes": "9674498"
},
{
"name": "PHP",
"bytes": "42038"
},
{
"name": "PLpgSQL",
"bytes": "163248"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "474121"
},
{
"name": "Python",
"bytes": "11646662"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1151673"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath("../"))
print("***** CWD {}".format(os.getcwd()))
# -- Project information -----------------------------------------------------
project = "Least Squares Anomaly Detection"
copyright = "2019, John Quinn"
author = "John Quinn"
# The full version, including alpha/beta/rc tags
release = "1.4"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
master_doc = "index"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| {
"content_hash": "72dacc97a1d1af01ab3246e57bf9762d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 32.8,
"alnum_prop": 0.6490514905149052,
"repo_name": "lsanomaly/lsanomaly",
"id": "31eda5a38d4f822675e91991901cecce5e57cf08",
"size": "2029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "254947"
},
{
"name": "Makefile",
"bytes": "254"
},
{
"name": "Python",
"bytes": "60058"
},
{
"name": "TeX",
"bytes": "1771"
}
],
"symlink_target": ""
} |
import copy
import datetime
import json
from unittest import mock
import sqlalchemy as sa
from mistral.api.controllers.v2 import resources
from mistral.db.v2 import api as db_api
from mistral.db.v2.sqlalchemy import models as db
from mistral import exceptions as exc
from mistral.tests.unit.api import base
from oslo_utils import uuidutils
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
VARIABLES = {
'host': 'localhost',
'db': 'test',
'timeout': 600,
'verbose': True,
'__actions': {
'std.sql': {
'conn': 'mysql://admin:secret@<% env().host %>/<% env().db %>'
}
}
}
ENVIRONMENT_FOR_CREATE = {
'name': 'test',
'description': 'my test settings',
'variables': VARIABLES,
}
ENVIRONMENT_FOR_UPDATE = {
'name': 'test',
'description': 'my test settings',
'variables': VARIABLES,
'scope': 'private'
}
ENVIRONMENT_FOR_UPDATE_NO_SCOPE = {
'name': 'test',
'description': 'my test settings',
'variables': VARIABLES
}
ENVIRONMENT = {
'id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'my test settings',
'variables': VARIABLES,
'scope': 'private',
'project_id': '<default-project>',
'created_at': str(datetime.datetime.utcnow()),
'updated_at': str(datetime.datetime.utcnow())
}
ENVIRONMENT_WITH_ILLEGAL_FIELD = {
'id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'my test settings',
'extra_field': 'I can add whatever I want here',
'variables': VARIABLES,
'scope': 'private',
}
ENVIRONMENT_DB = db.Environment(
id=ENVIRONMENT['id'],
name=ENVIRONMENT['name'],
description=ENVIRONMENT['description'],
variables=copy.deepcopy(VARIABLES),
scope=ENVIRONMENT['scope'],
project_id=ENVIRONMENT['project_id'],
created_at=datetime.datetime.strptime(ENVIRONMENT['created_at'],
DATETIME_FORMAT),
updated_at=datetime.datetime.strptime(ENVIRONMENT['updated_at'],
DATETIME_FORMAT)
)
ENVIRONMENT_DB_WITH_PROJECT_ID = ENVIRONMENT_DB.get_clone()
ENVIRONMENT_DB_WITH_PROJECT_ID.project_id = '<default-project>'
ENVIRONMENT_DB_DICT = {k: v for k, v in ENVIRONMENT_DB.items()}
UPDATED_VARIABLES = copy.deepcopy(VARIABLES)
UPDATED_VARIABLES['host'] = '127.0.0.1'
FOR_UPDATED_ENVIRONMENT = copy.deepcopy(ENVIRONMENT_FOR_UPDATE)
FOR_UPDATED_ENVIRONMENT['variables'] = json.dumps(UPDATED_VARIABLES)
UPDATED_ENVIRONMENT = copy.deepcopy(ENVIRONMENT)
UPDATED_ENVIRONMENT['variables'] = json.dumps(UPDATED_VARIABLES)
UPDATED_ENVIRONMENT_DB = db.Environment(**ENVIRONMENT_DB_DICT)
UPDATED_ENVIRONMENT_DB.variables = copy.deepcopy(UPDATED_VARIABLES)
MOCK_ENVIRONMENT = mock.MagicMock(return_value=ENVIRONMENT_DB)
MOCK_ENVIRONMENTS = mock.MagicMock(return_value=[ENVIRONMENT_DB])
MOCK_UPDATED_ENVIRONMENT = mock.MagicMock(return_value=UPDATED_ENVIRONMENT_DB)
MOCK_EMPTY = mock.MagicMock(return_value=[])
MOCK_NOT_FOUND = mock.MagicMock(side_effect=exc.DBEntityNotFoundError())
MOCK_DUPLICATE = mock.MagicMock(side_effect=exc.DBDuplicateEntryError())
MOCK_DELETE = mock.MagicMock(return_value=None)
def _convert_vars_to_dict(env_dict):
"""Converts 'variables' in the given environment dict into dictionary."""
if ('variables' in env_dict and
isinstance(env_dict.get('variables'), str)):
env_dict['variables'] = json.loads(env_dict['variables'])
return env_dict
def _convert_vars_to_json(env_dict):
"""Converts 'variables' in the given environment dict into string."""
if ('variables' in env_dict and
isinstance(env_dict.get('variables'), dict)):
env_dict['variables'] = json.dumps(env_dict['variables'])
return env_dict
class TestEnvironmentController(base.APITest):
def _assert_dict_equal(self, expected, actual):
self.assertIsInstance(expected, dict)
self.assertIsInstance(actual, dict)
_convert_vars_to_dict(expected)
_convert_vars_to_dict(actual)
self.assertDictEqual(expected, actual)
def test_resource(self):
resource = resources.Environment(**copy.deepcopy(ENVIRONMENT))
self._assert_dict_equal(
copy.deepcopy(ENVIRONMENT),
resource.to_dict()
)
@mock.patch.object(db_api, 'get_environments', MOCK_ENVIRONMENTS)
def test_get_all(self):
resp = self.app.get('/v2/environments')
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(resp.json['environments']))
@mock.patch.object(db_api, 'get_environments')
def test_get_all_operational_error(self, mocked_get_all):
mocked_get_all.side_effect = [
# Emulating DB OperationalError
sa.exc.OperationalError('Mock', 'mock', 'mock'),
[ENVIRONMENT_DB] # Successful run
]
resp = self.app.get('/v2/environments')
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(resp.json['environments']))
self._assert_dict_equal(ENVIRONMENT, resp.json['environments'][0])
def test_get_all_empty(self):
resp = self.app.get('/v2/environments')
self.assertEqual(200, resp.status_int)
self.assertEqual(0, len(resp.json['environments']))
@mock.patch.object(db_api, 'get_environment', MOCK_ENVIRONMENT)
def test_get(self):
resp = self.app.get('/v2/environments/123')
self.assertEqual(200, resp.status_int)
self._assert_dict_equal(ENVIRONMENT, resp.json)
@mock.patch.object(db_api, 'get_environment')
def test_get_operational_error(self, mocked_get):
mocked_get.side_effect = [
# Emulating DB OperationalError
sa.exc.OperationalError('Mock', 'mock', 'mock'),
ENVIRONMENT_DB # Successful run
]
resp = self.app.get('/v2/environments/123')
self.assertEqual(200, resp.status_int)
self._assert_dict_equal(ENVIRONMENT, resp.json)
@mock.patch.object(db_api, 'get_environment',
return_value=ENVIRONMENT_DB_WITH_PROJECT_ID)
def test_get_within_project_id(self, mock_get):
resp = self.app.get('/v2/environments/123')
self.assertEqual(200, resp.status_int)
self.assertEqual('<default-project>', resp.json['project_id'])
@mock.patch.object(db_api, "get_environment", MOCK_NOT_FOUND)
def test_get_not_found(self):
resp = self.app.get('/v2/environments/123', expect_errors=True)
self.assertEqual(404, resp.status_int)
@mock.patch.object(db_api, 'create_environment', MOCK_ENVIRONMENT)
def test_post(self):
resp = self.app.post_json(
'/v2/environments',
_convert_vars_to_json(copy.deepcopy(ENVIRONMENT_FOR_CREATE))
)
self.assertEqual(201, resp.status_int)
self._assert_dict_equal(copy.deepcopy(ENVIRONMENT), resp.json)
@mock.patch.object(db_api, 'create_environment', MOCK_ENVIRONMENT)
def test_post_with_illegal_field(self):
resp = self.app.post_json(
'/v2/environments',
_convert_vars_to_json(
copy.deepcopy(ENVIRONMENT_WITH_ILLEGAL_FIELD)),
expect_errors=True
)
self.assertEqual(400, resp.status_int)
@mock.patch.object(db_api, 'create_environment', MOCK_DUPLICATE)
def test_post_dup(self):
resp = self.app.post_json(
'/v2/environments',
_convert_vars_to_json(copy.deepcopy(ENVIRONMENT_FOR_CREATE)),
expect_errors=True
)
self.assertEqual(409, resp.status_int)
@mock.patch.object(db_api, 'create_environment', MOCK_ENVIRONMENT)
def test_post_default_scope(self):
env = _convert_vars_to_json(copy.deepcopy(ENVIRONMENT_FOR_CREATE))
resp = self.app.post_json('/v2/environments', env)
self.assertEqual(201, resp.status_int)
self._assert_dict_equal(copy.deepcopy(ENVIRONMENT), resp.json)
@mock.patch.object(db_api, 'update_environment', MOCK_UPDATED_ENVIRONMENT)
def test_put(self):
resp = self.app.put_json(
'/v2/environments',
copy.deepcopy(FOR_UPDATED_ENVIRONMENT)
)
self.assertEqual(200, resp.status_int)
self._assert_dict_equal(UPDATED_ENVIRONMENT, resp.json)
@mock.patch.object(db_api, 'update_environment', MOCK_UPDATED_ENVIRONMENT)
def test_put_default_scope(self):
env = copy.deepcopy(ENVIRONMENT_FOR_UPDATE_NO_SCOPE)
env['variables'] = json.dumps(env)
resp = self.app.put_json('/v2/environments', env)
self.assertEqual(200, resp.status_int)
self._assert_dict_equal(copy.deepcopy(UPDATED_ENVIRONMENT), resp.json)
@mock.patch.object(db_api, 'update_environment', MOCK_NOT_FOUND)
def test_put_not_found(self):
env = copy.deepcopy(FOR_UPDATED_ENVIRONMENT)
resp = self.app.put_json(
'/v2/environments',
env,
expect_errors=True
)
self.assertEqual(404, resp.status_int)
@mock.patch.object(db_api, 'delete_environment', MOCK_DELETE)
def test_delete(self):
resp = self.app.delete('/v2/environments/123')
self.assertEqual(204, resp.status_int)
@mock.patch.object(db_api, 'delete_environment', MOCK_NOT_FOUND)
def test_delete_not_found(self):
resp = self.app.delete('/v2/environments/123', expect_errors=True)
self.assertEqual(404, resp.status_int)
| {
"content_hash": "fb22c03f0b577240066e6b74ebbd522e",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 78,
"avg_line_length": 32.82006920415225,
"alnum_prop": 0.6484976278334212,
"repo_name": "openstack/mistral",
"id": "425082065f3f23e78c84a1c178330f6e7ad6e1a9",
"size": "10095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/tests/unit/api/v2/test_environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2091"
},
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "2617595"
},
{
"name": "Shell",
"bytes": "26731"
}
],
"symlink_target": ""
} |
from preggy import expect
from tornado.testing import gen_test
from tests.fixtures.watermark_fixtures import (
POSITIONS,
RATIOS,
SOURCE_IMAGE_SIZES,
WATERMARK_IMAGE_SIZES,
)
from thumbor.config import Config
from thumbor.context import Context
from thumbor.filters import watermark
from thumbor.importer import Importer
from thumbor.testing import FilterTestCase
class WatermarkFilterTestCase(FilterTestCase):
@gen_test
async def test_watermark_filter_centered(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,center,center,60)",
)
expected = self.get_fixture("watermarkCenter.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_centered_x(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,center,40,20)",
)
expected = self.get_fixture("watermarkCenterX.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_centered_y(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,80,center,50)",
)
expected = self.get_fixture("watermarkCenterY.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_repeated(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,repeat,repeat,70)",
)
expected = self.get_fixture("watermarkRepeat.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_repeated_x(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,repeat,center,70)",
)
expected = self.get_fixture("watermarkRepeatX.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_repeated_y(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,30,repeat,30)",
)
expected = self.get_fixture("watermarkRepeatY.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_detect_extension_simple(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark,30,-50,60)",
)
expected = self.get_fixture("watermarkSimple.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_simple(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,30,-50,60)",
)
expected = self.get_fixture("watermarkSimple.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_calculated(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,4p,-30p,60)",
)
expected = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,32,-160,60)",
)
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_calculated_center(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,4p,center,60)",
)
expected = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,32,center,60)",
)
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_calculated_repeat(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,repeat,30p,60)",
)
expected = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,repeat,160,60)",
)
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_calculated_position(self):
watermark.Filter.pre_compile()
filter_instance = watermark.Filter("http://dummy,0,0,0", self.context)
for length, pos, expected in POSITIONS:
test = {
"length": length,
"pos": pos,
}
expect(
filter_instance.detect_and_get_ratio_position(pos, length)
).to_be_equal_with_additional_info(expected, **test)
@gen_test
async def test_watermark_filter_simple_big(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermarkBig.png,-10,-100,50)",
)
expected = self.get_fixture("watermarkSimpleBig.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_simple_50p_width(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,30,-50,20,50)",
)
expected = self.get_fixture("watermarkResize50pWidth.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_simple_70p_height(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,30,-50,20,none,70)",
)
expected = self.get_fixture("watermarkResize70pHeight.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_simple_60p_80p(self):
image = await self.get_filtered(
"source.jpg",
"thumbor.filters.watermark",
"watermark(watermark.png,-30,-200,20,60,80)",
)
expected = self.get_fixture("watermarkResize60p80p.jpg")
ssim = self.get_ssim(image, expected)
expect(ssim).to_be_greater_than(0.98)
@gen_test
async def test_watermark_filter_calculated_resizing(self):
watermark.Filter.pre_compile()
filter_instance = watermark.Filter("http://dummy,0,0,0", self.context)
for source_image_width, source_image_height in SOURCE_IMAGE_SIZES:
for (
watermark_source_image_width,
watermark_source_image_height,
) in WATERMARK_IMAGE_SIZES:
for w_ratio, h_ratio in RATIOS:
max_width = (
source_image_width * (float(w_ratio) / 100)
if w_ratio
else float("inf")
)
max_height = (
source_image_height * (float(h_ratio) / 100)
if h_ratio
else float("inf")
)
w_ratio = float(w_ratio) / 100.0 if w_ratio else False
h_ratio = float(h_ratio) / 100.0 if h_ratio else False
ratio = (
float(watermark_source_image_width)
/ watermark_source_image_height
)
(
watermark_image_width,
watermark_image_height,
) = filter_instance.calc_watermark_size(
(source_image_width, source_image_height),
(
watermark_source_image_width,
watermark_source_image_height,
),
w_ratio,
h_ratio,
)
watermark_image = (
float(watermark_image_width) / watermark_image_height
)
test = {
"source_image_width": source_image_width,
"source_image_height": source_image_height,
"watermark_source_image_width": watermark_source_image_width,
"watermark_source_image_height": watermark_source_image_height,
"watermark_image_width": watermark_image_width,
"watermark_image_height": watermark_image_height,
"w_ratio": w_ratio,
"h_ratio": h_ratio,
}
test["topic_name"] = "watermark_image_width"
expect(watermark_image_width).to_fit_into(
max_width, **test
)
test["topic_name"] = "watermark_image_height"
expect(watermark_image_height).to_fit_into(
max_height, **test
)
test["topic_name"] = "fill out"
expect(
(
watermark_image_width == max_width
or watermark_image_height == max_height
)
).to_be_true_with_additional_info(**test)
test["topic_name"] = "image ratio"
expect(watermark_image).to_almost_equal(ratio, 2, **test)
@gen_test
async def test_watermark_validate_allowed_source(self):
config = Config(
ALLOWED_SOURCES=[
"s.glbimg.com",
],
LOADER="thumbor.loaders.http_loader",
)
importer = Importer(config)
importer.import_modules()
context = Context(config=config, importer=importer)
filter_instance = watermark.Filter("", context)
expect(
filter_instance.validate("https://s2.glbimg.com/logo.jpg")
).to_be_false()
expect(
filter_instance.validate("https://s.glbimg.com/logo.jpg")
).to_be_true()
| {
"content_hash": "f77ab8aa7b86663929ea9a374456bab9",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 87,
"avg_line_length": 36.540983606557376,
"alnum_prop": 0.5423956931359354,
"repo_name": "thumbor/thumbor",
"id": "49fe8c06200650656430c8f2dc2bbcbe6f0eaa05",
"size": "11397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/filters/test_watermark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "59023"
},
{
"name": "Dockerfile",
"bytes": "1631"
},
{
"name": "JavaScript",
"bytes": "2514"
},
{
"name": "Makefile",
"bytes": "11947"
},
{
"name": "Python",
"bytes": "716804"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
} |
'''
Created on Sep 30, 2016
@author: dj
'''
class ContextManager(object):
def __init__(self):
self.entered = False
print("ContextManager.init.entered =", self.entered)
def __enter__(self):
self.entered = True
print("ContextManager.enter.entered =", self.entered)
return self
def __exit__(self, exception_type, exception, traceback):
self.entered = False
print("ContextManager.exit.entered =", self.entered)
print("ContextManager.exit.exception_type =", exception_type)
print("ContextManager.exit.exception =", exception)
print("ContextManager.exit.traceback =", traceback)
class ExceptionHandler(object):
def __init__(self, error_message=None):
'''Define different error message.'''
self.error_message = error_message
print("ExceptionHandler.enter.error_message =", self.error_message)
def __enter__(self):
print("ExceptionHandler.enter.")
return self
def __exit__(self, exception_type, exception, traceback):
print("ExceptionHandler.exit.")
print("ExceptionHandler.exit.exception_type =", exception_type)
print("ExceptionHandler.exit.exception =", exception)
print("ExceptionHandler.exit.traceback =", traceback)
# If any exception.
if exception_type:
print("ExceptionHandler.exit.error_message =", self.error_message)
# If no "return" clause, it will return False.
# If "return False", the exception will be re-thrown.
# If "return True", the exception will not be re-thrown.
# It could re-throw with self-defined error.
return True
class ValueErrorExceptionHandler(object):
def __enter__(self):
print("ValueErrorExceptionHandler.enter.")
return self
def __exit__(self, exception_type, exception, traceback):
print("ValueErrorExceptionHandler.exit.")
print(
"ValueErrorExceptionHandler.exit.exception_type =", exception_type)
print("ValueErrorExceptionHandler.exit.exception =", exception)
print("ValueErrorExceptionHandler.exit.traceback =", traceback)
# If no exception.
if not exception_type:
return True
# Only suppress certain exception.
if issubclass(exception_type, ValueError):
print("ValueErrorExceptionHandler.exit.exception, less pass.")
return True
def main():
print("-" * 40)
cm = ContextManager()
print("cm.entered =", cm.entered)
print("-" * 40)
with cm:
print("cm.entered =", cm.entered)
print("-" * 40)
with ExceptionHandler():
x = 5 + 5
print("-" * 40)
with ExceptionHandler("New message."):
x = 5 / 0
print("Never printed due to exception.")
print("-" * 40)
with ExceptionHandler("Another new message."):
try:
x = 5 / 0
except:
print("Exception is capture here, not in __exit__.")
print("-" * 40)
with ValueErrorExceptionHandler():
raise ValueError("Wrong value.")
print("-" * 40)
try:
with ValueErrorExceptionHandler():
raise TypeError("Wrong type.")
except Exception as e:
print("Exception =", e)
print("-" * 40)
if __name__ == '__main__':
main()
| {
"content_hash": "4442f417c41c2fa7ed612f0e2b17dcc8",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 79,
"avg_line_length": 27.14516129032258,
"alnum_prop": 0.6084373143196673,
"repo_name": "djsilenceboy/LearnTest",
"id": "daf9c7801f7406554c0b2fe31241b517ea31f601",
"size": "3366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python_Test/PySample1/com/djs/learn/class/TestContextManagerClass.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "27588"
},
{
"name": "C",
"bytes": "201487"
},
{
"name": "C++",
"bytes": "9459"
},
{
"name": "CSS",
"bytes": "6049"
},
{
"name": "Dockerfile",
"bytes": "5976"
},
{
"name": "HTML",
"bytes": "89155"
},
{
"name": "Java",
"bytes": "3304194"
},
{
"name": "JavaScript",
"bytes": "73886"
},
{
"name": "Jinja",
"bytes": "1150"
},
{
"name": "PHP",
"bytes": "21180"
},
{
"name": "PLSQL",
"bytes": "2080"
},
{
"name": "PowerShell",
"bytes": "19723"
},
{
"name": "Python",
"bytes": "551890"
},
{
"name": "Ruby",
"bytes": "16460"
},
{
"name": "Shell",
"bytes": "142970"
},
{
"name": "TypeScript",
"bytes": "6986"
},
{
"name": "XSLT",
"bytes": "1860"
}
],
"symlink_target": ""
} |
"""Autograd for NDArray."""
from __future__ import absolute_import
from __future__ import division
import ctypes
from .base import _LIB, check_call, string_types
from .base import mx_uint, NDArrayHandle, c_array
from .ndarray import NDArray
from .symbol import _GRAD_REQ_MAP
def set_recording(is_recording):
"""Set status to recording/not recording. When recording, graph will be constructed
for gradient computation.
Parameters
----------
is_recording: bool
Returns
-------
previous state before this set.
"""
prev = ctypes.c_int()
check_call(_LIB.MXAutogradSetIsRecording(
ctypes.c_int(is_recording), ctypes.byref(prev)))
return bool(prev.value)
def set_training(is_train):
"""Set status to training/not training. This affects ctx.is_train in operator
running context. For example, Dropout will drop inputs randomly when
is_train=True while simply passing through if is_train=False.
Parameters
----------
is_train: bool
Returns
-------
previous state before this set.
"""
prev = ctypes.c_int()
check_call(_LIB.MXAutogradSetIsTraining(
ctypes.c_int(is_train), ctypes.byref(prev)))
return bool(prev.value)
class RecordingStateScope(object):
"""Scope for managing training state.
Example::
with RecordingStateScope(True, True):
y = model(x)
backward([y])
"""
def __init__(self, enter_state, is_train):
self._enter_state = enter_state
self._enter_is_train = is_train
self._prev = None
self._prev_is_train = None
def __enter__(self):
self._prev = set_recording(self._enter_state)
self._prev_is_train = set_training(self._enter_is_train)
def __exit__(self, ptype, value, trace):
if self._prev != self._enter_state:
set_recording(self._prev)
if self._prev_is_train != self._enter_is_train:
set_training(self._prev_is_train)
def record(is_train=True):
"""Returns a training scope context to be used in 'with' statement
and captures training code.
.. note:: When forwarding with is_train=False, the corresponding backward
should also use is_train=False, otherwise gradient is undefined.
Example::
with autograd.record():
y = model(x)
backward([y])
metric.update(...)
optim.step(...)
Parameters
----------
is_train: bool, default True
Whether to do forward for training or inference.
"""
return RecordingStateScope(True, is_train)
def pause(is_train=False):
"""Returns a testing scope context to be used in 'with' statement
and captures testing code.
Example::
with autograd.record():
y = model(x)
backward([y])
with autograd.pause():
# testing, IO, gradient updates...
Parameters
----------
is_train: bool, default False
Whether to do forward for training or inference.
"""
return RecordingStateScope(False, is_train)
def mark_variables(variables, gradients, grad_reqs='write'):
"""Mark NDArrays as variables to compute gradient for autograd.
Parameters
----------
variables: NDArray or list of NDArray
gradients: NDArray or list of NDArray
grad_reqs: str or list of str
"""
if isinstance(variables, NDArray):
assert isinstance(gradients, NDArray)
variables = [variables]
gradients = [gradients]
variable_handles = []
gradient_handles = []
for var, gradvar in zip(variables, gradients):
variable_handles.append(var.handle)
gradient_handles.append(gradvar.handle)
if isinstance(grad_reqs, string_types):
grad_reqs = [_GRAD_REQ_MAP[grad_reqs]]*len(variables)
else:
grad_reqs = [_GRAD_REQ_MAP[i] for i in grad_reqs]
check_call(_LIB.MXAutogradMarkVariables(
len(variable_handles),
c_array(NDArrayHandle, variable_handles),
c_array(mx_uint, grad_reqs),
c_array(NDArrayHandle, gradient_handles)))
def backward(heads, head_grads=None, retain_graph=False, is_train=True):
"""Compute the gradients of heads w.r.t previously marked variables.
Parameters
----------
heads: NDArray or list of NDArray
Output NDArray(s)
head_grads: NDArray or list of NDArray or None
Gradients with respect to heads.
is_train: bool, optional
Whether to do backward for training or inference.
"""
if isinstance(heads, NDArray):
assert head_grads is None or isinstance(head_grads, NDArray)
heads = [heads]
head_grads = [head_grads] if head_grads is not None else None
output_handles = []
for arr in heads:
output_handles.append(arr.handle)
if head_grads is None:
check_call(_LIB.MXAutogradBackwardEx(
len(output_handles),
c_array(NDArrayHandle, output_handles),
ctypes.c_void_p(0),
ctypes.c_int(retain_graph),
ctypes.c_int(is_train)))
return
ograd_handles = []
for arr in head_grads:
if arr is not None:
ograd_handles.append(arr.handle)
else:
ograd_handles.append(NDArrayHandle(0))
assert len(ograd_handles) == len(output_handles), \
"heads and head_grads must have the same length"
check_call(_LIB.MXAutogradBackwardEx(
len(output_handles),
c_array(NDArrayHandle, output_handles),
c_array(NDArrayHandle, ograd_handles),
ctypes.c_int(retain_graph),
ctypes.c_int(is_train)))
| {
"content_hash": "b55a220b5ab2bf375b07e475f7c9800b",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 87,
"avg_line_length": 29.941798941798943,
"alnum_prop": 0.6260823467043647,
"repo_name": "hesseltuinhof/mxnet",
"id": "2f33052e663e534d83749096a5b3f10cef07dac7",
"size": "5675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/mxnet/autograd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10619"
},
{
"name": "C",
"bytes": "87061"
},
{
"name": "C++",
"bytes": "3237483"
},
{
"name": "CMake",
"bytes": "48546"
},
{
"name": "Cuda",
"bytes": "567360"
},
{
"name": "Groovy",
"bytes": "217"
},
{
"name": "Java",
"bytes": "16368"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "40158"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "611248"
},
{
"name": "Perl 6",
"bytes": "21993"
},
{
"name": "Protocol Buffer",
"bytes": "77256"
},
{
"name": "Python",
"bytes": "3257431"
},
{
"name": "R",
"bytes": "317736"
},
{
"name": "Scala",
"bytes": "883096"
},
{
"name": "Shell",
"bytes": "118634"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import
# This file defines the AngleFormatterLocator class which is a class that
# provides both a method for a formatter and one for a locator, for a given
# label spacing. The advantage of keeping the two connected is that we need to
# make sure that the formatter can correctly represent the spacing requested and
# vice versa. For example, a format of dd:mm cannot work with a tick spacing
# that is not a multiple of one arcminute.
import re
import warnings
import numpy as np
from matplotlib import rcParams
from ...extern import six
from ... import units as u
from ...coordinates import Angle
DMS_RE = re.compile('^dd(:mm(:ss(.(s)+)?)?)?$')
HMS_RE = re.compile('^hh(:mm(:ss(.(s)+)?)?)?$')
DDEC_RE = re.compile('^d(.(d)+)?$')
DMIN_RE = re.compile('^m(.(m)+)?$')
DSEC_RE = re.compile('^s(.(s)+)?$')
SCAL_RE = re.compile('^x(.(x)+)?$')
class BaseFormatterLocator(object):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None):
if (values, number, spacing).count(None) < 2:
raise ValueError("At most one of values/number/spacing can be specifed")
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None):
self._unit = u.degree
self._sep = None
super(AngleFormatterLocator, self).__init__(values=values,
number=number,
spacing=spacing,
format=format)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (not isinstance(spacing, u.Quantity) or
spacing.unit.physical_type != 'angle'):
raise TypeError("spacing should be an astropy.units.Quantity "
"instance with units of angle")
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._unit = u.degree
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._unit = u.hourangle
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._unit = u.degree
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._unit = u.arcmin
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._unit = u.arcsec
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
else:
raise ValueError("Invalid format: {0}".format(value))
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self._decimal:
spacing = self._unit / (10. ** self._precision)
else:
if self._fields == 1:
spacing = 1. * u.degree
elif self._fields == 2:
spacing = 1. * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1. * u.arcsec
else:
spacing = u.arcsec / (10. ** self._precision)
if self._unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * u.deg, 0 * u.arcsec
if self.spacing is not None:
# spacing was manually specified
spacing_deg = self.spacing.to_value(u.degree)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * u.degree
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_deg = self.base_spacing.to_value(u.degree)
else:
# otherwise we clip to the nearest 'sensible' spacing
if self._unit is u.degree:
from .utils import select_step_degree
spacing_deg = select_step_degree(dv).to_value(u.degree)
else:
from .utils import select_step_hour
spacing_deg = select_step_hour(dv).to_value(u.degree)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_deg)
return values * spacing_deg * u.degree, spacing_deg * u.degree
def formatter(self, values, spacing):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
if self.format is None:
spacing = spacing.to_value(u.arcsec)
if spacing > 3600:
fields = 1
precision = 0
elif spacing > 60:
fields = 2
precision = 0
elif spacing > 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
decimal = False
unit = u.degree
else:
fields = self._fields
precision = self._precision
decimal = self._decimal
unit = self._unit
if decimal:
sep = None
elif self._sep is not None:
sep = self._sep
else:
if unit == u.degree:
if rcParams['text.usetex']:
deg = r'$^\circ$'
else:
deg = six.u('\xb0')
sep = (deg, "'", '"')
else:
sep = ('h', 'm', 's')
angles = Angle(values)
string = angles.to_string(unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None, unit=None):
if unit is not None:
self._unit = unit
self._format_unit = unit
elif spacing is not None:
self._unit = spacing.unit
self._format_unit = spacing.unit
elif values is not None:
self._unit = values.unit
self._format_unit = values.unit
super(ScalarFormatterLocator, self).__init__(values=values,
number=number,
spacing=spacing,
format=format)
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
if not issubclass(unit.__class__, u.UnitBase):
raise TypeError("unit should be an astropy UnitBase subclass")
self._format_unit = unit
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith('%'):
raise ValueError("Invalid format: {0}".format(value))
@property
def base_spacing(self):
return self._unit / (10. ** self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number
if self.format is not None and (not self.format.startswith('%')) and dv < self.base_spacing.value:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to_value(self._unit)
else:
from .utils import select_step_scalar
spacing = select_step_scalar(dv)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith('%'):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit)) for x in values]
else:
return []
| {
"content_hash": "bd2875fc8102dc31e08f08e317c2a02b",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 114,
"avg_line_length": 33.877155172413794,
"alnum_prop": 0.5241427571728482,
"repo_name": "AustereCuriosity/astropy",
"id": "31cbb8a552648e6bfc08236004c0d83c46db0caa",
"size": "15784",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astropy/visualization/wcsaxes/formatter_locator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366877"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "8239657"
},
{
"name": "Shell",
"bytes": "593"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Temperature, Door, Lamp, Light, Window, Humidity, Alarm
from django.contrib.sessions.models import Session
admin.site.register(Temperature)
admin.site.register(Door)
admin.site.register(Lamp)
admin.site.register(Light)
admin.site.register(Window)
admin.site.register(Humidity)
admin.site.register(Alarm)
admin.site.register(Session)
| {
"content_hash": "7341f019b0dc8ddc2e15204e346d7dd8",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 26,
"alnum_prop": 0.8128205128205128,
"repo_name": "BornToDebug/homeStruction",
"id": "9800fc5e4705738815b855371cd45019c06678c7",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/project/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "208528"
},
{
"name": "C++",
"bytes": "9401"
},
{
"name": "CSS",
"bytes": "409567"
},
{
"name": "HTML",
"bytes": "159997"
},
{
"name": "Java",
"bytes": "87873"
},
{
"name": "JavaScript",
"bytes": "429867"
},
{
"name": "Makefile",
"bytes": "14554"
},
{
"name": "Nginx",
"bytes": "2787"
},
{
"name": "Python",
"bytes": "77670"
},
{
"name": "Ruby",
"bytes": "752"
},
{
"name": "Shell",
"bytes": "66830"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
import os
import sys
from cytomine import Cytomine
from cell_counting.cytomine_software import InstallSoftware
__author__ = "Rubens Ulysse <[email protected]>"
__copyright__ = "Copyright 2010-2017 University of Liège, Belgium, http://www.cytomine.be/"
def install_ET_ObjectCounter_Predictor(cytomine, software_router, software_path, software_working_path):
if software_path is not None:
software_path = os.path.join(software_path, "object_counter_predictor/ET/add_and_run_job.py")
if software_working_path is not None:
software_working_path = os.path.join(software_working_path, "object_counter")
software = InstallSoftware("ObjectCounter_ET_Predictor", "pyxitSuggestedTermJobService", "Default",
software_router, software_path, software_working_path)
software.add_parameter("cytomine_id_software", int, 0, required=True, set_by_server=True)
software.add_parameter("cytomine_id_project", int, 0, required=True, set_by_server=True)
software.add_parameter("model_id_job", "Domain", "", required=True, set_by_server=False,
uri="/api/job.json?project=$currentProject$", uri_print_attr="softwareName",
uri_sort_attr="softwareName")
software.add_parameter("cytomine_object_term", "Domain", "", required=True,
uri="/api/project/$currentProject$/term.json", uri_print_attr="name", uri_sort_attr="name")
software.add_parameter("image", "ListDomain", "", required=False, set_by_server=False,
uri="/api/project/$currentProject$/imageinstance.json", uri_print_attr="instanceFilename",
uri_sort_attr="instanceFilename")
software.add_parameter("annotation", "Number", "", required=False, set_by_server=False)
software.add_parameter("post_threshold", float, 0.5, required=True, set_by_server=False)
software.add_parameter("post_sigma", float, 0.0, required=True, set_by_server=False)
software.add_parameter("post_min_dist", int, 5, required=True, set_by_server=False)
software.add_parameter("n_jobs", int, default_value=1, required=True)
software.add_parameter("verbose", int, default_value=3, required=False)
cytomine_software = software.install_software(cytomine)
print("New software ID is {}".format(cytomine_software.id))
if __name__ == "__main__":
parser = ArgumentParser(prog="Software installer")
parser.add_argument('--cytomine_host', type=str)
parser.add_argument('--cytomine_public_key', type=str)
parser.add_argument('--cytomine_private_key', type=str)
parser.add_argument('--software_router', action="store_true")
parser.add_argument('--software_path', type=str)
parser.add_argument('--software_working_path', type=str)
params, other = parser.parse_known_args(sys.argv[1:])
# Connection to Cytomine Core
conn = Cytomine(
params.cytomine_host,
params.cytomine_public_key,
params.cytomine_private_key,
base_path='/api/',
working_path='/tmp',
verbose=True
)
install_ET_ObjectCounter_Predictor(conn, params.software_router, params.software_path,
params.software_working_path) | {
"content_hash": "38a85459731487645dce887ec6a0bac3",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 118,
"avg_line_length": 47.85507246376812,
"alnum_prop": 0.6735311932162326,
"repo_name": "urubens/CellCounting",
"id": "823cae1d821dd011f18ec3a837848e7e68a00f39",
"size": "3955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "softwares/ET_predictor/add_software.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198444"
},
{
"name": "Shell",
"bytes": "65838"
}
],
"symlink_target": ""
} |
import sys, os
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, create_engine, MetaData
from sqlalchemy import Table, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.dialects.postgresql import *
from sqlalchemy.sql.expression import update
from psycopg2.extensions import register_adapter, AsIs
import numpy
import datetime as dt
import logging
import pandas as pd
import re
rootpath = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( __file__ ))))
end = os.path.sep
if getattr(sys, 'frozen', False):
# we are running in a bundle
transaction_path = os.path.join(os.path.dirname(sys.executable), 'db_transactions')
logpath = os.path.join(os.path.dirname(sys.executable), 'logs')
else:
# we are running in a normal Python environment
transaction_path = os.path.join(rootpath, 'db_transactions')
logpath = os.path.join(rootpath, 'logs')
if not os.path.exists(transaction_path):
os.makedirs(transaction_path)
if not os.path.exists(logpath):
os.makedirs(logpath)
# Setup logging for program
date = (str(dt.datetime.now()).split()[0]).replace("-", "_")
logging.basicConfig(filename= os.path.join(transaction_path, 'database_log_{}.log'.format(date)))
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
# Adapter for numpy datatypes
def adapt_numpy_int64(numpy_int64):
''' Enable postgres to recognize numpy's int64 data type'''
return AsIs(numpy_int64)
register_adapter(numpy.int64, adapt_numpy_int64)
# Creating database engin
engine = create_engine(
'postgresql+psycopg2://--/--',
echo=False)
conn = engine.connect()
# Mapping metadata
metadata = MetaData(bind=engine)
# Creating base
base = declarative_base()
# creating classes for tables to query things
class lter_table(base):
__table__ = Table('lter_table', metadata, autoload=True)
class study_site_table(base):
__table__ = Table('study_site_table', metadata, autoload=True)
class project_table(base):
__table__ = Table('project_table', metadata, autoload=True)
site_in_proj = relationship(
'site_in_project_table', cascade="delete, delete-orphan")
class site_in_project_table(base):
__table__ = Table('site_in_project_table', metadata, autoload=True)
taxa = relationship(
'taxa_table', cascade="delete, delete-orphan")
class taxa_table(base):
__table__ = Table('taxa_table', metadata, autoload=True)
count = relationship(
'count_table', cascade="delete, delete-orphan")
density = relationship(
'density_table', cascade="delete, delete-orphan")
biomass = relationship(
'biomass_table', cascade="delete, delete-orphan")
percent_cover = relationship(
'percent_cover_table', cascade="delete, delete-orphan")
individual = relationship(
'individual_table', cascade="delete, delete-orphan")
class taxa_accepted_table(base):
__table__ = Table('taxa_accepted_table', metadata, autoload=True)
class count_table(base):
__table__ = Table('count_table', metadata, autoload=True)
class biomass_table(base):
__table__ = Table('biomass_table', metadata, autoload=True)
class density_table(base):
__table__ = Table('density_table', metadata, autoload=True)
class percent_cover_table(base):
__table__ = Table('percent_cover_table', metadata, autoload=True)
class individual_table(base):
__table__ = Table('individual_table', metadata, autoload=True)
# Session maker to perform transactions
Session = sessionmaker(bind=engine, autoflush=False)
# Helper Functions
def find_types(tbl, name):
''' Method to get data types from Tbls'''
dictname = {}
for i, item in enumerate(tbl.__table__.c):
name = (str(item).split('.')[1])
dictname[name] = str(
tbl.__table__.c[name].type)
return dictname
# Getting datatypes from database to perform checks prior to uploading
study_site_types = find_types(study_site_table, 'study_site')
project_types = find_types(project_table, 'project')
taxa_types = find_types(taxa_table, 'taxa')
taxa_accepted_types = find_types(taxa_accepted_table, 'taxa_accepted')
count_types = find_types(count_table, 'count')
biomass_types = find_types(biomass_table, 'biomass')
density_types = find_types(density_table, 'density')
percent_cover_types = find_types(percent_cover_table, 'percent_cover')
individual_types = find_types(individual_table, 'individual')
def convert_types(dataframe, types):
'''
Method to convert data types in dataframe to match
column types in database
'''
for i in dataframe.columns:
if types[i] in [
'VARCHAR', 'TEXT', 'VARCHAR(50)', 'VARCHAR(200)',
'spatial_replication_level_1', 'spatial_replication_level_2',
'spatial_replication_level_3', 'spatial_replication_level_4',
'spatial_replication_level_5']:
try:
dataframe.loc[:, i] = dataframe.loc[:, i].apply(str).values
print('In CONVERT: ', i ,dataframe.loc[:,i].dtypes)
except Exception as e:
print('string conversion did not work:', i, str(e))
dataframe.loc[:, i] = dataframe.loc[:, i].astype(object).values
if types[i] in ['NUMERIC', 'numeric', 'INTEGER', 'integer']:
try:
dataframe.loc[:, i] = pd.to_numeric(dataframe.loc[:, i].values, errors='coerce')
except Exception as e:
print('numeric conversion did not work:', i, str(e))
if re.search('observation', i) is not None or re.search('_extent$', i) is not None or re.search('unique_reps', i) is not None:
dataframe.loc[:, i] = pd.to_numeric(dataframe.loc[:, i].values, errors='coerce')
def replace_numeric_null_with_string(dataframe):
''' Function to take values such as -99999 and convert them
to NA's '''
for i in dataframe.columns:
try:
dataframe[i].replace(
{
'-999999': 'NA',
'-99999': 'NA',
'-9999': 'NA',
'-999': 'NA',
'-888': 'NA',
'-8888': 'NA',
'-88888': 'NA',
'-888888': 'NA'
}, inplace=True)
except:
print(i + ' did not convert')
| {
"content_hash": "c7df72a0472aa5ccd15e973f4262334e",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 134,
"avg_line_length": 38.987730061349694,
"alnum_prop": 0.6508261211644375,
"repo_name": "bibsian/database-development",
"id": "183157d6637581e36ab2b7dd47b6d93e937946e3",
"size": "6377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poplerGUI/logiclayer/datalayer/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1241917"
}
],
"symlink_target": ""
} |
"""GridFS is a specification for storing large objects in Mongo.
The :mod:`gridfs` package is an implementation of GridFS on top of
:mod:`pymongo`, exposing a file-like interface.
.. mongodoc:: gridfs
"""
from gridfs.errors import (NoFile,
UnsupportedAPI)
from gridfs.grid_file import (GridIn,
GridOut)
from pymongo import (ASCENDING,
DESCENDING)
from pymongo.database import Database
class GridFS(object):
"""An instance of GridFS on top of a single Database.
"""
def __init__(self, database, collection="fs"):
"""Create a new instance of :class:`GridFS`.
Raises :class:`TypeError` if `database` is not an instance of
:class:`~pymongo.database.Database`.
:Parameters:
- `database`: database to use
- `collection` (optional): root collection to use
.. versionadded:: 1.6
The `collection` parameter.
.. mongodoc:: gridfs
"""
if not isinstance(database, Database):
raise TypeError("database must be an instance of Database")
self.__database = database
self.__collection = database[collection]
self.__files = self.__collection.files
self.__chunks = self.__collection.chunks
connection = database.connection
if not hasattr(connection, 'is_primary') or connection.is_primary:
self.__chunks.ensure_index([("files_id", ASCENDING),
("n", ASCENDING)],
unique=True)
def new_file(self, **kwargs):
"""Create a new file in GridFS.
Returns a new :class:`~gridfs.grid_file.GridIn` instance to
which data can be written. Any keyword arguments will be
passed through to :meth:`~gridfs.grid_file.GridIn`.
If the ``"_id"`` of the file is manually specified, it must
not already exist in GridFS. Otherwise
:class:`~gridfs.errors.FileExists` is raised.
:Parameters:
- `**kwargs` (optional): keyword arguments for file creation
.. versionadded:: 1.6
"""
return GridIn(self.__collection, **kwargs)
def put(self, data, **kwargs):
"""Put data in GridFS as a new file.
Equivalent to doing::
try:
f = new_file(**kwargs)
f.write(data)
finally
f.close()
`data` can be either an instance of :class:`str` (:class:`bytes`
in python 3) or a file-like object providing a :meth:`read` method.
If an `encoding` keyword argument is passed, `data` can also be a
:class:`unicode` (:class:`str` in python 3) instance, which will
be encoded as `encoding` before being written. Any keyword arguments
will be passed through to the created file - see
:meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the
``"_id"`` of the created file.
If the ``"_id"`` of the file is manually specified, it must
not already exist in GridFS. Otherwise
:class:`~gridfs.errors.FileExists` is raised.
:Parameters:
- `data`: data to be written as a file.
- `**kwargs` (optional): keyword arguments for file creation
.. versionadded:: 1.9
The ability to write :class:`unicode`, if an `encoding` has
been specified as a keyword argument.
.. versionadded:: 1.6
"""
grid_file = GridIn(self.__collection, **kwargs)
try:
grid_file.write(data)
finally:
grid_file.close()
return grid_file._id
def get(self, file_id):
"""Get a file from GridFS by ``"_id"``.
Returns an instance of :class:`~gridfs.grid_file.GridOut`,
which provides a file-like interface for reading.
:Parameters:
- `file_id`: ``"_id"`` of the file to get
.. versionadded:: 1.6
"""
return GridOut(self.__collection, file_id)
def get_version(self, filename=None, version=-1, **kwargs):
"""Get a file from GridFS by ``"filename"`` or metadata fields.
Returns a version of the file in GridFS whose filename matches
`filename` and whose metadata fields match the supplied keyword
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
Version numbering is a convenience atop the GridFS API provided
by MongoDB. If more than one file matches the query (either by
`filename` alone, by metadata fields, or by a combination of
both), then version ``-1`` will be the most recently uploaded
matching file, ``-2`` the second most recently
uploaded, etc. Version ``0`` will be the first version
uploaded, ``1`` the second version, etc. So if three versions
have been uploaded, then version ``0`` is the same as version
``-3``, version ``1`` is the same as version ``-2``, and
version ``2`` is the same as version ``-1``.
Raises :class:`~gridfs.errors.NoFile` if no such version of
that file exists.
An index on ``{filename: 1, uploadDate: -1}`` will
automatically be created when this method is called the first
time.
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `version` (optional): version of the file to get (defaults
to -1, the most recent version uploaded)
- `**kwargs` (optional): find files by custom metadata.
.. versionchanged:: 1.11
`filename` defaults to None;
.. versionadded:: 1.11
Accept keyword arguments to find files by custom metadata.
.. versionadded:: 1.9
"""
connection = self.__database.connection
if not hasattr(connection, 'is_primary') or connection.is_primary:
self.__files.ensure_index([("filename", ASCENDING),
("uploadDate", DESCENDING)])
query = kwargs
if filename is not None:
query["filename"] = filename
cursor = self.__files.find(query)
if version < 0:
skip = abs(version) - 1
cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING)
else:
cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING)
try:
grid_file = cursor.next()
return GridOut(self.__collection, file_document=grid_file)
except StopIteration:
raise NoFile("no version %d for filename %r" % (version, filename))
def get_last_version(self, filename=None, **kwargs):
"""Get the most recent version of a file in GridFS by ``"filename"``
or metadata fields.
Equivalent to calling :meth:`get_version` with the default
`version` (``-1``).
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `**kwargs` (optional): find files by custom metadata.
.. versionchanged:: 1.11
`filename` defaults to None;
.. versionadded:: 1.11
Accept keyword arguments to find files by custom metadata. See
:meth:`get_version`.
.. versionadded:: 1.6
"""
return self.get_version(filename=filename, **kwargs)
# TODO add optional safe mode for chunk removal?
def delete(self, file_id):
"""Delete a file from GridFS by ``"_id"``.
Removes all data belonging to the file with ``"_id"``:
`file_id`.
.. warning:: Any processes/threads reading from the file while
this method is executing will likely see an invalid/corrupt
file. Care should be taken to avoid concurrent reads to a file
while it is being deleted.
.. note:: Deletes of non-existent files are considered successful
since the end result is the same: no file with that _id remains.
:Parameters:
- `file_id`: ``"_id"`` of the file to delete
.. versionadded:: 1.6
"""
self.__files.remove({"_id": file_id}, safe=True)
self.__chunks.remove({"files_id": file_id})
def list(self):
"""List the names of all files stored in this instance of
:class:`GridFS`.
.. versionchanged:: 1.6
Removed the `collection` argument.
"""
return self.__files.distinct("filename")
def exists(self, document_or_id=None, **kwargs):
"""Check if a file exists in this instance of :class:`GridFS`.
The file to check for can be specified by the value of it's
``_id`` key, or by passing in a query document. A query
document can be passed in as dictionary, or by using keyword
arguments. Thus, the following three calls are equivalent:
>>> fs.exists(file_id)
>>> fs.exists({"_id": file_id})
>>> fs.exists(_id=file_id)
As are the following two calls:
>>> fs.exists({"filename": "mike.txt"})
>>> fs.exists(filename="mike.txt")
And the following two:
>>> fs.exists({"foo": {"$gt": 12}})
>>> fs.exists(foo={"$gt": 12})
Returns ``True`` if a matching file exists, ``False``
otherwise. Calls to :meth:`exists` will not automatically
create appropriate indexes; application developers should be
sure to create indexes if needed and as appropriate.
:Parameters:
- `document_or_id` (optional): query document, or _id of the
document to check for
- `**kwargs` (optional): keyword arguments are used as a
query document, if they're present.
.. versionadded:: 1.8
"""
if kwargs:
return self.__files.find_one(kwargs, ["_id"]) is not None
return self.__files.find_one(document_or_id, ["_id"]) is not None
def open(self, *args, **kwargs):
"""No longer supported.
.. versionchanged:: 1.6
The open method is no longer supported.
"""
raise UnsupportedAPI("The open method is no longer supported.")
def remove(self, *args, **kwargs):
"""No longer supported.
.. versionchanged:: 1.6
The remove method is no longer supported.
"""
raise UnsupportedAPI("The remove method is no longer supported. "
"Please use the delete method instead.")
| {
"content_hash": "c84fecfb26652da4f9cb750a89ce261e",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 79,
"avg_line_length": 36.87017543859649,
"alnum_prop": 0.5847925390178911,
"repo_name": "mburakergenc/Malware-Detection-using-Machine-Learning",
"id": "9b92e89272432c0fec45c5b82e14cb8b6dac3a5c",
"size": "11085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cuckoo/pymongo-2.3/gridfs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "120655"
},
{
"name": "CSS",
"bytes": "57002"
},
{
"name": "HTML",
"bytes": "14690306"
},
{
"name": "JavaScript",
"bytes": "134909"
},
{
"name": "Jupyter Notebook",
"bytes": "167644"
},
{
"name": "Makefile",
"bytes": "4676"
},
{
"name": "Mako",
"bytes": "1078"
},
{
"name": "Python",
"bytes": "1576528"
},
{
"name": "Shell",
"bytes": "34027"
},
{
"name": "Visual Basic",
"bytes": "1101"
}
],
"symlink_target": ""
} |
"""
Gerador de senhas aleatória fáceis de serem lembradas pelos usuários
"""
from random import randint
import easygui
def gerarSenha():
"""
gerarSenha() -> senha: str
Gera uma senha composta por um substantivo seguido de um adjetivo, ambos concordando em genêro, seguido também
de um numero de dois a três digitos.
"""
#gera aleatóriamente um indice de substantivo
indexSubstantivo = randint(0,40)
if (indexSubstantivo < 20):
#caso o substantivo seja masculino, gera aleatóriamente um indice de adjetivo
#em uma faixa em que ele será masculino ou neutro
indexAdjetivo = randint(0,39)
else:
#caso o substantivo seja feminino, gera aleatóriamente um indice de adjetivo
#em uma faixa em que ele será neutro ou feminino
indexAdjetivo = randint(20,59)
#gera aleatóriamente um número de dois ou três digitos
numero = str(randint(0,999)).zfill(2)
#abre o arquivo de substantivos e busca substantivo do indice desejado
with open('ListaSubstantivos.txt','r') as arqSubstantivo:
substantivo = arqSubstantivo.readlines()[indexSubstantivo].strip()
#abre o arquivo de adjetivos e busca adjetivo do indice desejado
with open('ListaAdjetivos.txt','r') as arqAdjetivo:
adjetivo = arqAdjetivo.readlines()[indexAdjetivo].strip()
#retorna a senha gerada
return substantivo + adjetivo + numero
def main():
### sem interface gráfica ###
"""
#lê o número de senhas a serem geradas
n = raw_input('Digite quantas senhas deseja gerar: ')
while (not n.isdigit()):
n = raw_input('Valor invalido. Digite novamente: ')
#gera o número de senhas digitado pelo usuário
for i in range(int(n)):
print(gerarSenha())
"""
### com interface gráfica ###
#lê o número de senhas a serem geradas
n = easygui.integerbox(msg = 'Digite quantas senhas deseja gerar:', title = 'Gerador de senhas aleatórias', default = '', lowerbound = 1, upperbound = 10000)
#gera o número de senhas digitado pelo usuário
if n:
senha = "\n".join(gerarSenha() for _ in range(n))
easygui.textbox(msg = 'Senhas geradas:', title = 'Gerador de senhas aleatórias', text = senha)
#encerra o programa
return 0
if __name__ == '__main__':
main()
| {
"content_hash": "a2c1082a538c80a4c56b811c8c14ac7a",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 159,
"avg_line_length": 31.485714285714284,
"alnum_prop": 0.7091651542649727,
"repo_name": "thiagopnobre/senha_aleatoria_facil",
"id": "01afc4eda7661d212c94c9616e52e9fb93f6f515",
"size": "2277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SenhaAleatoriaFacil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2277"
}
],
"symlink_target": ""
} |
from snimpy.manager import Manager as M
from snimpy.manager import load
from lablog.interfaces.snmp import SNMP
from lablog import messages
from datetime import datetime, timedelta
class UPS(SNMP):
exchange = messages.Exchanges.energy
measurement_key = "energy.ups"
run_delta = timedelta(seconds=30)
def data(self, data=None):
for i in self.mibs.split(","): load(i)
self.manager = M(self.ip, self.community, self.version)
m = self.manager
d = {}
d['model'] = str(self.manager.upsIdentModel).strip()
d['manufacturer'] = str(m.upsIdentManufacturer).strip()
d['values'] = {}
d['values']['battery_voltage'] = int(m.upsBatteryVoltage)
d['values']['battery_current'] = int(m.upsBatteryCurrent)
d['values']['input_frequency'] = []
for l in m.upsInputFrequency:
d['values']['input_frequency'].append(int(m.upsInputFrequency[l]))
d['values']['input_voltage'] = []
for l in m.upsInputVoltage:
d['values']['input_voltage'].append(int(m.upsInputVoltage[l]))
d['values']['output_current'] = []
for l in m.upsOutputCurrent:
d['values']['output_current'].append(int(m.upsOutputCurrent[l]))
d['values']['output_power'] = []
for l in m.upsOutputPower:
d['values']['output_power'].append(int(m.upsOutputPower[l]))
return d
def point(self, data, key, val, line=None):
t = datetime.utcnow()
d = dict(
measurement="{}.{}".format(self.measurement_key, key),
time=t,
tags=dict(
model=data['model'],
manufacturer=data['manufacturer'],
interface=str(self._id),
),
fields=dict(
value=val
),
)
if line: d['tags']['line'] = line
return d
def parse_data(self, data):
points = []
for k,v in data['values'].iteritems():
if isinstance(v, list):
for line, i in enumerate(v):
points.append(self.point(data, k, i, line))
else:
points.append(self.point(data, k, v))
return points
| {
"content_hash": "c1c10d1a60209b4fdff5218ec68c3b59",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 78,
"avg_line_length": 33.52238805970149,
"alnum_prop": 0.5525378450578807,
"repo_name": "NationalAssociationOfRealtors/LabLog",
"id": "5b7919c35a689990f3ca9b510c5d2787729bde21",
"size": "2246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lablog/interfaces/ups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1643"
},
{
"name": "HCL",
"bytes": "2483"
},
{
"name": "HTML",
"bytes": "42307"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Nginx",
"bytes": "2175"
},
{
"name": "Python",
"bytes": "114780"
},
{
"name": "Shell",
"bytes": "228"
}
],
"symlink_target": ""
} |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras.utils.test_utils import layer_test
from keras_contrib.layers import recurrent
from keras.layers import embeddings
from keras.models import Sequential
from keras import regularizers
from keras.utils.test_utils import keras_test
from keras import backend as K
from keras_contrib import backend as KC
nb_samples, timesteps, embedding_dim, output_dim = 2, 5, 4, 3
embedding_num = 12
def rnn_test(f):
"""
All the recurrent layers share the same interface,
so we can run through them with a single function.
"""
f = keras_test(f)
# Example : return pytest.mark.parametrize("layer_class", [recurrent.JZ1, recurrent.NTM])(f)
return pytest.mark.parametrize('layer_class', [])(f)
@rnn_test
def test_return_sequences(layer_class):
layer_test(layer_class,
kwargs={'output_dim': output_dim,
'return_sequences': True},
input_shape=(nb_samples, timesteps, embedding_dim))
@rnn_test
def test_dynamic_behavior(layer_class):
layer = layer_class(output_dim, input_dim=embedding_dim)
model = Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((nb_samples, timesteps, embedding_dim))
y = np.random.random((nb_samples, output_dim))
model.train_on_batch(x, y)
@rnn_test
def test_dropout(layer_class):
layer_test(layer_class,
kwargs={'output_dim': output_dim,
'dropout_U': 0.1,
'dropout_W': 0.1},
input_shape=(nb_samples, timesteps, embedding_dim))
@rnn_test
def test_implementation_mode(layer_class):
for mode in ['cpu', 'mem', 'gpu']:
layer_test(layer_class,
kwargs={'output_dim': output_dim,
'consume_less': mode},
input_shape=(nb_samples, timesteps, embedding_dim))
@rnn_test
def test_statefulness(layer_class):
model = Sequential()
model.add(embeddings.Embedding(embedding_num, embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(nb_samples, timesteps)))
layer = layer_class(output_dim, return_sequences=False,
stateful=True,
weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((nb_samples, timesteps)))
assert(out1.shape == (nb_samples, output_dim))
# train once so that the states change
model.train_on_batch(np.ones((nb_samples, timesteps)),
np.ones((nb_samples, output_dim)))
out2 = model.predict(np.ones((nb_samples, timesteps)))
# if the state is not reset, output should be different
assert(out1.max() != out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((nb_samples, timesteps)))
assert(out2.max() != out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((nb_samples, timesteps)))
assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((nb_samples, timesteps)))
assert(out4.max() != out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((nb_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((nb_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
assert_allclose(out7, out6, atol=1e-5)
@rnn_test
def test_regularizer(layer_class):
layer = layer_class(output_dim, return_sequences=False, weights=None,
batch_input_shape=(nb_samples, timesteps, embedding_dim),
W_regularizer=regularizers.WeightRegularizer(l1=0.01),
U_regularizer=regularizers.WeightRegularizer(l1=0.01),
b_regularizer='l2')
shape = (nb_samples, timesteps, embedding_dim)
layer.build(shape)
output = layer(K.variable(np.ones(shape)))
K.eval(output)
if layer_class == recurrent.SimpleRNN:
assert len(layer.losses) == 3
if layer_class == recurrent.GRU:
assert len(layer.losses) == 9
if layer_class == recurrent.LSTM:
assert len(layer.losses) == 12
@rnn_test
def test_from_config(layer_class):
for stateful in (False, True):
l1 = layer_class(output_dim=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
if __name__ == '__main__':
pytest.main([__file__])
| {
"content_hash": "be96acd364858f3dd7759d9acdaffb98",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 96,
"avg_line_length": 33.38,
"alnum_prop": 0.6231276213301378,
"repo_name": "stygstra/keras-contrib",
"id": "9601983ab8707e51ce48a6328dbdd1767e09679e",
"size": "5007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/keras_contrib/layers/test_recurrent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "293022"
}
],
"symlink_target": ""
} |
def mock_get_workflow_execution_history(*args, **kwargs):
override_data = kwargs.pop('override_data', {})
response = {
"events": [
{
'eventId': 1,
'eventType': 'WorkflowExecutionStarted',
'workflowExecutionStartedEventAttributes': {
'taskList': {
'name': 'test'
},
'parentInitiatedEventId': 0,
'taskStartToCloseTimeout': '300',
'childPolicy': 'TERMINATE',
'executionStartToCloseTimeout': '6000',
'workflowType': {
'version': '0.1',
'name': 'test-crawl-fsm1'
},
},
'eventTimestamp': 1365177769.585,
},
{
'eventId': 2,
'eventType': 'DecisionTaskScheduled',
'decisionTaskScheduledEventAttributes': {
'startToCloseTimeout': '300',
'taskList': {
'name': 'test'
}
},
'eventTimestamp': 1365177769.585
}
]
}
response.update(override_data)
return response
| {
"content_hash": "3909169d6831b7ab8ba71a93d4cabb53",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 60,
"avg_line_length": 32.75,
"alnum_prop": 0.40610687022900765,
"repo_name": "botify-labs/python-simple-workflow",
"id": "3b860595942913951d140734bf900e4484b14d58",
"size": "1335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/mocks/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "260109"
},
{
"name": "Shell",
"bytes": "34"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.