code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env python
import json
import optparse
import os
import subprocess
import sys
import tempfile
CHUNK_SIZE = 2**20
DEFAULT_DATA_TABLE_NAME = "bowtie_indexes"
def get_id_name( params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_bowtie_index( data_manager_dict, fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=DEFAULT_DATA_TABLE_NAME, color_space=False ):
# TODO: allow multiple FASTA input files
fasta_base_name = os.path.split( fasta_filename )[-1]
sym_linked_fasta_filename = os.path.join( target_directory, fasta_base_name )
os.symlink( fasta_filename, sym_linked_fasta_filename )
args = [ 'bowtie-build' ]
if color_space:
args.append( '-C' )
args.append( sym_linked_fasta_filename)
args.append( fasta_base_name )
args.append( sym_linked_fasta_filename )
tmp_stderr = tempfile.NamedTemporaryFile( prefix="tmp-data-manager-bowtie-index-builder-stderr" )
proc = subprocess.Popen( args=args, shell=False, cwd=target_directory, stderr=tmp_stderr.fileno() )
return_code = proc.wait()
if return_code:
tmp_stderr.flush()
tmp_stderr.seek(0)
print >> sys.stderr, "Error building index:"
while True:
chunk = tmp_stderr.read( CHUNK_SIZE )
if not chunk:
break
sys.stderr.write( chunk )
sys.exit( return_code )
tmp_stderr.close()
data_table_entry = dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name )
_add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
return data_manager_dict
def main():
# Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' )
parser.add_option( '-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' )
parser.add_option( '-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' )
parser.add_option( '-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' )
parser.add_option( '-c', '--color_space', dest='color_space', action='store_true', default=False, help='color_space' )
(options, args) = parser.parse_args()
filename = args[0]
params = json.loads( open( filename ).read() )
target_directory = params[ 'output_data' ][0]['extra_files_path']
os.mkdir( target_directory )
data_manager_dict = {}
dbkey = options.fasta_dbkey
if dbkey in [ None, '', '?' ]:
raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) )
sequence_id, sequence_name = get_id_name( params, dbkey=dbkey, fasta_description=options.fasta_description )
# build the index
build_bowtie_index( data_manager_dict, options.fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME, color_space=options.color_space )
# save info to json file
open( filename, 'wb' ).write( json.dumps( data_manager_dict ) )
if __name__ == "__main__":
main()
| nturaga/tools-iuc | data_managers/data_manager_bowtie_index_builder/data_manager/bowtie_index_builder.py | Python | mit | 4,124 |
"""
WSGI config for bjjweb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bjjweb.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| coffenbacher/bjj | bjjweb/wsgi.py | Python | agpl-3.0 | 387 |
class Solution(object):
def maxProduct(self, nums):
max_value = -200000000
"""
:type nums: List[int]
:rtype: int
"""
for i in range(1, len(nums) + 1):
for j in range(len(nums) - i + 1):
try_value = self.get_value(nums[j:j + i])
if max_value < try_value:
max_value = try_value
return max_value
def get_value(self, nums):
value = 1
for i in nums:
value *= i
return value
# Time consuming.
# Best solution
class Solution1(object):
def maxProduct(self, nums):
maximum = big = small = nums[0]
for n in nums[1:]:
print n, n * big, n * small
big, small = max(n, n * big, n * small), min(n, n * big, n * small)
maximum = max(maximum, big)
return maximum
s = Solution()
s1 = Solution1()
print s.maxProduct([-4, -3])
print s1.maxProduct([2, 3, -2, 4])
print s.maxProduct([-2])
print s.maxProduct(
[1, -5, 6, -5, 2, -4, -5, 0, 3, 2, -4, 0, -5, -3, -1, -4, -1, 4, 1, -1, -3, -1, 1, 3, -4, -6, -2, 5, 1, -5, 0, -1,
-5, 0, 1, 2, 6, 1, 2, -6, 5, 5, 0, 1, 0, 1, 1, -1, -1, 3, 1, 0, 4, -3, 0, 4, -4, -1, 6, 5, 5, 6, -6, 1, 1, 3, 4, 3,
-1, -3, 0, -5, -4, 1, 5, -2, 3, -1, 2, 1, 1, 6, 0, 5, -5, 6, -6, 3, 0, 4, -1, 3, 6, 0, -2, 0, -1, 6, 4, 1, -5, 1,
0, 1, -1, -1, 3, 5, 5, 4, 2, 5, 0, -1, 5, 2, 2, -3, -1, -1, 0, -6, -2, -5, 1, -2, 2, 0, 0, 2, -3, -2, -4, 1, 1, -4,
-3, -1, 0, 0, 1, -3, -2, 3, -4, 5, 2, -1, 4, 1, 5, 6, 0, 1, 1, -2, -1, 0, -1, -5, 5, 6, 6, -1, -1, 0, -4, 2, 1, 3,
-5, 6, -5, -1, -1, -3, -1, -4, -2, -1, -1, 1, -3, -4, 0, 1, -3, 4, 3, 2, -2, 6, -3, -6, -6, -2, -5, 1, 2, 0, -1, 0,
0, -2, 3, -4, 2, 4, 3, -1, 3, 1, 0, 2, 1, -1, 0, 5, -1, -3, -6, -5, 0, 6, 6, -6, -5, 4, -2, -1, 0, 4, 6, -3, 1, -1,
0, 1, -5, 5, -3, -3, -3, -1, -1, 4, 0, -2, -4, 3, 5, 5, -1, -1, -5, -2, -4, -4, 6, 0, -3, -1, -5, -3, -1, 6, 1, -5,
-1, 0, 1, -4, -5, 0, 0, 0, -3, -5, -1, -4, -1, 5, 5, -4, 4, -1, 6, -1, 1, -1, 2, -2, -3, 0, 1, 0, 0, -3, 0, 2, 5,
-6, -3, -3, 3, -4, -2, -6, -1, 1, 4, 4, 0, -6, -5, -6, -3, 5, -3, 1, -4, 6, -2, 0, -4, -1, 0, -1, 0, 6, -6, 0, 5,
0, 1, -3, 6, 1, -1, 1, 0, -1, 1, -1, -6, -3, 4, -1, -4, 6, 4, -1, -3, 2, -6, 5, 0, 4, -2, 1, 0, 4, -2, 2, 0, 0, 5,
5, -3, 4, 3, -5, 2, 2, 6, -1, -2, 1, -3, 1, -1, 6, -4, 0, 0, 0, 2, -5, -4, 2, 6, -3, -6, -1, -6, 0, 0, 2, -1, 6,
-4, -5, -1, 0, -3, -3, -1, 0, -4, 3, 1, 5, 0, 2, 5, 0, 4, -5, -1, 3, 1, -1, -1, 1, 1, -2, 3, 5, 4, 6, 2, 6, -6, 5,
2, -3, 0, -1, -1, 3, 1, 1, 1, -2, -5, 3, -1, 3, 0, -1, 3, 1, 1, -2, 6, 3, -6, 5, -5, -5, 0, -2, -3, -3, -4, 6, -1,
-6, 6, -3, -5, 1, -1, 0, 0, 1, 4, -5, 0, 1, -2, 6, 1, -3, -5, 0, 4, -2, 1, -5, -4, 0, 0, -1, -2, 0, 2, -2, 5, 6])
| kingno21/practice | problems/que007/solution.py | Python | mit | 2,830 |
# -*- coding: utf-8 -*-
import pytest
from repocket.rules import compile_rules, Rule
from repocket.main import PocketItem
def test_single_rule():
item1 = PocketItem(1, 'http://google.com', [], 'Google')
item2 = PocketItem(2, 'http://github.com', [], 'Github')
rule = Rule('.*google\.com', ['google'])
assert rule.suggest_tags(item1) == set(['google'])
assert rule.suggest_tags(item2) == set()
def test_tag_creation():
items = [
PocketItem(1, 'http://google.com', [], 'Google'),
PocketItem(2, 'http://github.com/lensvol/repocket', [], 'Specific github'),
PocketItem(3, 'http://github.com/', [], 'Github'),
]
rule = Rule('.*github\.com/([a-z0-9]+)/.*', ['github', '{0}'])
results = [rule.suggest_tags(item) for item in items]
assert results == [set(), set(['github', 'lensvol']), set()]
| lensvol/repocket | tests/test_rules.py | Python | mit | 857 |
#!/usr/bin/python2.7
# coding:utf-8
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for full_text_search.py"""
import datetime
import unittest
from google.appengine.ext import db
from google.appengine.ext import testbed
from google.appengine.api import search
import sys
import logging
import delete
import full_text_search
import model
TEST_DATETIME = datetime.datetime(2010, 1, 1, 0, 0, 0)
class FullTextSearchTests(unittest.TestCase):
def setUp(self):
self.tb = testbed.Testbed()
self.tb.activate()
self.tb.init_search_stub()
self.p1 = model.Person.create_original_with_record_id(
'haiti',
'haiti/0505',
given_name='Iori',
family_name='Minase',
full_name='Iori Minase',
alternate_names='Iorin',
entry_date=TEST_DATETIME
)
self.p2 = model.Person.create_original_with_record_id(
'haiti',
'haiti/0325',
given_name='Yayoi',
family_name='Takatsuki',
full_name='Yayoi Takatsuki',
alternate_names='Yayotan',
entry_date=TEST_DATETIME
)
self.p3 = model.Person.create_original_with_record_id(
'haiti',
'haiti/1202',
given_name='Yayoi',
full_name='Yayoi san',
alternate_names='Nigochan',
entry_date=TEST_DATETIME
)
self.p4 = model.Person.create_original_with_record_id(
'haiti',
'haiti/1123',
given_name='Miki',
family_name='Hoshii',
full_name='Miki Hoshii',
entry_date=TEST_DATETIME
)
self.p5 = model.Person.create_original_with_record_id(
'haiti',
'haiti/0522',
given_name='Ami',
family_name='Futami',
full_name='Ami Futami',
entry_date=TEST_DATETIME
)
self.p6 = model.Person.create_original_with_record_id(
'haiti',
'haiti/0225',
given_name='Chihaya',
family_name='Kisaragi',
full_name='Chihaya Kisaragi',
home_street='Kunaideme72',
home_city='Arao',
home_state='Kumamoto',
home_postal_code='864-0003',
home_neighborhood='Araokeibajou',
home_country='Japan',
entry_date=TEST_DATETIME
)
self.p7 = model.Person.create_original_with_record_id(
'haiti',
'haiti/1010',
given_name='Hibiki',
family_name='Ganaha',
full_name='Hibiki Ganaha',
entry_date=TEST_DATETIME
)
self.p8 = model.Person.create_original_with_record_id(
'haiti',
'haiti/0719',
given_name=u'あずさ',
family_name=u'三浦',
home_city=u'横浜',
entry_date=TEST_DATETIME
)
self.p9 = model.Person.create_original_with_record_id(
'haiti',
'haiti/0623',
given_name=u'рицуко',
family_name=u'акидуки',
home_city=u'тоттори',
entry_date=TEST_DATETIME
)
self.p10 = model.Person.create_original_with_record_id(
'haiti',
'haiti:0810',
given_name='Rin',
family_name='Shibuya',
full_name='Rin Shibuya',
home_city='shinjuku',
entry_date=TEST_DATETIME
)
self.p11 = model.Person.create_original_with_record_id(
'haiti',
'haiti:0203',
given_name='Rin',
family_name='Tosaka',
full_name='Rin Tosaka',
home_city='Shibuya',
entry_date=TEST_DATETIME
)
self.p12 = model.Person.create_original_with_record_id(
'haiti',
'haiti/1224',
given_name=u'雪歩',
family_name=u'萩原',
entry_date=TEST_DATETIME)
self.p13 = model.Person.create_original_with_record_id(
'haiti',
'haiti/0523',
given_name=u'Zhen Mei',
family_name=u'Shuang Hai',
entry_date=TEST_DATETIME)
self.p14 = model.Person.create_original_with_record_id(
'haiti',
'haiti/0829',
given_name=u'真',
family_name=u'菊地',
entry_date=TEST_DATETIME)
self.p15 = model.Person.create_original_with_record_id(
'haiti',
'haiti/1829',
given_name=u'眞',
family_name=u'菊地',
entry_date=TEST_DATETIME)
def tearDown(self):
db.delete(model.Person.all())
self.tb.deactivate()
def test_search_by_name_only(self):
db.put(self.p1)
db.put(self.p2)
db.put(self.p3)
db.put(self.p4)
db.put(self.p5)
db.put(self.p6)
db.put(self.p7)
db.put(self.p8)
db.put(self.p9)
db.put(self.p10)
db.put(self.p11)
db.put(self.p12)
db.put(self.p13)
db.put(self.p14)
db.put(self.p15)
full_text_search.add_record_to_index(self.p1)
full_text_search.add_record_to_index(self.p2)
full_text_search.add_record_to_index(self.p3)
full_text_search.add_record_to_index(self.p4)
full_text_search.add_record_to_index(self.p5)
full_text_search.add_record_to_index(self.p6)
full_text_search.add_record_to_index(self.p7)
full_text_search.add_record_to_index(self.p8)
full_text_search.add_record_to_index(self.p9)
full_text_search.add_record_to_index(self.p10)
full_text_search.add_record_to_index(self.p11)
full_text_search.add_record_to_index(self.p12)
full_text_search.add_record_to_index(self.p13)
full_text_search.add_record_to_index(self.p14)
full_text_search.add_record_to_index(self.p15)
# Search by alternate name
results = full_text_search.search('haiti', 'Iorin', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0505'])
# Search by family name
results = full_text_search.search('haiti', 'Minase', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0505'])
# Search by given name
results = full_text_search.search('haiti', 'Iori', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0505'])
# Search by given name + family name
results = full_text_search.search('haiti', 'Minase Iori', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0505'])
# Search by full name
resutls = full_text_search.search('haiti', 'Iori Minase', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0505'])
# Search by name & location
results = full_text_search.search('haiti', 'Chihaya Arao', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0225'])
# Search Cyrillic record by name & location
results = full_text_search.search('haiti', 'Ritsuko Tottori', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0623'])
# Search by home_street only
results = full_text_search.search('haiti', 'Kunaideme72', 5)
assert not results
# Search by home_city only
results = full_text_search.search('haiti', 'Arao', 5)
assert not results
# Search by home_state only
results = full_text_search.search('haiti', 'Kumamoto', 5)
assert not results
# Search by home_postal_code only
results = full_text_search.search('haiti', '864-0003', 5)
assert not results
# Search by home_neighborhood only
results = full_text_search.search('haiti', 'Araokeibajou', 5)
assert not results
# Search by home_country only
results = full_text_search.search('haiti', 'Japan', 5)
assert not results
# Search in a different repository
results = full_text_search.search('japan', 'Iori', 5)
assert not results
# Check no results
results = full_text_search.search('haiti', 'Producer san', 5)
assert not results
# Search with no query text
results = full_text_search.search('haiti', '', 5)
assert not results
# Search deleted record
delete.delete_person(self, self.p5)
results = full_text_search.search('haiti', 'Ami', 5)
assert not results
# Check rank order (name match heigher than location match)
results = full_text_search.search('haiti', 'Rin Shibuya', 5)
assert [r.record_id for r in results] == \
['haiti:0810', 'haiti:0203']
# Search romaji record by kanji name
results = full_text_search.search('haiti', u'千早', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0225'])
# Search romaji record by kanji name and location
results = full_text_search.search('haiti', u'千早 荒尾', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0225'])
# Check rank order
# (same kanji higher than different kanji with the same reading)
results = full_text_search.search('haiti', u'菊地 真', 5)
assert [r.record_id for r in results] == \
['haiti/0829', 'haiti/1829']
results = full_text_search.search('haiti', u'菊地 眞', 5)
assert [r.record_id for r in results] == \
['haiti/1829', 'haiti/0829']
# Search kanji record by multi reading
results = full_text_search.search('haiti', u'hagiwara', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/1224'])
results = full_text_search.search('haiti', u'ogiwara', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/1224'])
# Search romaji record by hiragana name and location
results = full_text_search.search('haiti', u'ちはや あらお', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0225'])
# Search by full name without space
results = full_text_search.search('haiti', 'HibikiGanaha', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/1010'])
# Search kanji record by full name without space
results = full_text_search.search('haiti', u'AzusaMiura', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0719'])
# Search Cyrillic record by full name without space
results = full_text_search.search('haiti', u'RitsukoAkiduki', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0623'])
# Search Chinese record by kanji
results = full_text_search.search('haiti', u'真美', 5)
assert set([r.record_id for r in results]) == \
set(['haiti/0523'])
def test_delete_record_from_index(self):
db.put(self.p4)
full_text_search.add_record_to_index(self.p4)
full_text_search.delete_record_from_index(self.p4)
results = full_text_search.search('haiti', 'Miki', 5)
assert not results
| clobrano/personfinder | tests/test_full_text_search.py | Python | apache-2.0 | 12,016 |
import sys
import subprocess
import textwrap
import decimal
from . import constants
from . import utils
from . import argparse_utils
def zpool_command(args):
context = vars(args)
effective_image_count = constants.ZPOOL_TYPES[args.type](args.count)
context['image_size'] = args.size / effective_image_count
context['physical_size'] = context['image_size'] * args.count
context['effective_size'] = context['image_size'] * effective_image_count
context['prefix'] %= context
context['postfix'] %= context
context['i'] = 0
context['name'] = constants.IMAGE_NAME % context
context['extra_args'] = ''
print textwrap.fill(constants.ZPOOL_CREATE_MESSAGE % context)
devices = []
for i in range(args.count):
context['i'] = i
context['name'] = constants.IMAGE_NAME % context
try:
if args.overwrite:
arg = '-ov'
else:
arg = ''
utils.execute(context, constants.ZPOOL_CREATE_IMAGE_COMMAND, arg)
except subprocess.CalledProcessError:
print 'Unable to create a new image'
sys.exit(1)
try:
context['name'] += '.sparseimage'
device = utils.execute(context,
constants.ZPOOL_ATTACH_IMAGE_COMMAND)
if device:
devices.append(device.strip())
except subprocess.CalledProcessError:
print 'Unable to attach image'
sys.exit(1)
if devices:
context['devices'] = ' '.join(devices)
context['mountpoint'] %= context
utils.execute(context, constants.ZPOOL_CREATE_COMMAND)
def get_parser(subparsers):
zpool = subparsers.add_parser('zpool', help='zpool creation')
zpool.add_argument(
'-c', '--count', default=3,
type=lambda s: argparse_utils.greater_than(s, int, 1),
help='The amount of images to use (default: %(default)s)')
zpool.add_argument(
'-s', '--size', default=10,
type=lambda s: argparse_utils.greater_than(s, decimal.Decimal, 0),
help='The usable size of the zpool in GiB (default: %(default)sGiB)')
zpool.add_argument(
'-t', '--type', choices=constants.ZPOOL_TYPES, default='raidz',
help='The zpool type to use (default: %(default)s)')
zpool.add_argument(
'-n', '--no-op', '--dry-run', action='store_true',
help='Show what will be done but dont execute')
zpool.add_argument(
'-m', '--mountpoint', default='~/%(pool_name)s',
help='Where should the disk be mounted (default: %(default)s')
zpool.add_argument(
'-o', '--overwrite', action='store_true',
help='Overwrite old images if they exist')
zpool.add_argument('pool_name', help='The name of the pool to create')
zpool.add_argument(
'-p', '--prefix', default='%(pool_name)s_',
help='File name prefix for the images (default: %(default)s)')
zpool.add_argument(
'--postfix', default='',
help='File name postfix for the images (default: %(default)s)')
zpool.set_defaults(func=zpool_command)
| WoLpH/zfs-utils-osx | zfs_utils_osx/zpool.py | Python | bsd-3-clause | 3,142 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Message Queue Receiver
@author: Bin Zhang
@email: [email protected]
'''
import datetime
import json
import urllib
import urllib2
import paho.mqtt.client as mqtt
from config import *
# Called when the broker responds to our connection request.
def on_connect(client, userdata, rc):
print("Connected with result code " + str(rc))
client.subscribe("gateway")
# Called when a message has been received on a topic that the client subscribes to
def on_message(client, userdata, msg):
"""callback function"""
print(msg.topic + " " + str(msg.payload))
begin_datetime = str(datetime.datetime.now().isoformat())
print '[begin] ' + begin_datetime
message = json.loads(msg.payload)
print message
post_data = str(msg.payload)
global influxdb_url
global influxdb_port
global influxdb_dbname
global influxdb_username
global influxdb_password
influxdb_url = urllib.quote(influxdb_url)
influxdb_port = urllib.quote(str(influxdb_port))
influxdb_dbname = urllib.quote(influxdb_dbname)
influxdb_username = urllib.quote(influxdb_username)
influxdb_password = urllib.quote(influxdb_password)
request_url = "http://%s:%s/db/%s/series?u=%s&p=%s" % (influxdb_url, influxdb_port, influxdb_dbname, influxdb_username, influxdb_password)
print request_url
request = urllib2.Request(request_url, post_data)
request.add_header("Content-Type", "application/json")
response = urllib2.urlopen(request, timeout=10)
#assert response.code == 200
print response.code
#response_dict = json.loads(response.read())
#print response_dict
client = mqtt.Client()
client.username_pw_set(broker_username, broker_password)
client.on_connect = on_connect
client.on_message = on_message
client.connect(broker_url, broker_port, 60)
print 'Awaiting...'
client.loop_forever()
| 87boy/mqpp | paho/receiver.py | Python | gpl-2.0 | 1,915 |
import cherrystrap
import orielpy
from cherrystrap import logger, formatter
from orielpy import common
# parse_qsl moved to urlparse module in v2.6
try:
from urllib.parse import parse_qsl #@UnusedImport
except:
from cgi import parse_qsl #@Reimport
import oauth2 as oauth
import twitter
class TwitterNotifier:
consumer_key = "ZUJt6TLfdoDx5MBZLCOFKQ"
consumer_secret = "9gS5c4AAdhk6YSkL5F4E67Xclyao6GRDnXQKWCAw"
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
def notify_health(self, output):
self._notifyTwitter('OrielPy: '+common.notifyStrings[common.NOTIFY_PREPEND]+output)
def test_notify(self):
return self._notifyTwitter("This is a test notification from OrielPy / " + formatter.now(), force=True)
def _get_authorization(self):
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
logger.info('Requesting temp token from Twitter')
resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
logger.info('Invalid respond from Twitter requesting temp token: %s' % resp['status'])
else:
request_token = dict(parse_qsl(content.decode('UTF-8')))
orielpy.TWITTER_TOKEN = request_token.get('oauth_token')
orielpy.TWITTER_SECRET = request_token.get('oauth_token_secret')
return self.AUTHORIZATION_URL+"?oauth_token="+ request_token.get('oauth_token')
def _get_credentials(self, key):
request_token = {}
request_token['oauth_token'] = orielpy.TWITTER_TOKEN
request_token['oauth_token_secret'] = orielpy.TWITTER_SECRET
request_token['oauth_callback_confirmed'] = 'true'
token = oauth.Token(request_token.get('oauth_token'), request_token.get('oauth_token_secret'))
token.set_verifier(key)
logger.info('Generating and signing request for an access token using key '+key)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
logger.info('oauth_consumer: '+str(oauth_consumer))
oauth_client = oauth.Client(oauth_consumer, token)
logger.info('oauth_client: '+str(oauth_client))
resp, content = oauth_client.request(self.ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % key)
logger.info('resp, content: '+str(resp)+','+str(content))
access_token = dict(parse_qsl(content.decode('UTF-8')))
logger.info('access_token: '+str(access_token))
logger.info('resp[status] = '+str(resp['status']))
if resp['status'] != '200':
logger.error('The request for a token with did not succeed: '+str(resp['status']))
return False
else:
logger.info('Your Twitter Access Token key: %s' % access_token.get('oauth_token'))
logger.info('Access Token secret: %s' % access_token.get('oauth_token_secret'))
orielpy.TWITTER_TOKEN = access_token.get('oauth_token')
orielpy.TWITTER_SECRET = access_token.get('oauth_token_secret')
return True
def _send_tweet(self, message=None):
username=self.consumer_key
password=self.consumer_secret
access_token_key=orielpy.TWITTER_TOKEN
access_token_secret=orielpy.TWITTER_SECRET
logger.info(u"Sending tweet: "+message)
api = twitter.Api(username, password, access_token_key, access_token_secret)
try:
api.PostUpdate(message)
except Exception as e:
logger.error(u"Error Sending Tweet: %s" %e)
return False
return True
def _notifyTwitter(self, message='', force=False):
prefix = orielpy.TWITTER_PREFIX
if not orielpy.TWITTER_ENABLED and not force:
return False
return self._send_tweet(prefix+": "+message)
notifier = TwitterNotifier
| theguardian/OrielPy | orielpy/notifiers/tweet.py | Python | gpl-2.0 | 4,510 |
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
"""
This helper script is used to dynamically update configs to named.
o For backward compatibility we read the named configurable
params from contrail-dns.conf and build contrail-named-base.conf
o Alternatively user can create/update contrail-name-base.conf
for configuring named params
o contrail-named.conf will be generated by dnsd which will
contain views/zones stanzas
o contrail-named-base.conf will be merged with contrail-named.conf
by the script and config applied to named
"""
import sys
import os
import subprocess
import ConfigParser
def parse_contrail_dns_conf():
named_defaults = {
'named_config_file': 'contrail-named.conf',
'named_config_directory': '/etc/contrail/dns',
'named_log_file': '/var/log/contrail/contrail-named.log',
'rndc_config_file': 'contrail-rndc.conf',
'rndc_secret': 'xvysmOR8lnUQRBcunkC6vg==',
'named_max_cache_size': '32M',
'named_max_retransmissions': '12',
'named_retransmission_interval': '1000',
}
# remove preceeding spaces from contrail-dns.conf
# and save it in contrail-dns-temp.conf
subprocess.call(["sed -e 's/^[ \t]*//g' < /etc/contrail/contrail-dns.conf \
> /etc/contrail/dns/contrail-dns-temp.conf"], shell=True)
# remove comments preceeding with #
subprocess.call(["sed -i 's/[;#].*$//g' /etc/contrail/dns/contrail-dns-temp.conf"], shell=True)
# parse contrail-dns.conf
dns_config = ConfigParser.SafeConfigParser()
dns_config.read('/etc/contrail/dns/contrail-dns-temp.conf')
# remove the temp file
os.remove("/etc/contrail/dns/contrail-dns-temp.conf")
# update defaults
named_defaults.update(dict(dns_config.items("DEFAULT")))
# create contrail-named-base.conf
file_named_base_conf=open('/etc/contrail/dns/contrail-named-base.conf', 'w+')
# build contrail-named-base.conf
# build options {} stanza
file_named_base_conf.write('options {\n')
file_named_base_conf.write(' directory "'+ named_defaults['named_config_directory'] + '";\n')
file_named_base_conf.write(' managed-keys-directory "'+ named_defaults['named_config_directory'] + '";\n')
file_named_base_conf.write(' empty-zones-enable no;\n')
file_named_base_conf.write(' pid-file "/etc/contrail/dns/contrail-named.pid";\n')
file_named_base_conf.write(' session-keyfile "/etc/contrail/dns/session.key";\n')
file_named_base_conf.write(' listen-on port 53 { any; };\n')
file_named_base_conf.write(' allow-query { any; };\n')
file_named_base_conf.write(' allow-recursion { any; };\n')
file_named_base_conf.write(' allow-query-cache { any; };\n')
file_named_base_conf.write(' max-cache-size '+ named_defaults['named_max_cache_size'] + ';\n')
file_named_base_conf.write('};\n\n')
# build rndc-key {} stanza
file_named_base_conf.write('key "rndc-key" {\n')
file_named_base_conf.write(' algorithm hmac-md5;\n')
file_named_base_conf.write(' secret "' + named_defaults['rndc_secret'] + '";\n')
file_named_base_conf.write('};\n\n')
#build controls {} stanza
file_named_base_conf.write('controls {\n')
file_named_base_conf.write(' inet 127.0.0.1 port 8094 \n')
file_named_base_conf.write(' allow { 127.0.0.1; } keys { "rndc-key"; };\n')
file_named_base_conf.write('};\n\n')
#build logging {} stanza
file_named_base_conf.write('logging {\n')
file_named_base_conf.write(' channel debug_log {\n')
file_named_base_conf.write(' file "'+ named_defaults['named_log_file'] + '" versions 5 size 5m;\n')
file_named_base_conf.write(' severity debug;\n')
file_named_base_conf.write(' print-time yes;\n')
file_named_base_conf.write(' print-severity yes;\n')
file_named_base_conf.write(' print-category yes;\n')
file_named_base_conf.write(' };\n')
file_named_base_conf.write(' category default {\n')
file_named_base_conf.write(' debug_log;\n')
file_named_base_conf.write(' };\n')
file_named_base_conf.write(' category queries {\n')
file_named_base_conf.write(' debug_log;\n')
file_named_base_conf.write(' };\n')
file_named_base_conf.write('};\n\n')
file_named_base_conf.close()
# end parse_contrail_dns_conf
def main():
if not os.path.exists('/etc/contrail/dns/contrail-named-base.conf'):
# parse contrail-dns.conf and build contrail-named-base.conf
parse_contrail_dns_conf()
# open contrail-named-base.conf and read the base configs
file1 = open('/etc/contrail/dns/contrail-named-base.conf', 'r')
file1_lines = file1.readlines()
file1.close()
# open contrail-named.conf and remove configurable stanzas
# options{} key{} controls{} logging {}
count = 0
file2 = open('/etc/contrail/dns/contrail-named.conf', 'r')
lines = file2.readlines()
for i, line in enumerate(lines[:]):
if line.startswith('view'):
break
else:
count = count + 1
file2.close()
# delete all lines before the view stanza {}
del lines[0:count]
# open contrail-named.conf
file3 = open('/etc/contrail/dns/contrail-named.conf', 'w')
file3.truncate()
file3.write("/* Build from contrail-named-base.conf */\n")
file3.writelines(file1_lines)
file3.write("/* Build from contrail-named.conf */\n")
file3.writelines(lines)
file3.close()
# apply config
os.system('/usr/bin/contrail-rndc -c /etc/contrail/dns/contrail-rndc.conf reconfig')
#end main
if __name__ == "__main__":
main()
| eonpatapon/contrail-controller | src/dns/applynamedconfig.py | Python | apache-2.0 | 5,723 |
import logging
import typing
from typing import List, Iterable, Optional
from binascii import hexlify
from torba.client.basescript import BaseInputScript, BaseOutputScript
from torba.client.baseaccount import BaseAccount
from torba.client.constants import COIN, NULL_HASH32
from torba.client.bcd_data_stream import BCDataStream
from torba.client.hash import sha256, TXRef, TXRefImmutable
from torba.client.util import ReadOnlyList
from torba.client.errors import InsufficientFundsError
if typing.TYPE_CHECKING:
from torba.client import baseledger
log = logging.getLogger()
class TXRefMutable(TXRef):
__slots__ = ('tx',)
def __init__(self, tx: 'BaseTransaction') -> None:
super().__init__()
self.tx = tx
@property
def id(self):
if self._id is None:
self._id = hexlify(self.hash[::-1]).decode()
return self._id
@property
def hash(self):
if self._hash is None:
self._hash = sha256(sha256(self.tx.raw))
return self._hash
@property
def height(self):
return self.tx.height
def reset(self):
self._id = None
self._hash = None
class TXORef:
__slots__ = 'tx_ref', 'position'
def __init__(self, tx_ref: TXRef, position: int) -> None:
self.tx_ref = tx_ref
self.position = position
@property
def id(self):
return '{}:{}'.format(self.tx_ref.id, self.position)
@property
def hash(self):
return self.tx_ref.hash + BCDataStream.uint32.pack(self.position)
@property
def is_null(self):
return self.tx_ref.is_null
@property
def txo(self) -> Optional['BaseOutput']:
return None
class TXORefResolvable(TXORef):
__slots__ = ('_txo',)
def __init__(self, txo: 'BaseOutput') -> None:
assert txo.tx_ref is not None
assert txo.position is not None
super().__init__(txo.tx_ref, txo.position)
self._txo = txo
@property
def txo(self):
return self._txo
class InputOutput:
__slots__ = 'tx_ref', 'position'
def __init__(self, tx_ref: TXRef = None, position: int = None) -> None:
self.tx_ref = tx_ref
self.position = position
@property
def size(self) -> int:
""" Size of this input / output in bytes. """
stream = BCDataStream()
self.serialize_to(stream)
return len(stream.get_bytes())
def get_fee(self, ledger):
return self.size * ledger.fee_per_byte
def serialize_to(self, stream, alternate_script=None):
raise NotImplementedError
class BaseInput(InputOutput):
script_class = BaseInputScript
NULL_SIGNATURE = b'\x00'*72
NULL_PUBLIC_KEY = b'\x00'*33
__slots__ = 'txo_ref', 'sequence', 'coinbase', 'script'
def __init__(self, txo_ref: TXORef, script: BaseInputScript, sequence: int = 0xFFFFFFFF,
tx_ref: TXRef = None, position: int = None) -> None:
super().__init__(tx_ref, position)
self.txo_ref = txo_ref
self.sequence = sequence
self.coinbase = script if txo_ref.is_null else None
self.script = script if not txo_ref.is_null else None
@property
def is_coinbase(self):
return self.coinbase is not None
@classmethod
def spend(cls, txo: 'BaseOutput') -> 'BaseInput':
""" Create an input to spend the output."""
assert txo.script.is_pay_pubkey_hash, 'Attempting to spend unsupported output.'
script = cls.script_class.redeem_pubkey_hash(cls.NULL_SIGNATURE, cls.NULL_PUBLIC_KEY)
return cls(txo.ref, script)
@property
def amount(self) -> int:
""" Amount this input adds to the transaction. """
if self.txo_ref.txo is None:
raise ValueError('Cannot resolve output to get amount.')
return self.txo_ref.txo.amount
@property
def is_my_account(self) -> Optional[bool]:
""" True if the output this input spends is yours. """
if self.txo_ref.txo is None:
return False
return self.txo_ref.txo.is_my_account
@classmethod
def deserialize_from(cls, stream):
tx_ref = TXRefImmutable.from_hash(stream.read(32), -1)
position = stream.read_uint32()
script = stream.read_string()
sequence = stream.read_uint32()
return cls(
TXORef(tx_ref, position),
cls.script_class(script) if not tx_ref.is_null else script,
sequence
)
def serialize_to(self, stream, alternate_script=None):
stream.write(self.txo_ref.tx_ref.hash)
stream.write_uint32(self.txo_ref.position)
if alternate_script is not None:
stream.write_string(alternate_script)
else:
if self.is_coinbase:
stream.write_string(self.coinbase)
else:
stream.write_string(self.script.source)
stream.write_uint32(self.sequence)
class BaseOutputEffectiveAmountEstimator:
__slots__ = 'txo', 'txi', 'fee', 'effective_amount'
def __init__(self, ledger: 'baseledger.BaseLedger', txo: 'BaseOutput') -> None:
self.txo = txo
self.txi = ledger.transaction_class.input_class.spend(txo)
self.fee: int = self.txi.get_fee(ledger)
self.effective_amount: int = txo.amount - self.fee
def __lt__(self, other):
return self.effective_amount < other.effective_amount
class BaseOutput(InputOutput):
script_class = BaseOutputScript
estimator_class = BaseOutputEffectiveAmountEstimator
__slots__ = 'amount', 'script', 'is_change', 'is_my_account'
def __init__(self, amount: int, script: BaseOutputScript,
tx_ref: TXRef = None, position: int = None,
is_change: Optional[bool] = None, is_my_account: Optional[bool] = None
) -> None:
super().__init__(tx_ref, position)
self.amount = amount
self.script = script
self.is_change = is_change
self.is_my_account = is_my_account
def update_annotations(self, annotated):
if annotated is None:
self.is_change = False
self.is_my_account = False
else:
self.is_change = annotated.is_change
self.is_my_account = annotated.is_my_account
@property
def ref(self):
return TXORefResolvable(self)
@property
def id(self):
return self.ref.id
@property
def pubkey_hash(self):
return self.script.values['pubkey_hash']
def get_address(self, ledger):
return ledger.hash160_to_address(self.pubkey_hash)
def get_estimator(self, ledger):
return self.estimator_class(ledger, self)
@classmethod
def pay_pubkey_hash(cls, amount, pubkey_hash):
return cls(amount, cls.script_class.pay_pubkey_hash(pubkey_hash))
@classmethod
def deserialize_from(cls, stream):
return cls(
amount=stream.read_uint64(),
script=cls.script_class(stream.read_string())
)
def serialize_to(self, stream, alternate_script=None):
stream.write_uint64(self.amount)
stream.write_string(self.script.source)
class BaseTransaction:
input_class = BaseInput
output_class = BaseOutput
def __init__(self, raw=None, version: int = 1, locktime: int = 0, is_verified: bool = False,
height: int = -2, position: int = -1) -> None:
self._raw = raw
self.ref = TXRefMutable(self)
self.version = version
self.locktime = locktime
self._inputs: List[BaseInput] = []
self._outputs: List[BaseOutput] = []
self.is_verified = is_verified
# Height Progression
# -2: not broadcast
# -1: in mempool but has unconfirmed inputs
# 0: in mempool and all inputs confirmed
# +num: confirmed in a specific block (height)
self.height = height
self.position = position
if raw is not None:
self._deserialize()
@property
def is_broadcast(self):
return self.height > -2
@property
def is_mempool(self):
return self.height in (-1, 0)
@property
def is_confirmed(self):
return self.height > 0
@property
def id(self):
return self.ref.id
@property
def hash(self):
return self.ref.hash
@property
def raw(self):
if self._raw is None:
self._raw = self._serialize()
return self._raw
def _reset(self):
self._raw = None
self.ref.reset()
@property
def inputs(self) -> ReadOnlyList[BaseInput]:
return ReadOnlyList(self._inputs)
@property
def outputs(self) -> ReadOnlyList[BaseOutput]:
return ReadOnlyList(self._outputs)
def _add(self, new_ios: Iterable[InputOutput], existing_ios: List) -> 'BaseTransaction':
for txio in new_ios:
txio.tx_ref = self.ref
txio.position = len(existing_ios)
existing_ios.append(txio)
self._reset()
return self
def add_inputs(self, inputs: Iterable[BaseInput]) -> 'BaseTransaction':
return self._add(inputs, self._inputs)
def add_outputs(self, outputs: Iterable[BaseOutput]) -> 'BaseTransaction':
return self._add(outputs, self._outputs)
@property
def size(self) -> int:
""" Size in bytes of the entire transaction. """
return len(self.raw)
@property
def base_size(self) -> int:
""" Size of transaction without inputs or outputs in bytes. """
return (
self.size
- sum(txi.size for txi in self._inputs)
- sum(txo.size for txo in self._outputs)
)
@property
def input_sum(self):
return sum(i.amount for i in self.inputs if i.txo_ref.txo is not None)
@property
def output_sum(self):
return sum(o.amount for o in self.outputs)
@property
def net_account_balance(self) -> int:
balance = 0
for txi in self.inputs:
if txi.txo_ref.txo is None:
continue
if txi.is_my_account is None:
raise ValueError(
"Cannot access net_account_balance if inputs/outputs do not "
"have is_my_account set properly."
)
if txi.is_my_account:
balance -= txi.amount
for txo in self.outputs:
if txo.is_my_account is None:
raise ValueError(
"Cannot access net_account_balance if inputs/outputs do not "
"have is_my_account set properly."
)
if txo.is_my_account:
balance += txo.amount
return balance
@property
def fee(self) -> int:
return self.input_sum - self.output_sum
def get_base_fee(self, ledger) -> int:
""" Fee for base tx excluding inputs and outputs. """
return self.base_size * ledger.fee_per_byte
def get_effective_input_sum(self, ledger) -> int:
""" Sum of input values *minus* the cost involved to spend them. """
return sum(txi.amount - txi.get_fee(ledger) for txi in self._inputs)
def get_total_output_sum(self, ledger) -> int:
""" Sum of output values *plus* the cost involved to spend them. """
return sum(txo.amount + txo.get_fee(ledger) for txo in self._outputs)
def _serialize(self, with_inputs: bool = True) -> bytes:
stream = BCDataStream()
stream.write_uint32(self.version)
if with_inputs:
stream.write_compact_size(len(self._inputs))
for txin in self._inputs:
txin.serialize_to(stream)
stream.write_compact_size(len(self._outputs))
for txout in self._outputs:
txout.serialize_to(stream)
stream.write_uint32(self.locktime)
return stream.get_bytes()
def _serialize_for_signature(self, signing_input: int) -> bytes:
stream = BCDataStream()
stream.write_uint32(self.version)
stream.write_compact_size(len(self._inputs))
for i, txin in enumerate(self._inputs):
if signing_input == i:
assert txin.txo_ref.txo is not None
txin.serialize_to(stream, txin.txo_ref.txo.script.source)
else:
txin.serialize_to(stream, b'')
stream.write_compact_size(len(self._outputs))
for txout in self._outputs:
txout.serialize_to(stream)
stream.write_uint32(self.locktime)
stream.write_uint32(self.signature_hash_type(1)) # signature hash type: SIGHASH_ALL
return stream.get_bytes()
def _deserialize(self):
if self._raw is not None:
stream = BCDataStream(self._raw)
self.version = stream.read_uint32()
input_count = stream.read_compact_size()
self.add_inputs([
self.input_class.deserialize_from(stream) for _ in range(input_count)
])
output_count = stream.read_compact_size()
self.add_outputs([
self.output_class.deserialize_from(stream) for _ in range(output_count)
])
self.locktime = stream.read_uint32()
@classmethod
def ensure_all_have_same_ledger(cls, funding_accounts: Iterable[BaseAccount],
change_account: BaseAccount = None) -> 'baseledger.BaseLedger':
ledger = None
for account in funding_accounts:
if ledger is None:
ledger = account.ledger
if ledger != account.ledger:
raise ValueError(
'All funding accounts used to create a transaction must be on the same ledger.'
)
if change_account is not None and change_account.ledger != ledger:
raise ValueError('Change account must use same ledger as funding accounts.')
if ledger is None:
raise ValueError('No ledger found.')
return ledger
@classmethod
async def create(cls, inputs: Iterable[BaseInput], outputs: Iterable[BaseOutput],
funding_accounts: Iterable[BaseAccount], change_account: BaseAccount,
sign: bool = True):
""" Find optimal set of inputs when only outputs are provided; add change
outputs if only inputs are provided or if inputs are greater than outputs. """
tx = cls() \
.add_inputs(inputs) \
.add_outputs(outputs)
ledger = cls.ensure_all_have_same_ledger(funding_accounts, change_account)
# value of the outputs plus associated fees
cost = (
tx.get_base_fee(ledger) +
tx.get_total_output_sum(ledger)
)
# value of the inputs less the cost to spend those inputs
payment = tx.get_effective_input_sum(ledger)
try:
for _ in range(5):
if payment < cost:
deficit = cost - payment
spendables = await ledger.get_spendable_utxos(deficit, funding_accounts)
if not spendables:
raise InsufficientFundsError('Not enough funds to cover this transaction.')
payment += sum(s.effective_amount for s in spendables)
tx.add_inputs(s.txi for s in spendables)
cost_of_change = (
tx.get_base_fee(ledger) +
cls.output_class.pay_pubkey_hash(COIN, NULL_HASH32).get_fee(ledger)
)
if payment > cost:
change = payment - cost
if change > cost_of_change:
change_address = await change_account.change.get_or_create_usable_address()
change_hash160 = change_account.ledger.address_to_hash160(change_address)
change_amount = change - cost_of_change
change_output = cls.output_class.pay_pubkey_hash(change_amount, change_hash160)
change_output.is_change = True
tx.add_outputs([cls.output_class.pay_pubkey_hash(change_amount, change_hash160)])
if tx._outputs:
break
else:
# this condition and the outer range(5) loop cover an edge case
# whereby a single input is just enough to cover the fee and
# has some change left over, but the change left over is less
# than the cost_of_change: thus the input is completely
# consumed and no output is added, which is an invalid tx.
# to be able to spend this input we must increase the cost
# of the TX and run through the balance algorithm a second time
# adding an extra input and change output, making tx valid.
# we do this 5 times in case the other UTXOs added are also
# less than the fee, after 5 attempts we give up and go home
cost += cost_of_change + 1
if sign:
await tx.sign(funding_accounts)
except Exception as e:
log.exception('Failed to create transaction:')
await ledger.release_tx(tx)
raise e
return tx
@staticmethod
def signature_hash_type(hash_type):
return hash_type
async def sign(self, funding_accounts: Iterable[BaseAccount]):
ledger = self.ensure_all_have_same_ledger(funding_accounts)
for i, txi in enumerate(self._inputs):
assert txi.script is not None
assert txi.txo_ref.txo is not None
txo_script = txi.txo_ref.txo.script
if txo_script.is_pay_pubkey_hash:
address = ledger.hash160_to_address(txo_script.values['pubkey_hash'])
private_key = await ledger.get_private_key_for_address(address)
tx = self._serialize_for_signature(i)
txi.script.values['signature'] = \
private_key.sign(tx) + bytes((self.signature_hash_type(1),))
txi.script.values['pubkey'] = private_key.public_key.pubkey_bytes
txi.script.generate()
else:
raise NotImplementedError("Don't know how to spend this output.")
self._reset()
| lbryio/lbry | torba/torba/client/basetransaction.py | Python | mit | 18,514 |
import io
import os.path
import nose.tools as nt
from IPython.utils import openpy
mydir = os.path.dirname(__file__)
nonascii_path = os.path.join(mydir, "../../core/tests/nonascii.py")
def test_detect_encoding():
with open(nonascii_path, "rb") as f:
enc, lines = openpy.detect_encoding(f.readline)
nt.assert_equal(enc, "iso-8859-5")
def test_read_file():
with io.open(nonascii_path, encoding="iso-8859-5") as f:
read_specified_enc = f.read()
read_detected_enc = openpy.read_py_file(nonascii_path, skip_encoding_cookie=False)
nt.assert_equal(read_detected_enc, read_specified_enc)
assert "coding: iso-8859-5" in read_detected_enc
read_strip_enc_cookie = openpy.read_py_file(
nonascii_path, skip_encoding_cookie=True
)
assert "coding: iso-8859-5" not in read_strip_enc_cookie
def test_source_to_unicode():
with io.open(nonascii_path, "rb") as f:
source_bytes = f.read()
nt.assert_equal(
openpy.source_to_unicode(source_bytes, skip_encoding_cookie=False).splitlines(),
source_bytes.decode("iso-8859-5").splitlines(),
)
source_no_cookie = openpy.source_to_unicode(source_bytes, skip_encoding_cookie=True)
nt.assert_not_in("coding: iso-8859-5", source_no_cookie)
| sserrot/champion_relationships | venv/Lib/site-packages/IPython/utils/tests/test_openpy.py | Python | mit | 1,271 |
#!/usr/bin/env python
#
# Copyright (C) 2014 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-fexceptions',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++14',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'/usr/bin/../lib64/gcc/x86_64-suse-linux/5/../../../../include/c++/5',
'-isystem',
'/usr/bin/../lib64/gcc/x86_64-suse-linux/5/../../../../include/c++/5/x86_64-suse-linux',
'-isystem',
'/usr/bin/../lib64/gcc/x86_64-suse-linux/5/../../../../include/c++/5/backward',
'-isystem',
'/usr/local/include',
'-isystem',
'/usr/bin/../lib64/clang/3.6.1/include',
'-isystem',
'/usr/include',
'-I',
'/home/vsProject/cpp_project/vsD/common',
'-I',
'/home/vsProject/cpp_project/vsD/daemon',
'-I',
'/home/vsProject/cpp_project/vsD/signal',
'-I',
'/home/vsProject/cpp_project/vsD/spdlog'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
# This is the entry point; this function is called by ycmd to produce flags for
# a file.
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| verbalsaintmars/vsd_brand_new | .ycm_extra_conf.py | Python | mit | 5,280 |
import twitter
import glob
import random
class Tweety( object ):
def signIn(self):
df = open('./twitter/login.dat', 'r')
self.creds = {}
for line in df:
l = line.split()
self.creds[l[0]] = l[1]
self.tweeter = twitter.Api(consumer_key=self.creds["ckey"],
consumer_secret=self.creds["csecret"],
access_token_key=self.creds["atkey"],
access_token_secret=self.creds["atsecret"])
self.verification = self.tweeter.VerifyCredentials()
self.name = self.verification.GetName()
self.Id = self.verification.GetId()
self.User = self.tweeter.GetUser(self.Id)
self.status = self.User.GetStatus()
self.timeHorizon = self.status.GetId()
def loadStuff(self):
self.mention = 'Status'
self.vocabdir = './twitter/'
self.vocab = {}
for name in ["startup", "snark", "broadcast", "shutdown"]:
snippets = []
for line in open(self.vocabdir+name+'.vocab', 'r'):
snippets.append(line)
self.vocab[name] = snippets
self.graphdir = './graphs/'
def __init__(self, parent):
self.signIn()
self.loadStuff()
self.parent = parent
print self.status.GetText()
print self.timeHorizon
def shutdown(self):
self.tweeter.ClearCredentials()
def reply(self, mention):
status = "A status update should appear here"
#self.tweeter.PostUpdate(status, in_reply_to_status_id=mention.GetId())
print status
def checkFeed(self):
newMentions = self.tweeter.GetMentions()#since_id=self.timeHorizon)
for mention in newMentions:
print mention.GetText()
if (self.mentionText in mention.GetText()):
print "User wants a query!"
self.reply(mention)
def startTweeting(self):
self.status = self.tweeter.PostUpdate(
self.vocab["startup"][random.randint(0,len(self.vocab["startup"]))])
self.timeHorizon = self.status.GetId()
def stopTweeting(self):
self.status = self.tweeter.PostUpdate(
self.vocab["shutdown"][random.randint(
0,len(self.vocab["shutdown"]))])
self.timeHorizon = self.status.GetId()
self.shutdown()
| SPIE-hack-day-2014/labrador-retweeter | tweetHandler.py | Python | gpl-2.0 | 2,364 |
"""Optional dependencies for DAL."""
| yourlabs/django-autocomplete-light | src/__init__.py | Python | mit | 37 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow import DAG
from airflow.contrib.sensors.aws_sqs_sensor import SQSSensor
from airflow.utils import timezone
from mock import patch, MagicMock
from airflow.exceptions import AirflowException
from moto import mock_sqs
from airflow.contrib.hooks.aws_sqs_hook import SQSHook
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestSQSSensor(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
self.sensor = SQSSensor(
task_id='test_task',
dag=self.dag,
sqs_queue='test',
aws_conn_id='aws_default'
)
self.mock_context = MagicMock()
self.sqs_hook = SQSHook()
@mock_sqs
def test_poke_success(self):
self.sqs_hook.create_queue('test')
self.sqs_hook.send_message(queue_url='test', message_body='hello')
result = self.sensor.poke(self.mock_context)
self.assertTrue(result)
self.assertTrue("'Body': 'hello'" in str(self.mock_context['ti'].method_calls),
"context call should contain message hello")
@mock_sqs
def test_poke_no_messsage_failed(self):
self.sqs_hook.create_queue('test')
result = self.sensor.poke(self.mock_context)
self.assertFalse(result)
context_calls = []
self.assertTrue(self.mock_context['ti'].method_calls == context_calls, "context call should be same")
@patch('airflow.contrib.sensors.aws_sqs_sensor.SQSHook')
def test_poke_delete_raise_airflow_exception(self, mock_sqs_hook):
message = {'Messages': [{'MessageId': 'c585e508-2ea0-44c7-bf3e-d1ba0cb87834',
'ReceiptHandle': 'mockHandle',
'MD5OfBody': 'e5a9d8684a8edfed460b8d42fd28842f',
'Body': 'h21'}],
'ResponseMetadata': {'RequestId': '56cbf4aa-f4ef-5518-9574-a04e0a5f1411',
'HTTPStatusCode': 200,
'HTTPHeaders': {
'x-amzn-requestid': '56cbf4aa-f4ef-5518-9574-a04e0a5f1411',
'date': 'Mon, 18 Feb 2019 18:41:52 GMT',
'content-type': 'text/xml', 'mock_sqs_hook-length': '830'},
'RetryAttempts': 0}}
mock_sqs_hook().get_conn().receive_message.return_value = message
mock_sqs_hook().get_conn().delete_message_batch.return_value = \
{'Failed': [{'Id': '22f67273-4dbc-4c19-83b5-aee71bfeb832'}]}
with self.assertRaises(AirflowException) as context:
self.sensor.poke(self.mock_context)
self.assertTrue('Delete SQS Messages failed' in context.exception.args[0])
@patch('airflow.contrib.sensors.aws_sqs_sensor.SQSHook')
def test_poke_receive_raise_exception(self, mock_sqs_hook):
mock_sqs_hook().get_conn().receive_message.side_effect = Exception('test exception')
with self.assertRaises(Exception) as context:
self.sensor.poke(self.mock_context)
self.assertTrue('test exception' in context.exception.args[0])
if __name__ == '__main__':
unittest.main()
| owlabs/incubator-airflow | tests/contrib/sensors/test_sqs_sensor.py | Python | apache-2.0 | 4,201 |
#!/usr/bin/env python
'''
This sample demonstrates Canny edge detection.
Usage:
edge.py [<video source>]
Trackbars control edge thresholds.
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import numpy as np
# relative module
import video
# built-in module
import sys
if __name__ == '__main__':
print(__doc__)
try:
fn = sys.argv[1]
except:
fn = 0
def nothing(*arg):
pass
cv2.namedWindow('edge')
cv2.createTrackbar('thrs1', 'edge', 2000, 5000, nothing)
cv2.createTrackbar('thrs2', 'edge', 4000, 5000, nothing)
cap = video.create_capture(fn)
while True:
flag, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thrs1 = cv2.getTrackbarPos('thrs1', 'edge')
thrs2 = cv2.getTrackbarPos('thrs2', 'edge')
edge = cv2.Canny(gray, thrs1, thrs2, apertureSize=5)
vis = img.copy()
vis = np.uint8(vis/2.)
vis[edge != 0] = (0, 255, 0)
cv2.imshow('edge', vis)
ch = cv2.waitKey(5)
if ch == 27:
break
cv2.destroyAllWindows()
| makelove/OpenCV-Python-Tutorial | 官方samples/edge.py | Python | mit | 1,130 |
"""!
@brief Unit-tests for Hysteresis Oscillatory Network.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest;
from pyclustering.nnet.hysteresis import hysteresis_network;
from pyclustering.nnet import *;
from pyclustering.utils import extract_number_oscillations;
class HysteresisUnitTest(unittest.TestCase):
def templateOscillationExistance(self, num_osc, own_weight, neigh_weight, steps, time, initial_states = None, initial_outputs = None, conn_repr = conn_represent.MATRIX):
network = hysteresis_network(num_osc, own_weight, neigh_weight, type_conn_represent = conn_repr);
if (initial_states is not None):
network.states = initial_states;
if (initial_outputs is not None):
network.outputs = initial_outputs;
output_dynamic = network.simulate(steps, time);
oscillations = [];
for index in range(num_osc):
number_oscillations = extract_number_oscillations(output_dynamic.output, index, 0.9);
oscillations.append(number_oscillations)
assert number_oscillations > 1;
def testOscillationsOneOscillator(self):
self.templateOscillationExistance(1, -2, -1, 1000, 10);
self.templateOscillationExistance(1, -4, -1, 1000, 10);
def testOscillationsTwoOscillators(self):
self.templateOscillationExistance(2, -4, 1, 1000, 10, [1, 0], [1, 1]);
self.templateOscillationExistance(2, -4, -1, 1000, 10, [1, 0], [1, 1]);
def testOscillationsFiveOscillators(self):
self.templateOscillationExistance(5, -4, -1, 1000, 10, [1, 0.5, 0, -0.5, -1], [1, 1, 1, 1, 1]);
def testListConnectionRepresentation(self):
self.templateOscillationExistance(1, -2, -1, 1000, 10, conn_repr = conn_represent.LIST);
self.templateOscillationExistance(2, -4, -1, 1000, 10, [1, 0], [1, 1], conn_repr = conn_represent.LIST);
self.templateOscillationExistance(5, -4, -1, 1000, 10, [1, 0.5, 0, -0.5, -1], [1, 1, 1, 1, 1], conn_repr = conn_represent.LIST);
def templateSynchronousEnsemblesAllocation(self, num_osc, own_weight, neigh_weight, steps, time, initial_states, initial_outputs, sync_ensembles_sizes):
network = hysteresis_network(num_osc, own_weight, neigh_weight);
if (initial_states is not None):
network.states = initial_states;
if (initial_outputs is not None):
network.outputs = initial_outputs;
output_dynamic = network.simulate(steps, time, collect_dynamic = True);
ensembles = output_dynamic.allocate_sync_ensembles(0.5, 5);
assert len(ensembles) == len(sync_ensembles_sizes);
obtained_ensembles_sizes = [len(cluster) for cluster in ensembles];
total_length = sum(obtained_ensembles_sizes);
assert total_length == len(network);
obtained_ensembles_sizes.sort();
sync_ensembles_sizes.sort();
assert obtained_ensembles_sizes == sync_ensembles_sizes;
def testOneSyncEnsemblesAllocation(self):
self.templateSynchronousEnsemblesAllocation(2, -4, 1, 1000, 10, [1, 0], [1, 1], [2]);
def testTwoSyncEnsemblesAllocation(self):
self.templateSynchronousEnsemblesAllocation(2, -4, -1, 1000, 10, [1, 0], [1, 1], [1, 1]);
| annoviko/pyclustering | pyclustering/nnet/tests/unit/ut_hysteresis.py | Python | gpl-3.0 | 3,547 |
#
# Copyright 2007-2014 Charles du Jeu - Abstrium SAS <team (at) pyd.io>
# This file is part of Pydio.
#
# Pydio is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pydio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pydio. If not, see <http://www.gnu.org/licenses/>.
#
# The latest code can be found at <http://pyd.io/>.
#
from flask import Flask
from flask_restful import Api
from flask import request, redirect, Response
from flask.ext.restful import Resource
from requests.exceptions import SSLError, ProxyError, TooManyRedirects, ChunkedEncodingError, ContentDecodingError, \
InvalidSchema, InvalidURL, Timeout, RequestException
from pydio.job.job_config import JobConfig, JobsLoader
from pydio.job.EventLogger import EventLogger
from pydio.job.localdb import LocalDbHandler
from pydio.job.scheduler import PydioScheduler
import json
import requests
import keyring
import xmltodict
import types
import logging
import sys
import os
from pathlib import *
from pydio.utils.global_config import ConfigManager
from pydio.utils.functions import connection_helper
from pydio.utils import i18n
_ = i18n.language.ugettext
from functools import wraps
import authdigest
import flask
try:
from pydio.endpoint.resolver import EndpointResolver, RESOLVER_CONFIG, EndpointException
except ImportError:
EndpointResolver = False
RESOLVER_CONFIG = False
EndpointException = False
class FlaskRealmDigestDB(authdigest.RealmDigestDB):
def requires_auth(self, f):
@wraps(f)
def decorated(*args, **kwargs):
request = flask.request
if not self.isAuthenticated(request):
return self.challenge()
return f(*args, **kwargs)
return decorated
authDB = FlaskRealmDigestDB('PydioSyncAuthRealm')
class PydioApi(Api):
def __init__(self, server_port, user, password, external_ip=None):
logging.info('-----------------------------------------------')
if external_ip:
logging.info('Starting agent on http://' + external_ip + ':' + str(server_port) + '/')
logging.info('Warning, this agent UI is world accessible!')
else:
logging.info('Starting agent locally on http://localhost:' + str(server_port) + '/')
logging.info('------------------------------------------------')
self.user_data_path = JobsLoader.Instance().data_path
self.port = server_port
self.external_ip = external_ip
authDB.add_user(user, password)
self.running = False
if getattr(sys, 'frozen', False):
self.real_static_folder = Path(sys._MEIPASS) / 'ui' / 'res'
static_folder = str(self.real_static_folder)
else:
self.real_static_folder = Path(__file__).parent / 'res'
static_folder = 'res'
logging.debug('Starting Flask server with following static folder : '+ static_folder)
self.app = Flask(__name__, static_folder=static_folder, static_url_path='/res')
self.app.logger.setLevel(logging.ERROR)
l = logging.getLogger("werkzeug")
if l:
l.setLevel(logging.ERROR)
super(PydioApi, self).__init__(self.app)
self.add_resource(JobManager, '/', '/jobs', '/jobs/<string:job_id>', '/jobs-status')
self.add_resource(WorkspacesManager, '/ws/<string:job_id>')
self.add_resource(FoldersManager, '/folders/<string:job_id>')
self.add_resource(LogManager, '/jobs/<string:job_id>/logs')
self.add_resource(ConflictsManager, '/jobs/<string:job_id>/conflicts', '/jobs/conflicts')
self.add_resource(CmdManager, '/cmd/<string:cmd>/<string:job_id>', '/cmd/<string:cmd>')
self.app.add_url_rule('/res/i18n.js', 'i18n', self.serve_i18n_file)
self.app.add_url_rule('/res/config.js', 'config', self.server_js_config)
self.app.add_url_rule('/res/dynamic.css', 'dynamic_css', self.serve_dynamic_css)
self.app.add_url_rule('/res/about.html', 'dynamic_about', self.serve_about_content)
if EndpointResolver:
self.add_resource(ResolverManager, '/resolve/<string:client_id>')
self.app.add_url_rule('/res/dynamic.png', 'dynamic_png', self.serve_dynamic_image)
def serve_i18n_file(self):
s = ''
from pydio.utils.i18n import get_languages
import json
languages = get_languages()
short_lang = []
for l in languages:
lang_part = l.split('_')[0]
if lang_part:
short_lang.append(lang_part)
with open(str(self.real_static_folder / 'i18n.js')) as js:
for line in js:
s += line
if EndpointResolver:
additional_strings = EndpointResolver.Instance().load_additional_strings()
if additional_strings:
s += '\nvar PydioAdditionalStrings = ' + json.dumps(additional_strings) + ';'
s += '\nwindow.PydioLangs = merge(PydioAdditionalStrings, PydioLangs);'
s += '\n'
s += 'window.PydioEnvLanguages = ' + json.dumps(short_lang) + ';'
return Response(response=s,
status=200,
mimetype="text/javascript")
def server_js_config(self):
content = "window.ui_config = {'login_mode':'standard'}"
if EndpointResolver:
content = EndpointResolver.Instance().get_ui_config()
return Response(response=content,
status=200,
mimetype="text/javascript")
def serve_dynamic_css(self):
content = ''
if EndpointResolver:
content = EndpointResolver.Instance().load_css()
return Response(response=content,
status=200,
mimetype="text/css")
def serve_dynamic_image(self):
# This is called only if there is a resolved.
return Response(response=EndpointResolver.Instance().load_image_content(),
status=200,
mimetype="image/png")
def serve_about_content(self):
content = ''
if EndpointResolver:
content = EndpointResolver.Instance().load_about_content()
else:
about_file = str(self.real_static_folder / 'about.html')
with open(about_file, 'r') as handle:
content = handle.read()
return Response(response=content,
status=200,
mimetype="text/html")
def start_server(self):
try:
self.running = True
self.app.run(port=self.port, host=self.external_ip)
except Exception:
self.running = False
logging.exception("Error while starting web server")
def shutdown_server(self):
logging.debug("Shutdown api server: %s" % self.app)
with self.app.test_request_context():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
class WorkspacesManager(Resource):
@authDB.requires_auth
def get(self, job_id):
if job_id != 'request':
jobs = JobsLoader.Instance().get_jobs()
if not job_id in jobs:
return {"error": "Cannot find job"}
job = jobs[job_id]
url = job.server + '/api/pydio/state/user/repositories?format=json'
auth = (job.user_id, keyring.get_password(job.server, job.user_id))
verify = not job.trust_ssl
else:
args = request.args
base = args['url'].rstrip('/')
verify = False if args['trust_ssl'] == 'true' else True
url = base + '/api/pydio/state/user/repositories?format=json'
if 'password' in args:
auth = (args['user'], args['password'])
else:
auth = (args['user'], keyring.get_password(base, args['user']))
app_name_url = base + '/api/pydio/state/plugins?format=json'
display_name_url = base + '/api/pydio/state/user/preferences?format=json'
if verify and "REQUESTS_CA_BUNDLE" in os.environ:
verify = os.environ["REQUESTS_CA_BUNDLE"]
try:
# TRY TO GET APPLICATION TITLE
if app_name_url:
resp = requests.get(app_name_url, stream=False, auth=auth, verify=verify,
proxies=ConfigManager.Instance().get_defined_proxies())
resp.raise_for_status()
try:
app_data = json.loads(resp.content)
app_name = ''
ajxpcores = app_data['plugins']['ajxpcore']
for core in ajxpcores:
if core['@id'] == 'core.ajaxplorer':
for prop in core['plugin_configs']['property']:
if prop['@name'] == 'APPLICATION_TITLE':
app_name = json.loads(prop['$'])
break
break
except KeyError as k:
pass
except ValueError:
pass
# TRY TO GET USER DISPLAY NAME
if display_name_url:
resp = requests.get(display_name_url, stream=False, auth=auth, verify=verify,
proxies=ConfigManager.Instance().get_defined_proxies())
resp.raise_for_status()
try:
user_data = json.loads(resp.content)
user_display_name = ''
prefs = user_data['preferences']['pref']
for pref in prefs:
if pref['@name'] == 'USER_DISPLAY_NAME':
if pref['@value']:
user_display_name = pref['@value']
break
except KeyError as k:
pass
except ValueError:
pass
resp = requests.get(url, stream=True, auth=auth, verify=verify,
proxies=ConfigManager.Instance().get_defined_proxies())
resp.raise_for_status()
data = json.loads(resp.content)
if 'repositories' in data and 'repo' in data['repositories']:
if isinstance(data['repositories']['repo'], types.DictType):
data['repositories']['repo'] = [data['repositories']['repo']]
data['repositories']['repo'] = filter(lambda x: not x['@access_type'].startswith('ajxp_'), data['repositories']['repo'])
if app_name:
data['application_title'] = app_name
if user_display_name:
data['user_display_name'] = user_display_name
return data
except requests.HTTPError:
r = resp.status_code
message = _("Couldn't load your workspaces, check your server !")
if r == 404:
message = _("Server not found (404), is it up and has it Pydio installed ?")
elif r == 401:
message = _("Authentication failed: please verify your login and password")
elif r == 403:
message = _("Access to the server is forbidden")
elif r == 500 or r == 408:
message = _("Server seems to be encountering problems (500)")
logging.debug("Error while loading workspaces : " + message)
return {'error': message}, resp.status_code
except SSLError as rt:
logging.error(rt.message)
return {'error': _("An SSL error happened! Is your server using a self-signed certificate? In that case please check 'Trust SSL certificate'")}, 400
except ProxyError as rt:
logging.error(rt.message)
return {'error': _('A proxy error happened, please check the logs')}, 400
except TooManyRedirects as rt:
logging.error(rt.message)
return {'error': _('Connection error: too many redirects')}, 400
except ChunkedEncodingError as rt:
logging.error(rt.message)
return {'error': _('Chunked encoding error, please check the logs')}, 400
except ContentDecodingError as rt:
logging.error(rt.message)
return {'error': _('Content Decoding error, please check the logs')}, 400
except InvalidSchema as rt:
logging.error(rt.message)
return {'error': _('Http connection error: invalid schema.')}, 400
except InvalidURL as rt:
logging.error(rt.message)
return {'error': _('Http connection error: invalid URL.')}, 400
except ValueError:
message = "Error while parsing request result:" + resp.content
logging.debug(message)
return {'error': message}, 400
except Timeout as to:
logging.error(to)
return {'error': _('Connection timeout!')}, 400
except RequestException as ree:
logging.error(ree.message)
return {'error': _('Cannot resolve domain!')}, 400
class FoldersManager(Resource):
@authDB.requires_auth
def get(self, job_id):
if job_id != 'request':
jobs = JobsLoader.Instance().get_jobs()
if not job_id in jobs:
return {"error":"Cannot find job"}
job = jobs[job_id]
url = job.server + '/api/'+job.workspace+'/ls/?options=d&recursive=true'
auth = (job.user_id, keyring.get_password(job.server, job.user_id))
verify = not job.trust_ssl
else:
args = request.args
base = args['url'].rstrip('/')
verify = False if args['trust_ssl'] == 'true' else True
url = base + '/api/'+args['ws']+'/ls/?options=d&recursive=true&max_depth=2'
if 'password' in args:
auth = (args['user'], args['password'])
else:
auth = (args['user'], keyring.get_password(base, args['user']))
if verify and "REQUESTS_CA_BUNDLE" in os.environ:
verify = os.environ["REQUESTS_CA_BUNDLE"]
resp = requests.get( url, stream=True, auth=auth, verify=verify,
proxies=ConfigManager.Instance().get_defined_proxies())
o = xmltodict.parse(resp.content)
if not 'tree' in o or 'message' in o['tree']:
return [{'error':'Cannot load workspace'}]
if not 'tree' in o['tree']:
return []
if isinstance(o['tree']['tree'], types.DictType):
return [o['tree']['tree']]
return o['tree']['tree']
class JobManager(Resource):
loader = None
@authDB.requires_auth
def post(self):
JobsLoader.Instance().get_jobs()
json_req = request.get_json()
new_job = JobConfig.object_decoder(json_req)
if 'test_path' in json_req:
json_req['directory'] = os.path.join(ConfigManager.Instance().get_data_path(), json_req['repoObject']['label'])
return json_req
elif 'compute_sizes' in json_req:
dl_rate = 2 * 1024 * 1024
up_rate = 0.1 * 1024 * 1024
# COMPUTE REMOTE SIZE
from pydio.sdk.remote import PydioSdk
trust_ssl = False
if 'trust_ssl' in json_req:
trust_ssl = json_req['trust_ssl']
sdk = PydioSdk(json_req['server'], json_req['workspace'], json_req['remote_folder'], '',
auth=(json_req['user'], json_req['password']),
device_id=ConfigManager.Instance().get_device_id(),
skip_ssl_verify=trust_ssl,
proxies=ConfigManager.Instance().get_defined_proxies())
up = [0.0]
def callback(location, change, info):
if change and "bytesize" in change and change["md5"] != "directory":
up[0] += float(change["bytesize"])
sdk.changes_stream(0, callback)
# COMPUTE LOCAL SIZE
down = 0.0
if os.path.exists(json_req['directory']):
for dirpath, dirnames, filenames in os.walk(json_req['directory']):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
down += os.path.getsize(fp)
except OSError:
pass
json_req['byte_size'] = up[0] + down
json_req['eta'] = up[0] * 8 / dl_rate + down * 8 / up_rate
return json_req
JobsLoader.Instance().update_job(new_job)
scheduler = PydioScheduler.Instance()
scheduler.reload_configs()
scheduler.disable_job(new_job.id)
if not 'toggle_status' in json_req:
JobsLoader.Instance().clear_job_data(new_job.id)
scheduler.enable_job(new_job.id)
return JobConfig.encoder(new_job)
@authDB.requires_auth
def get(self, job_id = None):
if request.path == '/':
return redirect("/res/index.html", code=302)
jobs = JobsLoader.Instance().get_jobs()
if not job_id:
json_jobs = []
for k in jobs:
data = JobConfig.encoder(jobs[k])
self.enrich_job(data, k, (request.path == '/jobs-status'))
json_jobs.append(data)
if request.path == '/jobs-status':
response = {'is_connected_to_internet': connection_helper.internet_ok, 'jobs': json_jobs}
return response
return json_jobs
data = JobConfig.encoder(jobs[job_id])
self.enrich_job(data, job_id)
return data
def enrich_job(self, job_data, job_id, get_notification=False):
running = PydioScheduler.Instance().is_job_running(job_id)
job_data['running'] = running
logger = EventLogger(JobsLoader.Instance().build_job_data_path(job_id))
if get_notification:
notification = logger.consume_notification()
if notification:
job_data['notification'] = notification
last_events = logger.get_all(1, 0)
if len(last_events):
job_data['last_event'] = last_events.pop()
if running:
job_data['state'] = PydioScheduler.Instance().get_job_progress(job_id)
@authDB.requires_auth
def delete(self, job_id):
JobsLoader.Instance().delete_job(job_id)
scheduler = PydioScheduler.Instance()
scheduler.reload_configs()
scheduler.disable_job(job_id)
JobsLoader.Instance().clear_job_data(job_id, parent=True)
return job_id + "deleted", 204
class ConflictsManager(Resource):
@authDB.requires_auth
def post(self):
json_conflict = request.get_json()
job_id = json_conflict['job_id']
try:
job_config = JobsLoader.Instance().get_job(job_id)
except Exception:
return "Can't find any job config with this ID.", 404
dbHandler = LocalDbHandler(JobsLoader.Instance().build_job_data_path(job_id))
dbHandler.update_node_status(json_conflict['node_path'], json_conflict['status'])
if not dbHandler.count_conflicts() and job_config.active:
t = PydioScheduler.Instance().get_thread(job_id)
if t:
t.start_now()
return json_conflict
@authDB.requires_auth
def get(self, job_id):
if not job_id in JobsLoader.Instance().get_jobs():
return "Can't find any job config with this ID.", 404
dbHandler = LocalDbHandler(JobsLoader.Instance().build_job_data_path(job_id))
return dbHandler.list_conflict_nodes()
class LogManager(Resource):
def __init__(self):
self.events = {}
@authDB.requires_auth
def get(self, job_id):
if not job_id in JobsLoader.Instance().get_jobs():
return "Can't find any job config with this ID.", 404
logger = EventLogger(JobsLoader.Instance().build_job_data_path(job_id))
if not request.args:
logs = logger.get_all(20,0)
else:
filter = request.args.keys()[0]
filter_parameter = request.args.get(filter)
logs = logger.filter(filter, filter_parameter)
tasks = PydioScheduler.Instance().get_job_progress(job_id)
return {"logs":logs, "running":tasks}
class CmdManager(Resource):
@authDB.requires_auth
def get(self, cmd, job_id=None):
if job_id:
if cmd == 'enable' or cmd == 'disable':
job_config = JobsLoader.Instance().get_job(job_id)
job_config.active = True if cmd == 'enable' else False
JobsLoader.Instance().update_job(job_config)
PydioScheduler.Instance().reload_configs()
PydioScheduler.Instance().handle_job_signal(self, cmd, job_id)
else:
return PydioScheduler.Instance().handle_generic_signal(self, cmd)
return ('success',)
class ResolverManager(Resource):
@authDB.requires_auth
def get(self, client_id):
try:
return EndpointResolver.Instance().get_customer_endpoints(client_id)
except EndpointException as e:
return {'message': e.message, 'code': e.error_id}, 500 | andrewleech/pydio-sync | src/pydio/ui/web_api.py | Python | gpl-3.0 | 22,015 |
"""The prezzibenzina component."""
| fbradyirl/home-assistant | homeassistant/components/prezzibenzina/__init__.py | Python | apache-2.0 | 35 |
from django.contrib import admin
from .models import Channel, Video, Podcast, Pod
import logging
log = logging.getLogger(__name__)
# Register your models here.
def update_channel(modeladmin, request, queryset):
log.info('admin: update_channel called ')
for n in queryset:
log.info("query : "+ str(n))
update_channel.short_description = "Update selected channels"
def update_latest_video(modeladmin, request, queryset):
log.info('admin: update_latest_video called')
for channel in queryset:
videos = channel.video_set.order_by('-pub_date')
if len(videos) > 0:
if channel.latest_video != videos[0].pub_date:
channel.latest_video = videos[0].pub_date
log.info('Updateing latest_video to '+ str(channel.latest_video))
channel.save()
update_latest_video.short_description = "Update latest video"
@admin.register(Channel)
class ChannelAdmin(admin.ModelAdmin):
date_hierarchy = 'latest_video'
ordering = ['title_text']
list_display = ('title_text', 'latest_video',)
actions = [update_channel, update_latest_video]
@admin.register(Video)
class VideoAdmin(admin.ModelAdmin):
date_hierarchy = 'pub_date'
ordering = ['-pub_date']
list_display = ('title_text', 'channel', 'pub_date',)
list_filter = ('channel','pub_date',)
@admin.register(Podcast)
class PodcastAdmin(admin.ModelAdmin):
date_hierarchy = 'latest_pod'
ordering = ['title_text']
list_display = ('title_text', 'latest_pod',)
@admin.register(Pod)
class PodAdmin(admin.ModelAdmin):
date_hierarchy = 'pub_date'
ordering = ['-pub_date']
list_display = ('title_text', 'podcast', 'pub_date',)
list_filter = ('podcast','pub_date',)
| eponsko/you2rss | you2rss/admin.py | Python | gpl-3.0 | 1,748 |
from stream_framework.feeds.aggregated_feed.base import AggregatedFeed
from stream_framework.serializers.aggregated_activity_serializer import \
NotificationSerializer
from stream_framework.storage.redis.timeline_storage import RedisTimelineStorage
import copy
import json
import logging
logger = logging.getLogger(__name__)
class NotificationFeed(AggregatedFeed):
'''
Similar to an aggregated feed, but:
- doesnt use the activity storage (serializes everything into the timeline storage)
- features denormalized counts
- pubsub signals which you can subscribe to
For now this is entirely tied to Redis
'''
#: notification feeds only need a small max length
max_length = 99
key_format = 'notification_feed:1:user:%(user_id)s'
#: the format we use to denormalize the count
count_format = 'notification_feed:1:user:%(user_id)s:count'
#: the key used for locking
lock_format = 'notification_feed:1:user:%s:lock'
#: the main channel to publish
pubsub_main_channel = 'juggernaut'
timeline_serializer = NotificationSerializer
activity_storage_class = None
activity_serializer = None
def __init__(self, user_id, **kwargs):
'''
User id (the user for which we want to read/write notifications)
'''
AggregatedFeed.__init__(self, user_id, **kwargs)
# location to which we denormalize the count
self.format_dict = dict(user_id=user_id)
self.count_key = self.count_format % self.format_dict
# set the pubsub key if we're using it
self.pubsub_key = user_id
self.lock_key = self.lock_format % self.format_dict
from stream_framework.storage.redis.connection import get_redis_connection
self.redis = get_redis_connection()
def add_many(self, activities, **kwargs):
'''
Similar to the AggregatedActivity.add_many
The only difference is that it denormalizes a count of unseen activities
'''
with self.redis.lock(self.lock_key, timeout=2):
current_activities = AggregatedFeed.add_many(
self, activities, **kwargs)
# denormalize the count
self.denormalize_count()
# return the current state of the notification feed
return current_activities
def get_denormalized_count(self):
'''
Returns the denormalized count stored in self.count_key
'''
result = self.redis.get(self.count_key) or 0
result = int(result)
return result
def set_denormalized_count(self, count):
'''
Updates the denormalized count to count
:param count: the count to update to
'''
self.redis.set(self.count_key, count)
self.publish_count(count)
def publish_count(self, count):
'''
Published the count via pubsub
:param count: the count to publish
'''
count_dict = dict(unread_count=count, unseen_count=count)
count_data = json.dumps(count_dict)
data = {'channel': self.pubsub_key, 'data': count_data}
encoded_data = json.dumps(data)
self.redis.publish(self.pubsub_main_channel, encoded_data)
def denormalize_count(self):
'''
Denormalize the number of unseen aggregated activities to the key
defined in self.count_key
'''
# now count the number of unseen
count = self.count_unseen()
# and update the count if it changed
stored_count = self.get_denormalized_count()
if stored_count != count:
self.set_denormalized_count(count)
return count
def count_unseen(self, aggregated_activities=None):
'''
Counts the number of aggregated activities which are unseen
:param aggregated_activities: allows you to specify the aggregated
activities for improved performance
'''
count = 0
if aggregated_activities is None:
aggregated_activities = self[:self.max_length]
for aggregated in aggregated_activities:
if not aggregated.is_seen():
count += 1
return count
def mark_all(self, seen=True, read=None):
'''
Mark all the entries as seen or read
:param seen: set seen_at
:param read: set read_at
'''
with self.redis.lock(self.lock_key, timeout=10):
# get the current aggregated activities
aggregated_activities = self[:self.max_length]
# create the update dict
update_dict = {}
for aggregated_activity in aggregated_activities:
changed = False
old_activity = copy.deepcopy(aggregated_activity)
if seen is True and not aggregated_activity.is_seen():
aggregated_activity.update_seen_at()
changed = True
if read is True and not aggregated_activity.is_read():
aggregated_activity.update_read_at()
changed = True
if changed:
update_dict[old_activity] = aggregated_activity
# send the diff to the storage layer
new, deleted = [], []
changed = update_dict.items()
self._update_from_diff(new, changed, deleted)
# denormalize the count
self.denormalize_count()
# return the new activities
return aggregated_activities
class RedisNotificationFeed(NotificationFeed):
timeline_storage_class = RedisTimelineStorage
| nikolay-saskovets/Feedly | stream_framework/feeds/aggregated_feed/notification_feed.py | Python | bsd-3-clause | 5,626 |
from __future__ import division
from pylab import *
from sklearn import datasets
from sklearn import svm
#import cv2
def svmTest():
def sk_learn():
data = datasets.load_digits()
N_test = int(1050)
x_train = data['data'][:-N_test]
y_train = data['target'][:-N_test]
x_test = data['data'][-N_test:]
y_test = data['target'][-N_test:]
np.savetxt("csvs/x_train.csv", x_train, delimiter=",")#@asdjkk
np.savetxt("csvs/y_train.csv", y_train, delimiter=",", newline=",")
np.savetxt("csvs/x_test.csv", x_test, delimiter=",")
np.savetxt("csvs/y_test.csv", y_test, delimiter=",", newline=",")
ml = svm.LinearSVC()
ml = ml.fit(x_train, y_train)
yhat_test = ml.predict(x_test)
print argwhere(abs(yhat_test - y_test) < 0.5).shape[0] / y_test.shape[0]
data = datasets.load_digits()
N_test = int(1050)
x_train = data['data'][:-N_test]
y_train = data['target'][:-N_test]
x_test = data['data'][-N_test:]
y_test = data['target'][-N_test:]
x_train = asarray(x_train, dtype=float32)
y_train = asarray(y_train, dtype=float32)
x_test = asarray(x_test, dtype=float32)
params = dict(kernel_type=cv2.SVM_SIGMOID, svm_type=cv2.SVM_C_SVC)
cv_svm = cv2.SVM()
cv_svm.train(x_train, y_train, params=params)
yhat = cv_svm.predict_all(x_test)
print "Percent correct:", argwhere(abs(yhat.flat[:] - y_test) < 0.5).shape[0] / yhat.shape[0]
def pinvTest():
M = 3
N = 4
x = arange(M*N).reshape(M,N)
y = pinv(x)
def kronTest():
A = array([1, 2, 3, 4, 5, 6]).reshape(2, 3)
B = array([3, 2, 5, 0, 1, 2]).reshape(3,2)
print kron(A, B)
#def convolveTest():
from scipy.signal import fftconvolve
x = arange(10)
k = ones(4) / 4
y = fftconvolve(x, k, mode='same')
print np.around(y, decimals=3)
| shyamalschandra/swix | python_testing/testing.py | Python | mit | 1,865 |
"""
This module hosts all the extension functions and classes created via SDK.
The function :py:func:`ext_import` is used to import a toolkit module (shared library)
into the workspace. The shared library can be directly imported
from a remote source, e.g. http, s3, or hdfs.
The imported module will be under namespace `graphlab.extensions`.
Alternatively, if the shared library is local, it can be directly imported
using the python import statement. Note that graphlab must be imported first.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
# This is a fake meta namespace which contains toolkit functions and toolkit
# models implemented as extensions in C++
import graphlab as _gl
import types as _types
from graphlab.util import _make_internal_url
from graphlab.cython.cy_sframe import UnitySFrameProxy as _UnitySFrameProxy
from graphlab.cython.cy_sarray import UnitySArrayProxy as _UnitySArrayProxy
from graphlab.cython.cy_graph import UnityGraphProxy as _UnityGraphProxy
from graphlab.cython.cy_model import UnityModel as _UnityModel
from graphlab.toolkits._main import ToolkitError as _ToolkitError
from graphlab.cython.context import debug_trace as cython_context
# Now. a bit of magic hackery is going to happen to this module.
# This module is going to be first imported as graphlab.extensions
# After which, inside graphlab/__init__.py, sys.modules['graphlab.extensions']
# will be modified to become a class called _extension_wrapper which redirects
# getattr calls into this module.
#
# The reason for this wrapping is so that uses of functions in gl.extensions
# (for instance)
#
# import graphlab as gl
# gl.extensions._demo_addone(5)
#
# This will normally not work because gl.extensions._publish() was not called
# hence _demo_addone will not be found.
#
# By wrapping the extensions module in another class, we can redefine
# __getattr__ on that class and have it force gl.extensions._publish() when
# an attribute name is not found.
#
# However, there are some odd sideeffects due to the use of the metapath
# system as well. the metapath importer (this module) is going to look in
# gl.extensions, but gl.extensions is going poke this module hence resulting
# in an interesting recursive relationship.
#
# Also, we need gl.extensions.__dict__ to have all the published information
# so that tab completion in ipython works.
#
# The result is that we need gl.extensions._publish() to publish into both
# places.
# - the current module
# - the gl.extensions wrapper
#
# Then the metapath importer (this module) will just need to look in this
# module, breaking the recursive relation. And the gl.extensions wrapper will
# have all the stuff in it for tab completion by IPython.
import sys as _sys
_thismodule = _sys.modules[__name__]
class_uid_to_class = {}
def _wrap_function_return(val):
"""
Recursively walks each thing in val, opening lists and dictionaries,
converting all occurances of UnityGraphProxy to an SGraph,
UnitySFrameProxy to SFrame, and UnitySArrayProxy to SArray.
"""
if type(val) == _UnityGraphProxy:
return _gl.SGraph(_proxy = val)
elif type(val) == _UnitySFrameProxy:
return _gl.SFrame(_proxy = val)
elif type(val) == _UnitySArrayProxy:
return _gl.SArray(_proxy = val)
elif type(val) == _UnityModel:
# we need to cast it up to the appropriate type
try:
if '__uid__' in val.list_fields():
uid = val.get('__uid__')
if uid in class_uid_to_class:
return class_uid_to_class[uid](_proxy=val)
except:
pass
return val
elif type(val) == list:
return [_wrap_function_return(i) for i in val]
elif type(val) == dict:
return {i:_wrap_function_return(val[i]) for i in val}
else:
return val
def _setattr_wrapper(mod, key, value):
"""
A setattr wrapper call used only by _publish(). This ensures that anything
published into this module is also published into gl.extensions
"""
setattr(mod, key, value)
if mod == _thismodule:
setattr(_sys.modules[__name__], key, value)
def _translate_function_arguments(argument):
import inspect
if inspect.isfunction(argument):
try:
return _build_native_function_call(argument)
except:
raise TypeError("Only native functions, or simple lambdas of native functions (with constant capture values) can be passed to an extension function.")
elif type(argument) is list:
return [_translate_function_arguments(i) for i in argument]
elif type(argument) is tuple:
return [_translate_function_arguments(i) for i in argument]
elif type(argument) is dict:
return {i:_translate_function_arguments(v) for (i, v) in argument.iteritems()}
elif hasattr(argument, '_tkclass') and hasattr(argument, '__glmeta__'):
return argument._tkclass
else:
return argument
def _run_toolkit_function(fnname, arguments, args, kwargs):
"""
Dispatches arguments to a toolkit function.
Parameters
----------
fnname : string
The toolkit function to run
arguments : list[string]
The list of all the arguments the function takes.
args : list
The arguments that were passed
kwargs : dictionary
The keyword arguments that were passed
"""
# scan for all the arguments in args
num_args_got = len(args) + len(kwargs)
num_args_required = len(arguments)
if num_args_got != num_args_required:
raise TypeError("Expecting " + str(num_args_required) + " arguments, got " + str(num_args_got))
## fill the dict first with the regular args
argument_dict = {}
for i in range(len(args)):
argument_dict[arguments[i]] = args[i]
# now fill with the kwargs.
for k in kwargs.keys():
if k in argument_dict:
raise TypeError("Got multiple values for keyword argument '" + k + "'")
argument_dict[k] = kwargs[k]
argument_dict = _translate_function_arguments(argument_dict)
# unwrap it
with cython_context():
ret = _gl.connect.main.get_unity().run_toolkit(fnname, argument_dict)
# handle errors
if ret[0] != True:
if len(ret[1]) > 0:
raise _ToolkitError(ret[1])
else:
raise _ToolkitError("Toolkit failed with unknown error")
ret = _wrap_function_return(ret[2])
if type(ret) == dict and 'return_value' in ret:
return ret['return_value']
else:
return ret
def _make_injected_function(fn, arguments):
return lambda *args, **kwargs: _run_toolkit_function(fn, arguments, args, kwargs)
def _class_instance_from_name(class_name, *arg, **kwarg):
"""
class_name is of the form modA.modB.modC.class module_path splits on "."
and the import_path is then ['modA','modB','modC'] the __import__ call is
really annoying but essentially it reads like:
import class from modA.modB.modC
- Then the module variable points to modC
- Then you get the class from the module.
"""
# we first look in gl.extensions for the class name
module_path = class_name.split('.')
import_path = module_path[0:-1]
module = __import__('.'.join(import_path), fromlist=[module_path[-1]])
class_ = getattr(module, module_path[-1])
instance = class_(*arg, **kwarg)
return instance
def _create_class_instance(class_name, _proxy):
"""
Look for the class in graphlab.extensions in case it has already been
imported (perhaps as a builtin extensions hard compiled into unity_server).
"""
try:
return _class_instance_from_name("graphlab.extensions." + class_name, _proxy=_proxy)
except:
pass
return _class_instance_from_name(class_name, _proxy=_proxy)
class _ToolkitClass:
"""
The actual class class that is rewritten to become each user defined
toolkit class.
Certain care with attributes (__getattr__ / __setattr__) has to be done to
inject functions, and attributes into their appropriate places.
"""
_functions = {} # The functions in the class
_get_properties = [] # The getable properties in the class
_set_properties = [] # The setable properties in the class
_tkclass = None
def __init__(self, *args, **kwargs):
tkclass_name = getattr(self.__init__, "tkclass_name")
_proxy = None
if "_proxy" in kwargs:
_proxy = kwargs['_proxy']
del kwargs['_proxy']
if _proxy:
self.__dict__['_tkclass'] = _proxy
elif tkclass_name:
self.__dict__['_tkclass'] = _gl.connect.main.get_unity().create_toolkit_class(tkclass_name)
try:
# fill the functions and properties
self.__dict__['_functions'] = self._tkclass.get('list_functions')
self.__dict__['_get_properties'] = self._tkclass.get('list_get_properties')
self.__dict__['_set_properties'] = self._tkclass.get('list_set_properties')
# rewrite the doc string for this class
try:
self.__dict__['__doc__'] = self._tkclass.get('get_docstring', {'__symbol__':'__doc__'})
self.__class__.__dict__['__doc__'] = self.__dict__['__doc__']
except:
pass
except:
raise _ToolkitError("Cannot create Toolkit Class for this class. "
"This class was not created with the new toolkit class system.")
# for compatibility with older classes / models
self.__dict__['__proxy__'] = self.__dict__['_tkclass']
if '__init__' in self.__dict__['_functions']:
self.__run_class_function("__init__", args, kwargs)
elif len(args) != 0 or len(kwargs) != 0:
raise TypeError("This constructor takes no arguments")
def _get_wrapper(self):
gl_meta_value = self.__glmeta__['extension_name']
return lambda _proxy: _create_class_instance(gl_meta_value, _proxy)
def __dir__(self):
return self._functions.keys() + self._get_properties + self._set_properties
def __run_class_function(self, fnname, args, kwargs):
# scan for all the arguments in args
arguments = self._functions[fnname]
num_args_got = len(args) + len(kwargs)
num_args_required = len(arguments)
if num_args_got != num_args_required:
raise TypeError("Expecting " + str(num_args_required) + " arguments, got " + str(num_args_got))
## fill the dict first with the regular args
argument_dict = {}
for i in range(len(args)):
argument_dict[arguments[i]] = args[i]
# now fill with the kwargs.
for k in kwargs.keys():
if k in argument_dict:
raise TypeError("Got multiple values for keyword argument '" + k + "'")
argument_dict[k] = kwargs[k]
# unwrap it
argument_dict['__function_name__'] = fnname
ret = self._tkclass.get('call_function', argument_dict)
ret = _wrap_function_return(ret)
return ret
def __getattr__(self, name):
if name == '__proxy__':
return self.__dict__['__proxy__']
elif name in self._get_properties:
# is it an attribute?
arguments = {'__property_name__':name}
return _wrap_function_return(self._tkclass.get('get_property', arguments))
elif name in self._functions:
# is it a function?
ret = lambda *args, **kwargs: self.__run_class_function(name, args, kwargs)
ret.__doc__ = "Name: " + name + "\nParameters: " + str(self._functions[name]) + "\n"
try:
ret.__doc__ += self._tkclass.get('get_docstring', {'__symbol__':name})
ret.__doc__ += '\n'
except:
pass
return ret
else:
raise AttributeError("no attribute " + name)
def __setattr__(self, name, value):
if name == '__proxy__':
self.__dict__['__proxy__'] = value
elif name in self._set_properties:
# is it a setable property?
arguments = {'__property_name__':name, 'value':value}
return _wrap_function_return(self._tkclass.get('set_property', arguments))
else:
raise AttributeError("no attribute " + name)
def _list_functions():
"""
Lists all the functions registered in unity_server.
"""
unity = _gl.connect.main.get_unity()
return unity.list_toolkit_functions()
def _publish():
import sys
import copy
"""
Publishes all functions and classes registered in unity_server.
The functions and classes will appear in the module graphlab.extensions
"""
unity = _gl.connect.main.get_unity()
fnlist = unity.list_toolkit_functions()
# Loop through all the functions and inject it into
# graphlab.extensions.[blah]
# Note that [blah] may be somemodule.somefunction
# and so the injection has to be
# graphlab.extensions.somemodule.somefunction
for fn in fnlist:
props = unity.describe_toolkit_function(fn)
# quit if there is nothing we can process
if 'arguments' not in props:
continue
arguments = props['arguments']
newfunc = _make_injected_function(fn, arguments)
newfunc.__doc__ = "Name: " + fn + "\nParameters: " + str(arguments) + "\n"
if 'documentation' in props:
newfunc.__doc__ += props['documentation'] + "\n"
newfunc.__dict__['__glmeta__'] = {'extension_name':fn}
modpath = fn.split('.')
# walk the module tree
mod = _thismodule
for path in modpath[:-1]:
try:
getattr(mod, path)
except:
_setattr_wrapper(mod, path, _types.ModuleType(name=path))
mod = getattr(mod, path)
_setattr_wrapper(mod, modpath[-1], newfunc)
# Repeat for classes
tkclasslist = unity.list_toolkit_classes()
for tkclass in tkclasslist:
pathpos = tkclass.split('.')
m = unity.describe_toolkit_class(tkclass)
# of v2 type
if not ('functions' in m and 'get_properties' in m and 'set_properties' in m and 'uid' in m):
continue
# create a new class
new_class = copy.deepcopy(_ToolkitClass.__dict__)
# rewrite the init method to add the toolkit class name so it will
# default construct correctly
new_class['__init__'] = _types.FunctionType(new_class['__init__'].func_code,
new_class['__init__'].func_globals,
name='__init__',
argdefs=(),
closure=())
new_class['__init__'].tkclass_name = tkclass
newclass = _types.ClassType(tkclass, (object,), new_class)
setattr(newclass, '__glmeta__', {'extension_name':tkclass})
class_uid_to_class[m['uid']] = newclass
modpath = tkclass.split('.')
# walk the module tree
mod = _thismodule
for path in modpath[:-1]:
try:
getattr(mod, path)
except:
_setattr_wrapper(mod, path, _types.ModuleType(name=path))
mod = getattr(mod, path)
_setattr_wrapper(mod, modpath[-1], newclass)
class _ExtMetaPath(object):
"""
This is a magic metapath searcher. To understand how this works,
See the PEP 302 document. Essentially this class is inserted into
the sys.meta_path list. This class must implement find_module()
and load_module(). After which, this class is called first when any
particular module import was requested, allowing this to essentially
'override' the default import behaviors.
"""
def find_module(self, fullname, submodule_path=None):
"""
We have to see if fullname refers to a module we can import.
Some care is needed here because:
import xxx # tries to load xxx.so from any of the python import paths
import aaa.bbb.xxx # tries to load aaa/bbb/xxx.so from any of the python import paths
"""
# first see if we have this particular so has been loaded by
# graphlab's extension library before
ret = self.try_find_module(fullname, submodule_path)
if ret is not None:
return ret
# nope. has not been loaded before
# lets try to find a ".so" or a ".dylib" if any of the python
# locations
import sys
import os
# This drops the last "." So if I am importing aaa.bbb.xxx
# module_subpath is aaa.bbb
module_subpath = ".".join(fullname.split('.')[:-1])
for path in sys.path:
# joins the path to aaa/bbb/xxx
pathname = os.path.join(path, os.sep.join(fullname.split('.')))
# try to laod the ".so" extension
try:
if os.path.exists(pathname + '.so'):
ext_import(pathname + '.so', module_subpath)
break
except:
pass
# try to laod the ".dylib" extension
try:
if os.path.exists(pathname + '.dylib'):
ext_import(pathname + '.dylib', module_subpath)
break
except:
pass
ret = self.try_find_module(fullname, submodule_path)
if ret is not None:
return ret
def try_find_module(self, fullname, submodule_path=None):
# check if the so has been loaded before
import sys
# try to find the module inside of gl.extensions
# Essentially: if fullname == aaa.bbb.xxx
# Then we try to see if we have loaded gl.extensions.aaa.bbb.xxx
mod = _thismodule
modpath = fullname.split('.')
# walk the module tree
mod = _thismodule
for path in modpath:
try:
mod = getattr(mod, path)
except:
return None
return self
def load_module(self, fullname):
import sys
# we may have already been loaded
if fullname in sys.modules:
return sys.modules[fullname]
# try to find the module inside of gl.extensions
# Essentially: if fullname == aaa.bbb.xxx
# Then we try to look for gl.extensions.aaa.bbb.xxx
mod = _thismodule
modpath = fullname.split('.')
for path in modpath:
mod = getattr(mod, path)
# Inject the module into aaa.bbb.xxx
mod.__loader__ = self
mod.__package__ = fullname
mod.__name__ = fullname
sys.modules[fullname] = mod
return mod
_ext_meta_path_singleton = None
def _add_meta_path():
"""
called on unity_server import to insert the meta path loader.
"""
import sys
global _ext_meta_path_singleton
if _ext_meta_path_singleton == None:
_ext_meta_path_singleton = _ExtMetaPath()
sys.meta_path += [_ext_meta_path_singleton]
def ext_import(soname, module_subpath=""):
"""
Loads a graphlab toolkit module (a shared library) into the
gl.extensions namespace.
Toolkit module created via SDK can either be directly imported,
e.g. ``import example`` or via this function, e.g. ``graphlab.ext_import("example.so")``.
Use ``ext_import`` when you need more namespace control, or when
the shared library is not local, e.g. in http, s3 or hdfs.
Parameters
----------
soname : string
The filename of the shared library to load.
This can be a URL, or a HDFS location. For instance if soname is
somewhere/outthere/toolkit.so
The functions in toolkit.so will appear in gl.extensions.toolkit.*
module_subpath : string, optional
Any additional module paths to prepend to the toolkit module after
it is imported. For instance if soname is
somewhere/outthere/toolkit.so, by default
the functions in toolkit.so will appear in gl.extensions.toolkit.*.
However, if I module_subpath="somewhere.outthere", the functions
in toolkit.so will appear in gl.extensions.somewhere.outthere.toolkit.*
Returns
-------
out : a list of functions and classes loaded.
Examples
--------
For instance, given a module which implements the function "square_root",
.. code-block:: c++
#include <cmath>
#include <graphlab/sdk/toolkit_function_macros.hpp>
double square_root(double a) {
return sqrt(a);
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(square_root, "a");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> graphlab.ext_import('example1.so')
['example1.square_root']
>>> graphlab.extensions.example1.square_root(9)
3.0
We can customize the import location with module_subpath which can be
used to avoid namespace conflicts when you have multiple toolkits with the
same filename.
>>> graphlab.ext_import('example1.so', 'math')
['math.example1.square_root']
>>> graphlab.extensions.math.example1.square_root(9)
3.0
The module can also be imported directly, but graphlab *must* be imported
first. graphlab will intercept the module loading process to load the
toolkit.
>>> import graphlab
>>> import example1 #searches for example1.so in all the python paths
>>> example1.square_root(9)
3.0
"""
unity = _gl.connect.main.get_unity()
import os
if os.path.exists(soname):
soname = os.path.abspath(soname)
else:
soname = _make_internal_url(soname)
ret = unity.load_toolkit(soname, module_subpath)
if len(ret) > 0:
raise RuntimeError(ret)
_publish()
# push the functions into the corresponding module namespace
filename = os.path.basename(soname)
modulename = filename.split('.')[0]
return unity.list_toolkit_functions_in_dynamic_module(soname) + unity.list_toolkit_classes_in_dynamic_module(soname)
def _get_toolkit_function_name_from_function(fn):
"""
If fn is a toolkit function either imported by graphlab.extensions.ext_import
or the magic import system, we return the name of toolkit function.
Otherwise we return an empty string.
"""
try:
if '__glmeta__' in fn.__dict__:
return fn.__dict__['__glmeta__']['extension_name']
else:
return ""
except:
return ""
def _get_argument_list_from_toolkit_function_name(fn):
"""
Given a toolkit function name, return the argument list
"""
unity = _gl.connect.main.get_unity()
fnprops = unity.describe_toolkit_function(fn)
argnames = fnprops['arguments']
return argnames
class _Closure:
"""
Defines a closure class describing a lambda closure. Contains 2 fields:
native_fn_name: The toolkit native function name
arguments: An array of the same length as the toolkit native function.
Each array element is an array of 2 elements [is_capture, value]
If is_capture == 1:
value contains the captured value
If is_capture == 0:
value contains a number denoting the lambda argument position.
Example:
lambda x, y: fn(10, x, x, y)
Then arguments will be
[1, 10], --> is captured value. has value 10
[0, 0], --> is not captured value. is argument 0 of the lambda.
[0, 0], --> is not captured value. is argument 0 of the lambda.
[0, 1] --> is not captured value. is argument 1 of the lambda.
"""
def __init__(self, native_fn_name, arguments):
self.native_fn_name = native_fn_name
self.arguments = arguments
def _descend_namespace(caller_globals, name):
"""
Given a globals dictionary, and a name of the form "a.b.c.d", recursively
walk the globals expanding caller_globals['a']['b']['c']['d'] returning
the result. Raises an exception (IndexError) on failure.
"""
names = name.split('.')
cur = caller_globals
for i in names:
if type(cur) is dict:
cur = cur[i]
else:
cur = getattr(cur, i)
return cur
def _build_native_function_call(fn):
"""
If fn can be interpreted and handled as a native function: i.e.
fn is one of the extensions, or fn is a simple lambda closure using one of
the extensions.
fn = gl.extensions.add
fn = lambda x: gl.extensions.add(5)
Then, this returns a closure object, which describes the function call
which can then be passed to C++.
Returns a _Closure object on success, raises an exception on failure.
"""
# See if fn is the native function itself
native_function_name = _get_toolkit_function_name_from_function(fn)
if native_function_name != "":
# yup!
# generate an "identity" argument list
argnames = _get_argument_list_from_toolkit_function_name(native_function_name)
arglist = [[0, i] for i in range(len(argnames))]
return _Closure(native_function_name, arglist)
# ok. its not a native function
from graphlab_util.lambda_closure_capture import translate
from graphlab_util.lambda_closure_capture import Parameter
# Lets see if it is a simple lambda
capture = translate(fn)
# ok. build up the closure arguments
# Try to pick up the lambda
function = _descend_namespace(capture.caller_globals, capture.closure_fn_name)
native_function_name = _get_toolkit_function_name_from_function(function)
if native_function_name == "":
raise RuntimeError("Lambda does not contain a native function")
argnames = _get_argument_list_from_toolkit_function_name(native_function_name)
# ok. build up the argument list. this is mildly annoying due to the mix of
# positional and named arguments
# make an argument list with a placeholder for everything first
arglist = [[-1, i] for i in argnames]
# loop through the positional arguments
for i in range(len(capture.positional_args)):
arg = capture.positional_args[i]
if type(arg) is Parameter:
# This is a lambda argument
# arg.name is the actual string of the argument
# here we need the index
arglist[i] = [0, capture.input_arg_names.index(arg.name)]
else:
# this is a captured value
arglist[i] = [1, arg]
# now. the named arguments are somewhat annoying
for i in capture.named_args:
arg = capture.named_args[i]
if type(arg) is Parameter:
# This is a lambda argument
# arg.name is the actual string of the argument
# here we need the index
arglist[argnames.index(i)] = [0, capture.input_arg_names.index(arg.name)]
else:
# this is a captured value
arglist[argnames.index(i)] = [1, arg]
# done. Make sure all arguments are filled
for i in arglist:
if i[0] == -1:
raise RuntimeError("Incomplete function specification")
# attempt to recursively break down any other functions
import inspect
for i in range(len(arglist)):
if arglist[i][0] == 1 and inspect.isfunction(arglist[i][1]):
try:
arglist[i][1] = _build_native_function_call(arglist[i][1])
except:
pass
return _Closure(native_function_name, arglist)
| ypkang/Dato-Core | src/unity/python/graphlab/extensions.py | Python | agpl-3.0 | 27,795 |
import itertools
unique_character = '$'
def z_algorithm_detail(super_str, pat_size):
z_score_arr = [0] * len(super_str)
l, r = 0, 0
def get_match_num(begin_0, begin_1):
match_num = 0
while super_str[begin_0 + match_num] == super_str[begin_1 + match_num]:
match_num += 1
return match_num
for k in xrange(1, len(super_str)):
if k > r:
match_num = get_match_num(begin_0=k, begin_1=0)
z_score_arr[k] = match_num
if match_num > 0:
l, r = k, k + match_num - 1
else:
k_prime = k - l
beta = r - k + 1
if z_score_arr[k_prime] < beta:
z_score_arr[k] = z_score_arr[k_prime]
elif z_score_arr[k_prime] > beta:
z_score_arr[k] = beta
else:
match_num = get_match_num(begin_0=r + 1, begin_1=r - k + 1)
z_score_arr[k] = beta + match_num
if match_num > 0:
l, r = k, k + z_score_arr[k] - 1
return filter(lambda idx: z_score_arr[idx] == pat_size, range(len(super_str))), z_score_arr
def search_pattern_str_z(pat, txt):
match_list, z_score_arr = z_algorithm_detail(pat + unique_character + txt, len(pat))
return map(lambda idx: idx - len(pat + unique_character), match_list), z_score_arr
def search_pattern_str_naive(pat, txt):
return filter(lambda i: txt[i:i + len(pat)] == pat, range(len(txt) - len(pat) + 1))
def test_cases():
pat = 'aba'
txt = 'bbabaxababay'
print 'naive:', search_pattern_str_naive(pat, txt)
print 'z algorithm:' + str(search_pattern_str_z(pat, txt)) + '\n'
def solve_homework4():
pat = 'GC'
txt = 'GCTTGGCATA'
print 'naive:', search_pattern_str_naive(pat, txt)
print 'z algorithm:' + str(search_pattern_str_z(pat, txt)) + '\n'
alphabet = ['A', 'G', 'C', 'T']
wild_card_pat = 'G*'
def expand_wildcard_character(wildcard_str):
combinations = map(lambda my_tuple: ''.join(my_tuple),
list(apply(itertools.product,
tuple(map(lambda ch: alphabet if ch == '*' else [ch], wildcard_str)))))
return combinations
z_score_arr_list = []
idx_list_set = set()
for pat in expand_wildcard_character(wild_card_pat):
print 'pat:', pat
print 'naive:', search_pattern_str_naive(pat, txt)
idx_list, z_score_arr = search_pattern_str_z(pat, txt)
print 'z algorithm:' + str((idx_list, z_score_arr)) + '\n'
z_score_arr_list.append(z_score_arr)
idx_list_set |= set(idx_list)
final_z_score_arr = reduce(lambda l, r: map(lambda my_pair: max(my_pair[0], my_pair[1]), zip(l, r)),
z_score_arr_list, z_score_arr_list[0])
print idx_list_set, final_z_score_arr
if __name__ == '__main__':
test_cases()
solve_homework4()
| YcheLanguageStudio/PythonStudy | bioinformatics/string_matching/z_algorithm.py | Python | mit | 2,932 |
"""
dirsrv_sysconfig - file ``/etc/sysconfig/dirsrv``
=================================================
This module provides the ``DirsrvSysconfig`` class parser, for reading the
options in the ``/etc/sysconfig/dirsrv`` file.
Sample input::
# how many seconds to wait for the startpid file to show
# up before we assume there is a problem and fail to start
# if using systemd, omit the "; export VARNAME" at the end
#STARTPID_TIME=10 ; export STARTPID_TIME
# how many seconds to wait for the pid file to show
# up before we assume there is a problem and fail to start
# if using systemd, omit the "; export VARNAME" at the end
#PID_TIME=600 ; export PID_TIME
KRB5CCNAME=/tmp/krb5cc_995
KRB5_KTNAME=/etc/dirsrv/ds.keytab
Examples:
>>> dirsrv_conf = shared[DirsrvSysconfig]
>>> dirsrv.KRB5_KTNAME
'/etc/dirsrv/ds.keytab'
>>> 'PID_TIME' in dirsrv.data
False
"""
from .. import parser, SysconfigOptions
@parser('dirsrv')
class DirsrvSysconfig(SysconfigOptions):
"""
Parse the `dirsrv` service's start-up configuration.
"""
set_properties = True
| PaulWay/insights-core | insights/parsers/dirsrv_sysconfig.py | Python | apache-2.0 | 1,126 |
class TortillaException(Exception):
def __init__(self, **keys):
super().__init__(self.message % keys)
class ConfigKeyNotFound(TortillaException):
message = ("The requested key '%(key)s' does not exist in the "
"application configuration")
class ConfigConflict(TortillaException):
message = ("The requested key '%(key)s' already exists in the config "
"and has value '%(value)s'")
class ConfigNecessityConflict(TortillaException):
message = ("The declared variable '%(key)s' may not be both required "
"and have a default value")
class ConfigAlreadyDefined(TortillaException):
message = ("The declared variable '%(key)s' has already been defined "
"and has value '%(value)s'")
class ConfigUndeclared(TortillaException):
message = ("The variable '%(key)s' has not been declared in namespace "
"'%(namespace)s'")
class ConfigUndefined(TortillaException):
message = ("The declared variable '%(key)s' has not been defined")
class ConfigAlreadyOverridden(TortillaException):
message = ("The declared variable '%(key)s' has already been overridden "
"with value '%(value)s'. Original value is '%(original)s'")
class ConfigNotOverridden(TortillaException):
message = ("The declared variable '%(key)s' has not been overridden")
class ConfigTypeError(TortillaException):
message = ("Value '%(value)s' is not of type %(expected_type)s")
| Cerberus98/tortilla | tortilla/exception.py | Python | apache-2.0 | 1,482 |
# -*- coding: utf-8 -*-
import re
from django import forms
from django.forms.util import ErrorList
from django.utils.translation import ugettext_lazy as _
from molly.conf import applications
from molly.geolocation import geocode, reverse_geocode
METHOD_CHOICES = (
('html5', 'HTML5'),
('html5request', 'HTML5 (triggered by the user)'),
('gears', 'Google Gears'),
('manual', 'Manual update'),
('geocoded', 'Geocoded'),
('other', 'Other method'),
('denied', 'Update denied by user'),
('error', 'Error updating location'),
('favourite', 'Manually selected from favourite location list'),
)
# From http://en.wikipedia.org/wiki/Postcodes_in_the_United_Kingdom
POSTCODE_RE = r'(((A[BL]|B[ABDHLNRSTX]?|C[ABFHMORTVW]|D[ADEGHLNTY]|E[HNX]?|' + \
r'F[KY]|G[LUY]?|H[ADGPRSUX]|I[GMPV]|JE|K[ATWY]|L[ADELNSU]?|M[' + \
r'EKL]?|N[EGNPRW]?|O[LX]|P[AEHLOR]|R[GHM]|S[AEGKLMNOPRSTY]?|T' + \
r'[ADFNQRSW]|UB|W[ADFNRSV]|YO|ZE)[1-9]?[0-9]|((E|N|NW|SE|SW|W' + \
r')1|EC[1-4]|WC[12])[A-HJKMNPR-Y]|(SW|W)([2-9]|[1-9][0-9])|EC' + \
r'[1-9][0-9]) [0-9][ABD-HJLNP-UW-Z]{2})'
class LocationUpdateForm(forms.Form):
latitude = forms.FloatField(required=False)
longitude = forms.FloatField(required=False)
accuracy = forms.FloatField(required=False)
method = forms.ChoiceField(required=False, choices=METHOD_CHOICES)
name = forms.CharField(required=False)
def clean_latitude(self):
latitude = self.cleaned_data.get('latitude')
if latitude is not None and not (-180 <= latitude < 180):
raise forms.ValidationError(_('Must be in the range [-180, 180).'))
return latitude
def clean_longitude(self):
longitude = self.cleaned_data.get('longitude')
if longitude is not None and not (-90 <= longitude < 90):
raise forms.ValidationError(_('Must be in the range [-90, 90).'))
return longitude
def clean(self):
cleaned_data = self.cleaned_data
if cleaned_data['method'] in ('html5', 'html5request', 'gears','manual', 'geocoded', 'other', 'favourite'):
if cleaned_data['method'] == 'geocoded':
if not cleaned_data['name'].strip():
raise forms.ValidationError(_("You must enter a location"))
results = geocode(cleaned_data['name'])
if len(results) > 0:
cleaned_data.update(results[0])
cleaned_data['longitude'], cleaned_data['latitude'] = cleaned_data['location']
# Ignore alternatives for postcodes
if not re.match(POSTCODE_RE, cleaned_data['name'].upper()):
cleaned_data['alternatives'] = results[1:]
else:
cleaned_data['alternatives'] = []
else:
raise forms.ValidationError(_("Unable to find a location that matches '%s'.") % cleaned_data['name'])
for key in ('latitude', 'longitude', 'accuracy'):
if cleaned_data.get(key) is None:
self._errors[key] = ErrorList(['method requires that ' + key + ' must be specified'])
if not self._errors:
cleaned_data['location'] = cleaned_data['longitude'], cleaned_data['latitude']
if not cleaned_data.get('name'):
try:
cleaned_data['name'] = reverse_geocode(
self.cleaned_data['longitude'],
self.cleaned_data['latitude'])[0]['name']
except:
cleaned_data['name'] = u"↝ %f, %f" % (self.cleaned_data['longitude'], self.cleaned_data['latitude'])
elif cleaned_data['method'] in ('denied', 'error'):
for key in ('latitude', 'longitude', 'accuracy'):
if cleaned_data.get(key) is None:
self._errors[key] = ErrorList(['method requires that ' + key + ' must be specified'])
else:
self._errors['method'] = ErrorList(['method is required'])
return cleaned_data
| mollyproject/mollyproject | molly/geolocation/forms.py | Python | apache-2.0 | 4,202 |
#!/usr/bin/env python3
# Copyright 2017 Donour Sizemore
#
# This file is part of RacePi
#
# RacePi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2.
#
# RacePi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RacePi. If not, see <http://www.gnu.org/licenses/>.
import time
import sys
import os
from racepi.sensor.handler.stn11xx import STNHandler, DEV_NAME, BAUD_RATE
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: %s <CAN ID> <CAN ID> ..." % sys.argv[0])
sys.exit(1)
sh = STNHandler(DEV_NAME, BAUD_RATE)
# setup monitor of specified IDs
last_mesg = {}
for can_id in sys.argv[1:]:
last_mesg[can_id] = None
sh.set_monitor_ids([eval('0x' + x) for x in last_mesg.keys()])
sh.start_monitor()
last_update = 0
while True:
# read all the messages that are available
data = sh.readline()
if len(data) > 3:
can_id = data[:3]
if can_id in last_mesg.keys():
last_mesg[can_id] = data
else:
last_mesg[can_id] = None
now = time.time()
if now - last_update > 0.2:
last_update = now
os.write(1, b"\r")
for k in sorted(last_mesg.keys()):
os.write(1, ("[%s] " % last_mesg[k]).encode())
| donour/racepi | python/utilities/stn11xx_monitor.py | Python | gpl-2.0 | 1,692 |
# Copyright 2013 by Kamil Koziara. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
I/O operations for gene annotation files.
"""
from __future__ import print_function
import sys
import csv
import collections
from Ontology.Data import GeneAnnotation, TermAssociation
from .Interfaces import OntoIterator, OntoReader
class TsvIterator(OntoIterator):
"""
Parses TSV files
"""
def __init__(self, file_handle):
self._reader = csv.reader(file_handle, delimiter='\t')
def __iter__(self):
return self._reader
def __next__(self):
return next(self._reader)
def next(self):
return next(self._reader)
# GAF version 2.0
GAF20FIELDS = ['DB',
'DB_Object_ID',
'DB_Object_Symbol',
'Qualifier',
'GO_ID',
'DB:Reference',
'Evidence',
'With',
'Aspect',
'DB_Object_Name',
'Synonym',
'DB_Object_Type',
'Taxon_ID',
'Date',
'Assigned_By',
'Annotation_Extension',
'Gene_Product_Form_ID']
# GAF version 1.0
GAF10FIELDS = ['DB',
'DB_Object_ID',
'DB_Object_Symbol',
'Qualifier',
'GO_ID',
'DB:Reference',
'Evidence',
'With',
'Aspect',
'DB_Object_Name',
'Synonym',
'DB_Object_Type',
'Taxon_ID',
'Date',
'Assigned_By']
GAF_VERSION = { "1.0" : GAF10FIELDS,
"2.0" : GAF20FIELDS}
def _split_multi(value):
if len(value) > 0:
return value.split('|')
else:
return []
def _to_goa(obj_rows, version):
row = obj_rows[0]
obj_id = row[1]
obj_attrs = {GAF20FIELDS[0] : row[0],
GAF20FIELDS[2] : row[2],
GAF20FIELDS[9] : row[9],
GAF20FIELDS[10] : _split_multi(row[10]),
GAF20FIELDS[11] : row[11],
GAF20FIELDS[12]: _split_multi(row[12])}
if version == "1.0":
row_len = 15
else:
row_len = 17
obj_attrs[GAF20FIELDS[15]] = _split_multi(row[15])
obj_attrs[GAF20FIELDS[16]] = row[16]
assocs = []
for row in obj_rows:
if len(row) == row_len:
assocs.append(TermAssociation(row[4],
{GAF20FIELDS[3] : _split_multi(row[3]),
GAF20FIELDS[5] : _split_multi(row[5]),
GAF20FIELDS[6] : row[6],
GAF20FIELDS[7] :_split_multi(row[7]),
GAF20FIELDS[8] : row[8],
GAF20FIELDS[13] : row[13],
GAF20FIELDS[14] : row[14]}
))
else:
raise ValueError("Invalid gaf file: Incorrect row length.")
return GeneAnnotation(obj_id, assocs, obj_attrs)
class GafReader(OntoReader):
"""
Reads GAF files into list of GeneAnnotation.
GAF file is list of tab separated values in the following order:
'DB', 'DB Object ID', 'DB Object Symbol', 'Qualifier', 'GO ID',
'DB:Reference', 'Evidence Code', 'With (or) From', 'Aspect',
'DB Object Name', 'DB Object Synonym', 'DB Object Type',
'Taxon', 'Date', 'Assigned By', 'Annotation Extension',
'Gene Product Form ID'
"""
_ID_IDX = 1
def __init__(self, file_handle, assoc_format = "dict"):
"""
Parameters:
----------
- assoc_format - states format of returned association:
o "dict" - as a dictionary (faster)
o "in_mem_sql" - as dict-like object with underlying in-memory database
(more memory efficient)
"""
self.handle = file_handle
self.assoc_format = assoc_format
def read(self):
first = self.handle.readline()
if first and first.startswith('!gaf-version:'):
version = first[(first.find(':') + 1):].strip()
else:
raise ValueError("Invalid gaf file: No version specified.")
if version not in GAF_VERSION:
raise ValueError("Incorrect version.")
tsv_iter = TsvIterator(self.handle)
if self.assoc_format == "dict":
raw_records = collections.defaultdict(list)
for row in tsv_iter:
first = row[0]
if not first.startswith('!'):
raw_records[row[self._ID_IDX]].append(row)
return dict([(k, _to_goa(v, version)) for k, v in raw_records.items()]) # Possible py2 slow down
elif self.assoc_format == "in_mem_sql":
try:
sqla = InSqlAssoc(GAF_VERSION[version], [1,4], lambda x: _to_goa(x, version))
except ImportError:
print("Error: To use in_mem_sql association you need to have sqlite3 bindings installed.", file=sys.stderr)
else:
for row in tsv_iter:
if not row[0].startswith('!'):
sqla.add_row(row)
return sqla
else:
raise ValueError("Incorrect assoc_format parameter.")
class InSqlAssoc(object):
"""
Immutable dictionary-like structure for storing annotations.
It provides slower access but is more memory efficient thus more suitable
for big annotations files.
"""
def __init__(self, fields, index, selection_to_obj_fun, db_path = ":memory:"):
"""
Parameters:
----------
- fields - name of the columns in db representation
- index - pair of fields indexing associations: (gene_id, ontology_term_id)
- selection_to_obj_fun - function transforming list of rows into
GeneAssociation
- db_path - path to database file, special value ":memory:" creates
database in memory
"""
import sqlite3
self.fields = fields
self.fun = selection_to_obj_fun
self.con = sqlite3.connect(db_path)
self.index = index
cur = self.con.cursor()
query = 'CREATE TABLE assocs ("' + self.fields[0] + '" '
for field in fields[1:]:
query += ', "' + field + '" '
query += ');'
cur.execute(query)
cur.execute('CREATE INDEX obj_idx ON assocs ({0});'.format(self.fields[index[0]]))
self.con.commit()
def add_row(self, row):
if len(row) != len(self.fields):
raise TypeError("Incorrect number of fields in a row.")
else:
cur = self.con.cursor()
cur.execute("INSERT INTO assocs VALUES (?" + (",?" * (len(self.fields) - 1)) + ");", row)
self.con.commit()
def __len__(self):
cur = self.con.cursor()
cur.execute('SELECT COUNT(DISTINCT "' + self.fields[self.index[0]] + '") FROM assocs;')
return cur.fetchone()[0]
def __contains__(self, key):
cur = self.con.cursor()
cur.execute('SELECT * FROM assocs WHERE "' + self.fields[self.index[0]]\
+ '" = ?;', [key])
return len(list(cur)) > 0 #TODO sth prettier
def __getitem__(self, key):
cur = self.con.cursor()
cur.execute('SELECT * FROM assocs WHERE "' + self.fields[self.index[0]]\
+ '" = ?;', [key])
return self.fun(list(cur))
def __iter__(self):
cur = self.con.cursor()
cur.execute('SELECT * FROM assocs ORDER BY "{0}"'.format(self.fields[self.index[0]]))
cur_id = None
row_list = []
for row in cur:
if cur_id and cur_id != row[self.index[0]]:
obj = self.fun(row_list)
row_list = [row]
cur_id = row[self.index[0]]
yield (cur_id, obj)
else:
cur_id = row[self.index[0]]
row_list.append(row)
yield (cur_id, self.fun(row_list))
def itervalues(self):
for _, v in self:
yield v
def iterkeys(self):
for k, _ in self:
yield k
def keys(self):
return self.keys()
def values(self):
return self.values()
| arkatebi/SwissProt-stats | Ontology/IO/GoaIO.py | Python | gpl-3.0 | 8,540 |
#!/usr/bin/env python
from ansible.module_utils.hashivault import hashivault_argspec
from ansible.module_utils.hashivault import hashivault_auth_client
from ansible.module_utils.hashivault import hashivault_init
from ansible.module_utils.hashivault import hashiwrapper
ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'}
DOCUMENTATION = '''
---
module: hashivault_approle_role_secret_get
version_added: "3.8.0"
short_description: Hashicorp Vault approle role secret id get module
description:
- Module to get a approle role secret id from Hashicorp Vault.
options:
name:
description:
- role name.
mount_point:
description:
- mount point for role
default: approle
secret:
description:
- secret id.
extends_documentation_fragment: hashivault
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_approle_role_secret_get:
name: 'ashley'
secret: 'ec4bedee-e44b-c096-9ac8-1600e52ed8f8'
register: 'vault_approle_role_secret_get'
- debug: msg="Role secret is {{vault_approle_role_secret_get.secret}}"
'''
def main():
argspec = hashivault_argspec()
argspec['name'] = dict(required=True, type='str')
argspec['mount_point'] = dict(required=False, type='str', default='approle')
argspec['secret'] = dict(required=True, type='str')
module = hashivault_init(argspec)
result = hashivault_approle_role_secret_get(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
@hashiwrapper
def hashivault_approle_role_secret_get(params):
try:
name = params.get('name')
mount_point = params.get('mount_point')
secret = params.get('secret')
client = hashivault_auth_client(params)
response = client.get_role_secret_id(name, secret, mount_point=mount_point)
if type(response) is not dict and response.status_code == 204: # No content
return {'secret': {}, 'status': 'absent'}
else:
return {'secret': response['data'], 'response': response, 'status': 'present'}
except Exception as e:
return {'failed': True, 'msg': str(e)}
if __name__ == '__main__':
main()
| TerryHowe/ansible-modules-hashivault | ansible/modules/hashivault/hashivault_approle_role_secret_get.py | Python | mit | 2,303 |
from __future__ import absolute_import
from . import ir as I
from .walk import IRWalker, propigate_location
class EvaluateCompileTime(IRWalker):
descend_into_functions = True
def __init__(self, eval_ir):
super(EvaluateCompileTime, self).__init__()
self.eval_ir = eval_ir
def visit_compile_time_value(self, node):
I.replace_child(node, propigate_location(node, I.make_constant(self.eval_ir(node.expression))))
def evaluate_compile_time_values(node, eval_ir):
EvaluateCompileTime(eval_ir).visit(node)
return node
| matthagy/Jamenson | jamenson/compiler/preeval.py | Python | apache-2.0 | 564 |
#!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = '[email protected] (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
sessionKeysToAttend = ndb.StringProperty(repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
sessionKeysToAttend = messages.StringField(5, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Session(ndb.Model):
"""Session -- Session object"""
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty()
speaker = ndb.StringProperty()
duration = ndb.IntegerProperty()
typeOfSession = ndb.StringProperty()
sessDate = ndb.DateProperty()
startTime = ndb.TimeProperty()
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
name = messages.StringField(1)
highlights = messages.StringField(2)
speaker = messages.StringField(3)
duration = messages.IntegerField(4)
typeOfSession = messages.StringField(5)
sessDate = messages.StringField(6) #DateTimeField()
startTime = messages.StringField(7) #TimeField
websafeKey = messages.StringField(8)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
month = ndb.IntegerProperty() # TODO: do we need for indexing like Java?
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
featuredSpeaker = ndb.StringProperty()
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) #DateTimeField()
month = messages.IntegerField(7, variant=messages.Variant.INT32)
maxAttendees = messages.IntegerField(8, variant=messages.Variant.INT32)
seatsAvailable = messages.IntegerField(9, variant=messages.Variant.INT32)
endDate = messages.StringField(10) #DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
featuredSpeaker = messages.StringField(13)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class SpeakerForm(messages.Message):
"""SpeakerForm for returning featuredSpeaker"""
featuredSpeaker = messages.StringField(1)
name = messages.StringField(2)
class SessionForms(messages.Message):
"""SessionForms -- multiple Session outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
| rumblefish2494/FSWD-P4 | models.py | Python | apache-2.0 | 5,280 |
#!/usr/bin/env python
import argparse
import bullet_cartpole
import collections
import datetime
import gym
import json
import numpy as np
import replay_memory
import signal
import sys
import tensorflow as tf
import time
import util
np.set_printoptions(precision=5, threshold=10000, suppress=True, linewidth=10000)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-eval', type=int, default=0,
help="if >0 just run this many episodes with no training")
parser.add_argument('--max-num-actions', type=int, default=0,
help="train for (at least) this number of actions (always finish"
" current episode) ignore if <=0")
parser.add_argument('--max-run-time', type=int, default=0,
help="train for (at least) this number of seconds (always finish"
" current episode) ignore if <=0")
parser.add_argument('--ckpt-dir', type=str, default=None,
help="if set save ckpts to this dir")
parser.add_argument('--ckpt-freq', type=int, default=3600, help="freq (sec) to save ckpts")
parser.add_argument('--batch-size', type=int, default=128, help="training batch size")
parser.add_argument('--batches-per-step', type=int, default=5,
help="number of batches to train per step")
parser.add_argument('--dont-do-rollouts', action="store_true",
help="by dft we do rollouts to generate data then train after each rollout. if this flag is set we"
" dont do any rollouts. this only makes sense to do if --event-log-in set.")
parser.add_argument('--target-update-rate', type=float, default=0.0001,
help="affine combo for updating target networks each time we run a"
" training batch")
# TODO params per value, P, output_action networks?
parser.add_argument('--share-input-state-representation', action='store_true',
help="if set we have one network for processing input state that is"
" shared between value, l_value and output_action networks. if"
" not set each net has it's own network.")
parser.add_argument('--hidden-layers', type=str, default="100,50",
help="hidden layer sizes")
parser.add_argument('--use-batch-norm', action='store_true',
help="whether to use batch norm on conv layers")
parser.add_argument('--discount', type=float, default=0.99,
help="discount for RHS of bellman equation update")
parser.add_argument('--event-log-in', type=str, default=None,
help="prepopulate replay memory with entries from this event log")
parser.add_argument('--replay-memory-size', type=int, default=22000,
help="max size of replay memory")
parser.add_argument('--replay-memory-burn-in', type=int, default=1000,
help="dont train from replay memory until it reaches this size")
parser.add_argument('--eval-action-noise', action='store_true',
help="whether to use noise during eval")
parser.add_argument('--action-noise-theta', type=float, default=0.01,
help="OrnsteinUhlenbeckNoise theta (rate of change) param for action"
" exploration")
parser.add_argument('--action-noise-sigma', type=float, default=0.05,
help="OrnsteinUhlenbeckNoise sigma (magnitude) param for action"
" exploration")
parser.add_argument('--gpu-mem-fraction', type=float, default=None,
help="if not none use only this fraction of gpu memory")
util.add_opts(parser)
bullet_cartpole.add_opts(parser)
opts = parser.parse_args()
sys.stderr.write("%s\n" % opts)
# TODO: check that if --dont-do-rollouts set then --event-log-in also set
# TODO: if we import slim before cartpole env we can't start bullet withGL gui o_O
env = bullet_cartpole.BulletCartpole(opts=opts, discrete_actions=False)
import base_network
import tensorflow.contrib.slim as slim
VERBOSE_DEBUG = False
def toggle_verbose_debug(signal, frame):
global VERBOSE_DEBUG
VERBOSE_DEBUG = not VERBOSE_DEBUG
signal.signal(signal.SIGUSR1, toggle_verbose_debug)
DUMP_WEIGHTS = False
def set_dump_weights(signal, frame):
global DUMP_WEIGHTS
DUMP_WEIGHTS = True
signal.signal(signal.SIGUSR2, set_dump_weights)
class ValueNetwork(base_network.Network):
""" Value network component of a NAF network. Created as seperate net because it has a target network."""
def __init__(self, namespace, input_state, hidden_layer_config):
super(ValueNetwork, self).__init__(namespace)
self.input_state = input_state
with tf.variable_scope(namespace):
# expose self.input_state_representation since it will be the network "shared"
# by l_value & output_action network when running --share-input-state-representation
self.input_state_representation = self.input_state_network(input_state, opts)
self.value = slim.fully_connected(scope='fc',
inputs=self.input_state_representation,
num_outputs=1,
weights_regularizer=tf.contrib.layers.l2_regularizer(0.01),
activation_fn=None) # (batch, 1)
def value_given(self, state):
return tf.get_default_session().run(self.value,
feed_dict={self.input_state: state,
base_network.IS_TRAINING: False})
class NafNetwork(base_network.Network):
def __init__(self, namespace,
input_state, input_state_2,
value_net, target_value_net,
action_dim):
super(NafNetwork, self).__init__(namespace)
# noise to apply to actions during rollouts
self.exploration_noise = util.OrnsteinUhlenbeckNoise(action_dim,
opts.action_noise_theta,
opts.action_noise_sigma)
# we already have the V networks, created independently because it also
# has a target network.
self.value_net = value_net
self.target_value_net = target_value_net
# keep placeholders provided and build any others required
self.input_state = input_state
self.input_state_2 = input_state_2
self.input_action = tf.placeholder(shape=[None, action_dim],
dtype=tf.float32, name="input_action")
self.reward = tf.placeholder(shape=[None, 1],
dtype=tf.float32, name="reward")
self.terminal_mask = tf.placeholder(shape=[None, 1],
dtype=tf.float32, name="terminal_mask")
# TODO: dont actually use terminal mask?
with tf.variable_scope(namespace):
# mu (output_action) is also a simple NN mapping input state -> action
# this is our target op for inference (i.e. value that maximises Q given input_state)
with tf.variable_scope("output_action"):
if opts.share_input_state_representation:
input_representation = value_net.input_state_representation
else:
input_representation = self.input_state_network(self.input_state, opts)
weights_initializer = tf.random_uniform_initializer(-0.001, 0.001)
self.output_action = slim.fully_connected(scope='fc',
inputs=input_representation,
num_outputs=action_dim,
weights_initializer=weights_initializer,
weights_regularizer=tf.contrib.layers.l2_regularizer(0.01),
activation_fn=tf.nn.tanh) # (batch, action_dim)
# A (advantage) is a bit more work and has three components...
# first the u / mu difference. note: to use in a matmul we need
# to convert this vector into a matrix by adding an "unused"
# trailing dimension
u_mu_diff = self.input_action - self.output_action # (batch, action_dim)
u_mu_diff = tf.expand_dims(u_mu_diff, -1) # (batch, action_dim, 1)
# next we have P = L(x).L(x)_T where L is the values of lower triangular
# matrix with diagonals exp'd. yikes!
# first the L lower triangular values; a network on top of the input state
num_l_values = (action_dim*(action_dim+1))/2
with tf.variable_scope("l_values"):
if opts.share_input_state_representation:
input_representation = value_net.input_state_representation
else:
input_representation = self.input_state_network(self.input_state, opts)
l_values = slim.fully_connected(scope='fc',
inputs=input_representation,
num_outputs=num_l_values,
weights_regularizer=tf.contrib.layers.l2_regularizer(0.01),
activation_fn=None)
# we will convert these l_values into a matrix one row at a time.
rows = []
self._l_values = l_values
# each row is made of three components;
# 1) the lower part of the matrix, i.e. elements to the left of diagonal
# 2) the single diagonal element (that we exponentiate)
# 3) the upper part of the matrix; all zeros
batch_size = tf.shape(l_values)[0]
row_idx = 0
for row_idx in xrange(action_dim):
row_offset_in_l = (row_idx*(row_idx+1))/2
lower = tf.slice(l_values, begin=(0, row_offset_in_l), size=(-1, row_idx))
diag = tf.exp(tf.slice(l_values, begin=(0, row_offset_in_l+row_idx), size=(-1, 1)))
upper = tf.zeros((batch_size, action_dim - tf.shape(lower)[1] - 1)) # -1 for diag
rows.append(tf.concat(1, [lower, diag, upper]))
# full L matrix is these rows packed.
L = tf.pack(rows, 0)
# and since leading axis in l was always the batch
# we need to transpose it back to axis0 again
L = tf.transpose(L, (1, 0, 2)) # (batch_size, action_dim, action_dim)
self.check_L = tf.check_numerics(L, "L")
# P is L.L_T
L_T = tf.transpose(L, (0, 2, 1)) # TODO: update tf & use batch_matrix_transpose
P = tf.batch_matmul(L, L_T) # (batch_size, action_dim, action_dim)
# can now calculate advantage
u_mu_diff_T = tf.transpose(u_mu_diff, (0, 2, 1))
advantage = -0.5 * tf.batch_matmul(u_mu_diff_T, tf.batch_matmul(P, u_mu_diff)) # (batch, 1, 1)
# and finally we need to reshape off the axis we added to be able to matmul
self.advantage = tf.reshape(advantage, [-1, 1]) # (batch, 1)
# Q is value + advantage
self.q_value = value_net.value + self.advantage
# target y is reward + discounted target value
# TODO: pull discount out
self.target_y = self.reward + (self.terminal_mask * opts.discount * \
target_value_net.value)
self.target_y = tf.stop_gradient(self.target_y)
# loss is squared difference that we want to minimise.
self.loss = tf.reduce_mean(tf.pow(self.q_value - self.target_y, 2))
with tf.variable_scope("optimiser"):
# dynamically create optimiser based on opts
optimiser = util.construct_optimiser(opts)
# calc gradients
gradients = optimiser.compute_gradients(self.loss)
# potentially clip and wrap with debugging tf.Print
gradients = util.clip_and_debug_gradients(gradients, opts)
# apply
self.train_op = optimiser.apply_gradients(gradients)
# sanity checks (in the dependent order)
checks = []
for op, name in [(l_values, 'l_values'), (L,'L'), (self.loss, 'loss')]:
checks.append(tf.check_numerics(op, name))
self.check_numerics = tf.group(*checks)
def action_given(self, state, add_noise):
# NOTE: noise is added _outside_ tf graph. we do this simply because the noisy output
# is never used for any part of computation graph required for online training. it's
# only used during training after being the replay buffer.
actions = tf.get_default_session().run(self.output_action,
feed_dict={self.input_state: [state],
base_network.IS_TRAINING: False})
if add_noise:
if VERBOSE_DEBUG:
pre_noise = str(actions)
actions[0] += self.exploration_noise.sample()
actions = np.clip(1, -1, actions) # action output is _always_ (-1, 1)
if VERBOSE_DEBUG:
print "TRAIN action_given pre_noise %s post_noise %s" % (pre_noise, actions)
return actions
def train(self, batch):
_, _, l = tf.get_default_session().run([self.check_numerics, self.train_op, self.loss],
feed_dict={self.input_state: batch.state_1,
self.input_action: batch.action,
self.reward: batch.reward,
self.terminal_mask: batch.terminal_mask,
self.input_state_2: batch.state_2,
base_network.IS_TRAINING: True})
return l
def debug_values(self, batch):
values = tf.get_default_session().run([self._l_values, self.loss, self.value_net.value,
self.advantage, self.target_value_net.value],
feed_dict={self.input_state: batch.state_1,
self.input_action: batch.action,
self.reward: batch.reward,
self.terminal_mask: batch.terminal_mask,
self.input_state_2: batch.state_2,
base_network.IS_TRAINING: False})
values = [np.squeeze(v) for v in values]
return values
class NormalizedAdvantageFunctionAgent(object):
def __init__(self, env):
self.env = env
state_shape = self.env.observation_space.shape
action_dim = self.env.action_space.shape[1]
# for now, with single machine synchronous training, use a replay memory for training.
# TODO: switch back to async training with multiple replicas (as in drivebot project)
self.replay_memory = replay_memory.ReplayMemory(opts.replay_memory_size,
state_shape, action_dim)
# s1 and s2 placeholders
batched_state_shape = [None] + list(state_shape)
s1 = tf.placeholder(shape=batched_state_shape, dtype=tf.float32)
s2 = tf.placeholder(shape=batched_state_shape, dtype=tf.float32)
# initialise base models for value & naf networks. value subportion of net is
# explicitly created seperate because it has a target network note: in the case of
# --share-input-state-representation the input state network of the value_net will
# be reused by the naf.l_value and naf.output_actions net
self.value_net = ValueNetwork("value", s1, opts.hidden_layers)
self.target_value_net = ValueNetwork("target_value", s2, opts.hidden_layers)
self.naf = NafNetwork("naf", s1, s2,
self.value_net, self.target_value_net,
action_dim)
def post_var_init_setup(self):
# prepopulate replay memory (if configured to do so)
# TODO: rewrite!!!
if opts.event_log_in:
self.replay_memory.reset_from_event_log(opts.event_log_in)
# hook networks up to their targets
# ( does one off clobber to init all vars in target network )
self.target_value_net.set_as_target_network_for(self.value_net,
opts.target_update_rate)
def run_training(self, max_num_actions, max_run_time, batch_size, batches_per_step,
saver_util):
# log start time, in case we are limiting by time...
start_time = time.time()
# run for some max number of actions
num_actions_taken = 0
n = 0
while True:
rewards = []
losses = []
# run an episode
if opts.dont_do_rollouts:
# _not_ gathering experience online
pass
else:
# start a new episode
state_1 = self.env.reset()
# prepare data for updating replay memory at end of episode
initial_state = np.copy(state_1)
action_reward_state_sequence = []
episode_start = time.time()
done = False
while not done:
# choose action
action = self.naf.action_given(state_1, add_noise=True)
# take action step in env
state_2, reward, done, _ = self.env.step(action)
rewards.append(reward)
# cache for adding to replay memory
action_reward_state_sequence.append((action, reward, np.copy(state_2)))
# roll state for next step.
state_1 = state_2
# at end of episode update replay memory
print "episode_took", time.time() - episode_start, len(rewards)
replay_add_start = time.time()
self.replay_memory.add_episode(initial_state, action_reward_state_sequence)
print "replay_took", time.time() - replay_add_start
# do a training step (after waiting for buffer to fill a bit...)
if self.replay_memory.size() > opts.replay_memory_burn_in:
# run a set of batches
for _ in xrange(batches_per_step):
batch_start = time.time()
batch = self.replay_memory.batch(batch_size)
losses.append(self.naf.train(batch))
print "batch_took", time.time() - batch_start
# update target nets
self.target_value_net.update_weights()
# do debug (if requested) on last batch
if VERBOSE_DEBUG:
print "-----"
print "> BATCH"
print "state_1", batch.state_1.T
print "action\n", batch.action.T
print "reward ", batch.reward.T
print "terminal_mask ", batch.terminal_mask.T
print "state_2", batch.state_2.T
print "< BATCH"
l_values, l, v, a, vp = self.naf.debug_values(batch)
print "> BATCH DEBUG VALUES"
print "l_values\n", l_values.T
print "loss\t", l
print "val\t" , np.mean(v), "\t", v.T
print "adv\t", np.mean(a), "\t", a.T
print "val'\t", np.mean(vp), "\t", vp.T
print "< BATCH DEBUG VALUES"
# dump some stats and progress info
stats = collections.OrderedDict()
stats["time"] = time.time()
stats["n"] = n
stats["mean_losses"] = float(np.mean(losses))
stats["total_reward"] = np.sum(rewards)
stats["episode_len"] = len(rewards)
stats["replay_memory_stats"] = self.replay_memory.current_stats()
print "STATS %s\t%s" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
json.dumps(stats))
sys.stdout.flush()
n += 1
# save if required
if saver_util is not None:
saver_util.save_if_required()
# emit occasional eval
if VERBOSE_DEBUG or n % 10 == 0:
self.run_eval(1)
# dump weights once if requested
global DUMP_WEIGHTS
if DUMP_WEIGHTS:
self.debug_dump_network_weights()
DUMP_WEIGHTS = False
# exit when finished
num_actions_taken += len(rewards)
if max_num_actions > 0 and num_actions_taken > max_num_actions:
break
if max_run_time > 0 and time.time() > start_time + max_run_time:
break
def run_eval(self, num_episodes, add_noise=False):
""" run num_episodes of eval and output episode length and rewards """
for i in xrange(num_episodes):
state = self.env.reset()
total_reward = 0
steps = 0
done = False
while not done:
action = self.naf.action_given(state, add_noise)
state, reward, done, _ = self.env.step(action)
print "EVALSTEP e%d s%d action=%s (l2=%s) => reward %s" % (i, steps, action,
np.linalg.norm(action), reward)
total_reward += reward
steps += 1
if False: # RENDER ALL STATES / ACTIVATIONS to /tmp
self.naf.render_all_convnet_activations(steps, self.naf.input_state, state)
util.render_state_to_png(steps, state)
util.render_action_to_png(steps, action)
print "EVAL", i, steps, total_reward
sys.stdout.flush()
def debug_dump_network_weights(self):
fn = "/tmp/weights.%s" % time.time()
with open(fn, "w") as f:
f.write("DUMP time %s\n" % time.time())
for var in tf.all_variables():
f.write("VAR %s %s\n" % (var.name, var.get_shape()))
f.write("%s\n" % var.eval())
print "weights written to", fn
def main():
config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# config.log_device_placement = True
if opts.gpu_mem_fraction is not None:
config.gpu_options.per_process_gpu_memory_fraction = opts.gpu_mem_fraction
with tf.Session(config=config) as sess:
agent = NormalizedAdvantageFunctionAgent(env=env)
# setup saver util and either load latest ckpt or init variables
saver_util = None
if opts.ckpt_dir is not None:
saver_util = util.SaverUtil(sess, opts.ckpt_dir, opts.ckpt_freq)
else:
sess.run(tf.initialize_all_variables())
for v in tf.all_variables():
print >>sys.stderr, v.name, util.shape_and_product_of(v)
# now that we've either init'd from scratch, or loaded up a checkpoint,
# we can do any required post init work.
agent.post_var_init_setup()
# run either eval or training
if opts.num_eval > 0:
agent.run_eval(opts.num_eval, opts.eval_action_noise)
else:
agent.run_training(opts.max_num_actions, opts.max_run_time,
opts.batch_size, opts.batches_per_step,
saver_util)
if saver_util is not None:
saver_util.force_save()
env.reset() # just to flush logging, clumsy :/
if __name__ == "__main__":
main()
| matpalm/cartpoleplusplus | naf_cartpole.py | Python | mit | 22,487 |
# Copyright (C) 2016, University of Notre Dame
# All rights reserved
import website
from django.test.testcases import TestCase
from django.test.utils import override_settings
class GetSiteUrlTest(TestCase):
@override_settings(SITE_URL="https://om.vecnet.org/")
def test_1(self):
self.assertEqual(website.get_site_url(), "https://om.vecnet.org")
@override_settings(SITE_URL="https://om-qa.vecnet.org")
def test_2(self):
self.assertEqual(website.get_site_url(), "https://om-qa.vecnet.org")
@override_settings(SITE_URL="http://127.0.0.1:8000")
def test_3(self):
self.assertEqual(website.get_site_url(), "http://127.0.0.1:8000")
@override_settings(SITE_URL="http://localhost:8000/")
def test_4(self):
self.assertEqual(website.get_site_url(), "http://localhost:8000")
| vecnet/om | website/tests/test_get_site_url.py | Python | mpl-2.0 | 833 |
import unittest
import os
import sys
import time
import os.path as op
from functools import partial
from kivy.clock import Clock
main_path = op.dirname(op.dirname(op.abspath(__file__)))
sys.path.append(main_path)
from messanger import Messanger
class Test(unittest.TestCase):
# sleep function that catches `dt` from Clock
def pause(*args):
time.sleep(0.000001)
# main test function
def run_test(self, app, *args):
Clock.schedule_interval(self.pause, 0.000001)
# Setup
app.main_window.finish_init(None)
app.main_window.add_conversation_to_UI()
sc = app.main_window.ids['screen_controls']
# Excute
app.main_window.select_transition('conversation_0')
# Assert
self.assertEqual(sc.current, 'conversation_0')
self.assertEqual(sc.transition.direction, 'up')
# Comment out if you are editing the test, it'll leave the
# Window opened.
app.stop()
# same named function as the filename(!)
def test_select_transition(self):
app = Messanger()
p = partial(self.run_test, app)
Clock.schedule_once(p, 0.000001)
app.run()
if __name__ == '__main__':
unittest.main()
| Abraxos/hermes | hermes-native/tests/test_select_transition.py | Python | gpl-3.0 | 1,249 |
from mongoengine.errors import OperationError
from mongoengine.queryset.base import (BaseQuerySet, CASCADE, DENY, DO_NOTHING,
NULLIFY, PULL)
__all__ = ('QuerySet', 'QuerySetNoCache', 'DO_NOTHING', 'NULLIFY', 'CASCADE',
'DENY', 'PULL')
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
ITER_CHUNK_SIZE = 100
class QuerySet(BaseQuerySet):
"""The default queryset, that builds queries and handles a set of results
returned from a query.
Wraps a MongoDB cursor, providing :class:`~mongoengine.Document` objects as
the results.
"""
_has_more = True
_len = None
_result_cache = None
def __iter__(self):
"""Iteration utilises a results cache which iterates the cursor
in batches of ``ITER_CHUNK_SIZE``.
If ``self._has_more`` the cursor hasn't been exhausted so cache then
batch. Otherwise iterate the result_cache.
"""
self._iter = True
if self._has_more:
return self._iter_results()
# iterating over the cache.
return iter(self._result_cache)
def __len__(self):
"""Since __len__ is called quite frequently (for example, as part of
list(qs)), we populate the result cache and cache the length.
"""
if self._len is not None:
return self._len
# Populate the result cache with *all* of the docs in the cursor
if self._has_more:
list(self._iter_results())
# Cache the length of the complete result cache and return it
self._len = len(self._result_cache)
return self._len
def __repr__(self):
"""Provide a string representation of the QuerySet"""
if self._iter:
return '.. queryset mid-iteration ..'
self._populate_cache()
data = self._result_cache[:REPR_OUTPUT_SIZE + 1]
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = '...(remaining elements truncated)...'
return repr(data)
def _iter_results(self):
"""A generator for iterating over the result cache.
Also populates the cache if there are more possible results to
yield. Raises StopIteration when there are no more results.
"""
if self._result_cache is None:
self._result_cache = []
pos = 0
while True:
# For all positions lower than the length of the current result
# cache, serve the docs straight from the cache w/o hitting the
# database.
# XXX it's VERY important to compute the len within the `while`
# condition because the result cache might expand mid-iteration
# (e.g. if we call len(qs) inside a loop that iterates over the
# queryset). Fortunately len(list) is O(1) in Python, so this
# doesn't cause performance issues.
while pos < len(self._result_cache):
yield self._result_cache[pos]
pos += 1
# Raise StopIteration if we already established there were no more
# docs in the db cursor.
if not self._has_more:
raise StopIteration
# Otherwise, populate more of the cache and repeat.
if len(self._result_cache) <= pos:
self._populate_cache()
def _populate_cache(self):
"""
Populates the result cache with ``ITER_CHUNK_SIZE`` more entries
(until the cursor is exhausted).
"""
if self._result_cache is None:
self._result_cache = []
# Skip populating the cache if we already established there are no
# more docs to pull from the database.
if not self._has_more:
return
# Pull in ITER_CHUNK_SIZE docs from the database and store them in
# the result cache.
try:
for _ in xrange(ITER_CHUNK_SIZE):
self._result_cache.append(self.next())
except StopIteration:
# Getting this exception means there are no more docs in the
# db cursor. Set _has_more to False so that we can use that
# information in other places.
self._has_more = False
def count(self, with_limit_and_skip=False):
"""Count the selected elements in the query.
:param with_limit_and_skip (optional): take any :meth:`limit` or
:meth:`skip` that has been applied to this cursor into account when
getting the count
"""
if with_limit_and_skip is False:
return super(QuerySet, self).count(with_limit_and_skip)
if self._len is None:
self._len = super(QuerySet, self).count(with_limit_and_skip)
return self._len
def no_cache(self):
"""Convert to a non-caching queryset
.. versionadded:: 0.8.3 Convert to non caching queryset
"""
if self._result_cache is not None:
raise OperationError('QuerySet already cached')
return self._clone_into(QuerySetNoCache(self._document,
self._collection))
class QuerySetNoCache(BaseQuerySet):
"""A non caching QuerySet"""
def cache(self):
"""Convert to a caching queryset
.. versionadded:: 0.8.3 Convert to caching queryset
"""
return self._clone_into(QuerySet(self._document, self._collection))
def __repr__(self):
"""Provides the string representation of the QuerySet
.. versionchanged:: 0.6.13 Now doesnt modify the cursor
"""
if self._iter:
return '.. queryset mid-iteration ..'
data = []
for _ in xrange(REPR_OUTPUT_SIZE + 1):
try:
data.append(self.next())
except StopIteration:
break
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = '...(remaining elements truncated)...'
self.rewind()
return repr(data)
def __iter__(self):
queryset = self
if queryset._iter:
queryset = self.clone()
queryset.rewind()
return queryset
class QuerySetNoDeRef(QuerySet):
"""Special no_dereference QuerySet"""
def __dereference(items, max_depth=1, instance=None, name=None):
return items
| MakerReduxCorp/mongoengine | mongoengine/queryset/queryset.py | Python | mit | 6,389 |
"""
Component to interface with binary sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor/
"""
import logging
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_ON, STATE_OFF)
from homeassistant.components import (
bloomsky, mysensors, zwave, vera, wemo, wink)
DOMAIN = 'binary_sensor'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SENSOR_CLASSES = [
None, # Generic on/off
'opening', # Door, window, etc
'motion', # Motion sensor
'gas', # CO, CO2, etc
'smoke', # Smoke detector
'moisture', # Specifically a wetness sensor
'light', # Lightness threshold
'power', # Power, over-current, etc
'safety', # Generic on=unsafe, off=safe
'heat', # On means hot (or too hot)
'cold', # On means cold (or too cold)
'moving', # On means moving, Off means stopped
'sound', # On means sound detected, Off means no sound
'vibration', # On means vibration detected, Off means no vibration
]
# Maps discovered services to their platforms
DISCOVERY_PLATFORMS = {
bloomsky.DISCOVER_BINARY_SENSORS: 'bloomsky',
mysensors.DISCOVER_BINARY_SENSORS: 'mysensors',
zwave.DISCOVER_BINARY_SENSORS: 'zwave',
vera.DISCOVER_BINARY_SENSORS: 'vera',
wemo.DISCOVER_BINARY_SENSORS: 'wemo',
wink.DISCOVER_BINARY_SENSORS: 'wink'
}
def setup(hass, config):
"""Track states and offer events for binary sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL,
DISCOVERY_PLATFORMS)
component.setup(config)
return True
# pylint: disable=no-self-use
class BinarySensorDevice(Entity):
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def sensor_class(self):
"""Return the class of this sensor, from SENSOR_CLASSES."""
return None
@property
def state_attributes(self):
"""Return device specific state attributes."""
attr = {}
if self.sensor_class is not None:
attr['sensor_class'] = self.sensor_class
return attr
| aoakeson/home-assistant | homeassistant/components/binary_sensor/__init__.py | Python | mit | 2,527 |
import logging, time, re, os, tempfile, ConfigParser
import threading
import xml.dom.minidom
from autotest.client.shared import error, iso9660
from autotest.client import utils
from virttest import virt_vm, utils_misc, utils_disk
from virttest import kvm_monitor, remote, syslog_server
from virttest import http_server
# Whether to print all shell commands called
DEBUG = False
_url_auto_content_server_thread = None
_url_auto_content_server_thread_event = None
_unattended_server_thread = None
_unattended_server_thread_event = None
_syslog_server_thread = None
_syslog_server_thread_event = None
def start_auto_content_server_thread(port, path):
global _url_auto_content_server_thread
global _url_auto_content_server_thread_event
if _url_auto_content_server_thread is None:
_url_auto_content_server_thread_event = threading.Event()
_url_auto_content_server_thread = threading.Thread(
target=http_server.http_server,
args=(port, path, terminate_auto_content_server_thread))
_url_auto_content_server_thread.start()
def start_unattended_server_thread(port, path):
global _unattended_server_thread
global _unattended_server_thread_event
if _unattended_server_thread is None:
_unattended_server_thread_event = threading.Event()
_unattended_server_thread = threading.Thread(
target=http_server.http_server,
args=(port, path, terminate_unattended_server_thread))
_unattended_server_thread.start()
def terminate_auto_content_server_thread():
global _url_auto_content_server_thread
global _url_auto_content_server_thread_event
if _url_auto_content_server_thread is None:
return False
if _url_auto_content_server_thread_event is None:
return False
if _url_auto_content_server_thread_event.isSet():
return True
return False
def terminate_unattended_server_thread():
global _unattended_server_thread, _unattended_server_thread_event
if _unattended_server_thread is None:
return False
if _unattended_server_thread_event is None:
return False
if _unattended_server_thread_event.isSet():
return True
return False
class RemoteInstall(object):
"""
Represents a install http server that we can master according to our needs.
"""
def __init__(self, path, ip, port, filename):
self.path = path
utils_disk.cleanup(self.path)
os.makedirs(self.path)
self.ip = ip
self.port = port
self.filename = filename
start_unattended_server_thread(self.port, self.path)
def get_url(self):
return 'http://%s:%s/%s' % (self.ip, self.port, self.filename)
def get_answer_file_path(self, filename):
return os.path.join(self.path, filename)
def close(self):
os.chmod(self.path, 0755)
logging.debug("unattended http server %s successfully created",
self.get_url())
class UnattendedInstallConfig(object):
"""
Creates a floppy disk image that will contain a config file for unattended
OS install. The parameters to the script are retrieved from environment
variables.
"""
def __init__(self, test, params, vm):
"""
Sets class atributes from test parameters.
@param test: KVM test object.
@param params: Dictionary with test parameters.
"""
root_dir = test.bindir
self.deps_dir = os.path.join(test.virtdir, 'deps')
self.unattended_dir = os.path.join(test.virtdir, 'unattended')
self.params = params
attributes = ['kernel_args', 'finish_program', 'cdrom_cd1',
'unattended_file', 'medium', 'url', 'kernel', 'initrd',
'nfs_server', 'nfs_dir', 'install_virtio',
'floppy_name', 'cdrom_unattended', 'boot_path',
'kernel_params', 'extra_params', 'qemu_img_binary',
'cdkey', 'finish_program', 'vm_type', 'process_check', 'vfd_size']
for a in attributes:
setattr(self, a, params.get(a, ''))
if self.install_virtio == 'yes':
v_attributes = ['virtio_floppy', 'virtio_storage_path',
'virtio_network_path', 'virtio_oemsetup_id',
'virtio_network_installer']
for va in v_attributes:
setattr(self, va, params.get(va, ''))
self.tmpdir = test.tmpdir
if getattr(self, 'unattended_file'):
self.unattended_file = os.path.join(test.virtdir, self.unattended_file)
if getattr(self, 'finish_program'):
self.finish_program = os.path.join(test.virtdir, self.finish_program)
if getattr(self, 'qemu_img_binary'):
if not os.path.isfile(getattr(self, 'qemu_img_binary')):
self.qemu_img_binary = os.path.join(root_dir,
self.qemu_img_binary)
if getattr(self, 'cdrom_cd1'):
self.cdrom_cd1 = os.path.join(root_dir, self.cdrom_cd1)
self.cdrom_cd1_mount = tempfile.mkdtemp(prefix='cdrom_cd1_',
dir=self.tmpdir)
if self.medium == 'nfs':
self.nfs_mount = tempfile.mkdtemp(prefix='nfs_',
dir=self.tmpdir)
setattr(self, 'floppy', self.floppy_name)
if getattr(self, 'floppy'):
self.floppy = os.path.join(root_dir, self.floppy)
if not os.path.isdir(os.path.dirname(self.floppy)):
os.makedirs(os.path.dirname(self.floppy))
self.image_path = os.path.dirname(self.kernel)
# Content server params
# lookup host ip address for first nic by interface name
auto_ip = utils_misc.get_ip_address_by_interface(vm.virtnet[0].netdst)
self.url_auto_content_ip = params.get('url_auto_ip', auto_ip)
self.url_auto_content_port = None
# Kickstart server params
# use the same IP as url_auto_content_ip, but a different port
self.unattended_server_port = None
# Embedded Syslog Server
self.syslog_server_enabled = params.get('syslog_server_enabled', 'no')
self.syslog_server_ip = params.get('syslog_server_ip', auto_ip)
self.syslog_server_port = int(params.get('syslog_server_port', 5140))
self.syslog_server_tcp = params.get('syslog_server_proto',
'tcp') == 'tcp'
self.vm = vm
def answer_kickstart(self, answer_path):
"""
Replace KVM_TEST_CDKEY (in the unattended file) with the cdkey
provided for this test and replace the KVM_TEST_MEDIUM with
the tree url or nfs address provided for this test.
@return: Answer file contents
"""
contents = open(self.unattended_file).read()
dummy_cdkey_re = r'\bKVM_TEST_CDKEY\b'
if re.search(dummy_cdkey_re, contents):
if self.cdkey:
contents = re.sub(dummy_cdkey_re, self.cdkey, contents)
dummy_medium_re = r'\bKVM_TEST_MEDIUM\b'
if self.medium in ["cdrom", "kernel_initrd"]:
content = "cdrom"
elif self.medium == "url":
content = "url --url %s" % self.url
elif self.medium == "nfs":
content = "nfs --server=%s --dir=%s" % (self.nfs_server,
self.nfs_dir)
else:
raise ValueError("Unexpected installation medium %s" % self.url)
contents = re.sub(dummy_medium_re, content, contents)
dummy_logging_re = r'\bKVM_TEST_LOGGING\b'
if re.search(dummy_logging_re, contents):
if self.syslog_server_enabled == 'yes':
l = 'logging --host=%s --port=%s --level=debug'
l = l % (self.syslog_server_ip, self.syslog_server_port)
else:
l = ''
contents = re.sub(dummy_logging_re, l, contents)
logging.debug("Unattended install contents:")
for line in contents.splitlines():
logging.debug(line)
utils.open_write_close(answer_path, contents)
def answer_windows_ini(self, answer_path):
parser = ConfigParser.ConfigParser()
parser.read(self.unattended_file)
# First, replacing the CDKEY
if self.cdkey:
parser.set('UserData', 'ProductKey', self.cdkey)
else:
logging.error("Param 'cdkey' required but not specified for "
"this unattended installation")
# Now, replacing the virtio network driver path, under double quotes
if self.install_virtio == 'yes':
parser.set('Unattended', 'OemPnPDriversPath',
'"%s"' % self.virtio_nework_path)
else:
parser.remove_option('Unattended', 'OemPnPDriversPath')
# Replace the virtio installer command
if self.install_virtio == 'yes':
driver = self.virtio_network_installer_path
else:
driver = 'dir'
dummy_re = 'KVM_TEST_VIRTIO_NETWORK_INSTALLER'
installer = parser.get('GuiRunOnce', 'Command0')
if dummy_re in installer:
installer = re.sub(dummy_re, driver, installer)
parser.set('GuiRunOnce', 'Command0', installer)
# Replace the process check in finish command
dummy_process_re = r'\bPROCESS_CHECK\b'
for opt in parser.options('GuiRunOnce'):
process_check = parser.get('GuiRunOnce', opt)
if re.search(dummy_process_re, process_check):
process_check = re.sub(dummy_process_re,
"%s" % self.process_check,
process_check)
parser.set('GuiRunOnce', opt, process_check)
# Now, writing the in memory config state to the unattended file
fp = open(answer_path, 'w')
parser.write(fp)
# Let's read it so we can debug print the contents
fp = open(answer_path, 'r')
contents = fp.read()
logging.debug("Unattended install contents:")
for line in contents.splitlines():
logging.debug(line)
fp.close()
def answer_windows_xml(self, answer_path):
doc = xml.dom.minidom.parse(self.unattended_file)
if self.cdkey:
# First, replacing the CDKEY
product_key = doc.getElementsByTagName('ProductKey')[0]
key = product_key.getElementsByTagName('Key')[0]
key_text = key.childNodes[0]
assert key_text.nodeType == doc.TEXT_NODE
key_text.data = self.cdkey
else:
logging.error("Param 'cdkey' required but not specified for "
"this unattended installation")
# Now, replacing the virtio driver paths or removing the entire
# component PnpCustomizationsWinPE Element Node
if self.install_virtio == 'yes':
paths = doc.getElementsByTagName("Path")
values = [self.virtio_storage_path, self.virtio_network_path]
for path, value in zip(paths, values):
path_text = path.childNodes[0]
assert key_text.nodeType == doc.TEXT_NODE
path_text.data = value
else:
settings = doc.getElementsByTagName("settings")
for s in settings:
for c in s.getElementsByTagName("component"):
if (c.getAttribute('name') ==
"Microsoft-Windows-PnpCustomizationsWinPE"):
s.removeChild(c)
# Last but not least important, replacing the virtio installer command
# And process check in finish command
command_lines = doc.getElementsByTagName("CommandLine")
for command_line in command_lines:
command_line_text = command_line.childNodes[0]
assert command_line_text.nodeType == doc.TEXT_NODE
dummy_re = 'KVM_TEST_VIRTIO_NETWORK_INSTALLER'
process_check_re = 'PROCESS_CHECK'
if (self.install_virtio == 'yes' and
hasattr(self, 'virtio_network_installer_path')):
driver = self.virtio_network_installer_path
else:
driver = 'dir'
if driver.endswith("msi"):
driver = 'msiexec /passive /package ' + driver
if dummy_re in command_line_text.data:
t = command_line_text.data
t = re.sub(dummy_re, driver, t)
command_line_text.data = t
if process_check_re in command_line_text.data:
t = command_line_text.data
t = re.sub(process_check_re, self.process_check, t)
command_line_text.data = t
contents = doc.toxml()
logging.debug("Unattended install contents:")
for line in contents.splitlines():
logging.debug(line)
fp = open(answer_path, 'w')
doc.writexml(fp)
def answer_suse_xml(self, answer_path):
# There's nothing to replace on SUSE files to date. Yay!
doc = xml.dom.minidom.parse(self.unattended_file)
contents = doc.toxml()
logging.debug("Unattended install contents:")
for line in contents.splitlines():
logging.debug(line)
fp = open(answer_path, 'w')
doc.writexml(fp)
def preseed_initrd(self):
"""
Puts a preseed file inside a gz compressed initrd file.
Debian and Ubuntu use preseed as the OEM install mechanism. The only
way to get fully automated setup without resorting to kernel params
is to add a preseed.cfg file at the root of the initrd image.
"""
logging.debug("Remastering initrd.gz file with preseed file")
dest_fname = 'preseed.cfg'
remaster_path = os.path.join(self.image_path, "initrd_remaster")
if not os.path.isdir(remaster_path):
os.makedirs(remaster_path)
base_initrd = os.path.basename(self.initrd)
os.chdir(remaster_path)
utils.run("gzip -d < ../%s | cpio --extract --make-directories "
"--no-absolute-filenames" % base_initrd, verbose=DEBUG)
utils.run("cp %s %s" % (self.unattended_file, dest_fname),
verbose=DEBUG)
if self.params.get("vm_type") == "libvirt":
utils.run("find . | cpio -H newc --create > ../%s.img" %
base_initrd.rstrip(".gz"), verbose=DEBUG)
else:
utils.run("find . | cpio -H newc --create | gzip -9 > ../%s" %
base_initrd, verbose=DEBUG)
os.chdir(self.image_path)
utils.run("rm -rf initrd_remaster", verbose=DEBUG)
contents = open(self.unattended_file).read()
logging.debug("Unattended install contents:")
for line in contents.splitlines():
logging.debug(line)
def setup_unattended_http_server(self):
'''
Setup a builtin http server for serving the kickstart file
Does nothing if unattended file is not a kickstart file
'''
if self.unattended_file.endswith('.ks'):
# Red Hat kickstart install
dest_fname = 'ks.cfg'
answer_path = os.path.join(self.tmpdir, dest_fname)
self.answer_kickstart(answer_path)
if self.unattended_server_port is None:
self.unattended_server_port = utils_misc.find_free_port(
8000,
8099,
self.url_auto_content_ip)
start_unattended_server_thread(self.unattended_server_port,
self.tmpdir)
# Point installation to this kickstart url
ks_param = 'ks=http://%s:%s/%s' % (self.url_auto_content_ip,
self.unattended_server_port,
dest_fname)
self.kernel_params = getattr(self, 'kernel_params')
if 'ks=' in self.kernel_params:
kernel_params = re.sub('ks\=[\w\d\:\.\/]+',
ks_param,
self.kernel_params)
else:
kernel_params = '%s %s' % (self.kernel_params, ks_param)
# reflect change on params
self.kernel_params = kernel_params
self.params['kernel_params'] = self.kernel_params
def setup_boot_disk(self):
if self.unattended_file.endswith('.sif'):
dest_fname = 'winnt.sif'
setup_file = 'winnt.bat'
boot_disk = utils_disk.FloppyDisk(self.floppy,
self.qemu_img_binary,
self.tmpdir, self.vfd_size)
answer_path = boot_disk.get_answer_file_path(dest_fname)
self.answer_windows_ini(answer_path)
setup_file_path = os.path.join(self.unattended_dir, setup_file)
boot_disk.copy_to(setup_file_path)
if self.install_virtio == "yes":
boot_disk.setup_virtio_win2003(self.virtio_floppy,
self.virtio_oemsetup_id)
boot_disk.copy_to(self.finish_program)
elif self.unattended_file.endswith('.ks'):
# Red Hat kickstart install
dest_fname = 'ks.cfg'
if self.params.get('unattended_delivery_method') == 'integrated':
ks_param = 'ks=cdrom:/dev/sr0:/isolinux/%s' % dest_fname
kernel_params = getattr(self, 'kernel_params')
if 'ks=' in kernel_params:
kernel_params = re.sub('ks\=[\w\d\:\.\/]+',
ks_param,
kernel_params)
else:
kernel_params = '%s %s' % (kernel_params, ks_param)
# Standard setting is kickstart disk in /dev/sr0 and
# install cdrom in /dev/sr1. As we merge them together,
# we need to change repo configuration to /dev/sr0
if 'repo=cdrom' in kernel_params:
kernel_params = re.sub('repo\=cdrom[\:\w\d\/]*',
'repo=cdrom:/dev/sr0',
kernel_params)
self.params['kernel_params'] = ''
boot_disk = utils_disk.CdromInstallDisk(
self.cdrom_unattended,
self.tmpdir,
self.cdrom_cd1_mount,
kernel_params)
elif self.params.get('unattended_delivery_method') == 'url':
if self.unattended_server_port is None:
self.unattended_server_port = utils_misc.find_free_port(
8000,
8099,
self.url_auto_content_ip)
path = os.path.join(os.path.dirname(self.cdrom_unattended),
'ks')
boot_disk = RemoteInstall(path, self.url_auto_content_ip,
self.unattended_server_port,
dest_fname)
ks_param = 'ks=%s' % boot_disk.get_url()
kernel_params = getattr(self, 'kernel_params')
if 'ks=' in kernel_params:
kernel_params = re.sub('ks\=[\w\d\:\.\/]+',
ks_param,
kernel_params)
else:
kernel_params = '%s %s' % (kernel_params, ks_param)
# Standard setting is kickstart disk in /dev/sr0 and
# install cdrom in /dev/sr1. When we get ks via http,
# we need to change repo configuration to /dev/sr0
if 'repo=cdrom' in kernel_params:
if ((self.vm.driver_type == 'xen') and
(self.params.get('hvm_or_pv') == 'pv')):
kernel_params = re.sub('repo\=[\:\w\d\/]*',
'repo=http://%s:%s' %
(self.url_auto_content_ip,
self.url_auto_content_port),
kernel_params)
else:
kernel_params = re.sub('repo\=cdrom[\:\w\d\/]*',
'repo=cdrom:/dev/sr0',
kernel_params)
self.params['kernel_params'] = kernel_params
elif self.params.get('unattended_delivery_method') == 'cdrom':
boot_disk = utils_disk.CdromDisk(self.cdrom_unattended,
self.tmpdir)
elif self.params.get('unattended_delivery_method') == 'floppy':
boot_disk = utils_disk.FloppyDisk(self.floppy,
self.qemu_img_binary,
self.tmpdir, self.vfd_size)
else:
raise ValueError("Neither cdrom_unattended nor floppy set "
"on the config file, please verify")
answer_path = boot_disk.get_answer_file_path(dest_fname)
self.answer_kickstart(answer_path)
elif self.unattended_file.endswith('.xml'):
if "autoyast" in self.kernel_params:
# SUSE autoyast install
dest_fname = "autoinst.xml"
if self.cdrom_unattended:
boot_disk = utils_disk.CdromDisk(self.cdrom_unattended,
self.tmpdir)
elif self.floppy:
boot_disk = utils_disk.FloppyDisk(self.floppy,
self.qemu_img_binary,
self.tmpdir, self.vfd_size)
else:
raise ValueError("Neither cdrom_unattended nor floppy set "
"on the config file, please verify")
answer_path = boot_disk.get_answer_file_path(dest_fname)
self.answer_suse_xml(answer_path)
else:
# Windows unattended install
dest_fname = "autounattend.xml"
boot_disk = utils_disk.FloppyDisk(self.floppy,
self.qemu_img_binary,
self.tmpdir, self.vfd_size)
answer_path = boot_disk.get_answer_file_path(dest_fname)
self.answer_windows_xml(answer_path)
if self.install_virtio == "yes":
boot_disk.setup_virtio_win2008(self.virtio_floppy)
boot_disk.copy_to(self.finish_program)
else:
raise ValueError('Unknown answer file type: %s' %
self.unattended_file)
boot_disk.close()
@error.context_aware
def setup_cdrom(self):
"""
Mount cdrom and copy vmlinuz and initrd.img.
"""
error.context("Copying vmlinuz and initrd.img from install cdrom %s" %
self.cdrom_cd1)
if not os.path.isdir(self.image_path):
os.makedirs(self.image_path)
if (self.params.get('unattended_delivery_method') in
['integrated', 'url']):
i = iso9660.Iso9660Mount(self.cdrom_cd1)
self.cdrom_cd1_mount = i.mnt_dir
else:
i = iso9660.iso9660(self.cdrom_cd1)
if i is None:
raise error.TestFail("Could not instantiate an iso9660 class")
i.copy(os.path.join(self.boot_path, os.path.basename(self.kernel)),
self.kernel)
assert(os.path.getsize(self.kernel) > 0)
i.copy(os.path.join(self.boot_path, os.path.basename(self.initrd)),
self.initrd)
assert(os.path.getsize(self.initrd) > 0)
if self.unattended_file.endswith('.preseed'):
self.preseed_initrd()
if self.params.get("vm_type") == "libvirt":
if self.vm.driver_type == 'qemu':
# Virtinstall command needs files "vmlinuz" and "initrd.img"
os.chdir(self.image_path)
base_kernel = os.path.basename(self.kernel)
base_initrd = os.path.basename(self.initrd)
if base_kernel != 'vmlinuz':
utils.run("mv %s vmlinuz" % base_kernel, verbose=DEBUG)
if base_initrd != 'initrd.img':
utils.run("mv %s initrd.img" % base_initrd, verbose=DEBUG)
if (self.params.get('unattended_delivery_method') !=
'integrated'):
i.close()
utils_disk.cleanup(self.cdrom_cd1_mount)
elif ((self.vm.driver_type == 'xen') and
(self.params.get('hvm_or_pv') == 'pv')):
logging.debug("starting unattended content web server")
self.url_auto_content_port = utils_misc.find_free_port(8100,
8199,
self.url_auto_content_ip)
start_auto_content_server_thread(self.url_auto_content_port,
self.cdrom_cd1_mount)
self.medium = 'url'
self.url = ('http://%s:%s' % (self.url_auto_content_ip,
self.url_auto_content_port))
pxe_path = os.path.join(os.path.dirname(self.image_path), 'xen')
if not os.path.isdir(pxe_path):
os.makedirs(pxe_path)
pxe_kernel = os.path.join(pxe_path,
os.path.basename(self.kernel))
pxe_initrd = os.path.join(pxe_path,
os.path.basename(self.initrd))
utils.run("cp %s %s" % (self.kernel, pxe_kernel))
utils.run("cp %s %s" % (self.initrd, pxe_initrd))
@error.context_aware
def setup_url_auto(self):
"""
Configures the builtin web server for serving content
"""
auto_content_url = 'http://%s:%s' % (self.url_auto_content_ip,
self.url_auto_content_port)
self.params['auto_content_url'] = auto_content_url
@error.context_aware
def setup_url(self):
"""
Download the vmlinuz and initrd.img from URL.
"""
# it's only necessary to download kernel/initrd if running bare qemu
if self.vm_type == 'kvm':
error.context("downloading vmlinuz/initrd.img from %s" % self.url)
if not os.path.exists(self.image_path):
os.mkdir(self.image_path)
os.chdir(self.image_path)
kernel_cmd = "wget -q %s/%s/%s" % (self.url,
self.boot_path,
os.path.basename(self.kernel))
initrd_cmd = "wget -q %s/%s/%s" % (self.url,
self.boot_path,
os.path.basename(self.initrd))
if os.path.exists(self.kernel):
os.remove(self.kernel)
if os.path.exists(self.initrd):
os.remove(self.initrd)
utils.run(kernel_cmd, verbose=DEBUG)
utils.run(initrd_cmd, verbose=DEBUG)
elif self.vm_type == 'libvirt':
logging.info("Not downloading vmlinuz/initrd.img from %s, "
"letting virt-install do it instead")
else:
logging.info("No action defined/needed for the current virt "
"type: '%s'" % self.vm_type)
def setup_nfs(self):
"""
Copy the vmlinuz and initrd.img from nfs.
"""
error.context("copying the vmlinuz and initrd.img from NFS share")
m_cmd = ("mount %s:%s %s -o ro" %
(self.nfs_server, self.nfs_dir, self.nfs_mount))
utils.run(m_cmd, verbose=DEBUG)
try:
kernel_fetch_cmd = ("cp %s/%s/%s %s" %
(self.nfs_mount, self.boot_path,
os.path.basename(self.kernel), self.image_path))
utils.run(kernel_fetch_cmd, verbose=DEBUG)
initrd_fetch_cmd = ("cp %s/%s/%s %s" %
(self.nfs_mount, self.boot_path,
os.path.basename(self.initrd), self.image_path))
utils.run(initrd_fetch_cmd, verbose=DEBUG)
finally:
utils_disk.cleanup(self.nfs_mount)
def setup_import(self):
self.unattended_file = None
self.params['kernel_params'] = None
def setup(self):
"""
Configure the environment for unattended install.
Uses an appropriate strategy according to each install model.
"""
logging.info("Starting unattended install setup")
if DEBUG:
utils_misc.display_attributes(self)
if self.syslog_server_enabled == 'yes':
start_syslog_server_thread(self.syslog_server_ip,
self.syslog_server_port,
self.syslog_server_tcp)
if self.medium in ["cdrom", "kernel_initrd"]:
if self.kernel and self.initrd:
self.setup_cdrom()
elif self.medium == "url":
self.setup_url()
elif self.medium == "nfs":
self.setup_nfs()
elif self.medium == "import":
self.setup_import()
else:
raise ValueError("Unexpected installation method %s" %
self.medium)
if self.unattended_file and (self.floppy or self.cdrom_unattended):
self.setup_boot_disk()
def start_syslog_server_thread(address, port, tcp):
global _syslog_server_thread
global _syslog_server_thread_event
syslog_server.set_default_format('[UnattendedSyslog '
'(%s.%s)] %s')
if _syslog_server_thread is None:
_syslog_server_thread_event = threading.Event()
_syslog_server_thread = threading.Thread(
target=syslog_server.syslog_server,
args=(address, port, tcp, terminate_syslog_server_thread))
_syslog_server_thread.start()
def terminate_syslog_server_thread():
global _syslog_server_thread, _syslog_server_thread_event
if _syslog_server_thread is None:
return False
if _syslog_server_thread_event is None:
return False
if _syslog_server_thread_event.isSet():
return True
return False
@error.context_aware
def run_unattended_install(test, params, env):
"""
Unattended install test:
1) Starts a VM with an appropriated setup to start an unattended OS install.
2) Wait until the install reports to the install watcher its end.
@param test: KVM test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
unattended_install_config = UnattendedInstallConfig(test, params, vm)
unattended_install_config.setup()
# params passed explicitly, because they may have been updated by
# unattended install config code, such as when params['url'] == auto
vm.create(params=params)
post_finish_str = params.get("post_finish_str",
"Post set up finished")
install_timeout = int(params.get("timeout", 3000))
migrate_background = params.get("migrate_background") == "yes"
if migrate_background:
mig_timeout = float(params.get("mig_timeout", "3600"))
mig_protocol = params.get("migration_protocol", "tcp")
logging.info("Waiting for installation to finish. Timeout set to %d s "
"(%d min)", install_timeout, install_timeout / 60)
error.context("waiting for installation to finish")
start_time = time.time()
while (time.time() - start_time) < install_timeout:
try:
vm.verify_alive()
# Due to a race condition, sometimes we might get a MonitorError
# before the VM gracefully shuts down, so let's capture MonitorErrors.
except (virt_vm.VMDeadError, kvm_monitor.MonitorError), e:
if params.get("wait_no_ack", "no") == "yes":
break
else:
raise e
vm.verify_kernel_crash()
finish_signal = vm.serial_console.get_output()
if (params.get("wait_no_ack", "no") == "no" and
(post_finish_str in finish_signal)):
break
# Due to libvirt automatically start guest after import
# we only need to wait for successful login.
if params.get("medium") == "import":
try:
vm.login()
break
except (remote.LoginError, Exception), e:
pass
if migrate_background:
vm.migrate(timeout=mig_timeout, protocol=mig_protocol)
else:
time.sleep(1)
else:
raise error.TestFail("Timeout elapsed while waiting for install to "
"finish")
logging.debug('cleaning up threads and mounts that may be active')
global _url_auto_content_server_thread
global _url_auto_content_server_thread_event
if _url_auto_content_server_thread is not None:
_url_auto_content_server_thread_event.set()
_url_auto_content_server_thread.join(3)
_url_auto_content_server_thread = None
utils_disk.cleanup(unattended_install_config.cdrom_cd1_mount)
global _unattended_server_thread
global _unattended_server_thread_event
if _unattended_server_thread is not None:
_unattended_server_thread_event.set()
_unattended_server_thread.join(3)
_unattended_server_thread = None
global _syslog_server_thread
global _syslog_server_thread_event
if _syslog_server_thread is not None:
_syslog_server_thread_event.set()
_syslog_server_thread.join(3)
_syslog_server_thread = None
time_elapsed = time.time() - start_time
logging.info("Guest reported successful installation after %d s (%d min)",
time_elapsed, time_elapsed / 60)
if params.get("shutdown_cleanly", "yes") == "yes":
shutdown_cleanly_timeout = int(params.get("shutdown_cleanly_timeout",
120))
logging.info("Wait for guest to shutdown cleanly")
try:
if utils_misc.wait_for(vm.is_dead, shutdown_cleanly_timeout, 1, 1):
logging.info("Guest managed to shutdown cleanly")
except kvm_monitor.MonitorError, e:
logging.warning("Guest apparently shut down, but got a "
"monitor error: %s", e)
| ldoktor/virt-test | tests/unattended_install.py | Python | gpl-2.0 | 35,594 |
# -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2016 Rémi Duraffort
# This file is part of ReactOBus.
#
# ReactOBus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ReactOBus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with ReactOBus. If not, see <http://www.gnu.org/licenses/>
import imp
import pytest
import zmq
from . import mock
def test_select():
from reactobus.outputs import Output, ZMQPush, ZMQPub
i = Output.select("ZMQPush", "pull", {"url": ""}, "")
assert isinstance(i, ZMQPush)
i = Output.select("ZMQPub", "sub", {"url": ""}, "")
assert isinstance(i, ZMQPub)
with pytest.raises(NotImplementedError):
Output.select("ZMQ", "zmq", {}, "")
def test_zmq_class():
from reactobus.outputs import ZMQ
with pytest.raises(NotImplementedError):
ZMQ("", {"url": ""}, "").secure_setup()
def test_zmq_push(monkeypatch, tmpdir):
# Reload the base class "Pipe"
import reactobus.utils
imp.reload(reactobus.utils)
import reactobus.outputs
imp.reload(reactobus.outputs)
from reactobus.outputs import ZMQPush
# Replace zmq.Context.instance()
imp.reload(zmq)
zmq_instance = mock.ZMQContextInstance()
monkeypatch.setattr(zmq.Context, "instance", zmq_instance)
url = "ipc://%s" % tmpdir.join("ReactOBus.test.push")
outbound = "ipc://%s" % tmpdir.join("ReactOBus.test.outbound")
# Create the sockets and the data
push = zmq_instance.socket(zmq.PUSH)
sub = zmq_instance.socket(zmq.SUB)
# send an invalid message then a valid one
data = [
[b"test"],
[b"org.reactobus.test", b"uuid", b"2016-11-15", b"testing", b"{}"],
]
sub.recv.extend(data)
p = ZMQPush("push", {"url": url}, outbound)
with pytest.raises(IndexError):
p.run()
assert sub.connected and not sub.bound
assert sub.url == outbound
assert sub.recv == []
assert push.connected and not push.bound
assert push.url == url
assert push.send == data
def test_zmq_pub(monkeypatch, tmpdir):
# Reload the base class "Pipe"
import reactobus.utils
imp.reload(reactobus.utils)
import reactobus.outputs
imp.reload(reactobus.outputs)
from reactobus.outputs import ZMQPub
# Replace zmq.Context.instance()
imp.reload(zmq)
zmq_instance = mock.ZMQContextInstance()
monkeypatch.setattr(zmq.Context, "instance", zmq_instance)
url = "ipc://%s" % tmpdir.join("ReactOBus.test.push")
outbound = "ipc://%s" % tmpdir.join("ReactOBus.test.outbound")
# Create the sockets and the data
pub = zmq_instance.socket(zmq.PUB)
sub = zmq_instance.socket(zmq.SUB)
# send an invalid message then a valid one
data = [
[b"test"],
[b"org.reactobus.test", b"uuid", b"2016-11-15", b"testing", b"{}"],
]
sub.recv.extend(data)
p = ZMQPub("pub", {"url": url}, outbound)
with pytest.raises(IndexError):
p.run()
assert sub.connected and not sub.bound
assert sub.url == outbound
assert sub.recv == []
assert pub.bound and not pub.connected
assert pub.url == url
assert pub.send == data
| ivoire/ReactOBus | tests/test_outputs.py | Python | agpl-3.0 | 3,604 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pywb.cdx.cdxobject import CDXObject, IDXObject, CDXException
from pytest import raises
def test_empty_cdxobject():
x = CDXObject(b'')
assert len(x) == 0
def test_invalid_cdx_format():
with raises(CDXException):
x = CDXObject(b'a b c')
def _make_line(fields):
line = ' '.join(['-'] * fields)
x = CDXObject(line.encode('utf-8'))
assert len(x) == fields
assert str(x) == line
def test_valid_cdx_formats():
# Currently supported cdx formats, 9, 11, 12, 14 field
# See CDXObject for more details
_make_line(9)
_make_line(12)
_make_line(11)
_make_line(14)
def test_unicode_url():
x = CDXObject(u'com,example,cafe)/ 123 {"url": "http://example.com/café/path"}'.encode('utf-8'))
assert x['urlkey'] == 'com,example,cafe)/'
assert x['timestamp'] == '123'
assert x['url'] == 'http://example.com/caf%C3%A9/path'
assert x.to_cdxj() == 'com,example,cafe)/ 123 {"url": "http://example.com/caf%C3%A9/path"}\n'
def test_invalid_idx_format():
with raises(CDXException):
x = IDXObject(b'a b c')
def test_lt_le():
A = CDXObject(b'ca,example)/ 2016 {"url": "http://example.com/"}')
B = CDXObject(b'com,example)/ 2015 {"url": "http://example.com/"}')
C = CDXObject(b'com,example)/ 2016 {"url": "http://example.com/"}')
assert A < B
assert B < C
assert B >= A
assert C >= A
assert A < C
| pombredanne/pywb | pywb/cdx/test/test_cdxobject.py | Python | gpl-3.0 | 1,455 |
"""
THIS IS NOT OUR CODE!!!
"""
import itertools
from card import Card
class LookupTable(object):
"""
Number of Distinct Hand Values:
Straight Flush 10
Four of a Kind 156 [(13 choose 2) * (2 choose 1)]
Full Houses 156 [(13 choose 2) * (2 choose 1)]
Flush 1277 [(13 choose 5) - 10 straight flushes]
Straight 10
Three of a Kind 858 [(13 choose 3) * (3 choose 1)]
Two Pair 858 [(13 choose 3) * (3 choose 2)]
One Pair 2860 [(13 choose 4) * (4 choose 1)]
High Card + 1277 [(13 choose 5) - 10 straights]
-------------------------
TOTAL 7462
Here we create a lookup table which maps:
5 card hand's unique prime product => rank in range [1, 7462]
Examples:
* Royal flush (best hand possible) => 1
* 7-5-4-3-2 unsuited (worst hand possible) => 7462
"""
MAX_STRAIGHT_FLUSH = 10
MAX_FOUR_OF_A_KIND = 166
MAX_FULL_HOUSE = 322
MAX_FLUSH = 1599
MAX_STRAIGHT = 1609
MAX_THREE_OF_A_KIND = 2467
MAX_TWO_PAIR = 3325
MAX_PAIR = 6185
MAX_HIGH_CARD = 7462
MAX_TO_RANK_CLASS = {
MAX_STRAIGHT_FLUSH: 1,
MAX_FOUR_OF_A_KIND: 2,
MAX_FULL_HOUSE: 3,
MAX_FLUSH: 4,
MAX_STRAIGHT: 5,
MAX_THREE_OF_A_KIND: 6,
MAX_TWO_PAIR: 7,
MAX_PAIR: 8,
MAX_HIGH_CARD: 9
}
RANK_CLASS_TO_STRING = {
1 : "Straight Flush",
2 : "Four of a Kind",
3 : "Full House",
4 : "Flush",
5 : "Straight",
6 : "Three of a Kind",
7 : "Two Pair",
8 : "Pair",
9 : "High Card"
}
def __init__(self):
"""
Calculates lookup tables
"""
# create dictionaries
self.flush_lookup = {}
self.unsuited_lookup = {}
# create the lookup table in piecewise fashion
self.flushes() # this will call straights and high cards method,
# we reuse some of the bit sequences
self.multiples()
def flushes(self):
"""
Straight flushes and flushes.
Lookup is done on 13 bit integer (2^13 > 7462):
xxxbbbbb bbbbbbbb => integer hand index
"""
# straight flushes in rank order
straight_flushes = [
7936, # int('0b1111100000000', 2), # royal flush
3968, # int('0b111110000000', 2),
1984, # int('0b11111000000', 2),
992, # int('0b1111100000', 2),
496, # int('0b111110000', 2),
248, # int('0b11111000', 2),
124, # int('0b1111100', 2),
62, # int('0b111110', 2),
31, # int('0b11111', 2),
4111 # int('0b1000000001111', 2) # 5 high
]
# now we'll dynamically generate all the other
# flushes (including straight flushes)
flushes = []
gen = self.get_lexographically_next_bit_sequence(int('0b11111', 2))
# 1277 = number of high cards
# 1277 + len(str_flushes) is number of hands with all cards unique rank
for i in range(1277 + len(straight_flushes) - 1): # we also iterate over SFs
# pull the next flush pattern from our generator
f = next(gen)
# if this flush matches perfectly any
# straight flush, do not add it
notSF = True
for sf in straight_flushes:
# if f XOR sf == 0, then bit pattern
# is same, and we should not add
if not f ^ sf:
notSF = False
if notSF:
flushes.append(f)
# we started from the lowest straight pattern, now we want to start ranking from
# the most powerful hands, so we reverse
flushes.reverse()
# now add to the lookup map:
# start with straight flushes and the rank of 1
# since theyit is the best hand in poker
# rank 1 = Royal Flush!
rank = 1
for sf in straight_flushes:
prime_product = Card.prime_product_from_rankbits(sf)
self.flush_lookup[prime_product] = rank
rank += 1
# we start the counting for flushes on max full house, which
# is the worst rank that a full house can have (2,2,2,3,3)
rank = LookupTable.MAX_FULL_HOUSE + 1
for f in flushes:
prime_product = Card.prime_product_from_rankbits(f)
self.flush_lookup[prime_product] = rank
rank += 1
# we can reuse these bit sequences for straights
# and high cards since they are inherently related
# and differ only by context
self.straight_and_highcards(straight_flushes, flushes)
def straight_and_highcards(self, straights, highcards):
"""
Unique five card sets. Straights and highcards.
Reuses bit sequences from flush calculations.
"""
rank = LookupTable.MAX_FLUSH + 1
for s in straights:
prime_product = Card.prime_product_from_rankbits(s)
self.unsuited_lookup[prime_product] = rank
rank += 1
rank = LookupTable.MAX_PAIR + 1
for h in highcards:
prime_product = Card.prime_product_from_rankbits(h)
self.unsuited_lookup[prime_product] = rank
rank += 1
def multiples(self):
"""
Pair, Two Pair, Three of a Kind, Full House, and 4 of a Kind.
"""
backwards_ranks = list(range(len(Card.INT_RANKS) - 1, -1, -1))
# 1) Four of a Kind
rank = LookupTable.MAX_STRAIGHT_FLUSH + 1
# for each choice of a set of four rank
for i in backwards_ranks:
# and for each possible kicker rank
kickers = backwards_ranks[:]
kickers.remove(i)
for k in kickers:
product = Card.PRIMES[i]**4 * Card.PRIMES[k]
self.unsuited_lookup[product] = rank
rank += 1
# 2) Full House
rank = LookupTable.MAX_FOUR_OF_A_KIND + 1
# for each three of a kind
for i in backwards_ranks:
# and for each choice of pair rank
pairranks = backwards_ranks[:]
pairranks.remove(i)
for pr in pairranks:
product = Card.PRIMES[i]**3 * Card.PRIMES[pr]**2
self.unsuited_lookup[product] = rank
rank += 1
# 3) Three of a Kind
rank = LookupTable.MAX_STRAIGHT + 1
# pick three of one rank
for r in backwards_ranks:
kickers = backwards_ranks[:]
kickers.remove(r)
gen = itertools.combinations(kickers, 2)
for kickers in gen:
c1, c2 = kickers
product = Card.PRIMES[r]**3 * Card.PRIMES[c1] * Card.PRIMES[c2]
self.unsuited_lookup[product] = rank
rank += 1
# 4) Two Pair
rank = LookupTable.MAX_THREE_OF_A_KIND + 1
tpgen = itertools.combinations(backwards_ranks, 2)
for tp in tpgen:
pair1, pair2 = tp
kickers = backwards_ranks[:]
kickers.remove(pair1)
kickers.remove(pair2)
for kicker in kickers:
product = Card.PRIMES[pair1]**2 * Card.PRIMES[pair2]**2 * Card.PRIMES[kicker]
self.unsuited_lookup[product] = rank
rank += 1
# 5) Pair
rank = LookupTable.MAX_TWO_PAIR + 1
# choose a pair
for pairrank in backwards_ranks:
kickers = backwards_ranks[:]
kickers.remove(pairrank)
kgen = itertools.combinations(kickers, 3)
for kickers in kgen:
k1, k2, k3 = kickers
product = Card.PRIMES[pairrank]**2 * Card.PRIMES[k1] \
* Card.PRIMES[k2] * Card.PRIMES[k3]
self.unsuited_lookup[product] = rank
rank += 1
def write_table_to_disk(self, table, filepath):
"""
Writes lookup table to disk
"""
with open(filepath, 'w') as f:
for prime_prod, rank in table.items():
f.write(str(prime_prod) +","+ str(rank) + '\n')
def get_lexographically_next_bit_sequence(self, bits):
"""
Bit hack from here:
http://www-graphics.stanford.edu/~seander/bithacks.html#NextBitPermutation
Generator even does this in poker order rank
so no need to sort when done! Perfect.
"""
t = (bits | (bits - 1)) + 1
next = t | ((((t & -t) // (bits & -bits)) >> 1) - 1)
yield next
while True:
t = (next | (next - 1)) + 1
next = t | ((((t & -t) // (next & -next)) >> 1) - 1)
yield next | simonbw/poker-player | lookup.py | Python | mit | 8,924 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: messagepath/v1/visibility_rules.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import kik_unofficial.protobuf.common_model_pb2 as common__model__pb2
from kik_unofficial.protobuf.common.v1 import model_pb2 as common_dot_v1_dot_model__pb2
import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='messagepath/v1/visibility_rules.proto',
package='common.messagepath.v1',
syntax='proto3',
serialized_pb=_b('\n%messagepath/v1/visibility_rules.proto\x12\x15\x63ommon.messagepath.v1\x1a\x12\x63ommon_model.proto\x1a\x15\x63ommon/v1/model.proto\x1a\x19protobuf_validation.proto\"\xbd\x02\n\x19VisibilityRulesAttachment\x12\x32\n\tinitiator\x18\x01 \x01(\x0b\x32\x15.common.XiBareUserJidB\x08\x18\x01\xca\x9d%\x02\x08\x00\x12\x38\n\x0cinitiator_v2\x18\x04 \x01(\x0b\x32\".common.v1.XiBareUserJidOrAliasJid\x12$\n\x1c\x64rop_if_initiator_not_friend\x18\x02 \x01(\x08\x12\x43\n\x04rule\x18\x03 \x01(\x0e\x32\x35.common.messagepath.v1.VisibilityRulesAttachment.Rule\"G\n\x04Rule\x12\x1d\n\x19USE_SENDER_FOR_VISIBILITY\x10\x00\x12 \n\x1cUSE_INITIATOR_FOR_VISIBILITY\x10\x01\x42z\n\x19\x63om.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\xa2\x02\x04MPTHb\x06proto3')
,
dependencies=[common__model__pb2.DESCRIPTOR,common_dot_v1_dot_model__pb2.DESCRIPTOR,protobuf__validation__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_VISIBILITYRULESATTACHMENT_RULE = _descriptor.EnumDescriptor(
name='Rule',
full_name='common.messagepath.v1.VisibilityRulesAttachment.Rule',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='USE_SENDER_FOR_VISIBILITY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='USE_INITIATOR_FOR_VISIBILITY', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=381,
serialized_end=452,
)
_sym_db.RegisterEnumDescriptor(_VISIBILITYRULESATTACHMENT_RULE)
_VISIBILITYRULESATTACHMENT = _descriptor.Descriptor(
name='VisibilityRulesAttachment',
full_name='common.messagepath.v1.VisibilityRulesAttachment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='initiator', full_name='common.messagepath.v1.VisibilityRulesAttachment.initiator', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001\312\235%\002\010\000'))),
_descriptor.FieldDescriptor(
name='initiator_v2', full_name='common.messagepath.v1.VisibilityRulesAttachment.initiator_v2', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='drop_if_initiator_not_friend', full_name='common.messagepath.v1.VisibilityRulesAttachment.drop_if_initiator_not_friend', index=2,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rule', full_name='common.messagepath.v1.VisibilityRulesAttachment.rule', index=3,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_VISIBILITYRULESATTACHMENT_RULE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=452,
)
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator'].message_type = common__model__pb2._XIBAREUSERJID
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator_v2'].message_type = common_dot_v1_dot_model__pb2._XIBAREUSERJIDORALIASJID
_VISIBILITYRULESATTACHMENT.fields_by_name['rule'].enum_type = _VISIBILITYRULESATTACHMENT_RULE
_VISIBILITYRULESATTACHMENT_RULE.containing_type = _VISIBILITYRULESATTACHMENT
DESCRIPTOR.message_types_by_name['VisibilityRulesAttachment'] = _VISIBILITYRULESATTACHMENT
VisibilityRulesAttachment = _reflection.GeneratedProtocolMessageType('VisibilityRulesAttachment', (_message.Message,), dict(
DESCRIPTOR = _VISIBILITYRULESATTACHMENT,
__module__ = 'messagepath.v1.visibility_rules_pb2'
# @@protoc_insertion_point(class_scope:common.messagepath.v1.VisibilityRulesAttachment)
))
_sym_db.RegisterMessage(VisibilityRulesAttachment)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\031com.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\242\002\004MPTH'))
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator'].has_options = True
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001\312\235%\002\010\000'))
# @@protoc_insertion_point(module_scope)
| tomer8007/kik-bot-api-unofficial | kik_unofficial/protobuf/messagepath/v1/visibility_rules_pb2.py | Python | mit | 5,997 |
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_direct_connect_virtual_interface
short_description: Manage Direct Connect virtual interfaces
description:
- Create, delete, or modify a Direct Connect public or private virtual interface.
version_added: "2.5"
author: "Sloane Hertel (@s-hertel)"
requirements:
- boto3
- botocore
options:
state:
description:
- The desired state of the Direct Connect virtual interface.
choices: [present, absent]
type: str
required: true
id_to_associate:
description:
- The ID of the link aggregation group or connection to associate with the virtual interface.
aliases: [link_aggregation_group_id, connection_id]
type: str
required: true
public:
description:
- The type of virtual interface.
type: bool
name:
description:
- The name of the virtual interface.
type: str
vlan:
description:
- The VLAN ID.
default: 100
type: int
bgp_asn:
description:
- The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
default: 65000
type: int
authentication_key:
description:
- The authentication key for BGP configuration.
type: str
amazon_address:
description:
- The amazon address CIDR with which to create the virtual interface.
type: str
customer_address:
description:
- The customer address CIDR with which to create the virtual interface.
type: str
address_type:
description:
- The type of IP address for the BGP peer.
type: str
cidr:
description:
- A list of route filter prefix CIDRs with which to create the public virtual interface.
type: list
elements: str
virtual_gateway_id:
description:
- The virtual gateway ID required for creating a private virtual interface.
type: str
virtual_interface_id:
description:
- The virtual interface ID.
type: str
extends_documentation_fragment:
- aws
- ec2
'''
RETURN = '''
address_family:
description: The address family for the BGP peer.
returned: always
type: str
sample: ipv4
amazon_address:
description: IP address assigned to the Amazon interface.
returned: always
type: str
sample: 169.254.255.1/30
asn:
description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
returned: always
type: int
sample: 65000
auth_key:
description: The authentication key for BGP configuration.
returned: always
type: str
sample: 0xZ59Y1JZ2oDOSh6YriIlyRE
bgp_peers:
description: A list of the BGP peers configured on this virtual interface.
returned: always
type: complex
contains:
address_family:
description: The address family for the BGP peer.
returned: always
type: str
sample: ipv4
amazon_address:
description: IP address assigned to the Amazon interface.
returned: always
type: str
sample: 169.254.255.1/30
asn:
description: The autonomous system (AS) number for Border Gateway Protocol (BGP) configuration.
returned: always
type: int
sample: 65000
auth_key:
description: The authentication key for BGP configuration.
returned: always
type: str
sample: 0xZ59Y1JZ2oDOSh6YriIlyRE
bgp_peer_state:
description: The state of the BGP peer (verifying, pending, available)
returned: always
type: str
sample: available
bgp_status:
description: The up/down state of the BGP peer.
returned: always
type: str
sample: up
customer_address:
description: IP address assigned to the customer interface.
returned: always
type: str
sample: 169.254.255.2/30
changed:
description: Indicated if the virtual interface has been created/modified/deleted
returned: always
type: bool
sample: false
connection_id:
description:
- The ID of the connection. This field is also used as the ID type for operations that
use multiple connection types (LAG, interconnect, and/or connection).
returned: always
type: str
sample: dxcon-fgb175av
customer_address:
description: IP address assigned to the customer interface.
returned: always
type: str
sample: 169.254.255.2/30
customer_router_config:
description: Information for generating the customer router configuration.
returned: always
type: str
location:
description: Where the connection is located.
returned: always
type: str
sample: EqDC2
owner_account:
description: The AWS account that will own the new virtual interface.
returned: always
type: str
sample: '123456789012'
route_filter_prefixes:
description: A list of routes to be advertised to the AWS network in this region (public virtual interface).
returned: always
type: complex
contains:
cidr:
description: A routes to be advertised to the AWS network in this region.
returned: always
type: str
sample: 54.227.92.216/30
virtual_gateway_id:
description: The ID of the virtual private gateway to a VPC. This only applies to private virtual interfaces.
returned: when I(public=False)
type: str
sample: vgw-f3ce259a
virtual_interface_id:
description: The ID of the virtual interface.
returned: always
type: str
sample: dxvif-fh0w7cex
virtual_interface_name:
description: The name of the virtual interface assigned by the customer.
returned: always
type: str
sample: test_virtual_interface
virtual_interface_state:
description: State of the virtual interface (confirming, verifying, pending, available, down, rejected).
returned: always
type: str
sample: available
virtual_interface_type:
description: The type of virtual interface (private, public).
returned: always
type: str
sample: private
vlan:
description: The VLAN ID.
returned: always
type: int
sample: 100
'''
EXAMPLES = '''
---
- name: create an association between a LAG and connection
aws_direct_connect_virtual_interface:
state: present
name: "{{ name }}"
link_aggregation_group_id: LAG-XXXXXXXX
connection_id: dxcon-XXXXXXXX
- name: remove an association between a connection and virtual interface
aws_direct_connect_virtual_interface:
state: absent
connection_id: dxcon-XXXXXXXX
virtual_interface_id: dxv-XXXXXXXX
'''
import traceback
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.direct_connect import DirectConnectError, delete_virtual_interface
from ansible.module_utils.ec2 import (AWSRetry, HAS_BOTO3, boto3_conn,
ec2_argument_spec, get_aws_connection_info,
camel_dict_to_snake_dict)
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
# handled by HAS_BOTO3
pass
def try_except_ClientError(failure_msg):
'''
Wrapper for boto3 calls that uses AWSRetry and handles exceptions
'''
def wrapper(f):
def run_func(*args, **kwargs):
try:
result = AWSRetry.backoff(tries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs)
except (ClientError, BotoCoreError) as e:
raise DirectConnectError(failure_msg, traceback.format_exc(), e)
return result
return run_func
return wrapper
def find_unique_vi(client, connection_id, virtual_interface_id, name):
'''
Determines if the virtual interface exists. Returns the virtual interface ID if an exact match is found.
If multiple matches are found False is returned. If no matches are found None is returned.
'''
# Get the virtual interfaces, filtering by the ID if provided.
vi_params = {}
if virtual_interface_id:
vi_params = {'virtualInterfaceId': virtual_interface_id}
virtual_interfaces = try_except_ClientError(
failure_msg="Failed to describe virtual interface")(
client.describe_virtual_interfaces)(**vi_params).get('virtualInterfaces')
# Remove deleting/deleted matches from the results.
virtual_interfaces = [vi for vi in virtual_interfaces if vi['virtualInterfaceState'] not in ('deleting', 'deleted')]
matching_virtual_interfaces = filter_virtual_interfaces(virtual_interfaces, name, connection_id)
return exact_match(matching_virtual_interfaces)
def exact_match(virtual_interfaces):
'''
Returns the virtual interface ID if one was found,
None if the virtual interface ID needs to be created,
False if an exact match was not found
'''
if not virtual_interfaces:
return None
if len(virtual_interfaces) == 1:
return virtual_interfaces[0]['virtualInterfaceId']
else:
return False
def filter_virtual_interfaces(virtual_interfaces, name, connection_id):
'''
Filters the available virtual interfaces to try to find a unique match
'''
# Filter by name if provided.
if name:
matching_by_name = find_virtual_interface_by_name(virtual_interfaces, name)
if len(matching_by_name) == 1:
return matching_by_name
else:
matching_by_name = virtual_interfaces
# If there isn't a unique match filter by connection ID as last resort (because connection_id may be a connection yet to be associated)
if connection_id and len(matching_by_name) > 1:
matching_by_connection_id = find_virtual_interface_by_connection_id(matching_by_name, connection_id)
if len(matching_by_connection_id) == 1:
return matching_by_connection_id
else:
matching_by_connection_id = matching_by_name
return matching_by_connection_id
def find_virtual_interface_by_connection_id(virtual_interfaces, connection_id):
'''
Return virtual interfaces that have the connection_id associated
'''
return [vi for vi in virtual_interfaces if vi['connectionId'] == connection_id]
def find_virtual_interface_by_name(virtual_interfaces, name):
'''
Return virtual interfaces that match the provided name
'''
return [vi for vi in virtual_interfaces if vi['virtualInterfaceName'] == name]
def vi_state(client, virtual_interface_id):
'''
Returns the state of the virtual interface.
'''
err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id)
vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)(virtualInterfaceId=virtual_interface_id)
return vi['virtualInterfaces'][0]
def assemble_params_for_creating_vi(params):
'''
Returns kwargs to use in the call to create the virtual interface
Params for public virtual interfaces:
virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, cidr
Params for private virtual interfaces:
virtualInterfaceName, vlan, asn, authKey, amazonAddress, customerAddress, addressFamily, virtualGatewayId
'''
public = params['public']
name = params['name']
vlan = params['vlan']
bgp_asn = params['bgp_asn']
auth_key = params['authentication_key']
amazon_addr = params['amazon_address']
customer_addr = params['customer_address']
family_addr = params['address_type']
cidr = params['cidr']
virtual_gateway_id = params['virtual_gateway_id']
parameters = dict(virtualInterfaceName=name, vlan=vlan, asn=bgp_asn)
opt_params = dict(authKey=auth_key, amazonAddress=amazon_addr, customerAddress=customer_addr, addressFamily=family_addr)
for name, value in opt_params.items():
if value:
parameters[name] = value
# virtual interface type specific parameters
if public and cidr:
parameters['routeFilterPrefixes'] = [{'cidr': c} for c in cidr]
if not public:
parameters['virtualGatewayId'] = virtual_gateway_id
return parameters
def create_vi(client, public, associated_id, creation_params):
'''
:param public: a boolean
:param associated_id: a link aggregation group ID or connection ID to associate
with the virtual interface.
:param creation_params: a dict of parameters to use in the boto call
:return The ID of the created virtual interface
'''
err_msg = "Failed to create virtual interface"
if public:
vi = try_except_ClientError(failure_msg=err_msg)(client.create_public_virtual_interface)(connectionId=associated_id,
newPublicVirtualInterface=creation_params)
else:
vi = try_except_ClientError(failure_msg=err_msg)(client.create_private_virtual_interface)(connectionId=associated_id,
newPrivateVirtualInterface=creation_params)
return vi['virtualInterfaceId']
def modify_vi(client, virtual_interface_id, connection_id):
'''
Associate a new connection ID
'''
err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id)
try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)(virtualInterfaceId=virtual_interface_id,
connectionId=connection_id)
def needs_modification(client, virtual_interface_id, connection_id):
'''
Determine if the associated connection ID needs to be updated
'''
return vi_state(client, virtual_interface_id).get('connectionId') != connection_id
def ensure_state(connection, module):
changed = False
state = module.params['state']
connection_id = module.params['id_to_associate']
public = module.params['public']
name = module.params['name']
virtual_interface_id = find_unique_vi(connection, connection_id, module.params.get('virtual_interface_id'), name)
if virtual_interface_id is False:
module.fail_json(msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, "
"and connection_id options if applicable to find a unique match.")
if state == 'present':
if not virtual_interface_id and module.params['virtual_interface_id']:
module.fail_json(msg="The virtual interface {0} does not exist.".format(module.params['virtual_interface_id']))
elif not virtual_interface_id:
assembled_params = assemble_params_for_creating_vi(module.params)
virtual_interface_id = create_vi(connection, public, connection_id, assembled_params)
changed = True
if needs_modification(connection, virtual_interface_id, connection_id):
modify_vi(connection, virtual_interface_id, connection_id)
changed = True
latest_state = vi_state(connection, virtual_interface_id)
else:
if virtual_interface_id:
delete_virtual_interface(connection, virtual_interface_id)
changed = True
latest_state = {}
return changed, latest_state
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
id_to_associate=dict(required=True, aliases=['link_aggregation_group_id', 'connection_id']),
public=dict(type='bool'),
name=dict(),
vlan=dict(type='int', default=100),
bgp_asn=dict(type='int', default=65000),
authentication_key=dict(),
amazon_address=dict(),
customer_address=dict(),
address_type=dict(),
cidr=dict(type='list'),
virtual_gateway_id=dict(),
virtual_interface_id=dict()
))
module = AnsibleAWSModule(argument_spec=argument_spec,
required_one_of=[['virtual_interface_id', 'name']],
required_if=[['state', 'present', ['public']],
['public', False, ['virtual_gateway_id']],
['public', True, ['amazon_address']],
['public', True, ['customer_address']],
['public', True, ['cidr']]])
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='directconnect', region=region, endpoint=ec2_url, **aws_connect_kwargs)
try:
changed, latest_state = ensure_state(connection, module)
except DirectConnectError as e:
if e.exception:
module.fail_json_aws(exception=e.exception, msg=e.msg)
else:
module.fail_json(msg=e.msg)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state))
if __name__ == '__main__':
main()
| Lujeni/ansible | lib/ansible/modules/cloud/amazon/aws_direct_connect_virtual_interface.py | Python | gpl-3.0 | 17,347 |
import datetime
import ddt
import pytz
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from django.test import RequestFactory
from opaque_keys.edx.keys import CourseKey
from openedx.core.lib.courses import course_image_url
from rest_framework.test import APIClient
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ToyCourseFactory
from student.models import CourseAccessRole
from student.tests.factories import AdminFactory, TEST_PASSWORD, UserFactory
from ..utils import serialize_datetime
from ...serializers.course_runs import CourseRunSerializer
@ddt.ddt
class CourseRunViewSetTests(ModuleStoreTestCase):
"""
Tests for creating course runs
"""
list_url = reverse('api:v1:course_run-list')
def setUp(self):
super(CourseRunViewSetTests, self).setUp()
self.client = APIClient()
user = AdminFactory()
self.client.login(username=user.username, password=TEST_PASSWORD)
def get_course_run_data(self, user, start, end, pacing_type, role='instructor'):
return {
'title': 'Testing 101',
'org': 'TestingX',
'number': 'Testing101x',
'run': '3T2017',
'schedule': {
'start': serialize_datetime(start),
'end': serialize_datetime(end),
},
'team': [
{
'user': user.username,
'role': role,
}
],
'pacing_type': pacing_type,
}
def assert_course_run_schedule(self, course_run, start, end):
assert course_run.start == start
assert course_run.end == end
def assert_access_role(self, course_run, user, role):
# An error will be raised if the endpoint did not create the role
assert CourseAccessRole.objects.filter(
course_id=course_run.id, org=course_run.id.org, user=user, role=role).count() == 1
def assert_course_access_role_count(self, course_run, expected):
assert CourseAccessRole.objects.filter(course_id=course_run.id).count() == expected
def get_serializer_context(self):
return {'request': RequestFactory().get('')}
def test_without_authentication(self):
self.client.logout()
response = self.client.get(self.list_url)
assert response.status_code == 401
def test_without_authorization(self):
user = UserFactory(is_staff=False)
self.client.login(username=user.username, password=TEST_PASSWORD)
response = self.client.get(self.list_url)
assert response.status_code == 403
def test_list(self):
course_runs = CourseFactory.create_batch(3)
response = self.client.get(self.list_url)
assert response.status_code == 200
# Order matters for the assertion
course_runs = sorted(course_runs, key=lambda course_run: str(course_run.id))
actual = sorted(response.data['results'], key=lambda course_run: course_run['id'])
assert actual == CourseRunSerializer(course_runs, many=True, context=self.get_serializer_context()).data
def test_retrieve(self):
course_run = CourseFactory()
url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
response = self.client.get(url)
assert response.status_code == 200
assert response.data == CourseRunSerializer(course_run, context=self.get_serializer_context()).data
def test_retrieve_not_found(self):
url = reverse('api:v1:course_run-detail', kwargs={'pk': 'course-v1:TestX+Test101x+1T2017'})
response = self.client.get(url)
assert response.status_code == 404
def test_update_not_found(self):
url = reverse('api:v1:course_run-detail', kwargs={'pk': 'course-v1:TestX+Test101x+1T2017'})
response = self.client.put(url, {})
assert response.status_code == 404
def test_update(self):
course_run = CourseFactory(start=None, end=None)
assert CourseAccessRole.objects.filter(course_id=course_run.id).count() == 0
url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
end = start + datetime.timedelta(days=30)
title = 'A New Testing Strategy'
user = UserFactory()
role = 'staff'
data = {
'title': title,
'schedule': {
'start': serialize_datetime(start),
'end': serialize_datetime(end),
},
'team': [
{
'user': user.username,
'role': role,
}
],
}
response = self.client.put(url, data, format='json')
assert response.status_code == 200
self.assert_access_role(course_run, user, role)
self.assert_course_access_role_count(course_run, 1)
course_run = modulestore().get_course(course_run.id)
assert response.data == CourseRunSerializer(course_run, context=self.get_serializer_context()).data
assert course_run.display_name == title
self.assert_course_run_schedule(course_run, start, end)
def test_update_with_invalid_user(self):
course_run = CourseFactory()
url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
data = {
'title': course_run.display_name,
'team': [
{
'user': 'test-user',
'role': 'staff',
}
]
}
response = self.client.put(url, data, format='json')
assert response.status_code == 400
assert response.data == {'team': ['Course team user does not exist']}
def test_update_with_pacing_type(self):
"""
Test that update run updates the pacing type
"""
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
course_run = CourseFactory(start=start, end=None, self_paced=False)
data = {
'pacing_type': 'self_paced',
}
course_run_detail_url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
response = self.client.patch(course_run_detail_url, data, format='json')
assert response.status_code == 200
course_run = modulestore().get_course(course_run.id)
assert course_run.self_paced is True
self.assert_course_run_schedule(course_run, start, None)
def test_update_with_instructor_role(self):
"""
Test that update creates a new instructor role only if it does not exist
"""
instructor_role = 'instructor'
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
new_user = UserFactory()
course_run = CourseFactory(start=start, end=None, self_paced=False)
assert CourseAccessRole.objects.filter(course_id=course_run.id).count() == 0
data = {
'team': [
{
'user': new_user.username,
'role': instructor_role,
},
],
'pacing_type': 'self_paced',
}
course_run_detail_url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
response = self.client.patch(course_run_detail_url, data, format='json')
assert response.status_code == 200
self.assert_access_role(course_run, new_user, instructor_role)
self.assert_course_access_role_count(course_run, 1)
# Requesting again with the same data should not create new instructor role
response = self.client.patch(course_run_detail_url, data, format='json')
assert response.status_code == 200
self.assert_access_role(course_run, new_user, instructor_role)
self.assert_course_access_role_count(course_run, 1)
def test_update_with_multiple_roles(self):
"""
Test that update creates an instructor role for a user in addition to any other role/roles he already has
"""
staff_role = 'staff'
instructor_role = 'instructor'
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
course_run = CourseFactory(start=start, end=None, self_paced=False)
existing_user = UserFactory()
CourseAccessRole.objects.create(
course_id=course_run.id, org=course_run.id.org, role=staff_role, user=existing_user
)
# existing_user already has a staff role in the course
# The request should create an additional instructor role for existing_user
new_user = UserFactory()
assert CourseAccessRole.objects.filter(course_id=course_run.id).count() == 1
data = {
'team': [
{
'user': existing_user.username,
'role': instructor_role,
},
{
'user': new_user.username,
'role': instructor_role,
},
],
}
course_run_detail_url = reverse('api:v1:course_run-detail', kwargs={'pk': str(course_run.id)})
response = self.client.patch(course_run_detail_url, data, format='json')
assert response.status_code == 200
self.assert_access_role(course_run, existing_user, instructor_role)
self.assert_access_role(course_run, new_user, instructor_role)
self.assert_course_access_role_count(course_run, 3)
@ddt.data(
('instructor_paced', False),
('self_paced', True),
)
@ddt.unpack
def test_create(self, pacing_type, expected_self_paced_value):
"""Tests successful course run creation"""
user = UserFactory()
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
end = start + datetime.timedelta(days=30)
role = 'staff'
data = self.get_course_run_data(user, start, end, pacing_type, role)
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(response.status_code, 201)
course_run_key = CourseKey.from_string(response.data['id'])
course_run = modulestore().get_course(course_run_key)
self.assertEqual(course_run.display_name, data['title'])
self.assertEqual(course_run.id.org, data['org'])
self.assertEqual(course_run.id.course, data['number'])
self.assertEqual(course_run.id.run, data['run'])
self.assertEqual(course_run.self_paced, expected_self_paced_value)
self.assert_course_run_schedule(course_run, start, end)
self.assert_access_role(course_run, user, role)
self.assert_course_access_role_count(course_run, 1)
def test_create_with_invalid_course_team(self):
"""
Tests that if the course team user is invalid, it returns bad request status
with expected validation message
"""
user = UserFactory()
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
end = start + datetime.timedelta(days=30)
data = self.get_course_run_data(user, start, end, 'self-paced')
data['team'] = [{'user': 'invalid-username'}]
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(response.status_code, 400)
self.assertDictContainsSubset({'team': ['Course team user does not exist']}, response.data)
def test_images_upload(self):
# http://www.django-rest-framework.org/api-guide/parsers/#fileuploadparser
course_run = CourseFactory()
expected_filename = 'course_image.png'
content_key = StaticContent.compute_location(course_run.id, expected_filename)
assert course_run.course_image != expected_filename
try:
contentstore().find(content_key)
self.fail('No image should be associated with a new course run.')
except NotFoundError:
pass
url = reverse('api:v1:course_run-images', kwargs={'pk': str(course_run.id)})
# PNG. Single black pixel
content = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS' \
b'\xde\x00\x00\x00\x0cIDATx\x9cc```\x00\x00\x00\x04\x00\x01\xf6\x178U\x00\x00\x00\x00IEND\xaeB`\x82'
# We are intentionally passing the incorrect JPEG extension here
upload = SimpleUploadedFile('card_image.jpg', content, content_type='image/png')
response = self.client.post(url, {'card_image': upload}, format='multipart')
assert response.status_code == 200
course_run = modulestore().get_course(course_run.id)
assert course_run.course_image == expected_filename
expected = {'card_image': RequestFactory().get('').build_absolute_uri(course_image_url(course_run))}
assert response.data == expected
# There should now be an image stored
contentstore().find(content_key)
@ddt.data(
('instructor_paced', False),
('self_paced', True),
)
@ddt.unpack
def test_rerun(self, pacing_type, expected_self_paced_value):
course_run = ToyCourseFactory()
start = datetime.datetime.now(pytz.UTC).replace(microsecond=0)
end = start + datetime.timedelta(days=30)
user = UserFactory()
role = 'instructor'
run = '3T2017'
url = reverse('api:v1:course_run-rerun', kwargs={'pk': str(course_run.id)})
data = {
'run': run,
'schedule': {
'start': serialize_datetime(start),
'end': serialize_datetime(end),
},
'team': [
{
'user': user.username,
'role': role,
}
],
'pacing_type': pacing_type,
}
response = self.client.post(url, data, format='json')
assert response.status_code == 201
course_run_key = CourseKey.from_string(response.data['id'])
course_run = modulestore().get_course(course_run_key)
assert course_run.id.run == run
assert course_run.self_paced is expected_self_paced_value
self.assert_course_run_schedule(course_run, start, end)
self.assert_access_role(course_run, user, role)
self.assert_course_access_role_count(course_run, 1)
def test_rerun_duplicate_run(self):
course_run = ToyCourseFactory()
url = reverse('api:v1:course_run-rerun', kwargs={'pk': str(course_run.id)})
data = {
'run': course_run.id.run,
}
response = self.client.post(url, data, format='json')
assert response.status_code == 400
assert response.data == {'run': [u'Course run {key} already exists'.format(key=course_run.id)]}
| jolyonb/edx-platform | cms/djangoapps/api/v1/tests/test_views/test_course_runs.py | Python | agpl-3.0 | 15,167 |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from hidespines import *
import sys
from scipy.interpolate import interp1d
sys.path.append('../../code')
import ld as LD
def make(sol, ot, tol, label):
err = np.zeros(len(sol))
it = np.zeros(len(sol))
for i in range(len(sol)):
sol[i].setMMS()
x, phi, it[i] = sol[i].sourceIteration(tol, 1000)
phi_int = interp1d(x, phi)
err[i] = np.linalg.norm(sol[i].phi_mms(x) - phi, 2)/ \
np.linalg.norm(sol[i].phi_mms(x), 2)
fig = plt.figure()
twin = fig.twinx()
fig.loglog(ot, err, '-o', clip_on=False, label=label)
twin.loglog(ot, it, '-o')
n = 8
Sigmaa = lambda x: 1
Sigmat = lambda x: 10
ot = np.logspace(-1, .5, 5)
xb = 1
N = np.array([int(Sigmat(0)*xb/x) for x in ot])
print(N)
q = lambda x, mu: 1
tol = 1e-6
ed00 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=0) for x in N]
ed01 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=1) for x in N]
ed20 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat,q, OPT=2, GAUSS=0) for x in N]
ed21 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=2, GAUSS=1) for x in N]
s2 = [LD.S2SA(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q) for x in N]
make(ed00, ot, tol, 'None, Average')
# make(s2, ot, tol, 'S2SA')
plt.show() | smsolivier/VEF | tex/paper/coarse.py | Python | mit | 1,371 |
# -*- coding: utf-8 -*-
"""Test XML parsing in XBlocks."""
import re
import StringIO
import textwrap
import unittest
from xblock.core import XBlock
from xblock.fields import Scope, String, Integer
from xblock.test.tools import blocks_are_equivalent
from xblock.test.toy_runtime import ToyRuntime
# XBlock classes to use in the tests.
class Leaf(XBlock):
"""Something we can parse from XML."""
data1 = String(default="default_value", scope=Scope.user_state)
data2 = String(default="default_value", scope=Scope.user_state)
content = String(default="", scope=Scope.content)
class Container(XBlock):
"""A thing with children."""
has_children = True
class Specialized(XBlock):
"""A block that wants to do its own XML parsing."""
num_children = Integer(default=0, scope=Scope.user_state)
@classmethod
def parse_xml(cls, node, runtime, keys, id_generator):
"""We'll just set num_children to the number of child nodes."""
block = runtime.construct_xblock_from_class(cls, keys)
block.num_children = len(node)
return block
# Helpers
class XmlTest(object):
"""Helpful things for XML tests."""
def setUp(self):
super(XmlTest, self).setUp()
self.runtime = ToyRuntime()
def parse_xml_to_block(self, xml):
"""A helper to get a block from some XML."""
# ToyRuntime has an id_generator, but most runtimes won't
# (because the generator will be contextual), so we
# pass it explicitly to parse_xml_string.
usage_id = self.runtime.parse_xml_string(xml, self.runtime.id_generator)
block = self.runtime.get_block(usage_id)
return block
def export_xml_for_block(self, block):
"""A helper to return the XML string for a block."""
output = StringIO.StringIO()
self.runtime.export_to_xml(block, output)
return output.getvalue()
# Tests!
class ParsingTest(XmlTest, unittest.TestCase):
"""Tests of XML parsing."""
@XBlock.register_temp_plugin(Leaf)
def test_parsing(self):
block = self.parse_xml_to_block("<leaf data2='parsed'/>")
self.assertIsInstance(block, Leaf)
self.assertEqual(block.data1, "default_value")
self.assertEqual(block.data2, "parsed")
self.assertEqual(block.content, "")
@XBlock.register_temp_plugin(Leaf)
def test_parsing_content(self):
block = self.parse_xml_to_block("<leaf>my text!</leaf>")
self.assertIsInstance(block, Leaf)
self.assertEqual(block.content, "my text!")
@XBlock.register_temp_plugin(Leaf)
@XBlock.register_temp_plugin(Container)
def test_parsing_children(self):
block = self.parse_xml_to_block("""\
<container>
<leaf data1='child1'/>
<leaf data1='child2'/>
</container>
""")
self.assertIsInstance(block, Container)
self.assertEqual(len(block.children), 2)
child1 = self.runtime.get_block(block.children[0])
self.assertIsInstance(child1, Leaf)
self.assertEqual(child1.data1, "child1")
self.assertEqual(child1.parent, block.scope_ids.usage_id)
child2 = self.runtime.get_block(block.children[1])
self.assertIsInstance(child2, Leaf)
self.assertEqual(child2.data1, "child2")
self.assertEqual(child2.parent, block.scope_ids.usage_id)
@XBlock.register_temp_plugin(Leaf)
@XBlock.register_temp_plugin(Specialized)
def test_customized_parsing(self):
block = self.parse_xml_to_block("""\
<specialized>
<leaf/><leaf/><leaf/>
</specialized>
""")
self.assertIsInstance(block, Specialized)
self.assertEqual(block.num_children, 3)
@XBlock.register_temp_plugin(Leaf)
def test_parse_unicode(self):
block = self.parse_xml_to_block(u"<leaf data1='\u2603' />")
self.assertIsInstance(block, Leaf)
self.assertEqual(block.data1, u'\u2603')
class ExportTest(XmlTest, unittest.TestCase):
"""Tests of the XML export facility."""
@XBlock.register_temp_plugin(Leaf)
def test_dead_simple_export(self):
block = self.parse_xml_to_block("<leaf/>")
xml = self.export_xml_for_block(block)
self.assertEqual(
xml.strip(),
"<?xml version='1.0' encoding='UTF8'?>\n<leaf/>"
)
@XBlock.register_temp_plugin(Leaf)
@XBlock.register_temp_plugin(Container)
def test_export_then_import(self):
block = self.parse_xml_to_block(textwrap.dedent("""\
<?xml version='1.0' encoding='utf-8'?>
<container>
<leaf data1='child1' data2='I'm also child1' />
<leaf data2="me too!" data1='child2' ></leaf>
<container>
<leaf data1='ʇxǝʇ uʍop-ǝpısdn' data2='whoa'>
ᵾnɨȼøđɇ ȼȺn ƀɇ ŧɍɨȼꝁɏ!
</leaf>
</container>
<leaf>Some text content.</leaf>
</container>
"""))
xml = self.export_xml_for_block(block)
block_imported = self.parse_xml_to_block(xml)
print repr(xml) # so if the test fails, we can see it.
# Crude checks that the XML is correct. The exact form of the XML
# isn't important.
self.assertEqual(xml.count("container"), 4)
self.assertEqual(xml.count("child1"), 2)
self.assertEqual(xml.count("child2"), 1)
self.assertEqual(xml.count("ʇxǝʇ uʍop-ǝpısdn"), 1)
self.assertEqual(xml.count("ᵾnɨȼøđɇ ȼȺn ƀɇ ŧɍɨȼꝁɏ!"), 1)
# The important part: exporting then importing a block should give
# you an equivalent block.
self.assertTrue(blocks_are_equivalent(block, block_imported))
def squish(text):
"""Turn any run of whitespace into one space."""
return re.sub(r"\s+", " ", text)
| IONISx/XBlock | xblock/test/test_parsing.py | Python | agpl-3.0 | 6,043 |
class BinaryCode:
def decode(self, message):
zero_string = self.decode_message(message, 0)
one_string = self.decode_message(message, 1)
return(zero_string, one_string)
def decode_message(self, message, init):
decode_string = ""
p0 = init
q = message[0]
p1 = int(q) - p0
decode_string = decode_string + str(p0) + str(p1)
print(decode_string)
i = 0
for i in range(1, len(message) - 1):
p = int(message[i]) - p0 - p1
decode_string = decode_string + str(p)
p0 = p1
p1 = p
for character in decode_string:
if(character != '1' and character != '0'):
decode_string = "NONE"
return decode_string | mikefeneley/topcoder | src/SRM-144/binarycode.py | Python | mit | 641 |
# -----------------------------------------------------------------------------
# description
#
# a very basic working version of commands to generate a tokenized word
# list and associated nltk text class for further nlp resulting in a
# sorted list of the most frequently occurring phrases found in the cs
# rep's messages
#
# tokenize list and then concatenate it by length, in order to take
# advantage of the nltk FreqDist function for sorting occurrences of
# phrases in addition to single words. the delimiter in between the
# concatenated words will be removed when the master list is created
#
# structure of challenge project data
# dict
# -'NumTextMessages'
# -int (22264)
# -'Issues'
# -list of dict (1508)
# -'Messages'
# -list of dict
# -'Text'
# -'IsFromCustomer'
# -list of
# -'IssueId'
# -int
# -'CompanyGroupId'
# -int
# -----------------------------------------------------------------------------
from __future__ import division
import nltk
import re
import json
def make_json_obj(filename):
f = file(filename)
text = f.read()
del f
obj = json.loads(text)
return obj
def grab_csrep_messages(obj):
count = 0
csrep_messages = ''
num_text_messages = obj['NumTextMessages']
for x0 in obj['Issues']:
for x1 in x0['Messages']:
if not x1['IsFromCustomer']:
count = count + 1
csrep_messages = csrep_messages + x1['Text']
return csrep_messages
def fdist_phrases(corpus, phrase_length, occurrences, delimiter,
master_list):
L = len(corpus)
phrases = corpus[0:L-phrase_length+1]
for x0 in range(L-phrase_length+1):
for x1 in range(phrase_length)[1:phrase_length]:
phrases[x0] = phrases[x0] + delimiter + corpus[x0+x1]
if type(corpus) == list:
fdist = nltk.FreqDist(nltk.Text(phrases))
elif type(corpus) == nltk.text.Text:
fdist = nltk.FreqDist(phrases)
common_occurrences = fdist.most_common()
for x in range(len(common_occurrences)):
if common_occurrences[x][1] <= occurrences:
break
# note: how to account for preference of 'Are you available
# tomorrow?' over 'Are you available tomorrow'. how best to
# take into account the preference for the question mark
common_occurrence_corrected = common_occurrences[x][0].\
replace(delimiter, ' ')
for x1 in '!?,\'".':
common_occurrence_corrected = common_occurrence_corrected.\
replace(' '+x1, x1)
master_list.append((common_occurrence_corrected, common_occurrences[x][1]))
master_list = sorted(master_list, key=lambda byColumn: byColumn[0])
master_list_letters_only = []
for x in range(len(master_list)):
interior = len(master_list[x][0])-1
if re.search('[a-zA-Z]', master_list[x][0][0]) and\
not re.search('\.!', master_list[x][0][0:interior]):
master_list_letters_only.append(master_list[x])
return master_list_letters_only
# need function for generating exceptions when something unexpected (e.g. a
# bracket) is in the message, for manual oversight
def get_autosuggestions(string, num_matches, search_set):
matches = [x for x in search_set if re.search(string, x[0][0:len(string)])]
matches = sorted(matches, key=lambda byColumn: byColumn[1], reverse=True)
L = len(matches)
matches = matches[0:num_matches]
autosuggestions = []
for x in range(L):
if x == num_matches:
break
autosuggestions.append(matches[x][0])
return autosuggestions
# correct punctuation in list. can remove anything with a sentence transition
conversations_text = make_json_obj('sample_conversations.json')
csrep_message_list = grab_csrep_messages(conversations_text)
corpus = nltk.word_tokenize(csrep_message_list)
autosuggestion_list = []
for x in range(5):
autosuggestion_list = fdist_phrases(corpus, x+1, 3, '_', autosuggestion_list)
autosuggestion_set = set(autosuggestion_list)
autosuggestion_dict = dict(autosuggestion_list)
# if __name__ == '__main__':
# main() | alankruppa/asapp-cp | challenge_data_model.py | Python | gpl-2.0 | 4,154 |
"""
WSGI config for joaozenos project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "joaozenos.settings")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application) | joaozenos/joaozenos-python | joaozenos/wsgi.py | Python | mit | 484 |
"""
Define command names and prove command/code mappings
"""
from collections import ChainMap
__all__ = [
'nibble_commands',
'byte_commands',
'sysex_commands',
'command_lookup',
'command_names',
]
INPUT, OUTPUT, ANALOG, \
PWM, SERVO, I2C, ONEWIRE, \
STEPPER, ENCODER = range(0, 9)
# do not combine names and packet structure:
# packets sometimes share same name and code, but have different
# structure depending on the origin (firmata or client)
# do not combine them: their membership to a particular
# group defines the packet structure that builds them
nibble_commands = {
0xE0: ('analog_io_message', ('pin', 'value')),
0x90: ('digital_io_message', ('port', 'value')),
0xC0: ('report_analog_pin', ('pin', 'value')),
0xD0: ('report_digital_port', ('port', 'value')),
}
byte_commands = {
0xF0: ('start_sysex', ()),
0xF4: ('set_pin_mode', ('pin', 'mode')),
0xF5: ('set_digital_pin_value', ('pin', 'value')),
0xF7: ('stop_sysex', ()),
0xF9: ('protocol_version', ('major', 'minor'))
}
sysex_commands = {
0x61: ('encoder_data', ()),
0x69: ('analog_mapping_query', ()),
0x6A: ('analog_mapping_response', ()),
0x6B: ('capability_query', ()),
0x6C: ('capability_response', ()),
0x6D: ('pin_state_query', ()),
0x6E: ('pin_state_response', ()),
0x6F: ('extended_analog', ()),
0x70: ('servo_config', ()),
0x71: ('string_data', ()),
0x72: ('stepper_data', ()),
0x73: ('onewire_data', ()),
0x75: ('shift_data', ()),
0x76: ('i2c_request', ()),
0x77: ('i2c_reply', ()),
0x78: ('i2c_config', ()),
0x79: ('report_firmware', ()),
0x7A: ('sampling_interval', ()),
0x7B: ('scheduler_data', ()),
0x7E: ('sysex_non_realtime', ()),
0x7F: ('sysex_realtime', ()),
}
# Code => Name mapping for all types
command_names = ChainMap(nibble_commands, byte_commands, sysex_commands)
# Name => Code mapping for all types
command_lookup = {v[0]: k for k, v in command_names.items()}
| bitcraft/firmata_aio | firmata_aio/protocol/commands.py | Python | gpl-3.0 | 1,998 |
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Configuration for Zenodo stats."""
ZENODO_STATS_PIWIK_EXPORTER = {
'id_site': 1,
'url': 'https://analytics.openaire.eu/piwik.php',
'token_auth': 'api-token',
'chunk_size': 50 # [max piwik payload size = 64k] / [max querystring size = 750]
}
ZENODO_STATS_PIWIK_EXPORT_ENABLED = True
# Queries performed when processing aggregations might take more time than
# usual. This is fine though, since this is happening during Celery tasks.
ZENODO_STATS_ELASTICSEARCH_CLIENT_CONFIG = {'timeout': 60}
| slint/zenodo | zenodo/modules/stats/config.py | Python | gpl-2.0 | 1,482 |
'''
@author: FangSun
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.primarystorage_operations as ps_ops
import random
_config_ = {
'timeout' : 3000,
'noparallel' : True
}
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
VM_COUNT = 1
VOLUME_NUMBER = 0
new_ps_list = []
disabled_ps_list = []
@test_stub.skip_if_local_shared
def test():
ps_env = test_stub.PSEnvChecker()
if ps_env.is_sb_ceph_env:
env = test_stub.SanAndCephPrimaryStorageEnv(test_object_dict=test_obj_dict,
first_ps_vm_number=VM_COUNT,
second_ps_vm_number=VM_COUNT,
first_ps_volume_number=VOLUME_NUMBER,
second_ps_volume_number=VOLUME_NUMBER)
else:
env = test_stub.TwoPrimaryStorageEnv(test_object_dict=test_obj_dict,
first_ps_vm_number=VM_COUNT,
second_ps_vm_number=VM_COUNT,
first_ps_volume_number=VOLUME_NUMBER,
second_ps_volume_number=VOLUME_NUMBER)
env.check_env()
env.deploy_env()
first_ps_vm_list = env.first_ps_vm_list
first_ps_volume_list = env.first_ps_volume_list
second_ps_vm_list = env.second_ps_vm_list
second_ps_volume_list = env.second_ps_volume_list
if env.new_ps:
new_ps_list.append(env.second_ps)
tbj_list = first_ps_vm_list + second_ps_vm_list + first_ps_volume_list + second_ps_volume_list
test_util.test_dsc('Disable random one Primary Storage')
disabled_ps = random.choice([env.first_ps, env.second_ps])
if disabled_ps is env.first_ps:
enabled_ps = env.second_ps
else:
enabled_ps = env.first_ps
ps_ops.change_primary_storage_state(disabled_ps.uuid, state='disable')
disabled_ps_list.append(disabled_ps)
test_util.test_dsc('make sure all VM and Volumes still OK and running')
for test_object in tbj_list:
test_object.check()
test_util.test_dsc("Try to Create vm in disabeld ps")
with test_stub.expected_failure("Create vm in disabled ps", Exception):
test_stub.create_multi_vms(name_prefix='test-vm', count=1, ps_uuid=disabled_ps.uuid)
test_util.test_dsc("Create 5 vms and check all should be in enabled PS")
if ps_env.is_sb_ceph_env:
if disabled_ps.uuid == env.first_ps:
vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, ps_uuid=enabled_ps.uuid, bs_type="Ceph")
else:
vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5, ps_uuid=enabled_ps.uuid, bs_type="ImageStoreBackupStorage")
else:
vm_list = test_stub.create_multi_vms(name_prefix='test_vm', count=5)
for vm in vm_list:
test_obj_dict.add_vm(vm)
for vm in vm_list:
assert vm.get_vm().allVolumes[0].primaryStorageUuid != disabled_ps.uuid
ps_ops.change_primary_storage_state(disabled_ps.uuid, state='enable')
disabled_ps_list.pop()
test_util.test_dsc("Create 1 vms in the recovered ps")
if ps_env.is_sb_ceph_env:
if disabled_ps.uuid == env.first_ps:
vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1, ps_uuid=disabled_ps.uuid, bs_type="ImageStoreBackupStorage")[0]
else:
vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1, ps_uuid=disabled_ps.uuid, bs_type="Ceph")[0]
else:
vm = test_stub.create_multi_vms(name_prefix='test_vm', count=1, ps_uuid=disabled_ps.uuid)[0]
test_obj_dict.add_vm(vm)
test_util.test_pass('Multi PrimaryStorage Test Pass')
def env_recover():
test_lib.lib_error_cleanup(test_obj_dict)
for disabled_ps in disabled_ps_list:
ps_ops.change_primary_storage_state(disabled_ps.uuid, state='enable')
if new_ps_list:
for new_ps in new_ps_list:
ps_ops.detach_primary_storage(new_ps.uuid, new_ps.attachedClusterUuids[0])
ps_ops.delete_primary_storage(new_ps.uuid)
| zstackio/zstack-woodpecker | integrationtest/vm/multihosts/multiPrimaryStorage/test_one_ps_disabled.py | Python | apache-2.0 | 4,290 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Model(object):
"""Defines an abstract network model for use with RLlib.
Models convert input tensors to a number of output features. These features
can then be interpreted by ActionDistribution classes to determine
e.g. agent action values.
The last layer of the network can also be retrieved if the algorithm
needs to further post-processing (e.g. Actor and Critic networks in A3C).
If options["free_log_std"] is True, the last half of the
output layer will be free variables that are not dependent on
inputs. This is often used if the output of the network is used
to parametrize a probability distribution. In this case, the
first half of the parameters can be interpreted as a location
parameter (like a mean) and the second half can be interpreted as
a scale parameter (like a standard deviation).
Attributes:
inputs (Tensor): The input placeholder for this model.
outputs (Tensor): The output vector of this model.
last_layer (Tensor): The network layer right before the model output.
"""
def __init__(self, inputs, num_outputs, options):
self.inputs = inputs
if options.get("free_log_std", False):
assert num_outputs % 2 == 0
num_outputs = num_outputs // 2
self.outputs, self.last_layer = self._init(
inputs, num_outputs, options)
if options.get("free_log_std", False):
log_std = tf.get_variable(name="log_std", shape=[num_outputs],
initializer=tf.zeros_initializer)
self.outputs = tf.concat(
[self.outputs, 0.0 * self.outputs + log_std], 1)
def _init(self):
"""Builds and returns the output and last layer of the network."""
raise NotImplementedError
| alanamarzoev/ray | python/ray/rllib/models/model.py | Python | apache-2.0 | 1,961 |
# -*- coding: utf-8 -*-
import json
from datetime import datetime
import sqlalchemy as sa
from aiopg.sa.result import ResultProxy
from .db import TLE, insert_tle
from .log import logger
from .utils import audit, send_pg_notify
__all__ = (
'get_tle_dt',
'nasa',
'space_track',
)
@audit(logger=logger)
async def space_track(engine, api, satellites):
satellites = {norad_cat_id: None for norad_cat_id in satellites}
async with engine.acquire() as conn:
satellites.update({row.norad_cat_id: row.dt async for row in get_latest_dt(conn)})
for norad_cat_id, dt in satellites.items():
params = dict(NORAD_CAT_ID=norad_cat_id)
if dt:
params['EPOCH'] = '>{:%Y-%m-%d %H:%M:%S}'.format(dt)
tles = await api.tle(**params)
if isinstance(tles, str):
raise KeyboardInterrupt(tles)
got_count = len(tles)
logger.debug('[%s] Got %d tle', space_track.__name__, got_count)
result = await store_tles(conn, convert_tles(tles))
store_count = result.rowcount if isinstance(result, ResultProxy) else 0
logger.debug('[%s] Stored %d tle', space_track.__name__, store_count)
await send_pg_notify(conn, 'tle:space-track:update', json.dumps(dict(got=got_count,
stored=store_count)))
@audit(logger=logger)
async def nasa(engine, api):
async with engine.acquire() as conn:
tles = (tle.as_dict() for tle in await api.get())
values = [tle for tle in convert_tles(tles, 'nasa')]
got_count = len(values)
logger.debug('[%s] Got %d tle', nasa.__name__, got_count)
if not got_count:
return
expires = await delete_expires_tle(conn)
expires_count = expires.rowcount if expires else 0
logger.debug('[%s] Expires %d tle', nasa.__name__, expires_count)
dts = [row.dt async for row in get_latest_dt(conn, norad_cat_id=25544)]
if dts:
max_dt = max(dts)
values = filter(lambda value: value['dt'] > max_dt, values)
result = await store_tles(conn, values)
store_count = result.rowcount if isinstance(result, ResultProxy) else 0
logger.debug('[%s] Stored %d tle', nasa.__name__, store_count)
await send_pg_notify(conn, 'tle:nasa:update', json.dumps(dict(got=got_count,
stored=store_count,
expires=expires_count)))
def get_tle_dt(tle):
return datetime.strptime('{EPOCH}.{EPOCH_MICROSECONDS}'.format(**tle), '%Y-%m-%d %H:%M:%S.%f')
def get_latest_dt(conn, source=None, norad_cat_id=None):
if source is None:
source = 'space-track'
stmt = sa.select([
TLE.c.norad_cat_id, sa.func.max(TLE.c.dt).label('dt')
]).where(
TLE.c.source == source
).group_by(
TLE.c.norad_cat_id
)
if isinstance(norad_cat_id, int):
stmt = stmt.where(TLE.c.norad_cat_id == norad_cat_id)
elif isinstance(norad_cat_id, list):
stmt = stmt.where(TLE.c.norad_cat_id.in_(norad_cat_id))
return conn.execute(stmt)
def convert_tles(tles, source=None):
if not isinstance(source, str):
source = 'space-track'
return [
dict(norad_cat_id=tle['NORAD_CAT_ID'],
dt=get_tle_dt(tle),
tle_line1=tle['TLE_LINE1'],
tle_line2=tle['TLE_LINE2'],
extra_info={k.lower(): v for k, v in tle.items()},
source=source)
for tle in tles
]
async def store_tles(conn, tles):
if not tles:
return
if not isinstance(tles, list):
tles = list(tles)
return await insert_tle(conn, tles)
async def delete_expires_tle(conn):
return await conn.execute(TLE.delete().where(TLE.c.source == 'nasa').returning(TLE))
| nkoshell/tle-storage-service | tle_storage_service/tle.py | Python | mit | 3,997 |
import requests
from bitfinex.bitfinex_config import DEFAULT
from bitfinex.ticker import Ticker
__author__ = 'Gengyu Shi'
URL_PREFIX = DEFAULT.url_prefix
def ticker(symbol):
return Ticker(requests.get(URL_PREFIX + "/pubticker/" + symbol, verify=True).json())
DEFAULT.url_prefix
| shigengyu/bitfinex-python | bitfinex/rest_api.py | Python | mit | 289 |
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# (c) 2005 Ian Bicking, Clark C. Evans and contributors
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
This module handles sending static content such as in-memory data or
files. At this time it has cache helpers and understands the
if-modified-since request header.
"""
import os, time, mimetypes, zipfile, tarfile
from paste.httpexceptions import *
from paste.httpheaders import *
CACHE_SIZE = 4096
BLOCK_SIZE = 4096 * 16
__all__ = ['DataApp', 'FileApp', 'DirectoryApp', 'ArchiveStore']
class DataApp(object):
"""
Returns an application that will send content in a single chunk,
this application has support for setting cache-control and for
responding to conditional (or HEAD) requests.
Constructor Arguments:
``content`` the content being sent to the client
``headers`` the headers to send /w the response
The remaining ``kwargs`` correspond to headers, where the
underscore is replaced with a dash. These values are only
added to the headers if they are not already provided; thus,
they can be used for default values. Examples include, but
are not limited to:
``content_type``
``content_encoding``
``content_location``
``cache_control()``
This method provides validated construction of the ``Cache-Control``
header as well as providing for automated filling out of the
``EXPIRES`` header for HTTP/1.0 clients.
``set_content()``
This method provides a mechanism to set the content after the
application has been constructed. This method does things
like changing ``Last-Modified`` and ``Content-Length`` headers.
"""
allowed_methods = ('GET', 'HEAD')
def __init__(self, content, headers=None, allowed_methods=None,
**kwargs):
assert isinstance(headers, (type(None), list))
self.expires = None
self.content = None
self.content_length = None
self.last_modified = 0
if allowed_methods is not None:
self.allowed_methods = allowed_methods
self.headers = headers or []
for (k, v) in kwargs.items():
header = get_header(k)
header.update(self.headers, v)
ACCEPT_RANGES.update(self.headers, bytes=True)
if not CONTENT_TYPE(self.headers):
CONTENT_TYPE.update(self.headers)
if content is not None:
self.set_content(content)
def cache_control(self, **kwargs):
self.expires = CACHE_CONTROL.apply(self.headers, **kwargs) or None
return self
def set_content(self, content, last_modified=None):
assert content is not None
if last_modified is None:
self.last_modified = time.time()
else:
self.last_modified = last_modified
self.content = content
self.content_length = len(content)
LAST_MODIFIED.update(self.headers, time=self.last_modified)
return self
def content_disposition(self, **kwargs):
CONTENT_DISPOSITION.apply(self.headers, **kwargs)
return self
def __call__(self, environ, start_response):
method = environ['REQUEST_METHOD'].upper()
if method not in self.allowed_methods:
exc = HTTPMethodNotAllowed(
'You cannot %s a file' % method,
headers=[('Allow', ','.join(self.allowed_methods))])
return exc(environ, start_response)
return self.get(environ, start_response)
def calculate_etag(self):
return '"%s-%s"' % (self.last_modified, self.content_length)
def get(self, environ, start_response):
headers = self.headers[:]
current_etag = self.calculate_etag()
ETAG.update(headers, current_etag)
if self.expires is not None:
EXPIRES.update(headers, delta=self.expires)
try:
client_etags = IF_NONE_MATCH.parse(environ)
if client_etags:
for etag in client_etags:
if etag == current_etag or etag == '*':
# horribly inefficient, n^2 performance, yuck!
for head in list_headers(entity=True):
head.delete(headers)
start_response('304 Not Modified', headers)
return ['']
except HTTPBadRequest, exce:
return exce.wsgi_application(environ, start_response)
# If we get If-None-Match and If-Modified-Since, and
# If-None-Match doesn't match, then we should not try to
# figure out If-Modified-Since (which has 1-second granularity
# and just isn't as accurate)
if not client_etags:
try:
client_clock = IF_MODIFIED_SINCE.parse(environ)
if client_clock >= int(self.last_modified):
# horribly inefficient, n^2 performance, yuck!
for head in list_headers(entity=True):
head.delete(headers)
start_response('304 Not Modified', headers)
return [''] # empty body
except HTTPBadRequest, exce:
return exce.wsgi_application(environ, start_response)
(lower, upper) = (0, self.content_length - 1)
range = RANGE.parse(environ)
if range and 'bytes' == range[0] and 1 == len(range[1]):
(lower, upper) = range[1][0]
upper = upper or (self.content_length - 1)
if upper >= self.content_length or lower > upper:
return HTTPRequestRangeNotSatisfiable((
"Range request was made beyond the end of the content,\r\n"
"which is %s long.\r\n Range: %s\r\n") % (
self.content_length, RANGE(environ))
).wsgi_application(environ, start_response)
content_length = upper - lower + 1
CONTENT_RANGE.update(headers, first_byte=lower, last_byte=upper,
total_length = self.content_length)
CONTENT_LENGTH.update(headers, content_length)
if range or content_length != self.content_length:
start_response('206 Partial Content', headers)
else:
start_response('200 OK', headers)
if self.content is not None:
return [self.content[lower:upper+1]]
return (lower, content_length)
class FileApp(DataApp):
"""
Returns an application that will send the file at the given
filename. Adds a mime type based on ``mimetypes.guess_type()``.
See DataApp for the arguments beyond ``filename``.
"""
def __init__(self, filename, headers=None, **kwargs):
self.filename = filename
content_type, content_encoding = self.guess_type()
if content_type and 'content_type' not in kwargs:
kwargs['content_type'] = content_type
if content_encoding and 'content_encoding' not in kwargs:
kwargs['content_encoding'] = content_encoding
DataApp.__init__(self, None, headers, **kwargs)
def guess_type(self):
return mimetypes.guess_type(self.filename)
def update(self, force=False):
stat = os.stat(self.filename)
if not force and stat.st_mtime == self.last_modified:
return
self.last_modified = stat.st_mtime
if stat.st_size < CACHE_SIZE:
fh = open(self.filename,"rb")
self.set_content(fh.read(), stat.st_mtime)
fh.close()
else:
self.content = None
self.content_length = stat.st_size
# This is updated automatically if self.set_content() is
# called
LAST_MODIFIED.update(self.headers, time=self.last_modified)
def get(self, environ, start_response):
is_head = environ['REQUEST_METHOD'].upper() == 'HEAD'
if 'max-age=0' in CACHE_CONTROL(environ).lower():
self.update(force=True) # RFC 2616 13.2.6
else:
self.update()
if not self.content:
if not os.path.exists(self.filename):
exc = HTTPNotFound(
'The resource does not exist',
comment="No file at %r" % self.filename)
return exc(environ, start_response)
try:
file = open(self.filename, 'rb')
except (IOError, OSError), e:
exc = HTTPForbidden(
'You are not permitted to view this file (%s)' % e)
return exc.wsgi_application(
environ, start_response)
retval = DataApp.get(self, environ, start_response)
if isinstance(retval, list):
# cached content, exception, or not-modified
if is_head:
return ['']
return retval
(lower, content_length) = retval
if is_head:
return ['']
file.seek(lower)
file_wrapper = environ.get('wsgi.file_wrapper', None)
if file_wrapper:
return file_wrapper(file, BLOCK_SIZE)
else:
return _FileIter(file, size=content_length)
class _FileIter(object):
def __init__(self, file, block_size=None, size=None):
self.file = file
self.size = size
self.block_size = block_size or BLOCK_SIZE
def __iter__(self):
return self
def next(self):
chunk_size = self.block_size
if self.size is not None:
if chunk_size > self.size:
chunk_size = self.size
self.size -= chunk_size
data = self.file.read(chunk_size)
if not data:
raise StopIteration
return data
def close(self):
self.file.close()
class DirectoryApp(object):
"""
Returns an application that dispatches requests to corresponding FileApps based on PATH_INFO.
FileApp instances are cached. This app makes sure not to serve any files that are not in a subdirectory.
To customize FileApp creation override ``DirectoryApp.make_fileapp``
"""
def __init__(self, path):
self.path = os.path.abspath(path)
if not self.path.endswith(os.path.sep):
self.path += os.path.sep
assert os.path.isdir(self.path)
self.cached_apps = {}
make_fileapp = FileApp
def __call__(self, environ, start_response):
path_info = environ['PATH_INFO']
app = self.cached_apps.get(path_info)
if app is None:
path = os.path.join(self.path, path_info.lstrip('/'))
if not os.path.normpath(path).startswith(self.path):
app = HTTPForbidden()
elif os.path.isfile(path):
app = self.make_fileapp(path)
self.cached_apps[path_info] = app
else:
app = HTTPNotFound(comment=path)
return app(environ, start_response)
class ArchiveStore(object):
"""
Returns an application that serves up a DataApp for items requested
in a given zip or tar archive.
Constructor Arguments:
``filepath`` the path to the archive being served
``cache_control()``
This method provides validated construction of the ``Cache-Control``
header as well as providing for automated filling out of the
``EXPIRES`` header for HTTP/1.0 clients.
"""
def __init__(self, filepath):
if zipfile.is_zipfile(filepath):
self.archive = zipfile.ZipFile(filepath,"r")
elif tarfile.is_tarfile(filepath):
self.archive = tarfile.TarFileCompat(filepath,"r")
else:
raise AssertionError("filepath '%s' is not a zip or tar " % filepath)
self.expires = None
self.last_modified = time.time()
self.cache = {}
def cache_control(self, **kwargs):
self.expires = CACHE_CONTROL.apply(self.headers, **kwargs) or None
return self
def __call__(self, environ, start_response):
path = environ.get("PATH_INFO","")
if path.startswith("/"):
path = path[1:]
application = self.cache.get(path)
if application:
return application(environ, start_response)
try:
info = self.archive.getinfo(path)
except KeyError:
exc = HTTPNotFound("The file requested, '%s', was not found." % path)
return exc.wsgi_application(environ, start_response)
if info.filename.endswith("/"):
exc = HTTPNotFound("Path requested, '%s', is not a file." % path)
return exc.wsgi_application(environ, start_response)
content_type, content_encoding = mimetypes.guess_type(info.filename)
# 'None' is not a valid content-encoding, so don't set the header if
# mimetypes.guess_type returns None
if content_encoding is not None:
app = DataApp(None, content_type = content_type,
content_encoding = content_encoding)
else:
app = DataApp(None, content_type = content_type)
app.set_content(self.archive.read(path),
time.mktime(info.date_time + (0,0,0)))
self.cache[path] = app
app.expires = self.expires
return app(environ, start_response)
| kaldonis/ft-event-manager | src/lib/paste/fileapp.py | Python | gpl-2.0 | 13,617 |
# -*- coding: utf-8 -*-
"""
Tests of neo.io.brainwaredamio
"""
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
import os.path
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
import numpy as np
import quantities as pq
from neo.core import (AnalogSignal, Block,
ChannelIndex, Segment)
from neo.io import BrainwareDamIO
from neo.test.iotest.common_io_test import BaseTestIO
from neo.test.tools import (assert_same_sub_schema,
assert_neo_object_is_compliant)
from neo.test.iotest.tools import create_generic_reader
PY_VER = sys.version_info[0]
def proc_dam(filename):
'''Load an dam file that has already been processed by the official matlab
file converter. That matlab data is saved to an m-file, which is then
converted to a numpy '.npz' file. This numpy file is the file actually
loaded. This function converts it to a neo block and returns the block.
This block can be compared to the block produced by BrainwareDamIO to
make sure BrainwareDamIO is working properly
block = proc_dam(filename)
filename: The file name of the numpy file to load. It should end with
'*_dam_py?.npz'. This will be converted to a neo 'file_origin' property
with the value '*.dam', so the filename to compare should fit that pattern.
'py?' should be 'py2' for the python 2 version of the numpy file or 'py3'
for the python 3 version of the numpy file.
example: filename = 'file1_dam_py2.npz'
dam file name = 'file1.dam'
'''
with np.load(filename) as damobj:
damfile = damobj.items()[0][1].flatten()
filename = os.path.basename(filename[:-12]+'.dam')
signals = [res.flatten() for res in damfile['signal']]
stimIndexes = [int(res[0, 0].tolist()) for res in damfile['stimIndex']]
timestamps = [res[0, 0] for res in damfile['timestamp']]
block = Block(file_origin=filename)
chx = ChannelIndex(file_origin=filename,
index=np.array([0]),
channel_ids=np.array([1]),
channel_names=np.array(['Chan1'], dtype='S'))
block.channel_indexes.append(chx)
params = [res['params'][0, 0].flatten() for res in damfile['stim']]
values = [res['values'][0, 0].flatten() for res in damfile['stim']]
params = [[res1[0] for res1 in res] for res in params]
values = [[res1 for res1 in res] for res in values]
stims = [dict(zip(param, value)) for param, value in zip(params, values)]
fulldam = zip(stimIndexes, timestamps, signals, stims)
for stimIndex, timestamp, signal, stim in fulldam:
sig = AnalogSignal(signal=signal*pq.mV,
t_start=timestamp*pq.d,
file_origin=filename,
sampling_period=1.*pq.s)
segment = Segment(file_origin=filename,
index=stimIndex,
**stim)
segment.analogsignals = [sig]
block.segments.append(segment)
block.create_many_to_one_relationship()
return block
class BrainwareDamIOTestCase(BaseTestIO, unittest.TestCase):
'''
Unit test testcase for neo.io.BrainwareDamIO
'''
ioclass = BrainwareDamIO
read_and_write_is_bijective = False
# These are the files it tries to read and test for compliance
files_to_test = ['block_300ms_4rep_1clust_part_ch1.dam',
'interleaved_500ms_5rep_ch2.dam',
'long_170s_1rep_1clust_ch2.dam',
'multi_500ms_mulitrep_ch1.dam',
'random_500ms_12rep_noclust_part_ch2.dam',
'sequence_500ms_5rep_ch2.dam']
# these are reference files to compare to
files_to_compare = ['block_300ms_4rep_1clust_part_ch1',
'interleaved_500ms_5rep_ch2',
'',
'multi_500ms_mulitrep_ch1',
'random_500ms_12rep_noclust_part_ch2',
'sequence_500ms_5rep_ch2']
# add the appropriate suffix depending on the python version
for i, fname in enumerate(files_to_compare):
if fname:
files_to_compare[i] += '_dam_py%s.npz' % PY_VER
# Will fetch from g-node if they don't already exist locally
# How does it know to do this before any of the other tests?
files_to_download = files_to_test + files_to_compare
def test_reading_same(self):
for ioobj, path in self.iter_io_objects(return_path=True):
obj_reader_base = create_generic_reader(ioobj, target=False)
obj_reader_single = create_generic_reader(ioobj)
obj_base = obj_reader_base()
obj_single = obj_reader_single()
try:
assert_same_sub_schema(obj_base, obj_single)
except BaseException as exc:
exc.args += ('from ' + os.path.basename(path),)
raise
def test_against_reference(self):
for filename, refname in zip(self.files_to_test,
self.files_to_compare):
if not refname:
continue
obj = self.read_file(filename=filename)
refobj = proc_dam(self.get_filename_path(refname))
try:
assert_neo_object_is_compliant(obj)
assert_neo_object_is_compliant(refobj)
assert_same_sub_schema(obj, refobj)
except BaseException as exc:
exc.args += ('from ' + filename,)
raise
if __name__ == '__main__':
unittest.main()
| CINPLA/expipe-dev | python-neo/neo/test/iotest/test_brainwaredamio.py | Python | gpl-3.0 | 5,709 |
from . import client, aioclient, parse
__all__ = [
'client',
'aioclient',
'parse'
]
| danking/hail | hail/python/hailtop/batch_client/__init__.py | Python | mit | 97 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import image_metadata as image_metadata_v21
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import image_fixtures
IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
CHK_QUOTA_STR = 'nova.api.openstack.common.check_img_metadata_properties_quota'
def get_image_123():
return copy.deepcopy(IMAGE_FIXTURES)[0]
class ImageMetaDataTestV21(test.NoDBTestCase):
controller_class = image_metadata_v21.ImageMetadataController
invalid_request = exception.ValidationError
base_path = '/v2/%s/images/' % fakes.FAKE_PROJECT_ID
def setUp(self):
super(ImageMetaDataTestV21, self).setUp()
self.controller = self.controller_class()
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_index(self, get_all_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
res_dict = self.controller.index(req, '123')
expected = {'metadata': {'key1': 'value1'}}
self.assertEqual(res_dict, expected)
get_all_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show(self, get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
res_dict = self.controller.show(req, '123', 'key1')
self.assertIn('meta', res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
get_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key9')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '123', 'key9')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_show_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '100', 'key9')
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_create(self, get_mocked, update_mocked, quota_mocked):
mock_result = copy.deepcopy(get_image_123())
mock_result['properties']['key7'] = 'value7'
update_mocked.return_value = mock_result
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'value1', # existing meta
'key7': 'value7' # new meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_create_image_not_found(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, '100', body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_all(self, get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.update_all(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key9': 'value9' # replace meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key9': 'value9'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_all_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item(self, _get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.update(req, '123', 'key1', body=body)
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'zz' # changed meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'meta': {'key1': 'zz'}}
self.assertEqual(res, expected_output)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_item_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, '100', 'key1',
body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_bad_body(self, get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"key1": "zz"}
req.body = b''
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPBadRequest())
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_too_many_keys(self, get_mocked, update_mocked,
_quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"foo": "bar"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'bad',
body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete(self, _get_mocked, update_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'DELETE'
res = self.controller.delete(req, '123', 'key1')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {}
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
self.assertIsNone(res)
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/blah')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '123', 'blah')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_delete_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '100', 'key1')
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(explanation=''))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_create(self, _get_mocked,
update_mocked, _quota_mocked):
body = {"metadata": {"foo": "bar"}}
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, '123', body=body)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(explanation=''))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_put(self, _get_mocked,
update_mocked, _quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/blah')
req.method = 'PUT'
body = {"meta": {"blah": "blah", "blah1": "blah1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'blah',
body=body)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update, req, '123', 'key1',
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update_all(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank(self.base_path +
'%s/metadata/key1' % image_id)
req.method = 'PUT'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update_all, req, image_id,
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_create(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank(self.base_path +
'%s/metadata/key1' % image_id)
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, image_id,
body=body)
class ImageMetadataControllerV239(test.NoDBTestCase):
def setUp(self):
super(ImageMetadataControllerV239, self).setUp()
self.controller = image_metadata_v21.ImageMetadataController()
self.req = fakes.HTTPRequest.blank('', version='2.39')
def test_not_found_for_all_image_metadata_api(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.index, self.req)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.create, self.req,
fakes.FAKE_UUID, {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update, self.req,
fakes.FAKE_UUID, 'id', {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update_all, self.req,
fakes.FAKE_UUID, {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID)
| rahulunair/nova | nova/tests/unit/api/openstack/compute/test_image_metadata.py | Python | apache-2.0 | 17,106 |
import bpy
from bpy_extras.io_utils import ExportHelper, ImportHelper
import json
from . import utils
from .utils import MultiCamContext
from .multicam_fade import MultiCamFaderProperties
class MultiCamExport(bpy.types.Operator, ExportHelper, MultiCamContext):
bl_idname = 'sequencer.export_multicam'
bl_label = 'Export Multicam'
filename_ext = '.json'
def get_cuts(self, scene, mc_strip):
d = {}
data_path = '.'.join([mc_strip.path_from_id(), 'multicam_source'])
fcurve = utils.get_fcurve(scene, data_path)
if fcurve is not None:
for kf in fcurve.keyframe_points:
key = str(kf.co[0])
d[key] = kf.co[1]
return d
def execute(self, context):
mc_strip = self.get_strip(context)
data = {}
props = MultiCamFaderProperties.get_for_strip(mc_strip)
if props is not None:
data['fades'] = props.serialize()
data['cuts'] = self.get_cuts(context.scene, mc_strip)
with open(self.filepath, 'w') as f:
f.write(json.dumps(data, indent=2))
return {'FINISHED'}
class MultiCamImport(bpy.types.Operator, ImportHelper, MultiCamContext):
bl_idname = 'sequencer.import_multicam'
bl_label = 'Import Multicam'
filename_ext = '.json'
def execute(self, context):
with open(self.filepath, 'r') as f:
data = json.loads(f.read())
mc_strip = self.get_strip(context)
props, created = MultiCamFaderProperties.get_or_create(mc_strip=mc_strip)
for key, fade_data in data.get('fades', {}).items():
for attr in ['start_frame', 'end_frame']:
fade = props.get_fade_in_range(fade_data['start_frame'])
if fade is not None:
props.remove_fade(fade)
props.add_fade(**fade_data)
data_path = '.'.join([mc_strip.path_from_id(), 'multicam_source'])
fcurve = utils.get_or_create_fcurve(context.scene, data_path)
for key, source in data.get('cuts', {}).items():
frame = float(key)
fade = props.get_fade_in_range(frame)
if fade is not None:
continue
if fcurve is None:
mc_strip.keyframe_insert('multicam_source', frame=frame)
fcurve = utils.get_fcurve(context.scene, data_path)
kf = utils.get_keyframe(fcurve, frame)
kf.co[1] = source
kf.interpolation = 'CONSTANT'
else:
utils.set_keyframe(fcurve, frame, source)
return {'FINISHED'}
def register():
bpy.utils.register_class(MultiCamExport)
bpy.utils.register_class(MultiCamImport)
def unregister():
bpy.utils.unregister_class(MultiCamExport)
bpy.utils.unregister_class(MultiCamImport)
| nocarryr/blender-scripts | multicam_tools/multicam_import_export.py | Python | gpl-2.0 | 2,843 |
import os
import sys
import logging
import inspect
from inspect import getmembers, isfunction
from commands import command
import handlers
logger = logging.getLogger(__name__)
class tracker:
def __init__(self):
self.bot = None
self.list = []
self.reset()
def set_bot(self, bot):
self.bot = bot
def reset(self):
self._current = {
"commands": {
"admin": [],
"user": [],
"all": None
},
"handlers": [],
"shared": [],
"metadata": None
}
def start(self, metadata):
self.reset()
self._current["metadata"] = metadata
def current(self):
self._current["commands"]["all"] = list(
set(self._current["commands"]["admin"] +
self._current["commands"]["user"]))
return self._current
def end(self):
self.list.append(self.current())
def register_command(self, type, command_names):
"""call during plugin init to register commands"""
self._current["commands"][type].extend(command_names)
self._current["commands"][type] = list(set(self._current["commands"][type]))
def register_handler(self, function, type, priority):
self._current["handlers"].append((function, type, priority))
def register_shared(self, id, objectref, forgiving):
self._current["shared"].append((id, objectref, forgiving))
tracking = tracker()
"""helpers"""
def register_user_command(command_names):
"""user command registration"""
if not isinstance(command_names, list):
command_names = [command_names]
tracking.register_command("user", command_names)
def register_admin_command(command_names):
"""admin command registration, overrides user command registration"""
if not isinstance(command_names, list):
command_names = [command_names]
tracking.register_command("admin", command_names)
def register_handler(function, type="message", priority=50):
"""register external handler"""
bot_handlers = tracking.bot._handlers
bot_handlers.register_handler(function, type, priority)
def register_shared(id, objectref, forgiving=True):
"""register shared object"""
bot = tracking.bot
bot.register_shared(id, objectref, forgiving=forgiving)
"""plugin loader"""
def retrieve_all_plugins(plugin_path=None, must_start_with=False):
"""recursively loads all plugins from the standard plugins path
* a plugin file or folder must not begin with . or _
* a subfolder containing a plugin must have an __init__.py file
* sub-plugin files (additional plugins inside a subfolder) must be prefixed with the
plugin/folder name for it to be automatically loaded
"""
if not plugin_path:
plugin_path = os.path.dirname(os.path.realpath(sys.argv[0])) + os.sep + "plugins"
plugin_list = []
nodes = os.listdir(plugin_path)
for node_name in nodes:
full_path = os.path.join(plugin_path, node_name)
module_names = [ os.path.splitext(node_name)[0] ] # node_name without .py extension
if node_name.startswith(("_", ".")):
continue
if must_start_with and not node_name.startswith(must_start_with):
continue
if os.path.isfile(full_path):
if not node_name.endswith(".py"):
continue
else:
if not os.path.isfile(os.path.join(full_path, "__init__.py")):
continue
for sm in retrieve_all_plugins(full_path, must_start_with=node_name):
module_names.append(module_names[0] + "." + sm)
plugin_list.extend(module_names)
logger.debug("retrieved {}: {}.{}".format(len(plugin_list), must_start_with or "plugins", plugin_list))
return plugin_list
def get_configured_plugins(bot):
all_plugins = retrieve_all_plugins()
config_plugins = bot.get_config_option('plugins')
if config_plugins is None: # must be unset in config or null
logger.info("plugins is not defined, using ALL")
plugin_list = all_plugins
else:
"""perform fuzzy matching with actual retrieved plugins, e.g. "abc" matches "xyz.abc"
if more than one match found, don't load plugin
"""
plugins_included = []
plugins_excluded = all_plugins
plugin_name_ambiguous = []
plugin_name_not_found = []
for configured in config_plugins:
dotconfigured = "." + configured
matches = []
for found in plugins_excluded:
fullfound = "plugins." + found
if fullfound.endswith(dotconfigured):
matches.append(found)
num_matches = len(matches)
if num_matches <= 0:
logger.debug("{} no match".format(configured))
plugin_name_not_found.append(configured)
elif num_matches == 1:
logger.debug("{} matched to {}".format(configured, matches[0]))
plugins_included.append(matches[0])
plugins_excluded.remove(matches[0])
else:
logger.debug("{} ambiguous, matches {}".format(configured, matches))
plugin_name_ambiguous.append(configured)
if plugins_excluded:
logger.info("excluded {}: {}".format(len(plugins_excluded), plugins_excluded))
if plugin_name_ambiguous:
logger.warning("ambiguous plugin names: {}".format(plugin_name_ambiguous))
if plugin_name_not_found:
logger.warning("plugin not found: {}".format(plugin_name_not_found))
plugin_list = plugins_included
logger.info("included {}: {}".format(len(plugin_list), plugin_list))
return plugin_list
def load(bot, command_dispatcher):
"""load plugins and perform any initialisation required to set them up"""
tracking.set_bot(bot)
command_dispatcher.set_tracking(tracking)
plugin_list = get_configured_plugins(bot)
for module in plugin_list:
module_path = "plugins.{}".format(module)
tracking.start({ "module": module, "module.path": module_path })
try:
exec("import {}".format(module_path))
except Exception as e:
logger.exception("EXCEPTION during plugin import: {}".format(module_path))
continue
public_functions = [o for o in getmembers(sys.modules[module_path], isfunction)]
candidate_commands = []
"""pass 1: run optional callable: _initialise, _initialize
* performs house-keeping tasks (e.g. migration, tear-up, pre-init, etc)
* registers user and/or admin commands
"""
available_commands = False # default: ALL
try:
for function_name, the_function in public_functions:
if function_name == "_initialise" or function_name == "_initialize":
"""accepted function signatures:
CURRENT
version >= 2.4 | function()
version >= 2.4 | function(bot) - parameter must be named "bot"
LEGACY
version <= 2.4 | function(handlers, bot)
ancient | function(handlers)
"""
_expected = list(inspect.signature(the_function).parameters)
if len(_expected) == 0:
the_function()
_return = []
elif len(_expected) == 1 and _expected[0] == "bot":
the_function(bot)
_return = []
else:
try:
# legacy support, pre-2.4
_return = the_function(bot._handlers, bot)
except TypeError as e:
# legacy support, ancient plugins
_return = the_function(bot._handlers)
if type(_return) is list:
available_commands = _return
elif function_name.startswith("_"):
pass
else:
candidate_commands.append((function_name, the_function))
if available_commands is False:
# implicit init, legacy support: assume all candidate_commands are user-available
register_user_command([function_name for function_name, function in candidate_commands])
elif available_commands is []:
# explicit init, no user-available commands
pass
else:
# explicit init, legacy support: _initialise() returned user-available commands
register_user_command(available_commands)
except Exception as e:
logger.exception("EXCEPTION during plugin init: {}".format(module_path))
continue # skip this, attempt next plugin
"""
pass 2: register filtered functions
tracking.current() and the CommandDispatcher registers might be out of sync if a
combination of decorators and register_user_command/register_admin_command is used since
decorators execute immediately upon import
"""
plugin_tracking = tracking.current()
explicit_admin_commands = plugin_tracking["commands"]["admin"]
all_commands = plugin_tracking["commands"]["all"]
registered_commands = []
for function_name, the_function in candidate_commands:
if function_name in all_commands:
is_admin = False
text_function_name = function_name
if function_name in explicit_admin_commands:
is_admin = True
text_function_name = "*" + text_function_name
command_dispatcher.register(the_function, admin=is_admin)
registered_commands.append(text_function_name)
if registered_commands:
logger.info("{} - {}".format(module, ", ".join(registered_commands)))
else:
logger.info("{} - no commands".format(module))
tracking.end()
@command.register(admin=True)
def plugininfo(bot, event, *args):
"""dumps plugin information"""
lines = []
for plugin in tracking.list:
if len(args) == 0 or args[0] in plugin["metadata"]["module"]:
lines.append("<b>{}</b>".format(plugin["metadata"]["module.path"]))
"""admin commands"""
if len(plugin["commands"]["admin"]) > 0:
lines.append("<i>admin commands:</i> {}".format(", ".join(plugin["commands"]["admin"])))
"""user-only commands"""
user_only_commands = list(set(plugin["commands"]["user"]) - set(plugin["commands"]["admin"]))
if len(user_only_commands) > 0:
lines.append("<i>user commands:</i> {}".format(", ".join(user_only_commands)))
"""handlers"""
if len(plugin["handlers"]) > 0:
lines.append("<i>handlers:</i>" + ", ".join([ "{} ({}, p={})".format(f[0].__name__, f[1], str(f[2])) for f in plugin["handlers"]]))
"""shared"""
if len(plugin["shared"]) > 0:
lines.append("<i>shared:</i>" + ", ".join([f[1].__name__ for f in plugin["shared"]]))
lines.append("")
bot.send_html_to_conversation(event.conv_id, "<br />".join(lines)) | ravrahn/HangoutsBot | hangupsbot/plugins/__init__.py | Python | gpl-3.0 | 11,524 |
# GNU LESSER GENERAL PUBLIC LICENSE
import io
import json as lib_json
from base64 import b64encode
try:
import bottle
except ImportError:
print("you don't have bottle installed")
try:
from urlparse import urlparse
from urllib import urlencode
except ImportError:
from urllib.parse import urlparse, urlencode
__version__ = '0.2.9'
class boddle(object):
def __init__(self, params={}, path=None, method=None, headers=None, json=None, url=None, body=None, query={}, auth=None, **extras):
environ = {}
self.extras = extras
self.extra_orig = {}
self.orig_app_reader = bottle.BaseRequest.app
if auth is not None:
user, password = auth
environ["HTTP_AUTHORIZATION"] = "Basic {}".format(b64encode(bytes(f"{user}:{password}", "utf-8")).decode("ascii"))
if params is not None:
self._set_payload(environ, urlencode(params).encode('utf8'))
if path is not None:
environ['PATH_INFO'] = path.lstrip('/')
if method is not None:
environ['REQUEST_METHOD'] = method
for k, v in (headers or {}).items():
k = k.replace('-', '_').upper()
environ['HTTP_'+k] = v
if json is not None:
environ['CONTENT_TYPE'] = 'application/json'
self._set_payload(environ, lib_json.dumps(json).encode('utf8'))
if body is not None:
if body.lower:
body = io.BytesIO(bytes(body.encode('utf-8')))
environ['CONTENT_LENGTH'] = str(len(body.read()))
body.seek(0)
environ['wsgi.input'] = body
if url is not None:
o = urlparse(url)
environ['wsgi.url_scheme'] = o.scheme
environ['HTTP_HOST'] = o.netloc
environ['PATH_INFO'] = o.path.lstrip('/')
if query is not None:
environ['QUERY_STRING'] = urlencode(query)
self.environ = environ
def _set_payload(self, environ, payload):
payload = bytes(payload)
environ['CONTENT_LENGTH'] = str(len(payload))
environ['wsgi.input'] = io.BytesIO(payload)
def __enter__(self):
self.orig = bottle.request.environ
bottle.request.environ = self.environ
for k,v in self.extras.items():
if hasattr(bottle.request, k):
self.extra_orig[k] = getattr(bottle.request, k)
setattr(bottle.request, k, v)
setattr(bottle.BaseRequest, 'app', True)
def __exit__(self,a,b,c):
bottle.request.environ = self.orig
for k,v in self.extras.items():
if k in self.extra_orig:
setattr(bottle.request, k, self.extra_orig[k])
else:
try:
delattr(bottle.request, k)
except AttributeError:
pass
setattr(bottle.BaseRequest, 'app', self.orig_app_reader)
| keredson/boddle | boddle.py | Python | lgpl-2.1 | 2,662 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Defect thermodynamics, such as defect phase diagrams, etc.
"""
import logging
from itertools import chain
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
from monty.json import MSONable
from scipy.optimize import bisect
from scipy.spatial import HalfspaceIntersection
from pymatgen.analysis.defects.core import DefectEntry
from pymatgen.analysis.structure_matcher import PointDefectComparator
from pymatgen.electronic_structure.dos import FermiDos
__author__ = "Danny Broberg, Shyam Dwaraknath"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "Mar 15, 2018"
logger = logging.getLogger(__name__)
class DefectPhaseDiagram(MSONable):
"""
This is similar to a PhaseDiagram object in pymatgen,
but has ability to do quick analysis of defect formation energies
when fed DefectEntry objects.
uses many of the capabilities from PyCDT's DefectsAnalyzer class...
This class is able to get:
a) stability of charge states for a given defect,
b) list of all formation ens
c) transition levels in the gap
"""
def __init__(self, entries, vbm, band_gap, filter_compatible=True, metadata=None):
"""
Args:
dentries ([DefectEntry]): A list of DefectEntry objects
vbm (float): Valence Band energy to use for all defect entries.
NOTE if using band shifting-type correction then this VBM
should still be that of the GGA calculation
(the bandedgeshifting_correction accounts for shift's
contribution to formation energy).
band_gap (float): Band gap to use for all defect entries.
NOTE if using band shifting-type correction then this gap
should still be that of the Hybrid calculation you are shifting to.
filter_compatible (bool): Whether to consider entries which were ruled
incompatible by the DefectComaptibility class. Note this must be set to False
if you desire a suggestion for larger supercell sizes.
Default is True (to omit calculations which have "is_compatible"=False in
DefectEntry'sparameters)
metadata (dict): Dictionary of metadata to store with the PhaseDiagram. Has
no impact on calculations
"""
self.vbm = vbm
self.band_gap = band_gap
self.filter_compatible = filter_compatible
if filter_compatible:
self.entries = [e for e in entries if e.parameters.get("is_compatible", True)]
else:
self.entries = entries
for ent_ind, ent in enumerate(self.entries):
if "vbm" not in ent.parameters.keys() or ent.parameters["vbm"] != vbm:
logger.info(
"Entry {} did not have vbm equal to given DefectPhaseDiagram value."
" Manually overriding.".format(ent.name)
)
new_ent = ent.copy()
new_ent.parameters["vbm"] = vbm
self.entries[ent_ind] = new_ent
self.metadata = metadata or {}
self.find_stable_charges()
def as_dict(self):
"""
Returns:
Json-serializable dict representation of DefectPhaseDiagram
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": [entry.as_dict() for entry in self.entries],
"vbm": self.vbm,
"band_gap": self.band_gap,
"filter_compatible": self.filter_compatible,
"metadata": self.metadata,
}
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a DefectPhaseDiagram object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of DefectPhaseDiagram.
Returns:
DefectPhaseDiagram object
"""
entries = [DefectEntry.from_dict(entry_dict) for entry_dict in d.get("entries")]
vbm = d["vbm"]
band_gap = d["band_gap"]
filter_compatible = d.get("filter_compatible", True)
metadata = d.get("metadata", {})
if "entry_id" in d.keys() and "entry_id" not in metadata:
metadata["entry_id"] = d["entry_id"]
return cls(
entries,
vbm,
band_gap,
filter_compatible=filter_compatible,
metadata=metadata,
)
def find_stable_charges(self):
"""
Sets the stable charges and transition states for a series of
defect entries. This function uses scipy's HalfspaceInterection
to oncstruct the polygons corresponding to defect stability as
a function of the Fermi-level. The Halfspace Intersection
constructs N-dimensional hyperplanes, in this case N=2, based
on the equation of defect formation energy with considering chemical
potentials:
E_form = E_0^{Corrected} + Q_{defect}*(E_{VBM} + E_{Fermi})
Extra hyperplanes are constructed to bound this space so that
the algorithm can actually find enclosed region.
This code was modeled after the Halfspace Intersection code for
the Pourbaix Diagram
"""
def similar_defects(entryset):
"""
Used for grouping similar defects of different charges
Can distinguish identical defects even if they are not in same position
"""
pdc = PointDefectComparator(check_charge=False, check_primitive_cell=True, check_lattice_scale=False)
grp_def_sets = []
grp_def_indices = []
for ent_ind, ent in enumerate(entryset):
# TODO: more pythonic way of grouping entry sets with PointDefectComparator.
# this is currently most time intensive part of DefectPhaseDiagram
matched_ind = None
for grp_ind, defgrp in enumerate(grp_def_sets):
if pdc.are_equal(ent.defect, defgrp[0].defect):
matched_ind = grp_ind
break
if matched_ind is not None:
grp_def_sets[matched_ind].append(ent.copy())
grp_def_indices[matched_ind].append(ent_ind)
else:
grp_def_sets.append([ent.copy()])
grp_def_indices.append([ent_ind])
return zip(grp_def_sets, grp_def_indices)
# Limits for search
# E_fermi = { -1 eV to band gap+1}
# E_formation = { (min(Eform) - 30) to (max(Eform) + 30)}
all_eform = [one_def.formation_energy(fermi_level=self.band_gap / 2.0) for one_def in self.entries]
min_y_lim = min(all_eform) - 30
max_y_lim = max(all_eform) + 30
limits = [[-1, self.band_gap + 1], [min_y_lim, max_y_lim]]
stable_entries = {}
finished_charges = {}
transition_level_map = {}
# Grouping by defect types
for defects, index_list in similar_defects(self.entries):
defects = list(defects)
# prepping coefficient matrix for half-space intersection
# [-Q, 1, -1*(E_form+Q*VBM)] -> -Q*E_fermi+E+-1*(E_form+Q*VBM) <= 0 where E_fermi and E are the variables
# in the hyperplanes
hyperplanes = np.array(
[
[
-1.0 * entry.charge,
1,
-1.0 * (entry.energy + entry.charge * self.vbm),
]
for entry in defects
]
)
border_hyperplanes = [
[-1, 0, limits[0][0]],
[1, 0, -1 * limits[0][1]],
[0, -1, limits[1][0]],
[0, 1, -1 * limits[1][1]],
]
hs_hyperplanes = np.vstack([hyperplanes, border_hyperplanes])
interior_point = [self.band_gap / 2, min(all_eform) - 1.0]
hs_ints = HalfspaceIntersection(hs_hyperplanes, np.array(interior_point))
# Group the intersections and coresponding facets
ints_and_facets = zip(hs_ints.intersections, hs_ints.dual_facets)
# Only inlcude the facets corresponding to entries, not the boundaries
total_entries = len(defects)
ints_and_facets = filter(
lambda int_and_facet: all(np.array(int_and_facet[1]) < total_entries),
ints_and_facets,
)
# sort based on transition level
ints_and_facets = list(sorted(ints_and_facets, key=lambda int_and_facet: int_and_facet[0][0]))
# log a defect name for tracking (using full index list to avoid naming
# in-equivalent defects with same name)
str_index_list = [str(ind) for ind in sorted(index_list)]
track_name = defects[0].name + "@" + str("-".join(str_index_list))
if len(ints_and_facets):
# Unpack into lists
_, facets = zip(*ints_and_facets)
# Map of transition level: charge states
transition_level_map[track_name] = {
intersection[0]: [defects[i].charge for i in facet] for intersection, facet in ints_and_facets
}
stable_entries[track_name] = list({defects[i] for dual in facets for i in dual})
finished_charges[track_name] = [defect.charge for defect in defects]
else:
# if ints_and_facets is empty, then there is likely only one defect...
if len(defects) != 1:
# confirm formation energies dominant for one defect over other identical defects
name_set = [one_def.name + "_chg" + str(one_def.charge) for one_def in defects]
vb_list = [one_def.formation_energy(fermi_level=limits[0][0]) for one_def in defects]
cb_list = [one_def.formation_energy(fermi_level=limits[0][1]) for one_def in defects]
vbm_def_index = vb_list.index(min(vb_list))
name_stable_below_vbm = name_set[vbm_def_index]
cbm_def_index = cb_list.index(min(cb_list))
name_stable_above_cbm = name_set[cbm_def_index]
if name_stable_below_vbm != name_stable_above_cbm:
raise ValueError(
"HalfSpace identified only one stable charge out of list: {}\n"
"But {} is stable below vbm and {} is "
"stable above cbm.\nList of VBM formation energies: {}\n"
"List of CBM formation energies: {}"
"".format(
name_set,
name_stable_below_vbm,
name_stable_above_cbm,
vb_list,
cb_list,
)
)
logger.info("{} is only stable defect out of {}".format(name_stable_below_vbm, name_set))
transition_level_map[track_name] = {}
stable_entries[track_name] = list([defects[vbm_def_index]])
finished_charges[track_name] = [one_def.charge for one_def in defects]
else:
transition_level_map[track_name] = {}
stable_entries[track_name] = list([defects[0]])
finished_charges[track_name] = [defects[0].charge]
self.transition_level_map = transition_level_map
self.transition_levels = {
defect_name: list(defect_tls.keys()) for defect_name, defect_tls in transition_level_map.items()
}
self.stable_entries = stable_entries
self.finished_charges = finished_charges
self.stable_charges = {
defect_name: [entry.charge for entry in entries] for defect_name, entries in stable_entries.items()
}
@property
def defect_types(self):
"""
List types of defects existing in the DefectPhaseDiagram
"""
return list(self.finished_charges.keys())
@property
def all_stable_entries(self):
"""
List all stable entries (defect+charge) in the DefectPhaseDiagram
"""
return set(chain.from_iterable(self.stable_entries.values()))
@property
def all_unstable_entries(self):
"""
List all unstable entries (defect+charge) in the DefectPhaseDiagram
"""
all_stable_entries = self.all_stable_entries
return [e for e in self.entries if e not in all_stable_entries]
def defect_concentrations(self, chemical_potentials, temperature=300, fermi_level=0.0):
"""
Give list of all concentrations at specified efermi in the DefectPhaseDiagram
args:
chemical_potentials = {Element: number} is dict of chemical potentials to provide formation energies for
temperature = temperature to produce concentrations from
fermi_level: (float) is fermi level relative to valence band maximum
Default efermi = 0 = VBM energy
returns:
list of dictionaries of defect concentrations
"""
concentrations = []
for dfct in self.all_stable_entries:
concentrations.append(
{
"conc": dfct.defect_concentration(
chemical_potentials=chemical_potentials,
temperature=temperature,
fermi_level=fermi_level,
),
"name": dfct.name,
"charge": dfct.charge,
}
)
return concentrations
def suggest_charges(self, tolerance=0.1):
"""
Suggest possible charges for defects to compute based on proximity
of known transitions from entires to VBM and CBM
Args:
tolerance (float): tolerance with respect to the VBM and CBM to
` continue to compute new charges
"""
recommendations = {}
for def_type in self.defect_types:
test_charges = np.arange(
np.min(self.stable_charges[def_type]) - 1,
np.max(self.stable_charges[def_type]) + 2,
)
test_charges = [charge for charge in test_charges if charge not in self.finished_charges[def_type]]
if len(self.transition_level_map[def_type].keys()):
# More positive charges will shift the minimum transition level down
# Max charge is limited by this if its transition level is close to VBM
min_tl = min(self.transition_level_map[def_type].keys())
if min_tl < tolerance:
max_charge = max(self.transition_level_map[def_type][min_tl])
test_charges = [charge for charge in test_charges if charge < max_charge]
# More negative charges will shift the maximum transition level up
# Minimum charge is limited by this if transition level is near CBM
max_tl = max(self.transition_level_map[def_type].keys())
if max_tl > (self.band_gap - tolerance):
min_charge = min(self.transition_level_map[def_type][max_tl])
test_charges = [charge for charge in test_charges if charge > min_charge]
else:
test_charges = [charge for charge in test_charges if charge not in self.stable_charges[def_type]]
recommendations[def_type] = test_charges
return recommendations
def suggest_larger_supercells(self, tolerance=0.1):
"""
Suggest larger supercells for different defect+chg combinations based on use of
compatibility analysis. Does this for any charged defects which have is_compatible = False,
and the defect+chg formation energy is stable at fermi levels within the band gap.
NOTE: Requires self.filter_compatible = False
Args:
tolerance (float): tolerance with respect to the VBM and CBM for considering
larger supercells for a given charge
"""
if self.filter_compatible:
raise ValueError("Cannot suggest larger supercells if filter_compatible is True.")
recommendations = {}
for def_type in self.defect_types:
template_entry = self.stable_entries[def_type][0].copy()
defect_indices = [int(def_ind) for def_ind in def_type.split("@")[-1].split("-")]
for charge in self.finished_charges[def_type]:
chg_defect = template_entry.defect.copy()
chg_defect.set_charge(charge)
for entry_index in defect_indices:
entry = self.entries[entry_index]
if entry.charge == charge:
break
if entry.parameters.get("is_compatible", True):
continue
# consider if transition level is within
# tolerance of band edges
suggest_bigger_supercell = True
for tl, chgset in self.transition_level_map[def_type].items():
sorted_chgset = list(chgset)
sorted_chgset.sort(reverse=True)
if charge == sorted_chgset[0] and tl < tolerance:
suggest_bigger_supercell = False
elif charge == sorted_chgset[1] and tl > (self.band_gap - tolerance):
suggest_bigger_supercell = False
if suggest_bigger_supercell:
if def_type not in recommendations:
recommendations[def_type] = []
recommendations[def_type].append(charge)
return recommendations
def solve_for_fermi_energy(self, temperature, chemical_potentials, bulk_dos):
"""
Solve for the Fermi energy self-consistently as a function of T
Observations are Defect concentrations, electron and hole conc
Args:
temperature: Temperature to equilibrate fermi energies for
chemical_potentials: dict of chemical potentials to use for calculation fermi level
bulk_dos: bulk system dos (pymatgen Dos object)
Returns:
Fermi energy dictated by charge neutrality
"""
fdos = FermiDos(bulk_dos, bandgap=self.band_gap)
_, fdos_vbm = fdos.get_cbm_vbm()
def _get_total_q(ef):
qd_tot = sum(
[
d["charge"] * d["conc"]
for d in self.defect_concentrations(
chemical_potentials=chemical_potentials,
temperature=temperature,
fermi_level=ef,
)
]
)
qd_tot += fdos.get_doping(fermi_level=ef + fdos_vbm, temperature=temperature)
return qd_tot
return bisect(_get_total_q, -1.0, self.band_gap + 1.0)
def solve_for_non_equilibrium_fermi_energy(self, temperature, quench_temperature, chemical_potentials, bulk_dos):
"""
Solve for the Fermi energy after quenching in the defect concentrations at a higher
temperature (the quench temperature),
as outlined in P. Canepa et al (2017) Chemistry of Materials (doi: 10.1021/acs.chemmater.7b02909)
Args:
temperature: Temperature to equilibrate fermi energy at after quenching in defects
quench_temperature: Temperature to equilibrate defect concentrations at (higher temperature)
chemical_potentials: dict of chemical potentials to use for calculation fermi level
bulk_dos: bulk system dos (pymatgen Dos object)
Returns:
Fermi energy dictated by charge neutrality with respect to frozen in defect concentrations
"""
high_temp_fermi_level = self.solve_for_fermi_energy(quench_temperature, chemical_potentials, bulk_dos)
fixed_defect_charge = sum(
[
d["charge"] * d["conc"]
for d in self.defect_concentrations(
chemical_potentials=chemical_potentials,
temperature=quench_temperature,
fermi_level=high_temp_fermi_level,
)
]
)
fdos = FermiDos(bulk_dos, bandgap=self.band_gap)
_, fdos_vbm = fdos.get_cbm_vbm()
def _get_total_q(ef):
qd_tot = fixed_defect_charge
qd_tot += fdos.get_doping(fermi_level=ef + fdos_vbm, temperature=temperature)
return qd_tot
return bisect(_get_total_q, -1.0, self.band_gap + 1.0)
def get_dopability_limits(self, chemical_potentials):
"""
Find Dopability limits for a given chemical potential.
This is defined by the defect formation energies which first cross zero
in formation energies.
This determine bounds on the fermi level.
Does this by computing formation energy for every stable defect with non-zero charge.
If the formation energy value changes sign on either side of the band gap, then
compute the fermi level value where the formation energy is zero
(formation energies are lines and basic algebra shows: x_crossing = x1 - (y1 / q)
for fermi level, x1, producing formation energy y1)
Args:
chemical_potentials: dict of chemical potentials to use for calculation fermi level
Returns:
lower dopability limit, upper dopability limit
(returns None if no limit exists for upper or lower i.e. no negative defect
crossing before +/- 20 of band edges OR defect formation energies are entirely zero)
"""
min_fl_range = -20.0
max_fl_range = self.band_gap + 20.0
lower_lim = None
upper_lim = None
for def_entry in self.all_stable_entries:
min_fl_formen = def_entry.formation_energy(
chemical_potentials=chemical_potentials, fermi_level=min_fl_range
)
max_fl_formen = def_entry.formation_energy(
chemical_potentials=chemical_potentials, fermi_level=max_fl_range
)
if min_fl_formen < 0.0 and max_fl_formen < 0.0:
logger.error(
"Formation energy is negative through entire gap for entry {} q={}."
" Cannot return dopability limits.".format(def_entry.name, def_entry.charge)
)
return None, None
if np.sign(min_fl_formen) != np.sign(max_fl_formen):
x_crossing = min_fl_range - (min_fl_formen / def_entry.charge)
if min_fl_formen < 0.0:
if lower_lim is None or lower_lim < x_crossing:
lower_lim = x_crossing
else:
if upper_lim is None or upper_lim > x_crossing:
upper_lim = x_crossing
return lower_lim, upper_lim
def plot(
self,
mu_elts=None,
xlim=None,
ylim=None,
ax_fontsize=1.3,
lg_fontsize=1.0,
lg_position=None,
fermi_level=None,
title=None,
saved=False,
):
"""
Produce defect Formation energy vs Fermi energy plot
Args:
mu_elts:
a dictionnary of {Element:value} giving the chemical
potential of each element
xlim:
Tuple (min,max) giving the range of the x (fermi energy) axis
ylim:
Tuple (min,max) giving the range for the formation energy axis
ax_fontsize:
float multiplier to change axis label fontsize
lg_fontsize:
float multiplier to change legend label fontsize
lg_position:
Tuple (horizontal-position, vertical-position) giving the position
to place the legend.
Example: (0.5,-0.75) will likely put it below the x-axis.
saved:
Returns:
a matplotlib object
"""
if xlim is None:
xlim = (-0.5, self.band_gap + 0.5)
xy = {}
lower_cap = -100.0
upper_cap = 100.0
y_range_vals = [] # for finding max/min values on y-axis based on x-limits
for defnom, def_tl in self.transition_level_map.items():
xy[defnom] = [[], []]
if def_tl:
org_x = sorted(def_tl.keys()) # list of transition levels
# establish lower x-bound
first_charge = max(def_tl[org_x[0]])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == first_charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=lower_cap)
fe_left = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=xlim[0])
xy[defnom][0].append(lower_cap)
xy[defnom][1].append(form_en)
y_range_vals.append(fe_left)
# iterate over stable charge state transitions
for fl in org_x:
charge = max(def_tl[fl])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=fl)
xy[defnom][0].append(fl)
xy[defnom][1].append(form_en)
y_range_vals.append(form_en)
# establish upper x-bound
last_charge = min(def_tl[org_x[-1]])
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == last_charge:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=upper_cap)
fe_right = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=xlim[1])
xy[defnom][0].append(upper_cap)
xy[defnom][1].append(form_en)
y_range_vals.append(fe_right)
else:
# no transition - just one stable charge
chg_ent = self.stable_entries[defnom][0]
for x_extrem in [lower_cap, upper_cap]:
xy[defnom][0].append(x_extrem)
xy[defnom][1].append(chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=x_extrem))
for x_window in xlim:
y_range_vals.append(chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=x_window))
if ylim is None:
window = max(y_range_vals) - min(y_range_vals)
spacer = 0.1 * window
ylim = (min(y_range_vals) - spacer, max(y_range_vals) + spacer)
if len(xy) <= 8:
colors = cm.Dark2(np.linspace(0, 1, len(xy))) # pylint: disable=E1101
else:
colors = cm.gist_rainbow(np.linspace(0, 1, len(xy))) # pylint: disable=E1101
plt.figure()
plt.clf()
width = 12
# plot formation energy lines
for_legend = []
for cnt, defnom in enumerate(xy.keys()):
plt.plot(xy[defnom][0], xy[defnom][1], linewidth=3, color=colors[cnt])
for_legend.append(self.stable_entries[defnom][0].copy())
# plot transtition levels
for cnt, defnom in enumerate(xy.keys()):
x_trans, y_trans = [], []
for x_val, chargeset in self.transition_level_map[defnom].items():
x_trans.append(x_val)
for chg_ent in self.stable_entries[defnom]:
if chg_ent.charge == chargeset[0]:
form_en = chg_ent.formation_energy(chemical_potentials=mu_elts, fermi_level=x_val)
y_trans.append(form_en)
if len(x_trans):
plt.plot(
x_trans,
y_trans,
marker="*",
color=colors[cnt],
markersize=12,
fillstyle="full",
)
# get latex-like legend titles
legends_txt = []
for dfct in for_legend:
flds = dfct.name.split("_")
if flds[0] == "Vac":
base = "$Vac"
sub_str = "_{" + flds[1] + "}$"
elif flds[0] == "Sub":
flds = dfct.name.split("_")
base = "$" + flds[1]
sub_str = "_{" + flds[3] + "}$"
elif flds[0] == "Int":
base = "$" + flds[1]
sub_str = "_{inter}$"
else:
base = dfct.name
sub_str = ""
legends_txt.append(base + sub_str)
if not lg_position:
plt.legend(legends_txt, fontsize=lg_fontsize * width, loc=0)
else:
plt.legend(
legends_txt,
fontsize=lg_fontsize * width,
ncol=3,
loc="lower center",
bbox_to_anchor=lg_position,
)
plt.ylim(ylim)
plt.xlim(xlim)
plt.plot([xlim[0], xlim[1]], [0, 0], "k-") # black dashed line for Eformation = 0
plt.axvline(x=0.0, linestyle="--", color="k", linewidth=3) # black dashed lines for gap edges
plt.axvline(x=self.band_gap, linestyle="--", color="k", linewidth=3)
if fermi_level is not None:
plt.axvline(x=fermi_level, linestyle="-.", color="k", linewidth=2) # smaller dashed lines for gap edges
plt.xlabel("Fermi energy (eV)", size=ax_fontsize * width)
plt.ylabel("Defect Formation\nEnergy (eV)", size=ax_fontsize * width)
if title:
plt.title("{}".format(title), size=ax_fontsize * width)
if saved:
plt.savefig(str(title) + "FreyplnravgPlot.pdf")
else:
return plt
return None
| gmatteo/pymatgen | pymatgen/analysis/defects/thermodynamics.py | Python | mit | 30,749 |
"""Tankerkoenig sensor integration."""
import logging
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_LATITUDE, ATTR_LONGITUDE
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DOMAIN, NAME
_LOGGER = logging.getLogger(__name__)
ATTR_BRAND = "brand"
ATTR_CITY = "city"
ATTR_FUEL_TYPE = "fuel_type"
ATTR_HOUSE_NUMBER = "house_number"
ATTR_IS_OPEN = "is_open"
ATTR_POSTCODE = "postcode"
ATTR_STATION_NAME = "station_name"
ATTR_STREET = "street"
ATTRIBUTION = "Data provided by https://creativecommons.tankerkoenig.de"
ICON = "mdi:gas-station"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the tankerkoenig sensors."""
if discovery_info is None:
return
tankerkoenig = hass.data[DOMAIN]
async def async_update_data():
"""Fetch data from API endpoint."""
try:
return await tankerkoenig.fetch_data()
except LookupError:
raise UpdateFailed("Failed to fetch data")
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=NAME,
update_method=async_update_data,
update_interval=tankerkoenig.update_interval,
)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_refresh()
stations = discovery_info.values()
entities = []
for station in stations:
for fuel in tankerkoenig.fuel_types:
if fuel not in station:
_LOGGER.warning(
"Station %s does not offer %s fuel", station["id"], fuel
)
continue
sensor = FuelPriceSensor(
fuel,
station,
coordinator,
f"{NAME}_{station['name']}_{fuel}",
tankerkoenig.show_on_map,
)
entities.append(sensor)
_LOGGER.debug("Added sensors %s", entities)
async_add_entities(entities)
class FuelPriceSensor(Entity):
"""Contains prices for fuel in a given station."""
def __init__(self, fuel_type, station, coordinator, name, show_on_map):
"""Initialize the sensor."""
self._station = station
self._station_id = station["id"]
self._fuel_type = fuel_type
self._coordinator = coordinator
self._name = name
self._latitude = station["lat"]
self._longitude = station["lng"]
self._city = station["place"]
self._house_number = station["houseNumber"]
self._postcode = station["postCode"]
self._street = station["street"]
self._price = station[fuel_type]
self._show_on_map = show_on_map
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
@property
def unit_of_measurement(self):
"""Return unit of measurement."""
return "€"
@property
def should_poll(self):
"""No need to poll. Coordinator notifies of updates."""
return False
@property
def state(self):
"""Return the state of the device."""
# key Fuel_type is not available when the fuel station is closed, use "get" instead of "[]" to avoid exceptions
return self._coordinator.data[self._station_id].get(self._fuel_type)
@property
def unique_id(self) -> str:
"""Return a unique identifier for this entity."""
return f"{self._station_id}_{self._fuel_type}"
@property
def device_state_attributes(self):
"""Return the attributes of the device."""
data = self._coordinator.data[self._station_id]
attrs = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_BRAND: self._station["brand"],
ATTR_FUEL_TYPE: self._fuel_type,
ATTR_STATION_NAME: self._station["name"],
ATTR_STREET: self._street,
ATTR_HOUSE_NUMBER: self._house_number,
ATTR_POSTCODE: self._postcode,
ATTR_CITY: self._city,
}
if self._show_on_map:
attrs[ATTR_LATITUDE] = self._latitude
attrs[ATTR_LONGITUDE] = self._longitude
if data is not None and "status" in data:
attrs[ATTR_IS_OPEN] = data["status"] == "open"
return attrs
@property
def available(self):
"""Return if entity is available."""
return self._coordinator.last_update_success
async def async_added_to_hass(self):
"""When entity is added to hass."""
self._coordinator.async_add_listener(self.async_write_ha_state)
async def async_will_remove_from_hass(self):
"""When entity will be removed from hass."""
self._coordinator.async_remove_listener(self.async_write_ha_state)
async def async_update(self):
"""Update the entity."""
await self._coordinator.async_request_refresh()
| nkgilley/home-assistant | homeassistant/components/tankerkoenig/sensor.py | Python | apache-2.0 | 5,069 |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.clone_vm, 'vm1', 'vm2', 'full'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'volume1-snapshot1'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot5'],
[TestAction.reboot_vm, 'vm2'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot9'],
[TestAction.create_image_from_volume, 'vm1', 'vm1-image1'],
[TestAction.create_volume, 'volume7', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume7'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'vm1-snapshot1'],
[TestAction.start_vm, 'vm1'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot1'],
])
'''
The final status:
Running:['vm2', 'vm1']
Stopped:[]
Enadbled:['vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5', 'vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'volume3-snapshot9', 'vm1-image1']
attached:['volume1', 'volume2', 'volume3', 'clone@volume1', 'clone@volume2', 'clone@volume3', 'volume7']
Detached:[]
Deleted:['vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']
Expunged:[]
Ha:[]
Group:
vm_snap2:['vm1-snapshot5', 'volume1-snapshot5', 'volume2-snapshot5', 'volume3-snapshot5']---vm1volume1_volume2_volume3
vm_snap3:['vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'volume3-snapshot9']---vm1volume1_volume2_volume3
'''
| zstackio/zstack-woodpecker | integrationtest/vm/multihosts/vm_snapshots/paths/sblk_path16.py | Python | apache-2.0 | 1,953 |
# stdlib
from unittest import TestCase
import socket
import threading
import Queue
from collections import defaultdict
# 3p
import mock
# project
from dogstatsd import mapto_v6, get_socket_address
from dogstatsd import Server, init
from utils.net import IPV6_V6ONLY, IPPROTO_IPV6
class TestFunctions(TestCase):
def test_mapto_v6(self):
self.assertIsNone(mapto_v6('foo'))
self.assertIsNone(mapto_v6('192.'))
self.assertEqual(mapto_v6('192.168.1.1'), '::ffff:192.168.1.1')
self.assertEqual(mapto_v6('::1'), '::1')
self.assertEqual(mapto_v6('ff00::'), 'ff00::')
def test_get_socket_address(self):
with mock.patch('dogstatsd.socket.getaddrinfo') as getaddrinfo:
getaddrinfo.return_value = [(2, 2, 17, '', ('192.168.1.1', 80))]
self.assertEqual(get_socket_address('example.com', 80), ('::ffff:192.168.1.1', 80, 0, 0))
getaddrinfo.return_value = [(30, 2, 17, '', ('::1', 80, 0, 0))]
self.assertEqual(get_socket_address('example.com', 80), ('::1', 80, 0, 0))
self.assertIsNone(get_socket_address('foo', 80))
@mock.patch('dogstatsd.get_config')
@mock.patch('dogstatsd.Server')
def test_init(self, s, gc):
gc.return_value = defaultdict(str)
gc.return_value['non_local_traffic'] = True
gc.return_value['use_dogstatsd'] = True
init()
# if non_local_traffic was passed, use IPv4 wildcard
s.assert_called_once()
args, _ = s.call_args
self.assertEqual(args[1], '0.0.0.0')
class TestServer(TestCase):
@mock.patch('dogstatsd.get_socket_address')
def test_init(self, nh):
nh.return_value = 'foo'
s = Server(None, 'localhost', '1234')
nh.assertCalledOnceWith('localhost', 1234)
self.assertEqual(s.sockaddr, 'foo')
self.assertIsNone(s.socket)
@mock.patch('dogstatsd.select')
def test_start(self, select):
select.select.side_effect = [KeyboardInterrupt, SystemExit]
s1 = Server(mock.MagicMock(), '::1', '1234')
s1.start()
self.assertEqual(s1.socket.family, socket.AF_INET6)
s2 = Server(mock.MagicMock(), '127.0.0.1', '2345')
s2.start()
self.assertEqual(s2.socket.family, socket.AF_INET6)
s2 = Server(mock.MagicMock(), 'foo', '80')
s2.start()
self.assertFalse(s2.running)
def _get_socket(self, addr, port):
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0)
sock.bind((addr, port))
return sock
def test_connection_v4(self):
# start the server with a v4 mapped address
sock = self._get_socket('::ffff:127.0.0.1', 12345)
results = Queue.Queue()
def listen():
while True:
res = sock.recvfrom(1024)
results.put(res)
thread = threading.Thread(target=listen)
thread.daemon = True
thread.start()
# send packets with a v4 client
client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_sock.sendto('msg4', ('127.0.0.1', 12345))
msg = results.get(True, 1)
self.assertEqual(msg[0], 'msg4')
# send packets with a v6 client
client_sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
client_sock.sendto('msg6', ('::1', 12345))
self.assertRaises(Queue.Empty, results.get, True, 1)
def test_connection_v6(self):
# start the server with a v6 address
sock = self._get_socket('::1', 12345)
results = Queue.Queue()
def listen():
while True:
res = sock.recvfrom(1024)
results.put(res)
thread = threading.Thread(target=listen)
thread.daemon = True
thread.start()
# send packets with a v4 client
client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_sock.sendto('msg4', ('127.0.0.1', 12345))
self.assertRaises(Queue.Empty, results.get, True, 1)
# send packets with a v6 client
client_sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
client_sock.sendto('msg6', ('::1', 12345))
msg = results.get(True, 1)
self.assertEqual(msg[0], 'msg6')
| indeedops/dd-agent | tests/core/test_dogstatsd.py | Python | bsd-3-clause | 4,325 |
import website_sale_available_fake_models
import controllers
| odoousers2014/website-addons | website_sale_available_fake/__init__.py | Python | lgpl-3.0 | 61 |
# Test case from bug #1506850
#"When threading is enabled, the interpreter will infinitely wait on a mutex the second
# time this type of extended method is called. Attached is an example
# program that waits on the mutex to be unlocked."
from director_extend import *
class MyObject(SpObject):
def __init__(self):
SpObject.__init__(self)
return
def getFoo(self):
return 123
m = MyObject()
if m.dummy() != 666:
raise RuntimeError, "1st call"
if m.dummy() != 666: # Locked system
raise RuntimeError, "2nd call"
| DGA-MI-SSI/YaCo | deps/swig-3.0.7/Examples/test-suite/python/director_extend_runme.py | Python | gpl-3.0 | 579 |
# READ THE INSTRUCTIONS BELOW BEFORE YOU ASK QUESTIONS
# Arena game mode written by Yourself
# A game of team survival. The last team standing scores a point.
# A map that uses arena needs to be modified to have a starting area for
# each team. A starting area is enclosed and has a gate on it. Each block of a
# gate must have the EXACT same color to work properly. Between each rounds,
# the gate is rebuilt. The gates are destroyed simultaneously at the start of each
# round, releasing the players onto the map. Players are free to switch weapons
# between rounds.
# Spawn locations and gate locations MUST be present in the map metadata (map txt file)
# for arena to work properly.
# The spawn location/s for the green team are set by using the data from the 'arena_green_spawns'
# tuple in the extensions dictionary. Likewise, the blue spawn/s is set with the 'arena_blue_spawns'
# key. 'arena_green_spawns' and 'arena_blue_spawns' are tuples which contain tuples of spawn
# coordinates. Spawn locations are chosen randomly.
# NOTE THAT THE SCRIPT RETAINS BACKWARDS COMPATIBILITY with the old 'arena_green_spawn' and
# 'arena_blue_spawn'
# The 'arena_max_spawn_distance' can be used to set MAX_SPAWN_DISTANCE on a map by map
# basis. See the comment by MAX_SPAWN_DISTANCE for more information
# The locations of gates is also determined in the map metadata. 'arena_gates' is a
# tuple of coordinates in the extension dictionary. Each gate needs only one block
# to be specified (since each gate is made of a uniform color)
# Sample extensions dictionary of an arena map with two gates:
# In this example there is one spawn location for blue and two spawn locations for green.
# extensions = {
# 'arena': True,
# 'arena_blue_spawns' : ((128, 256, 60),),
# 'arena_green_spawns' : ((384, 256, 60), (123, 423, 51)),
# 'arena_gates': ((192, 236, 59), (320, 245, 60))
# }
from pyspades.server import block_action, set_color, block_line
from pyspades import world
from pyspades.constants import *
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from commands import add, admin
import random
import math
# If ALWAYS_ENABLED is False, then the 'arena' key must be set to True in
# the 'extensions' dictionary in the map metadata
ALWAYS_ENABLED = True
# How long should be spent between rounds in arena (seconds)
SPAWN_ZONE_TIME = 15.0
# How many seconds a team color should be shown after they win a round
# Set to 0 to disable this feature.
TEAM_COLOR_TIME = 4.0
# Maximum duration that a round can last. Time is in seconds. Set to 0 to
# disable the time limit
MAX_ROUND_TIME = 180
MAP_CHANGE_DELAY = 25.0
# Coordinates to hide the tent and the intel
HIDE_COORD = (0, 0, 63)
# Max distance a player can be from a spawn while the players are held within
# the gates. If they get outside this they are teleported to a spawn.
# Used to teleport players who glitch through the map back into the spawns.
MAX_SPAWN_DISTANCE = 15.0
BUILDING_ENABLED = False
if MAX_ROUND_TIME >= 60:
MAX_ROUND_TIME_TEXT = '%.2f minutes' % (float(MAX_ROUND_TIME)/60.0)
else:
MAX_ROUND_TIME_TEXT = str(MAX_ROUND_TIME) + ' seconds'
@admin
def coord(connection):
connection.get_coord = True
return 'Spade a block to get its coordinate.'
add(coord)
def make_color(r, g, b, a = 255):
r = int(r)
g = int(g)
b = int(b)
a = float(a)
return b | (g << 8) | (r << 16) | (int((a / 255.0) * 128.0) << 24)
# Algorithm for minimizing the number of blocks sent for the gates using
# a block line. Probably won't find the optimal solution for shapes that are not
# rectangular prisms but it's better than nothing.
# d = changing indice
# c1 = first constant indice
# c2 = second constant indice
def partition(points, d, c1, c2):
row = {}
row_list = []
for point in points:
pc1 = point[c1]
pc2 = point[c2]
if not row.has_key(pc1):
row[pc1] = {}
dic1 = row[pc1]
if not dic1.has_key(pc2):
dic1[pc2] = []
row_list.append(dic1[pc2])
dic2 = dic1[pc2]
dic2.append(point)
row_list_sorted = []
for div in row_list:
row_list_sorted.append(sorted(div, key = lambda k: k[d]))
# row_list_sorted is a list containing lists of points that all have the same
# point[c1] and point[c2] values and are sorted in increasing order according to point[d]
start_block = None
final_blocks = []
for block_list in row_list_sorted:
counter = 0
for i, block in enumerate(block_list):
counter += 1
if start_block is None:
start_block = block
if i + 1 == len(block_list):
next_block = None
else:
next_block = block_list[i + 1]
# Current AoS version seems to have an upper limit of 65 blocks for a block line
if counter == 65 or next_block is None or block[d] + 1 != next_block[d]:
final_blocks.append([start_block, block])
start_block = None
counter = 0
return final_blocks
def minimize_block_line(points):
x = partition(points, 0, 1, 2)
y = partition(points, 1, 0, 2)
z = partition(points, 2, 0, 1)
xlen = len(x)
ylen = len(y)
zlen = len(z)
if xlen <= ylen and xlen <= zlen:
return x
if ylen <= xlen and ylen <= zlen:
return y
if zlen <= xlen and zlen <= ylen:
return z
return x
def get_team_alive_count(team):
count = 0
for player in team.get_players():
if not player.world_object.dead:
count += 1
return count
def get_team_dead(team):
for player in team.get_players():
if not player.world_object.dead:
return False
return True
class CustomException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class Gate:
def __init__(self, x, y, z, protocol_obj):
self.support_blocks = []
self.blocks = []
self.protocol_obj = protocol_obj
map = self.protocol_obj.map
solid, self.color = map.get_point(x, y, z)
if not solid:
raise CustomException('The gate coordinate (%i, %i, %i) is not solid.' % (x, y, z))
self.record_gate(x, y, z)
self.blocks = minimize_block_line(self.blocks)
def build_gate(self):
map = self.protocol_obj.map
set_color.value = make_color(*self.color)
set_color.player_id = block_line.player_id = 32
self.protocol_obj.send_contained(set_color, save = True)
for block_line_ in self.blocks:
start_block, end_block = block_line_
points = world.cube_line(*(start_block + end_block))
if not points:
continue
for point in points:
x, y, z = point
if not map.get_solid(x, y, z):
map.set_point(x, y, z, self.color)
block_line.x1, block_line.y1, block_line.z1 = start_block
block_line.x2, block_line.y2, block_line.z2 = end_block
self.protocol_obj.send_contained(block_line, save = True)
def destroy_gate(self):
map = self.protocol_obj.map
block_action.player_id = 32
block_action.value = DESTROY_BLOCK
for block in self.support_blocks: # optimize wire traffic
if map.get_solid(*block):
map.remove_point(*block)
block_action.x, block_action.y, block_action.z = block
self.protocol_obj.send_contained(block_action, save = True)
for block_line_ in self.blocks: # avoid desyncs
start_block, end_block = block_line_
points = world.cube_line(*(start_block + end_block))
if not points:
continue
for point in points:
x, y, z = point
if map.get_solid(x, y, z):
map.remove_point(x, y, z)
def record_gate(self, x, y, z):
if x < 0 or x > 511 or y < 0 or x > 511 or z < 0 or z > 63:
return False
solid, color = self.protocol_obj.map.get_point(x, y, z)
if solid:
coordinate = (x, y, z)
if color[0] != self.color[0] or color[1] != self.color[1] or color[2] != self.color[2]:
return True
for block in self.blocks:
if coordinate == block:
return False
self.blocks.append(coordinate)
returns = (self.record_gate(x+1, y, z),
self.record_gate(x-1, y, z),
self.record_gate(x, y+1, z),
self.record_gate(x, y-1, z),
self.record_gate(x, y, z+1),
self.record_gate(x, y, z-1))
if True in returns:
self.support_blocks.append(coordinate)
return False
def apply_script(protocol, connection, config):
class ArenaConnection(connection):
get_coord = False
def on_block_destroy(self, x, y, z, mode):
returned = connection.on_block_destroy(self, x, y, z, mode)
if self.get_coord:
self.get_coord = False
self.send_chat('Coordinate: %i, %i, %i' % (x, y, z))
return False
return returned
def on_disconnect(self):
if self.protocol.arena_running:
if self.world_object is not None:
self.world_object.dead = True
self.protocol.check_round_end()
return connection.on_disconnect(self)
def on_kill(self, killer, type, grenade):
if self.protocol.arena_running and type != TEAM_CHANGE_KILL:
if self.world_object is not None:
self.world_object.dead = True
self.protocol.check_round_end(killer)
return connection.on_kill(self, killer, type, grenade)
def on_team_join(self, team):
returned = connection.on_team_join(self, team)
if returned is False:
return False
if self.protocol.arena_running:
if self.world_object is not None and not self.world_object.dead:
self.world_object.dead = True
self.protocol.check_round_end()
return returned
def on_position_update(self):
if not self.protocol.arena_running:
min_distance = None
pos = self.world_object.position
for spawn in self.team.arena_spawns:
xd = spawn[0] - pos.x
yd = spawn[1] - pos.y
zd = spawn[2] - pos.z
distance = math.sqrt(xd ** 2 + yd ** 2 + zd ** 2)
if min_distance is None or distance < min_distance:
min_distance = distance
if min_distance > self.protocol.arena_max_spawn_distance:
self.set_location(random.choice(self.team.arena_spawns))
self.refill()
return connection.on_position_update(self)
def get_respawn_time(self):
if self.protocol.arena_enabled:
if self.protocol.arena_running:
return -1
else:
return 1
return connection.get_respawn_time(self);
def respawn(self):
if self.protocol.arena_running:
return False
return connection.respawn(self)
def on_spawn(self, pos):
returned = connection.on_spawn(self, pos)
if self.protocol.arena_running:
self.kill()
return returned
def on_spawn_location(self, pos):
if self.protocol.arena_enabled:
return random.choice(self.team.arena_spawns)
return connection.on_spawn_location(self, pos)
def on_flag_take(self):
if self.protocol.arena_take_flag:
self.protocol.arena_take_flag = False
return connection.on_flag_take(self)
return False
def on_refill(self):
returned = connection.on_refill(self)
if self.protocol.arena_running:
return False
return returned
class ArenaProtocol(protocol):
old_respawn_time = None
old_building = None
old_killing = None
arena_enabled = False
arena_running = False
arena_counting_down = False
arena_take_flag = False
arena_countdown_timers = None
arena_limit_timer = None
arena_old_fog_color = None
arena_max_spawn_distance = MAX_SPAWN_DISTANCE
def check_round_end(self, killer = None, message = True):
if not self.arena_running:
return
for team in (self.green_team, self.blue_team):
if get_team_dead(team):
self.arena_win(team.other, killer)
return
if message:
self.arena_remaining_message()
def arena_time_limit(self):
self.arena_limit_timer = None
green_team = self.green_team
blue_team = self.blue_team
green_count = get_team_alive_count(green_team)
blue_count = get_team_alive_count(blue_team)
if green_count > blue_count:
self.arena_win(green_team)
elif green_count < blue_count:
self.arena_win(blue_team)
else:
self.send_chat('Round ends in a tie.')
self.begin_arena_countdown()
def arena_win(self, team, killer = None):
if not self.arena_running:
return
if self.arena_old_fog_color is None and TEAM_COLOR_TIME > 0:
self.arena_old_fog_color = self.fog_color
self.set_fog_color(team.color)
reactor.callLater(TEAM_COLOR_TIME, self.arena_reset_fog_color)
if killer is None or killer.team is not team:
for player in team.get_players():
if not player.world_object.dead:
killer = player
break
if killer is not None:
self.arena_take_flag = True
killer.take_flag()
killer.capture_flag()
self.send_chat(team.name + ' team wins the round!')
self.begin_arena_countdown()
def arena_reset_fog_color(self):
if self.arena_old_fog_color is not None:
# Shitty fix for disco on game end
self.old_fog_color = self.arena_old_fog_color
self.set_fog_color(self.arena_old_fog_color)
self.arena_old_fog_color = None
def arena_remaining_message(self):
if not self.arena_running:
return
green_team = self.green_team
blue_team = self.blue_team
for team in (self.green_team, self.blue_team):
num = get_team_alive_count(team)
team.arena_message = '%i player' % num
if num != 1:
team.arena_message += 's'
team.arena_message += ' on ' + team.name
self.send_chat('%s and %s remain.' % (green_team.arena_message, blue_team.arena_message))
def on_map_change(self, map):
extensions = self.map_info.extensions
if ALWAYS_ENABLED:
self.arena_enabled = True
else:
if extensions.has_key('arena'):
self.arena_enabled = extensions['arena']
else:
self.arena_enabled = False
self.arena_max_spawn_distance = MAX_SPAWN_DISTANCE
if self.arena_enabled:
self.old_respawn_time = self.respawn_time
self.respawn_time = 0
self.old_building = self.building
self.old_killing = self.killing
self.gates = []
if extensions.has_key('arena_gates'):
for gate in extensions['arena_gates']:
self.gates.append(Gate(*gate, protocol_obj=self))
if extensions.has_key('arena_green_spawns'):
self.green_team.arena_spawns = extensions['arena_green_spawns']
elif extensions.has_key('arena_green_spawn'):
self.green_team.arena_spawns = (extensions['arena_green_spawn'],)
else:
raise CustomException('No arena_green_spawns given in map metadata.')
if extensions.has_key('arena_blue_spawns'):
self.blue_team.arena_spawns = extensions['arena_blue_spawns']
elif extensions.has_key('arena_blue_spawn'):
self.blue_team.arena_spawns = (extensions['arena_blue_spawn'],)
else:
raise CustomException('No arena_blue_spawns given in map metadata.')
if extensions.has_key('arena_max_spawn_distance'):
self.arena_max_spawn_distance = extensions['arena_max_spawn_distance']
self.delay_arena_countdown(MAP_CHANGE_DELAY)
self.begin_arena_countdown()
else:
# Cleanup after a map change
if self.old_respawn_time is not None:
self.respawn_time = self.old_respawn_time
if self.old_building is not None:
self.building = self.old_building
if self.old_killing is not None:
self.killing = self.old_killing
self.arena_enabled = False
self.arena_running = False
self.arena_counting_down = False
self.arena_limit_timer = None
self.arena_old_fog_color = None
self.old_respawn_time = None
self.old_building = None
self.old_killing = None
return protocol.on_map_change(self, map)
def build_gates(self):
for gate in self.gates:
gate.build_gate()
def destroy_gates(self):
for gate in self.gates:
gate.destroy_gate()
def arena_spawn(self):
for player in self.players.values():
if player.team.spectator:
continue
if player.world_object.dead:
player.spawn(random.choice(player.team.arena_spawns))
else:
player.set_location(random.choice(player.team.arena_spawns))
player.refill()
def begin_arena_countdown(self):
if self.arena_limit_timer is not None:
if self.arena_limit_timer.cancelled == 0 and self.arena_limit_timer.called == 0:
self.arena_limit_timer.cancel()
self.arena_limit_timer = None
if self.arena_counting_down:
return
self.arena_running = False
self.arena_limit_timer = None
self.arena_counting_down = True
self.killing = False
self.building = False
self.build_gates()
self.arena_spawn()
self.send_chat('The round will begin in %i seconds.' % SPAWN_ZONE_TIME)
self.arena_countdown_timers = [reactor.callLater(SPAWN_ZONE_TIME, self.begin_arena)]
for time in xrange(1, 6):
self.arena_countdown_timers.append(reactor.callLater(SPAWN_ZONE_TIME - time, self.send_chat, str(time)))
def delay_arena_countdown(self, amount):
if self.arena_counting_down:
for timer in self.arena_countdown_timers:
if timer.cancelled == 0 and timer.called == 0:
timer.delay(amount)
def begin_arena(self):
self.arena_counting_down = False
for team in (self.green_team, self.blue_team):
if team.count() == 0:
self.send_chat('Not enough players on the %s team to begin.' % team.name)
self.begin_arena_countdown()
return
self.arena_running = True
self.killing = True
self.building = BUILDING_ENABLED
self.destroy_gates()
self.send_chat('Go!')
if MAX_ROUND_TIME > 0:
self.send_chat('There is a time limit of %s for this round.' % MAX_ROUND_TIME_TEXT)
self.arena_limit_timer = reactor.callLater(MAX_ROUND_TIME, self.arena_time_limit)
def on_base_spawn(self, x, y, z, base, entity_id):
if not self.arena_enabled:
return protocol.on_base_spawn(self, x, y, z, base, entity_id)
return HIDE_COORD
def on_flag_spawn(self, x, y, z, flag, entity_id):
if not self.arena_enabled:
return protocol.on_base_spawn(self, x, y, z, flag, entity_id)
return HIDE_COORD
return ArenaProtocol, ArenaConnection
| iamgreaser/pycubed | feature_server/scripts/arena.py | Python | gpl-3.0 | 21,329 |
#
# Copyright (c) 2015 Open-RnD Sp. z o.o.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from ros3ddevcontroller.param.store import ParametersStore
| open-rnd/ros3d-dev-controller | ros3ddevcontroller/param/__init__.py | Python | mit | 1,203 |
"""
Methods for shutting up a dhcp server on your lan intelligently (and optionally resotre it)
Copyright (C) 2015 Bram Staps (Glasswall B.V.)
This file is part of Dhcpgag.
Dhcpgag is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Dhcpgag is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Foobar. If not, see <http://www.gnu.org/licenses/>.
"""
#prevent annoying scappy warnings
import sys
with open("/dev/null", "w") as f:
out,err = sys.stdout, sys.stderr
sys.stdout, sys.stderr = f, f
from scapy.all import *
sys.stdout, sys.stderr = out,err
#global that is tuned for the delay before sniffing
SNIFF_WAIT = 0.25
#various functions
def randomMac():
n = [random.randint(0,255),random.randint(0,255),random.randint(0,255),random.randint(0,255),random.randint(0,255),random.randint(0,255)]
n[0] &= 0b11111100 #make factory mac and not an boardcast mac
return ":".join( map(lambda x: chr(x).encode("hex"), n) )
def ip2num(ip):
octets = map(int, ip.split("."))
packed = struct.pack( *tuple(["!BBBB"] + octets) )
return struct.unpack("!L", packed)[0]
def num2ip(num):
packed = struct.pack("!L", num)
unpacked = struct.unpack("!BBBB", packed)
return ".".join( map(str, list(unpacked)) )
def ipgen(start,stop):
startIp = ip2num(start)
stopIp = ip2num(stop)
for x in xrange(startIp, stopIp+1):
yield num2ip(x)
# ARP packets
def mkpktArpWhoHas(ip, mac=None):
p = Ether(dst="FF:FF:FF:FF:FF:FF")/ARP(op="who-has")
if mac:
p.src = mac
p.hwsrc = mac
p.pdst = ip
return p
# DHCP packets
def mkpktDhcpDiscover(dhcpServerMac="FF:FF:FF:FF:FF:FF", mac=None):
p = Ether(dst=dhcpServerMac)/IP(src="0.0.0.0", dst="255.255.255.255")/UDP(sport=68,dport=67)/BOOTP()/DHCP(options=[("message-type","discover"),"end"])
if mac: p.src=mac
p.chaddr = p.src.replace(":","").decode("hex")
return p
def mkpktDhcpRequest(pkt):
p = Ether(dst=pkt.src, src=pkt.dst) \
/IP(src=pkt['IP'].dst, dst=pkt['IP'].src) \
/UDP(sport=68,dport=67) \
/BOOTP() \
/DHCP(options=[("message-type","request"), ("server_id", pkt['IP'].src), ("requested_addr",pkt['IP'].dst), ('lease_time', 3600), "end"])
p.chaddr = p.src.replace(":","").decode("hex")
return p
def mkpktDhcpRevoke(mac, ip, dhcpServerMac="FF:FF:FF:FF:FF:FF"):
p = Ether(src=mac, dst=dhcpServerMac)/IP(src=ip, dst="255.255.255.255")/UDP(sport=68,dport=67)/BOOTP()/DHCP(options=[("message-type","release"),"end"])
p.chaddr = p.src.replace(":","").decode("hex")
p.ciaddr = ip
return p
| bram-glasswall/dhcpgag | pktgen.py | Python | gpl-3.0 | 3,063 |
#!/usr/bin/python
# coding: utf-8
class Solution(object):
def diffWaysToCompute(self, input):
"""
:type input: str
:rtype: List[int]
"""
res = []
for i in range(len(input)):
c = input[i]
if c == "+" or c == "-" or c == "*":
left = self.diffWaysToCompute(input[0 : i])
right = self.diffWaysToCompute(input[i + 1:])
for l in left:
for r in right:
if c == "+":
res.append(l + r)
elif c == "-":
res.append(l - r)
else:
res.append(l * r)
if not res:
res.append(int(input))
return res
def diffWaysToCompute_cache(self, input):
def dfs(s, cache) :
ops = {'+':lambda x,y:x+y, '-':lambda x,y:x-y, '*':lambda x,y:x*y}
if not cache.has_key(s) :
ret = []
for k, v in enumerate(s) :
if v in '+-*' :
for left in dfs(s[:k], cache) :
for right in dfs(s[k+1:], cache) :
ret.append(ops[v](left,right))
if not ret :
ret.append(int(s))
cache[s] = ret
return cache[s]
return dfs(input, {})
| Lanceolata/code-problems | python/leetcode_medium/Question_123_Different_Ways_to_Add_Parentheses.py | Python | mit | 1,443 |
# -*- coding: utf-8 -*-
# Copyright 2018 Simone Rubino - Agile Business Group
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
{
"name": "Stock picking filter lot",
"summary": "In picking out lots' selection, "
"filter lots based on their location",
"version": "11.0.1.0.0",
"category": "Warehouse",
"website": "https://github.com/OCA/stock-logistics-workflow/tree/"
"11.0/stock_filter_lot",
"author": "Agile Business Group, "
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"depends": [
"stock"
],
"data": [
"views/stock_move_line_view.xml",
"views/stock_picking_view.xml",
]
}
| Domatix/stock-logistics-workflow | stock_picking_filter_lot/__manifest__.py | Python | agpl-3.0 | 767 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the lowest locally available SDK version greater than or equal to a
given minimum sdk version to standard output.
Usage:
python find_sdk.py 10.6 # Ignores SDKs < 10.6
"""
import os
import re
import subprocess
import sys
from optparse import OptionParser
def parse_version(version_str):
"""'10.6' => [10, 6]"""
return map(int, re.findall(r'(\d+)', version_str))
def main():
parser = OptionParser()
parser.add_option("--verify",
action="store_true", dest="verify", default=False,
help="return the sdk argument and warn if it doesn't exist")
parser.add_option("--sdk_path",
action="store", type="string", dest="sdk_path", default="",
help="user-specified SDK path; bypasses verification")
parser.add_option("--print_sdk_path",
action="store_true", dest="print_sdk_path", default=False,
help="Additionaly print the path the SDK (appears first).")
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Please specify a minimum SDK version')
min_sdk_version = args[0]
job = subprocess.Popen(['xcode-select', '-print-path'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
print >> sys.stderr, out
print >> sys.stderr, err
raise Exception(('Error %d running xcode-select, you might have to run '
'|sudo xcode-select --switch /Applications/Xcode.app/Contents/Developer| '
'if you are using Xcode 4.') % job.returncode)
# The Developer folder moved in Xcode 4.3.
xcode43_sdk_path = os.path.join(
out.rstrip(), 'Platforms/MacOSX.platform/Developer/SDKs')
if os.path.isdir(xcode43_sdk_path):
sdk_dir = xcode43_sdk_path
else:
sdk_dir = os.path.join(out.rstrip(), 'SDKs')
sdks = [re.findall('^MacOSX(10\.\d+)\.sdk$', s) for s in os.listdir(sdk_dir)]
sdks = [s[0] for s in sdks if s] # [['10.5'], ['10.6']] => ['10.5', '10.6']
sdks = [s for s in sdks # ['10.5', '10.6'] => ['10.6']
if parse_version(s) >= parse_version(min_sdk_version)]
if not sdks:
raise Exception('No %s+ SDK found' % min_sdk_version)
best_sdk = sorted(sdks, key=parse_version)[0]
if options.verify and best_sdk != min_sdk_version and not options.sdk_path:
print >> sys.stderr, ''
print >> sys.stderr, ' vvvvvvv'
print >> sys.stderr, ''
print >> sys.stderr, \
'This build requires the %s SDK, but it was not found on your system.' \
% min_sdk_version
print >> sys.stderr, \
'Either install it, or explicitly set mac_sdk in your GYP_DEFINES.'
print >> sys.stderr, ''
print >> sys.stderr, ' ^^^^^^^'
print >> sys.stderr, ''
return min_sdk_version
if options.print_sdk_path:
print subprocess.check_output(['xcodebuild', '-version', '-sdk',
'macosx' + best_sdk, 'Path']).strip()
return best_sdk
if __name__ == '__main__':
if sys.platform != 'darwin':
raise Exception("This script only runs on Mac")
print main()
sys.exit(0)
| biblerule/UMCTelnetHub | tools/mac/find_sdk.py | Python | mit | 3,430 |
#!/usr/bin/env python
'''
usage: alter.py my.pdf
Creates alter.my.pdf
Demonstrates making a slight alteration to a preexisting PDF file.
'''
import sys
import os
import find_pdfrw
from pdfrw import PdfReader, PdfWriter
inpfn, = sys.argv[1:]
outfn = 'alter.' + os.path.basename(inpfn)
trailer = PdfReader(inpfn)
trailer.Info.Title = 'My New Title Goes Here'
writer = PdfWriter()
writer.trailer = trailer
writer.write(outfn)
| kulbirsaini/pdfrw-fork | examples/alter.py | Python | mit | 433 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Validators
"""
from functools import partial
from rebulk.validators import chars_before, chars_after, chars_surround
from . import seps
seps_before = partial(chars_before, seps)
seps_after = partial(chars_after, seps)
seps_surround = partial(chars_surround, seps)
def int_coercable(string):
"""
Check if string can be coerced to int
:param string:
:type string:
:return:
:rtype:
"""
try:
int(string)
return True
except ValueError:
return False
| Thraxis/pymedusa | lib/guessit/rules/common/validators.py | Python | gpl-3.0 | 558 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class mrp_product_produce_line(osv.osv_memory):
_name="mrp.product.produce.line"
_description = "Product Produce Consume lines"
_columns = {
'product_id': fields.many2one('product.product', 'Product'),
'product_qty': fields.float('Quantity (in default UoM)'),
'lot_id': fields.many2one('stock.production.lot', 'Lot'),
'produce_id': fields.many2one('mrp.product.produce'),
'track_production': fields.related('product_id', 'track_production', type='boolean'),
}
class mrp_product_produce(osv.osv_memory):
_name = "mrp.product.produce"
_description = "Product Produce"
_columns = {
'product_id': fields.many2one('product.product', type='many2one'),
'product_qty': fields.float('Select Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'mode': fields.selection([('consume_produce', 'Consume & Produce'),
('consume', 'Consume Only')], 'Mode', required=True,
help="'Consume only' mode will only consume the products with the quantity selected.\n"
"'Consume & Produce' mode will consume as well as produce the products with the quantity selected "
"and it will finish the production order when total ordered quantities are produced."),
'lot_id': fields.many2one('stock.production.lot', 'Lot'), #Should only be visible when it is consume and produce mode
'consume_lines': fields.one2many('mrp.product.produce.line', 'produce_id', 'Products Consumed'),
'track_production': fields.boolean('Track production'),
}
def on_change_qty(self, cr, uid, ids, product_qty, consume_lines, context=None):
"""
When changing the quantity of products to be produced it will
recalculate the number of raw materials needed according
to the scheduled products and the already consumed/produced products
It will return the consume lines needed for the products to be produced
which the user can still adapt
"""
prod_obj = self.pool.get("mrp.production")
uom_obj = self.pool.get("product.uom")
production = prod_obj.browse(cr, uid, context['active_id'], context=context)
consume_lines = []
new_consume_lines = []
if product_qty > 0.0:
product_uom_qty = uom_obj._compute_qty(cr, uid, production.product_uom.id, product_qty, production.product_id.uom_id.id)
consume_lines = prod_obj._calculate_qty(cr, uid, production, product_qty=product_uom_qty, context=context)
for consume in consume_lines:
new_consume_lines.append([0, False, consume])
return {'value': {'consume_lines': new_consume_lines}}
def _get_product_qty(self, cr, uid, context=None):
""" To obtain product quantity
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: Quantity
"""
if context is None:
context = {}
prod = self.pool.get('mrp.production').browse(cr, uid,
context['active_id'], context=context)
done = 0.0
for move in prod.move_created_ids2:
if move.product_id == prod.product_id:
if not move.scrapped:
done += move.product_uom_qty # As uom of produced products and production order should correspond
return prod.product_qty - done
def _get_product_id(self, cr, uid, context=None):
""" To obtain product id
@return: id
"""
prod=False
if context and context.get("active_id"):
prod = self.pool.get('mrp.production').browse(cr, uid,
context['active_id'], context=context)
return prod and prod.product_id.id or False
def _get_track(self, cr, uid, context=None):
prod = self._get_product_id(cr, uid, context=context)
prod_obj = self.pool.get("product.product")
return prod and prod_obj.browse(cr, uid, prod, context=context).track_production or False
_defaults = {
'product_qty': _get_product_qty,
'mode': lambda *x: 'consume_produce',
'product_id': _get_product_id,
'track_production': _get_track,
}
def do_produce(self, cr, uid, ids, context=None):
production_id = context.get('active_id', False)
assert production_id, "Production Id should be specified in context as a Active ID."
data = self.browse(cr, uid, ids[0], context=context)
self.pool.get('mrp.production').action_produce(cr, uid, production_id,
data.product_qty, data.mode, data, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| diogocs1/comps | web/addons/mrp/wizard/mrp_product_produce.py | Python | apache-2.0 | 6,090 |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from envisage.ui.tasks.task_factory import TaskFactory
from traits.api import List
from pychron.envisage.tasks.base_task_plugin import BaseTaskPlugin
from pychron.external_pipette.protocol import IPipetteManager
from pychron.external_pipette.tasks.external_pipette_task import ExternalPipetteTask
class ExternalPipettePlugin(BaseTaskPlugin):
managers = List(contributes_to="pychron.hardware.managers")
id = "pychron.external_pipette"
_manager = None
def _manager_factory(self, **kw):
if self._manager:
return self._manager
else:
pkg = "pychron.external_pipette.apis_manager"
klass = "SimpleApisManager"
factory = __import__(pkg, fromlist=[klass])
m = getattr(factory, klass)(name="externalpipette")
m.bootstrap()
m.plugin_id = self.id
m.bind_preferences(self.id)
self._manager = m
return m
def _service_offers_default(self):
so = self.service_offer_factory(
protocol=IPipetteManager, factory=self._manager_factory
)
return [
so,
]
def _managers_default(self):
return [
dict(
name="ExternalPipette",
plugin_name="ExternalPipette",
manager=self._manager_factory(),
)
]
def _tasks_default(self):
return [
TaskFactory(
id=self.id,
task_group="hardware",
factory=self._task_factory,
name="External Pipette",
image="pipette",
accelerator="Ctrl+Shift+0",
)
]
def _task_factory(self):
t = ExternalPipetteTask(manager=self._manager, application=self.application)
return t
# ============= EOF =============================================
| USGSDenverPychron/pychron | pychron/external_pipette/tasks/external_pipette_plugin.py | Python | apache-2.0 | 2,745 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011-2015 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Freetype enum types
-------------------
FT_PIXEL_MODES: An enumeration type used to describe the format of pixels in a
given bitmap. Note that additional formats may be added in the
future.
FT_GLYPH_BBOX_MODES: The mode how the values of FT_Glyph_Get_CBox are returned.
FT_GLYPH_FORMATS: An enumeration type used to describe the format of a given
glyph image. Note that this version of FreeType only supports
two image formats, even though future font drivers will be
able to register their own format.
FT_ENCODINGS: An enumeration used to specify character sets supported by
charmaps. Used in the FT_Select_Charmap API function.
FT_RENDER_MODES: An enumeration type that lists the render modes supported by
FreeType 2. Each mode corresponds to a specific type of
scanline conversion performed on the outline.
FT_LOAD_TARGETS: A list of values that are used to select a specific hinting
algorithm to use by the hinter. You should OR one of these
values to your 'load_flags' when calling FT_Load_Glyph.
FT_LOAD_FLAGS: A list of bit-field constants used with FT_Load_Glyph to
indicate what kind of operations to perform during glyph
loading.
FT_STYLE_FLAGS: A list of bit-flags used to indicate the style of a given
face. These are used in the 'style_flags' field of FT_FaceRec.
FT_FSTYPES: A list of bit flags that inform client applications of embedding
and subsetting restrictions associated with a font.
FT_FACE_FLAGS: A list of bit flags used in the 'face_flags' field of the
FT_FaceRec structure. They inform client applications of
properties of the corresponding face.
FT_OUTLINE_FLAGS: A list of bit-field constants use for the flags in an
outline's 'flags' field.
FT_OPEN_MODES: A list of bit-field constants used within the 'flags' field of
the FT_Open_Args structure.
FT_KERNING_MODES: An enumeration used to specify which kerning values to return
in FT_Get_Kerning.
FT_STROKER_LINEJOINS: These values determine how two joining lines are rendered
in a stroker.
FT_STROKER_LINECAPS: These values determine how the end of opened sub-paths are
rendered in a stroke.
FT_STROKER_BORDERS: These values are used to select a given stroke border in
FT_Stroker_GetBorderCounts and FT_Stroker_ExportBorder.
FT_LCD_FILTERS: A list of values to identify various types of LCD filters.
TT_PLATFORMS: A list of valid values for the 'platform_id' identifier code in
FT_CharMapRec and FT_SfntName structures.
TT_APPLE_IDS: A list of valid values for the 'encoding_id' for
TT_PLATFORM_APPLE_UNICODE charmaps and name entries.
TT_MAC_IDS: A list of valid values for the 'encoding_id' for
TT_PLATFORM_MACINTOSH charmaps and name entries.
TT_MS_IDS: A list of valid values for the 'encoding_id' for
TT_PLATFORM_MICROSOFT charmaps and name entries.
TT_ADOBE_IDS: A list of valid values for the 'encoding_id' for
TT_PLATFORM_ADOBE charmaps. This is a FreeType-specific
extension!
TT_MAC_LANGIDS: Possible values of the language identifier field in the name
records of the TTF `name' table if the `platform' identifier
code is TT_PLATFORM_MACINTOSH.
TT_MS_LANGIDS: Possible values of the language identifier field in the name
records of the TTF `name' table if the `platform' identifier
code is TT_PLATFORM_MICROSOFT.
TT_NAME_IDS: Possible values of the `name' identifier field in the name
records of the TTF `name' table. These values are platform
independent.
'''
from freetype.ft_enums.ft_fstypes import *
from freetype.ft_enums.ft_face_flags import *
from freetype.ft_enums.ft_encodings import *
from freetype.ft_enums.ft_glyph_bbox_modes import *
from freetype.ft_enums.ft_glyph_formats import *
from freetype.ft_enums.ft_kerning_modes import *
from freetype.ft_enums.ft_lcd_filters import *
from freetype.ft_enums.ft_load_flags import *
from freetype.ft_enums.ft_load_targets import *
from freetype.ft_enums.ft_open_modes import *
from freetype.ft_enums.ft_outline_flags import *
from freetype.ft_enums.ft_pixel_modes import *
from freetype.ft_enums.ft_render_modes import *
from freetype.ft_enums.ft_stroker_borders import *
from freetype.ft_enums.ft_stroker_linecaps import *
from freetype.ft_enums.ft_stroker_linejoins import *
from freetype.ft_enums.ft_style_flags import *
from freetype.ft_enums.tt_adobe_ids import *
from freetype.ft_enums.tt_apple_ids import *
from freetype.ft_enums.tt_mac_ids import *
from freetype.ft_enums.tt_ms_ids import *
from freetype.ft_enums.tt_ms_langids import *
from freetype.ft_enums.tt_mac_langids import *
from freetype.ft_enums.tt_name_ids import *
from freetype.ft_enums.tt_platforms import *
| bitforks/freetype-py | freetype/ft_enums/__init__.py | Python | bsd-3-clause | 5,410 |
#!/usr/bin/env python
import os
import socket
import subprocess
def run_gae ():
mydir = os.path.dirname(__file__)
basedir = os.path.join(mydir, '..')
os.chdir(basedir)
kwargs = {
'host': socket.getfqdn(),
'sdk': '~/lib/google_appengine/'
}
cmd = "{sdk}dev_appserver.py --enable_sendmail --host {host} --port 8080 --admin_host {host} --admin_port 8888 hovermom/".format(**kwargs)
status = subprocess.call(cmd, shell=True)
if __name__ == "__main__":
run_gae()
| pizzapanther/HoverMom | scripts/start_gae.py | Python | mit | 492 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for Student's Profile Page.
"""
from contextlib import contextmanager
from datetime import datetime
from unittest import skip
from nose.plugins.attrib import attr
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.pages.lms.learner_profile import LearnerProfilePage
from common.test.acceptance.tests.helpers import AcceptanceTest, EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = u'all_users'
PRIVACY_PRIVATE = u'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed"
def log_in_as_unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
# These value_for_dropdown_field method calls used to include
# focus_out = True, but a change in selenium is focusing out of the
# drop down after selection without any more action needed.
profile_page.value_for_dropdown_field('language_proficiencies', 'English')
profile_page.value_for_dropdown_field('country', 'United Arab Emirates')
profile_page.set_value_for_textarea_field('bio', 'Nothing Special')
# Waits here for text to appear/save on bio field
profile_page.wait_for_ajax()
def visit_profile_page(self, username, privacy=None):
"""
Visit a user's profile page and if a privacy is specified and
is different from the displayed value, then set the privacy to that value.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
# Change the privacy setting if it is not the desired one already
profile_page.privacy = privacy
# Verify the current setting is as expected
if privacy == self.PRIVACY_PUBLIC:
self.assertEqual(profile_page.privacy, 'all_users')
else:
self.assertEqual(profile_page.privacy, 'private')
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.start_time = datetime.now() # pylint: disable=attribute-defined-outside-init
# Load the page
profile_page.visit()
return profile_page
def set_birth_year(self, birth_year):
"""
Set birth year for the current user to the specified value.
"""
account_settings_page = AccountSettingsPage(self.browser)
account_settings_page.visit()
account_settings_page.wait_for_page()
self.assertEqual(
account_settings_page.value_for_dropdown_field('year_of_birth', str(birth_year), focus_out=True),
str(birth_year)
)
def verify_profile_page_is_public(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently public.
"""
self.assertEqual(profile_page.visible_fields, self.PUBLIC_PROFILE_FIELDS)
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.editable_fields, self.PUBLIC_PROFILE_EDITABLE_FIELDS)
else:
self.assertEqual(profile_page.editable_fields, [])
def verify_profile_page_is_private(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently private.
"""
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.visible_fields, self.PRIVATE_PROFILE_FIELDS)
def verify_profile_page_view_event(self, requesting_username, profile_user_id, visibility=None):
"""
Verifies that the correct view event was captured for the profile page.
"""
actual_events = self.wait_for_events(
start_time=self.start_time,
event_filter={'event_type': 'edx.user.settings.viewed', 'username': requesting_username},
number_of_matches=1)
self.assert_events_match(
[
{
'username': requesting_username,
'event': {
'user_id': int(profile_user_id),
'page': 'profile',
'visibility': unicode(visibility)
}
}
],
actual_events
)
@contextmanager
def verify_pref_change_event_during(self, username, user_id, setting, **kwargs):
"""Assert that a single setting changed event is emitted for the user_api_userpreference table."""
expected_event = {
'username': username,
'event': {
'setting': setting,
'user_id': int(user_id),
'table': 'user_api_userpreference',
'truncated': []
}
}
expected_event['event'].update(kwargs)
event_filter = {
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'username': username,
}
with self.assert_events_match_during(event_filter=event_filter, expected_events=[expected_event]):
yield
def initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
@attr(shard=4)
class OwnLearnerProfilePageTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Tests that verify a student's own profile page.
"""
def verify_profile_forced_private_message(self, username, birth_year, message=None):
"""
Verify age limit messages for a user.
"""
if birth_year is None:
birth_year = ""
self.set_birth_year(birth_year=birth_year)
profile_page = self.visit_profile_page(username)
self.assertTrue(profile_page.privacy_field_visible)
if message:
self.assertTrue(profile_page.age_limit_message_present)
else:
self.assertFalse(profile_page.age_limit_message_present)
self.assertIn(message, profile_page.profile_forced_private_message)
@skip("failing on Jenkins")
def test_profile_defaults_to_public(self):
"""
Scenario: Verify that a new user's profile defaults to public.
Given that I am a new user.
When I go to my profile page.
Then I see that the profile visibility is set to public.
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
self.verify_profile_page_is_public(profile_page)
def assert_default_image_has_public_access(self, profile_page):
"""
Assert that profile image has public access.
"""
self.assertTrue(profile_page.profile_has_default_image)
self.assertTrue(profile_page.profile_has_image_with_public_access())
@skip("failing on Jenkins")
def test_make_profile_public(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my private profile page
And I set the profile visibility to public
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as public
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=self.PRIVACY_PRIVATE, new=self.PRIVACY_PUBLIC
):
profile_page.privacy = self.PRIVACY_PUBLIC
# Reload the page and verify that the profile is now public
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_public(profile_page)
def test_make_profile_private(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my public profile page
And I set the profile visibility to private
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as private
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=None, new=self.PRIVACY_PRIVATE
):
profile_page.privacy = self.PRIVACY_PRIVATE
# Reload the page and verify that the profile is now private
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_private(profile_page)
def test_dashboard_learner_profile_link(self):
"""
Scenario: Verify that my profile link is present on dashboard page and we can navigate to correct page.
Given that I am a registered user.
When I go to Dashboard page.
And I click on username dropdown.
Then I see Profile link in the dropdown menu.
When I click on Profile link.
Then I will be navigated to Profile page.
"""
username, __ = self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
self.assertIn('Profile', dashboard_page.tabs_link_text)
dashboard_page.click_my_profile_link()
my_profile_page = LearnerProfilePage(self.browser, username)
my_profile_page.wait_for_page()
def test_fields_on_my_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own private profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to private.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_page_is_private(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
@skip("failing on Jenkins")
def test_fields_on_my_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own public profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see all the profile fields are shown.
And `location`, `language` and `about me` fields are editable.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.verify_profile_page_is_public(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PUBLIC)
def _test_dropdown_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a dropdown field.
"""
profile_page.value_for_dropdown_field(field_id, new_value, focus_out=True)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def _test_textarea_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a textarea field.
"""
profile_page.set_value_for_textarea_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def test_birth_year_not_set(self):
"""
Verify message if birth year is not set.
Given that I am a registered user.
And birth year is not set for the user.
And I visit my profile page.
Then I should see a message that the profile is private until the year of birth is set.
"""
username, user_id = self.log_in_as_unique_user()
message = "You must specify your birth year before you can share your full profile."
self.verify_profile_forced_private_message(username, birth_year=None, message=message)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_is_under_age(self):
"""
Verify message if user is under age.
Given that I am a registered user.
And birth year is set so that age is less than 13.
And I visit my profile page.
Then I should see a message that the profile is private as I am under thirteen.
"""
username, user_id = self.log_in_as_unique_user()
under_age_birth_year = datetime.now().year - 10
self.verify_profile_forced_private_message(
username,
birth_year=under_age_birth_year,
message='You must be over 13 to share a full profile.'
)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_can_only_see_default_image_for_private_profile(self):
"""
Scenario: Default profile image behaves correctly for under age user.
Given that I am on my profile page with private access
And I can see default image
When I move my cursor to the image
Then i cannot see the upload/remove image text
And i cannot upload/remove the image.
"""
year_of_birth = datetime.now().year - 5
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_forced_private_message(
username,
year_of_birth,
message='You must be over 13 to share a full profile.'
)
self.assertTrue(profile_page.profile_has_default_image)
self.assertFalse(profile_page.profile_has_image_with_private_access())
def test_user_can_see_default_image_for_public_profile(self):
"""
Scenario: Default profile image behaves correctly for public profile.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
And i am able to upload new image
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
def test_user_can_upload_the_profile_image_with_success(self):
"""
Scenario: Upload profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new image via file uploader
Then i can see the changed image
And i can also see the latest image after reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
profile_page.visit()
self.assertTrue(profile_page.image_upload_success)
def test_user_can_see_error_for_exceeding_max_file_size_limit(self):
"""
Scenario: Upload profile image does not work for > 1MB image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new > 1MB image via file uploader
Then i can see the error message for file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='larger_image.jpg')
self.assertEqual(profile_page.profile_image_message, "The file must be smaller than 1 MB in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_file_size_below_the_min_limit(self):
"""
Scenario: Upload profile image does not work for < 100 Bytes image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new < 100 Bytes image via file uploader
Then i can see the error message for minimum file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='list-icon-visited.png')
self.assertEqual(profile_page.profile_image_message, "The file must be at least 100 bytes in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_wrong_file_type(self):
"""
Scenario: Upload profile image does not work for wrong file types.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new csv file via file uploader
Then i can see the error message for wrong/unsupported file type
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='generic_csv.csv')
self.assertEqual(
profile_page.profile_image_message,
"The file must be one of the following types: .gif, .png, .jpeg, .jpg."
)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_remove_profile_image(self):
"""
Scenario: Remove profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i click on the remove image link
Then i can see the default image
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
self.assertTrue(profile_page.remove_profile_image())
self.assertTrue(profile_page.profile_has_default_image)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
def test_user_cannot_remove_default_image(self):
"""
Scenario: Remove profile image does not works for default images.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see only the upload image text
And i cannot see the remove image text
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
self.assertFalse(profile_page.remove_link_present)
def test_eventing_after_multiple_uploads(self):
"""
Scenario: An event is fired when a user with a profile image uploads another image
Given that I am on my profile page with public access
And I upload a new image via file uploader
When I upload another image via the file uploader
Then two upload events have been emitted
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg', wait_for_upload_button=False)
@attr(shard=4)
class DifferentUserLearnerProfilePageTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Tests that verify viewing the profile page of a different user.
"""
def test_different_user_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's private profile.
Given that I am a registered user.
And I visit a different user's private profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PRIVATE)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_under_age(self):
"""
Scenario: Verify that an under age user's profile is private to others.
Given that I am a registered user.
And I visit an under age user's profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see that only the private fields are shown.
"""
under_age_birth_year = datetime.now().year - 10
different_username, different_user_id = self.initialize_different_user(
privacy=self.PRIVACY_PUBLIC,
birth_year=under_age_birth_year
)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's public profile.
Given that I am a registered user.
And I visit a different user's public profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then all the profile fields are shown.
Then I shouldn't see the profile visibility selector dropdown.
Also `location`, `language` and `about me` fields are not editable.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.wait_for_public_fields()
self.verify_profile_page_is_public(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PUBLIC)
def test_badge_share_modal(self):
username = 'testcert'
AutoAuthPage(self.browser, username=username).visit()
profile_page = self.visit_profile_page(username)
profile_page.display_accomplishments()
badge = profile_page.badges[0]
badge.display_modal()
badge.close_modal()
@attr('a11y')
class LearnerProfileA11yTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Class to test learner profile accessibility.
"""
def test_editable_learner_profile_a11y(self):
"""
Test the accessibility of the editable version of the profile page
(user viewing her own public profile).
"""
username, _ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('language_proficiencies')
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('bio')
profile_page.a11y_audit.check_for_accessibility_errors()
def test_read_only_learner_profile_a11y(self):
"""
Test the accessibility of the read-only version of a public profile page
(user viewing someone else's profile page).
"""
# initialize_different_user should cause country, language, and bio to be filled out (since
# privacy is public). It doesn't appear that this is happening, although the method
# works in regular bokchoy tests. Perhaps a problem with phantomjs? So this test is currently
# only looking at a read-only profile page with a username.
different_username, _ = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.a11y_audit.check_for_accessibility_errors()
def test_badges_accessibility(self):
"""
Test the accessibility of the badge listings and sharing modal.
"""
username = 'testcert'
AutoAuthPage(self.browser, username=username).visit()
profile_page = self.visit_profile_page(username)
profile_page.display_accomplishments()
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.badges[0].display_modal()
profile_page.a11y_audit.check_for_accessibility_errors()
| Stanford-Online/edx-platform | common/test/acceptance/tests/lms/test_learner_profile.py | Python | agpl-3.0 | 30,572 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# 7 i TRIA
# Copyright (C) 2011 - 2012 7 i TRIA <http://www.7itria.cat>
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2012 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import decimal_precision as dp
import netsvc
import pooler
from osv import fields, osv, orm
from tools.translate import _
class account_invoice(osv.osv):
_inherit = 'account.invoice'
_columns = {'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Paid'),
('unpaid','Unpaid'),
('cancel','Cancelled')
],'State', select=True, readonly=True,
help=' * The \'Draft\' state is used when a user is encoding a new and unconfirmed Invoice. \
\n* The \'Pro-forma\' when invoice is in Pro-forma state,invoice does not have an invoice number. \
\n* The \'Open\' state is used when user create invoice,a invoice number is generated.Its in open state till user does not pay invoice. \
\n* The \'Paid\' state is set automatically when invoice is paid.\
\n* The \'Unpaid\' state is set automatically when invoice is paid.\
\n* The \'Cancelled\' state is used when user cancel invoice.'),
}
def test_paid2(self, cr, uid, ids, *args):
res = False
if ids:
inv = self.browse(cr, uid, ids[0])
residual = inv._amount_residual
len_pay = len(inv.move_lines)
print len_pay
if residual != 0.0 and len_pay > 1:
res = True
return res
def action_unpaid(self, cr, uid, ids, *args):
res = False
for inv_id in ids:
if self.test_paid2(cr, uid, [inv_id]):
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'account.invoice', inv_id, 'open_test2', cr)
res = True
return res
account_invoice()
| avanzosc/avanzosc6.1 | l10n_es_devolucion_remesas/invoice.py | Python | agpl-3.0 | 2,919 |
from unicodedata import category
from django.core.management.base import BaseCommand, CommandError
from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption
from django.core import serializers
from optparse import make_option
import uuid
import os
import sys
from django.conf import settings
import json
from .export_package import export_package
class Command(BaseCommand):
args = 'package_name'
help = 'Exports all packages.'
def handle(self, *args, **options):
packages = []
for app in settings.INSTALLED_APPS:
if 'workflows.' in app:
packages.append(app)
for package in packages:
package_name = package.split('workflows.')[1]
self.stdout.write("Exporting package "+package_name+"\n")
export_package(package_name,self.stdout)
#temporary fix
#self.stdout.write("Exporting nltoolkit package \n")
#export_package('tf_core.nltoolkit',self.stdout, dest_folder='C:\\work\\textflows\\tf_core\\nltoolkit') | xflows/textflows | workflows/management/commands/export_all.py | Python | mit | 1,124 |
# -*- coding: utf-8 -*-
#
import ftplib
import os
import re
import shutil
import tarfile
import time
from pathlib import Path
from subprocess import call
from socket import gaierror
import paramiko
from paramiko.ssh_exception import SSHException, AuthenticationException
import requests
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.template import RequestContext
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.core.management import call_command
import statify.settings as statify_settings
from statify.forms import DeployForm
from statify.models import DeploymentHost, ExternalURL, Release, DeploymentTypes, AuthTypes
CURRENT_SITE = Site.objects.get_current()
@login_required()
def make_release(request):
timestamp = '%s' % (time.time())
htdocs = 'htdocs.%s.tar.gz' % (timestamp)
upload_path = statify_settings.STATIFY_UPLOAD_PATH
absolute_path = os.path.join(settings.MEDIA_ROOT, statify_settings.STATIFY_UPLOAD_PATH)
external_url_list = ExternalURL.objects.filter(is_valid=True)
# If htdocs already exists, remove
if os.path.isdir(settings.MEDUSA_DEPLOY_DIR):
shutil.rmtree(settings.MEDUSA_DEPLOY_DIR, ignore_errors=True)
os.makedirs(settings.MEDUSA_DEPLOY_DIR)
else:
os.makedirs(settings.MEDUSA_DEPLOY_DIR)
version_file = open(os.path.join(settings.MEDUSA_DEPLOY_DIR, 'version.txt'), 'w')
version_file.write(str(timestamp))
version_file.close()
# Call command to run medusa and statify all registered urls
call([
'python',
'manage.py',
'staticsitegen',
statify_settings.STATIFY_BUILD_SETTINGS
])
# Create files from external urls
for external_url in external_url_list:
path = os.path.join(settings.MEDUSA_DEPLOY_DIR, '/'.join(external_url.path[1:].split('/')[:-1]))
filepath = os.path.join(settings.MEDUSA_DEPLOY_DIR, external_url.path[1:])
# If path does not exists, create it
if not os.path.isdir(path):
os.makedirs(path)
# If file extists, remove it (need to be sure the file is clean)
if os.path.exists(filepath):
os.remove(filepath)
# Make request and get content
r = requests.get(external_url.url)
content = r.content
# Write content to file and save
filename = open(filepath, 'w+')
filename.write(content)
filename.close()
# Copy root files to builded htdocs
if os.path.isdir(statify_settings.STATIFY_ROOT_FILES):
files = os.listdir(statify_settings.STATIFY_ROOT_FILES)
for file in files:
filepath = os.path.join(statify_settings.STATIFY_ROOT_FILES, file)
shutil.copy(filepath, settings.MEDUSA_DEPLOY_DIR)
# Copy static files to builded htdocs
if not statify_settings.STATIFY_IGNORE_STATIC:
shutil.copytree(
os.path.join(statify_settings.STATIFY_PROJECT_DIR, 'static'),
os.path.join(settings.MEDUSA_DEPLOY_DIR, 'static'),
ignore=shutil.ignore_patterns(*statify_settings.STATIFY_EXCLUDED_STATIC),
dirs_exist_ok=True,
)
# Copy media files to builded htdocs
if not statify_settings.STATIFY_IGNORE_MEDIA:
shutil.copytree(
os.path.join(settings.STATIFY_PROJECT_DIR, 'media'),
os.path.join(settings.MEDUSA_DEPLOY_DIR, 'media'),
ignore=shutil.ignore_patterns('statify'),
dirs_exist_ok=True,
)
# Create tar.gz from htdocs and move it to media folder
dirlist = os.listdir(settings.MEDUSA_DEPLOY_DIR)
archive = tarfile.open(htdocs, 'w:gz')
for obj in dirlist:
path = os.path.join(settings.MEDUSA_DEPLOY_DIR, obj)
archive.add(path, arcname=obj)
archive.close()
if not os.path.isdir(absolute_path):
os.makedirs(absolute_path)
shutil.move(os.path.join(settings.STATIFY_PROJECT_DIR, htdocs), os.path.join(absolute_path, htdocs))
# Remove htdocs and tmp dir
shutil.rmtree(settings.MEDUSA_DEPLOY_DIR, ignore_errors=True,)
# shutil.rmtree(os.path.join(settings.MEDIA_ROOT, 'tmp'))
# Save new release object
release = Release(user=request.user, timestamp=timestamp)
release.archive = u'%s%s' % (upload_path, htdocs)
release.save()
messages.success(request, _('Release %s has been created successfully.') % (release.date_created))
return HttpResponseRedirect(reverse('admin:statify_release_change', args=(release.pk,)))
@login_required()
def deploy_select_release(request, release_id):
if request.POST:
form = DeployForm(request.POST)
if form.is_valid():
form.cleaned_data
host = request.POST['deploymenthost']
return HttpResponseRedirect(u'/admin/statify/release/%s/deploy/%s/' % (release_id, host))
else:
form = DeployForm()
return render(
request=request,
template_name = 'admin/statify/release/deploy_form.html',
context = {
'form': form,
'release_id': release_id
}
)
@login_required()
def deploy_release(request, release_id, deploymenthost_id):
release = get_object_or_404(Release, pk=release_id)
deploymenthost = get_object_or_404(DeploymentHost, pk=deploymenthost_id)
archive = os.path.join(settings.MEDIA_ROOT, u'%s' % release.archive)
directory = deploymenthost.path.split('/')[-1]
tmp_path = os.path.join(settings.MEDUSA_DEPLOY_DIR, '..', 'deploy', release.timestamp)
if not os.path.isdir(tmp_path):
os.makedirs(tmp_path)
else:
shutil.rmtree(tmp_path, ignore_errors=True)
os.makedirs(tmp_path)
call(['tar', 'xfz', archive, '-C', tmp_path])
# Replace hostnames
path_of_tmp_path = Path(tmp_path)
html_files = [item for item in path_of_tmp_path.glob('**/*.html') if item.is_file()]
xml_files = [item for item in path_of_tmp_path.glob('**/*.xml') if item.is_file()]
json_files = [item for item in path_of_tmp_path.glob('**/*.json') if item.is_file()]
css_files = [item for item in path_of_tmp_path.glob('**/*.css') if item.is_file()]
txt_files = [item for item in path_of_tmp_path.glob('**/*.txt') if item.is_file()]
all_files = html_files + xml_files + json_files + css_files + txt_files
for file in all_files:
fin = open(file, "rt")
data = fin.read()
data = re.sub(r'(http|https):\/\/({})'.format(CURRENT_SITE.domain), '{}://{}'.format(deploymenthost.target_scheme, deploymenthost.target_domain), data)
data = re.sub(r'{}'.format(CURRENT_SITE.domain), deploymenthost.target_domain, data)
fin.close()
fin = open(file, "wt")
fin.write(data)
fin.close()
# Local deployment
if deploymenthost.type == DeploymentTypes.LOCAL:
if not os.path.isdir(deploymenthost.path):
os.makedirs(deploymenthost.path)
else:
shutil.rmtree(deploymenthost.path, ignore_errors=True)
os.makedirs(deploymenthost.path)
files = os.listdir(tmp_path)
for file in files:
shutil.move(os.path.join(tmp_path, file), deploymenthost.path)
# FTP deployment
elif deploymenthost.type == DeploymentTypes.FTP:
# Check if host is available
try:
ftp = ftplib.FTP(deploymenthost.host)
except:
messages.error(request,
_('Deployment host "%s" is not available.') % (deploymenthost.host))
return HttpResponseRedirect(u'/admin/statify/release/%s/deploy/select/' % (release.id))
try:
ftp.login(deploymenthost.user, deploymenthost.password)
except:
messages.error(request,
_('Your login information to %s is not correct.') % (deploymenthost.host))
return HttpResponseRedirect(u'/admin/statify/release/%s/deploy/select/' % (release.id))
# Check if directory exists
filelist = []
directory_exist = False
ftp.retrlines('LIST', filelist.append)
for f in filelist:
if directory in f.split()[-1]:
directory_exist = True
# If not, mkdir it
if not directory_exist:
messages.error(request,
_('The target path "%s" does not exist.') % (deploymenthost.path))
return HttpResponseRedirect(u'/admin/statify/release/%s/deploy/select/' % (release.id))
# Upload all
paths = os.listdir(tmp_path)
for path in paths:
src_dir = os.path.join(tmp_path, path)
call(['ncftpput', '-R',
'-u', deploymenthost.user, '-p', deploymenthost.password,
deploymenthost.host, deploymenthost.path,
src_dir])
# Remove after upload is finished
shutil.rmtree(tmp_path, ignore_errors=True)
# SSH deployment
elif deploymenthost.type == DeploymentTypes.SSH:
# Check if host is available
try:
client = paramiko.SSHClient()
client.load_system_host_keys()
if deploymenthost.authtype == AuthTypes.KEY:
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if deploymenthost.password:
key = paramiko.RSAKey.from_private_key_file(deploymenthost.ssh_key_path, password=deploymenthost.password)
else:
key = paramiko.RSAKey.from_private_key_file(deploymenthost.ssh_key_path)
client.connect(
hostname=deploymenthost.host,
username=deploymenthost.user,
pkey=key
)
elif deploymenthost.authtype == AuthTypes.PASSWORD:
client.connect(
hostname=deploymenthost.host,
username=deploymenthost.user,
password=deploymenthost.password
)
channel = client.get_transport()
scp = paramiko.SFTPClient.from_transport(channel)
except AuthenticationException:
messages.error(
request,
_('Authentication for host "{}@{}" failed.').format(deploymenthost.masked_user, deploymenthost.host)
)
return HttpResponseRedirect(u'/admin/statify/release/%s/deploy/select/' % (release.id))
except gaierror as e:
messages.error(
request,
_('Unknown host "{}".').format(deploymenthost.host)
)
return HttpResponseRedirect(u'/admin/statify/release/%s/deploy/select/' % (release.id))
except SSHException as e:
messages.error(
request,
_('Deployment to host "{}" failed with SSH Exception: {}').format(deploymenthost.host, e)
)
return HttpResponseRedirect(u'/admin/statify/release/%s/deploy/select/' % (release.id))
# Check if directory exists
try:
scp.stat(deploymenthost.path)
except:
messages.error(request,
_('Target path "{}" does not exist on host "{}".').format(deploymenthost.path, deploymenthost.host))
return HttpResponseRedirect(u'/admin/statify/release/%s/deploy/select/' % (release.id))
src = tmp_path + '/'
destination = '{}@{}:{}'.format(deploymenthost.user, deploymenthost.host, deploymenthost.path)
rsync_args = ['rsync', '-av', '--update', '--delete']
if deploymenthost.ssh_key_path:
rsync_args.append('-e "ssh -i {}"'.format(deploymenthost.ssh_key_path))
if deploymenthost.chmod:
rsync_args.append('--chmod={}'.format(deploymenthost.chmod))
if deploymenthost.chown:
owner = deploymenthost.chown.split(':')[0]
group = deploymenthost.chown.split(':')[1]
rsync_args.append('--usermap=*:{}'.format(owner))
rsync_args.append('--groupmap=*:{}'.format(group))
rsync_command = rsync_args + [src, destination]
call(rsync_command)
# Remove trash
shutil.rmtree(tmp_path, ignore_errors=True)
shutil.rmtree(settings.MEDUSA_DEPLOY_DIR, ignore_errors=True)
messages.success(request,
_('Release "%s" was deployed successfully.') % (release.timestamp))
return HttpResponseRedirect(u'/admin/statify/release/')
| reellz/django-statify | statify/views.py | Python | bsd-3-clause | 12,635 |
#!/usr/bin/env python
from unittest import TestCase, main
from qiime.parallel.merge_otus import mergetree, mergeorder, \
initial_nodes_to_merge, initial_has_dependencies, job_complete, \
torque_job, local_job, start_job, JobError, reset_internal_count
import os
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2013, The QIIME Project"
__credits__ = ["Daniel McDonald", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Daniel McDonald"
__email__ = "[email protected]"
class MergeTests(TestCase):
def setUp(self):
reset_internal_count()
def test_mergetree(self):
"""construct a merge subtreetree with various properties set"""
exp = "(A,B)0;"
obs = mergetree(['A.biom'], ['B.biom'], 'foo')
self.assertEqual(obs.getNewick(escape_name=False), exp)
self.assertEqual(obs.Children[0].Name, 'A')
self.assertEqual(obs.Children[0].FilePath, 'A.biom')
self.assertEqual(obs.Children[0].Processed, False)
self.assertEqual(obs.Children[0].PollPath, None)
self.assertEqual(obs.Children[0].FullCommand, None)
self.assertEqual(obs.Children[1].Name, 'B')
self.assertEqual(obs.Children[1].FilePath, 'B.biom')
self.assertEqual(obs.Children[1].Processed, False)
self.assertEqual(obs.Children[1].PollPath, None)
self.assertEqual(obs.Children[1].FullCommand, None)
self.assertEqual(obs.Name, '0')
self.assertEqual(obs.FilePath, 'foo/0.biom')
self.assertEqual(obs.Processed, False)
self.assertEqual(obs.PollPath, 'foo/0.biom.poll')
self.assertEqual(obs.FullCommand, None)
def test_mergeorder(self):
"""recursively build and join all the subtrees"""
exp = "((A,B)0,(C,(D,E)1)2)3;"
obs = mergeorder(['A', 'B', 'C', 'D', 'E'], 'foo')
self.assertEqual(obs.getNewick(escape_name=False), exp)
def test_initial_nodes_to_merge(self):
"""determine the first nodes to merge"""
t = mergeorder(['A', 'B', 'C', 'D', 'E'], 'foo')
exp = set([t.Children[0], t.Children[1].Children[1]])
obs = initial_nodes_to_merge(t)
self.assertEqual(obs, exp)
def test_initial_has_dependencies(self):
"""determine initial has_dependencies"""
t = mergeorder(['A', 'B', 'C', 'D', 'E'], 'foo')
exp = [t, t.Children[1]]
obs = initial_has_dependencies(t, initial_nodes_to_merge(t))
self.assertEqual(obs, exp)
def test_job_complete(self):
"""check if a job is complete"""
t = mergeorder(['A', 'B', 'C', 'D', 'E'], 'foo')
self.assertFalse(job_complete(t))
self.assertFalse(job_complete(t.Children[0]))
self.assertFalse(job_complete(t.Children[1].Children[1]))
self.assertRaises(JobError, job_complete, t.Children[0].Children[0])
f = 'test_parallel_merge_otus_JOB_COMPLETE_TEST.poll'
self.assertFalse(os.path.exists(f))
testf = open(f, 'w')
testf.write('0\n')
testf.close()
t.PollPath = f
t.StartTime = 10
self.assertTrue(job_complete(t))
self.assertNotEqual(t.EndTime, None)
self.assertNotEqual(t.TotalTime, None)
testf = open(f, 'w')
testf.write('1\n')
testf.close()
self.assertRaises(JobError, job_complete, t)
t.Processed = False
self.assertRaises(JobError, job_complete, t)
os.remove(f)
def test_torque_job(self):
"""wrap a torque job"""
exp = 'echo "abc; echo $? > xyz" | qsub -k oe -N MOTU -q queue'
obs = torque_job('abc', 'xyz', '123', 'queue')
self.assertEqual(obs, exp)
def test_start_job(self):
"""start a job"""
exp = 'echo "y -i A.biom,B.biom -o foo/0.biom; echo $? > foo/0.biom.poll" | qsub -k oe -N MOTU -q ignored'
t = mergeorder(['A.biom', 'B.biom', 'C', 'D', 'E'], 'foo')
start_job(t.Children[0], 'y', 'ignored', torque_job, False)
self.assertEqual(t.Children[0].FullCommand, exp)
def test_local_job(self):
"""fire off a local job"""
exp = "abc; echo $? > xyz"
obs = local_job('abc', 'xyz', 'notused', 'notused')
self.assertEqual(obs, exp)
if __name__ == '__main__':
main()
| wasade/qiime | tests/test_parallel/test_merge_otus.py | Python | gpl-2.0 | 4,310 |
# -*- coding: utf-8 -*-
import scrapy
import re
import urllib.request
from scrapy.http import Request
from spider_jd_phone.items import SpiderJdPhoneItem
class JdPhoneSpider(scrapy.Spider):
name = 'jd_phone'
allowed_domains = ['jd.com']
str_keyword = '手机京东自营'
encode_keyword = urllib.request.quote(str_keyword)
url = 'https://search.jd.com/Search?keyword=' + encode_keyword + '&enc=utf-8&qrst' \
'=1&rt' \
'=1&stop=1&spm=2.1.0&vt=2&page=1&s=1&click=0'
# start_urls = [url]
# def start_requests(self):
# print(">>>进行第一次爬取<<<")
# print("爬取网址:%s" % self.url)
# yield Request(self.encode_url,
# headers={
# 'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) "
# "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36"
# })
# 设置要爬取用户的uid,为后续构造爬取网址做准备
# uid = "19940007"
# start_urls = ["http://19940007.blog.hexun.com/p1/default.html"]
def start_requests(self):
print(">>>进行第一次爬取<<<")
# 首次爬取模拟成浏览器进行
# yield Request(
# "http://" + str(self.uid) + ".blog.hexun.com/p1/default.html",
# headers={
# 'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) "
# "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36"
# })
url = "https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA%E4%BA%AC%E4%B8%9C%E8%87%AA%E8%90%A5&enc=utf-8&qrst=1&rt%27=1&stop=1&spm=2.1.0&vt=2&page=1&s=1&click=0"
print(url)
yield Request("https://search.jd.com/Search?keyword=" +
self.str_keyword + "&enc=utf-8&qrst=1&rt'=1&stop=1&spm=2.1.0&vt=2&page=1&s=1&click=0",
headers={
'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36"
})
def parse(self, response):
print(">>>parsing...<<<")
item = SpiderJdPhoneItem()
# print(str(response.body))
file_object = open('test.html', 'wb')
file_object.write(response.body)
file_object.close()
item['price'] = response.xpath("//div["
"@class='p-price']//i/text()").extract()
item['name'] = response.xpath("//div[@class='p-name "
"p-name-type-2']//em").extract()
print("获取item:{}".format(item))
print("长度:%s" % len(item['price']))
print("长度:%s" % len(item['name']))
print("=====================")
yield item
| jinzekid/codehub | python/py3_6venv/spider_jd_phone/spider_jd_phone/spiders/jd_phone.py | Python | gpl-3.0 | 3,028 |
#Every X seconds, check if the minecaft server is running? [Default: 60]
sleepTime=5
#Enable AutoSave? ("save-all") [Default: true]
autoSave=True
#AutoSave Every X seconds? [Default = 300 (5 minutes)]
saveX=300
#Full path to server start script (Must CD to server directory and start java)
startScript="/home/user/mcserver/start.sh"
#AutoRestart [Default: True]
autoRestart=True
#AutoRestart Time [Default: "01:00" (1 AM)]
restartTime="01:00"
#Screen Name; If you have multiple servers, make each server have a unique name. [Default: mcserver]
screenName="mcserver"
| gsanders5/RavenWrapper | config.py | Python | mit | 572 |
from cmio import MaterialVisitor | esetomo/mio | script/pymio/material/material_visitor.py | Python | gpl-3.0 | 32 |
#!/usr/bin/python
# coding: utf-8
'''
File: tiger_release_aug07.corrected.16012013_patch.py
Author: Oliver Zscheyge
Description:
Fixes wrong morph values in the TIGER corpus:
tiger_release_aug07.corrected.16012013.xml
Also converts XML file to utf-8 encoding.
'''
import codecs
import fileinput
TIGER_FILE = "tiger_release_aug07.corrected.16012013.xml"
TIGER_FILE_UTF8 = "tiger_release_aug07.corrected.16012013_utf8.xml"
TIGER_FILE_UTF8_PATCHED = "tiger_release_aug07.corrected.16012013_utf8_patched.xml"
SOURCE_ENC = "iso-8859-1"
TARGET_ENC = "utf-8"
def main():
convert_to_utf8()
fix_strings()
def convert_to_utf8():
"""Converting the TIGER_FILE to utf-8 encoding.
Taken from:
http://stackoverflow.com/questions/191359/how-to-convert-a-file-to-utf-8-in-python
"""
BLOCKSIZE = 1048576 # or some other, desired size in bytes
with codecs.open(TIGER_FILE, "r", SOURCE_ENC) as sourceFile:
with codecs.open(TIGER_FILE_UTF8, "w", TARGET_ENC) as targetFile:
while True:
contents = sourceFile.read(BLOCKSIZE)
if not contents:
break
targetFile.write(contents)
def fix_strings():
replacements = {
1 : [u"ISO-8859-1", u"utf-8"]
, 293648 : [u"Pl.1.Pres.Ind", u"1.Pl.Pres.Ind"]
, 543756 : [u"Pl.3.Pres.Ind", u"3.Pl.Pres.Ind"]
, 1846632 : [u"Pl.3.Pres.Ind", u"3.Pl.Pres.Ind"]
, 2634040 : [u"Pl.3.Pres.Ind", u"3.Pl.Pres.Ind"]
}
linenr = 1
with codecs.open(TIGER_FILE_UTF8_PATCHED, "w", TARGET_ENC) as outfile:
with codecs.open(TIGER_FILE_UTF8, "r", TARGET_ENC) as infile:
for line in infile:
line = unicode(line).replace(u"\r", u"") # Replace Window's carriage returns
replacement = replacements.get(linenr, [])
if replacement != []:
line = line.replace(replacement[0], replacement[1], 1)
linenr += 1
outfile.write(line)
# for line in fileinput.input(TIGER_FILE_FIXED, inplace=True):
# replacement = replacements.get(fileinput.filelineno(), [])
# if replacement == []:
# print line,
# else:
# print line.replace(replacement[0], replacement[1], 1),
if __name__ == '__main__':
main()
| ooz/Confopy | confopy/localization/de/corpus_de/tiger_release_aug07.corrected.16012013_patch.py | Python | mit | 2,377 |
#!/usr/bin/env python
import EnvConfig
EnvConfig.Script().main()
| ocordes/arctic | cmake/scripts/env.py | Python | lgpl-3.0 | 65 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_question_test_field'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='test_field',
),
]
| kiriakosv/movie-recommendator | moviesite/polls/migrations/0003_remove_question_test_field.py | Python | mit | 357 |
import socket
import config
class ResourceManager(object):
map = {}
def is_locked(self, resource):
"""Check if a resource is being held.
"""
return resource in self.map and self.map[resource]
def peak_holder(self, resource):
"""Peak who is the holder of a given resource.
"""
try:
return self.map[resource][0]
except (IndexError, KeyError):
return None
def lock(self, resource, origin):
"""Lock a resource :resource to a holder :origin.
Try to lock a resource to a holder :origin. If the resource is already held by someone else,
add :origin to the list of holders.
"""
resource_is_available = False
if not self.is_locked(resource):
self.map[resource] = []
resource_is_available = True
# Add origin to list of holders, regardless.
self.map[resource].append(origin)
return resource_is_available
def unlock(self, resource, origin):
"""Unlock a resource from a holder.
Unlock the resource :resource from a holder :origin. Prevents all unlocking requests from all origins that
are not the holder (the first in the holders list).
"""
try:
# prevents unlock operation, unless the origin is the current resource's holder.
if origin != self.peak_holder(resource):
return False
self.map[resource].remove(origin)
return True
except (IndexError, KeyError, ValueError):
# Ignore cases where the resource isn't allocated
# or the origin isn't a requester of that resource.
return False
class SyncServer(object):
_s = _resource_manager = None
enabled = True
@property
def resource_manager(self):
if not self._resource_manager:
self._resource_manager = ResourceManager()
return self._resource_manager
@property
def local_address(self):
return socket.gethostbyname(socket.gethostname()), config.sync_server_port
def start(self):
try:
print('Sync-server starting at (%s:%i)...' % self.local_address)
self._listen()
except (KeyboardInterrupt, EOFError):
self.stop()
def stop(self):
if self._s:
self._s.close()
print('bye')
def _listen(self):
self._s = socket.socket(type=socket.SOCK_DGRAM)
self._s.bind(('', config.sync_server_port))
self._s.settimeout(.1)
while True:
try:
data, address = self._s.recvfrom(config.buffer_length)
data = str(data, encoding='utf-8')
print('%s:%i: %s' % (address[0], address[1], data))
if not self.enabled:
continue
fragments = data.split()
action = fragments[0]
resource = fragments[1]
if action == 'lock':
resource_is_available = self.resource_manager.lock(resource, address)
if resource_is_available:
self._s.sendto(bytes(resource, encoding='utf-8'), address)
elif action == 'unlock':
self.resource_manager.unlock(resource, address)
# send message to the next requester in line.
next_holder = self.resource_manager.peak_holder(resource)
if next_holder:
self._s.sendto(bytes(resource, encoding='utf-8'), next_holder)
except socket.timeout:
# ignore timeouts. This is only here so sync-server will listen to INT signals.
continue
if __name__ == '__main__':
SyncServer().start()
| lucasdavid/distributed-systems | projects/6/sync-server/sync_server.py | Python | mit | 3,850 |
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett-Packard.
import netaddr
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.common import constants as n_constants
from neutron.db import api as qdbapi
from neutron.db import db_base_plugin_v2 as base_db
from neutron.db import l3_agentschedulers_db as l3_agent_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import vpnaas
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.plugins.common import utils
LOG = logging.getLogger(__name__)
IP_MIN_MTU = {4: 68, 6: 1280}
class IPsecPeerCidr(model_base.BASEV2):
"""Internal representation of a IPsec Peer Cidrs."""
cidr = sa.Column(sa.String(32), nullable=False, primary_key=True)
ipsec_site_connection_id = sa.Column(
sa.String(36),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
class IPsecPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IPsecPolicy Object."""
__tablename__ = 'ipsecpolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
transform_protocol = sa.Column(sa.Enum("esp", "ah", "ah-esp",
name="ipsec_transform_protocols"),
nullable=False)
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
encapsulation_mode = sa.Column(sa.Enum("tunnel", "transport",
name="ipsec_encapsulations"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IKEPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IKEPolicy Object."""
__tablename__ = 'ikepolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
phase1_negotiation_mode = sa.Column(sa.Enum("main",
name="ike_phase1_mode"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
ike_version = sa.Column(sa.Enum("v1", "v2", name="ike_versions"),
nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IPsecSiteConnection(model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a IPsecSiteConnection Object."""
__tablename__ = 'ipsec_site_connections'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
peer_address = sa.Column(sa.String(255), nullable=False)
peer_id = sa.Column(sa.String(255), nullable=False)
route_mode = sa.Column(sa.String(8), nullable=False)
mtu = sa.Column(sa.Integer, nullable=False)
initiator = sa.Column(sa.Enum("bi-directional", "response-only",
name="vpn_initiators"), nullable=False)
auth_mode = sa.Column(sa.String(16), nullable=False)
psk = sa.Column(sa.String(255), nullable=False)
dpd_action = sa.Column(sa.Enum("hold", "clear",
"restart", "disabled",
"restart-by-peer", name="vpn_dpd_actions"),
nullable=False)
dpd_interval = sa.Column(sa.Integer, nullable=False)
dpd_timeout = sa.Column(sa.Integer, nullable=False)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vpnservice_id = sa.Column(sa.String(36),
sa.ForeignKey('vpnservices.id'),
nullable=False)
ipsecpolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ipsecpolicies.id'),
nullable=False)
ikepolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ikepolicies.id'),
nullable=False)
ipsecpolicy = orm.relationship(
IPsecPolicy, backref='ipsec_site_connection')
ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection')
peer_cidrs = orm.relationship(IPsecPeerCidr,
backref='ipsec_site_connection',
lazy='joined',
cascade='all, delete, delete-orphan')
class VPNService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 VPNService Object."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
nullable=False)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),
nullable=False)
subnet = orm.relationship(models_v2.Subnet)
router = orm.relationship(l3_db.Router)
ipsec_site_connections = orm.relationship(
IPsecSiteConnection,
backref='vpnservice',
cascade="all, delete-orphan")
class VPNPluginDb(vpnaas.VPNPluginBase, base_db.CommonDbMixin):
"""VPN plugin database class using SQLAlchemy models."""
def __init__(self):
"""Do the initialization for the vpn service plugin here."""
qdbapi.register_models()
def update_status(self, context, model, v_id, status):
with context.session.begin(subtransactions=True):
v_db = self._get_resource(context, model, v_id)
v_db.update({'status': status})
def _get_resource(self, context, model, v_id):
try:
r = self._get_by_id(context, model, v_id)
except exc.NoResultFound:
with excutils.save_and_reraise_exception(reraise=False) as ctx:
if issubclass(model, IPsecSiteConnection):
raise vpnaas.IPsecSiteConnectionNotFound(
ipsec_site_conn_id=v_id
)
elif issubclass(model, IKEPolicy):
raise vpnaas.IKEPolicyNotFound(ikepolicy_id=v_id)
elif issubclass(model, IPsecPolicy):
raise vpnaas.IPsecPolicyNotFound(ipsecpolicy_id=v_id)
elif issubclass(model, VPNService):
raise vpnaas.VPNServiceNotFound(vpnservice_id=v_id)
ctx.reraise = True
return r
def assert_update_allowed(self, obj):
status = getattr(obj, 'status', None)
_id = getattr(obj, 'id', None)
if utils.in_pending_status(status):
raise vpnaas.VPNStateInvalidToUpdate(id=_id, state=status)
def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None):
res = {'id': ipsec_site_conn['id'],
'tenant_id': ipsec_site_conn['tenant_id'],
'name': ipsec_site_conn['name'],
'description': ipsec_site_conn['description'],
'peer_address': ipsec_site_conn['peer_address'],
'peer_id': ipsec_site_conn['peer_id'],
'route_mode': ipsec_site_conn['route_mode'],
'mtu': ipsec_site_conn['mtu'],
'auth_mode': ipsec_site_conn['auth_mode'],
'psk': ipsec_site_conn['psk'],
'initiator': ipsec_site_conn['initiator'],
'dpd': {
'action': ipsec_site_conn['dpd_action'],
'interval': ipsec_site_conn['dpd_interval'],
'timeout': ipsec_site_conn['dpd_timeout']
},
'admin_state_up': ipsec_site_conn['admin_state_up'],
'status': ipsec_site_conn['status'],
'vpnservice_id': ipsec_site_conn['vpnservice_id'],
'ikepolicy_id': ipsec_site_conn['ikepolicy_id'],
'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'],
'peer_cidrs': [pcidr['cidr']
for pcidr in ipsec_site_conn['peer_cidrs']]
}
return self._fields(res, fields)
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
dpd = ipsec_sitecon['dpd']
ipsec_sitecon['dpd_action'] = dpd.get('action', 'hold')
ipsec_sitecon['dpd_interval'] = dpd.get('interval', 30)
ipsec_sitecon['dpd_timeout'] = dpd.get('timeout', 120)
tenant_id = self._get_tenant_id_for_create(context, ipsec_sitecon)
self._check_dpd(ipsec_sitecon)
with context.session.begin(subtransactions=True):
#Check permissions
self._get_resource(context,
VPNService,
ipsec_sitecon['vpnservice_id'])
self._get_resource(context,
IKEPolicy,
ipsec_sitecon['ikepolicy_id'])
self._get_resource(context,
IPsecPolicy,
ipsec_sitecon['ipsecpolicy_id'])
self._check_mtu(context,
ipsec_sitecon['mtu'],
ipsec_sitecon['vpnservice_id'])
ipsec_site_conn_db = IPsecSiteConnection(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsec_sitecon['name'],
description=ipsec_sitecon['description'],
peer_address=ipsec_sitecon['peer_address'],
peer_id=ipsec_sitecon['peer_id'],
route_mode='static',
mtu=ipsec_sitecon['mtu'],
auth_mode='psk',
psk=ipsec_sitecon['psk'],
initiator=ipsec_sitecon['initiator'],
dpd_action=ipsec_sitecon['dpd_action'],
dpd_interval=ipsec_sitecon['dpd_interval'],
dpd_timeout=ipsec_sitecon['dpd_timeout'],
admin_state_up=ipsec_sitecon['admin_state_up'],
status=constants.PENDING_CREATE,
vpnservice_id=ipsec_sitecon['vpnservice_id'],
ikepolicy_id=ipsec_sitecon['ikepolicy_id'],
ipsecpolicy_id=ipsec_sitecon['ipsecpolicy_id']
)
context.session.add(ipsec_site_conn_db)
for cidr in ipsec_sitecon['peer_cidrs']:
peer_cidr_db = IPsecPeerCidr(
cidr=cidr,
ipsec_site_connection_id=ipsec_site_conn_db['id']
)
context.session.add(peer_cidr_db)
return self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
def _check_dpd(self, ipsec_sitecon):
if ipsec_sitecon['dpd_timeout'] <= ipsec_sitecon['dpd_interval']:
raise vpnaas.IPsecSiteConnectionDpdIntervalValueError(
attr='dpd_timeout')
def _check_mtu(self, context, mtu, vpnservice_id):
vpn_service_db = self._get_vpnservice(context, vpnservice_id)
subnet = vpn_service_db.subnet['cidr']
version = netaddr.IPNetwork(subnet).version
if mtu < IP_MIN_MTU[version]:
raise vpnaas.IPsecSiteConnectionMtuError(mtu=mtu, version=version)
def update_ipsec_site_connection(
self, context,
ipsec_site_conn_id, ipsec_site_connection):
conn = ipsec_site_connection['ipsec_site_connection']
changed_peer_cidrs = False
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context,
IPsecSiteConnection,
ipsec_site_conn_id)
dpd = conn.get('dpd', {})
if dpd.get('action'):
conn['dpd_action'] = dpd.get('action')
if dpd.get('interval') or dpd.get('timeout'):
conn['dpd_interval'] = dpd.get(
'interval', ipsec_site_conn_db.dpd_interval)
conn['dpd_timeout'] = dpd.get(
'timeout', ipsec_site_conn_db.dpd_timeout)
self._check_dpd(conn)
if 'mtu' in conn:
self._check_mtu(context,
conn['mtu'],
ipsec_site_conn_db.vpnservice_id)
self.assert_update_allowed(ipsec_site_conn_db)
if "peer_cidrs" in conn:
changed_peer_cidrs = True
old_peer_cidr_list = ipsec_site_conn_db['peer_cidrs']
old_peer_cidr_dict = dict(
(peer_cidr['cidr'], peer_cidr)
for peer_cidr in old_peer_cidr_list)
new_peer_cidr_set = set(conn["peer_cidrs"])
old_peer_cidr_set = set(old_peer_cidr_dict)
new_peer_cidrs = list(new_peer_cidr_set)
for peer_cidr in old_peer_cidr_set - new_peer_cidr_set:
context.session.delete(old_peer_cidr_dict[peer_cidr])
for peer_cidr in new_peer_cidr_set - old_peer_cidr_set:
pcidr = IPsecPeerCidr(
cidr=peer_cidr,
ipsec_site_connection_id=ipsec_site_conn_id)
context.session.add(pcidr)
del conn["peer_cidrs"]
if conn:
ipsec_site_conn_db.update(conn)
result = self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
if changed_peer_cidrs:
result['peer_cidrs'] = new_peer_cidrs
return result
def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id
)
context.session.delete(ipsec_site_conn_db)
def _get_ipsec_site_connection(
self, context, ipsec_site_conn_id):
return self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id)
def get_ipsec_site_connection(self, context,
ipsec_site_conn_id, fields=None):
ipsec_site_conn_db = self._get_ipsec_site_connection(
context, ipsec_site_conn_id)
return self._make_ipsec_site_connection_dict(
ipsec_site_conn_db, fields)
def get_ipsec_site_connections(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecSiteConnection,
self._make_ipsec_site_connection_dict,
filters=filters, fields=fields)
def update_ipsec_site_conn_status(self, context, conn_id, new_status):
with context.session.begin():
self._update_connection_status(context, conn_id, new_status, True)
def _update_connection_status(self, context, conn_id, new_status,
updated_pending):
"""Update the connection status, if changed.
If the connection is not in a pending state, unconditionally update
the status. Likewise, if in a pending state, and have an indication
that the status has changed, then update the database.
"""
try:
conn_db = self._get_ipsec_site_connection(context, conn_id)
except vpnaas.IPsecSiteConnectionNotFound:
return
if not utils.in_pending_status(conn_db.status) or updated_pending:
conn_db.status = new_status
def _make_ikepolicy_dict(self, ikepolicy, fields=None):
res = {'id': ikepolicy['id'],
'tenant_id': ikepolicy['tenant_id'],
'name': ikepolicy['name'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy['encryption_algorithm'],
'phase1_negotiation_mode': ikepolicy['phase1_negotiation_mode'],
'lifetime': {
'units': ikepolicy['lifetime_units'],
'value': ikepolicy['lifetime_value'],
},
'ike_version': ikepolicy['ike_version'],
'pfs': ikepolicy['pfs']
}
return self._fields(res, fields)
def create_ikepolicy(self, context, ikepolicy):
ike = ikepolicy['ikepolicy']
tenant_id = self._get_tenant_id_for_create(context, ike)
lifetime_info = ike.get('lifetime', [])
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ike_db = IKEPolicy(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ike['name'],
description=ike['description'],
auth_algorithm=ike['auth_algorithm'],
encryption_algorithm=ike['encryption_algorithm'],
phase1_negotiation_mode=ike['phase1_negotiation_mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
ike_version=ike['ike_version'],
pfs=ike['pfs']
)
context.session.add(ike_db)
return self._make_ikepolicy_dict(ike_db)
def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):
ike = ikepolicy['ikepolicy']
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
if ike:
lifetime_info = ike.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ike['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ike['lifetime_value'] = lifetime_info['value']
ike_db.update(ike)
return self._make_ikepolicy_dict(ike_db)
def delete_ikepolicy(self, context, ikepolicy_id):
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
context.session.delete(ike_db)
def get_ikepolicy(self, context, ikepolicy_id, fields=None):
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
return self._make_ikepolicy_dict(ike_db, fields)
def get_ikepolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IKEPolicy,
self._make_ikepolicy_dict,
filters=filters, fields=fields)
def _make_ipsecpolicy_dict(self, ipsecpolicy, fields=None):
res = {'id': ipsecpolicy['id'],
'tenant_id': ipsecpolicy['tenant_id'],
'name': ipsecpolicy['name'],
'description': ipsecpolicy['description'],
'transform_protocol': ipsecpolicy['transform_protocol'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encryption_algorithm': ipsecpolicy['encryption_algorithm'],
'encapsulation_mode': ipsecpolicy['encapsulation_mode'],
'lifetime': {
'units': ipsecpolicy['lifetime_units'],
'value': ipsecpolicy['lifetime_value'],
},
'pfs': ipsecpolicy['pfs']
}
return self._fields(res, fields)
def create_ipsecpolicy(self, context, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
tenant_id = self._get_tenant_id_for_create(context, ipsecp)
lifetime_info = ipsecp['lifetime']
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ipsecp_db = IPsecPolicy(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsecp['name'],
description=ipsecp['description'],
transform_protocol=ipsecp['transform_'
'protocol'],
auth_algorithm=ipsecp['auth_algorithm'],
encryption_algorithm=ipsecp['encryption_'
'algorithm'],
encapsulation_mode=ipsecp['encapsulation_'
'mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
pfs=ipsecp['pfs'])
context.session.add(ipsecp_db)
return self._make_ipsecpolicy_dict(ipsecp_db)
def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsecp_db = self._get_resource(context,
IPsecPolicy,
ipsecpolicy_id)
if ipsecp:
lifetime_info = ipsecp.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ipsecp['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ipsecp['lifetime_value'] = lifetime_info['value']
ipsecp_db.update(ipsecp)
return self._make_ipsecpolicy_dict(ipsecp_db)
def delete_ipsecpolicy(self, context, ipsecpolicy_id):
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
context.session.delete(ipsec_db)
def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
return self._make_ipsecpolicy_dict(ipsec_db, fields)
def get_ipsecpolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecPolicy,
self._make_ipsecpolicy_dict,
filters=filters, fields=fields)
def _make_vpnservice_dict(self, vpnservice, fields=None):
res = {'id': vpnservice['id'],
'name': vpnservice['name'],
'description': vpnservice['description'],
'tenant_id': vpnservice['tenant_id'],
'subnet_id': vpnservice['subnet_id'],
'router_id': vpnservice['router_id'],
'admin_state_up': vpnservice['admin_state_up'],
'status': vpnservice['status']}
return self._fields(res, fields)
def _check_router(self, context, router_id):
l3_plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
router = l3_plugin.get_router(context, router_id)
if not router.get(l3_db.EXTERNAL_GW_INFO):
raise vpnaas.RouterIsNotExternal(router_id=router_id)
def _check_subnet_id(self, context, router_id, subnet_id):
core_plugin = manager.NeutronManager.get_plugin()
ports = core_plugin.get_ports(
context,
filters={
'fixed_ips': {'subnet_id': [subnet_id]},
'device_id': [router_id]})
if not ports:
raise vpnaas.SubnetIsNotConnectedToRouter(
subnet_id=subnet_id,
router_id=router_id)
def create_vpnservice(self, context, vpnservice):
vpns = vpnservice['vpnservice']
tenant_id = self._get_tenant_id_for_create(context, vpns)
self._check_router(context, vpns['router_id'])
self._check_subnet_id(context, vpns['router_id'], vpns['subnet_id'])
with context.session.begin(subtransactions=True):
vpnservice_db = VPNService(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=vpns['name'],
description=vpns['description'],
subnet_id=vpns['subnet_id'],
router_id=vpns['router_id'],
admin_state_up=vpns['admin_state_up'],
status=constants.PENDING_CREATE)
context.session.add(vpnservice_db)
return self._make_vpnservice_dict(vpnservice_db)
def update_vpnservice(self, context, vpnservice_id, vpnservice):
vpns = vpnservice['vpnservice']
with context.session.begin(subtransactions=True):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
self.assert_update_allowed(vpns_db)
if vpns:
vpns_db.update(vpns)
return self._make_vpnservice_dict(vpns_db)
def delete_vpnservice(self, context, vpnservice_id):
with context.session.begin(subtransactions=True):
if context.session.query(IPsecSiteConnection).filter_by(
vpnservice_id=vpnservice_id
).first():
raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id)
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
context.session.delete(vpns_db)
def _get_vpnservice(self, context, vpnservice_id):
return self._get_resource(context, VPNService, vpnservice_id)
def get_vpnservice(self, context, vpnservice_id, fields=None):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
return self._make_vpnservice_dict(vpns_db, fields)
def get_vpnservices(self, context, filters=None, fields=None):
return self._get_collection(context, VPNService,
self._make_vpnservice_dict,
filters=filters, fields=fields)
def check_router_in_use(self, context, router_id):
vpnservices = self.get_vpnservices(
context, filters={'router_id': [router_id]})
if vpnservices:
raise vpnaas.RouterInUseByVPNService(
router_id=router_id,
vpnservice_id=vpnservices[0]['id'])
class VPNPluginRpcDbMixin():
def _get_agent_hosting_vpn_services(self, context, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(VPNService)
query = query.join(IPsecSiteConnection)
query = query.join(IKEPolicy)
query = query.join(IPsecPolicy)
query = query.join(IPsecPeerCidr)
query = query.join(l3_agent_db.RouterL3AgentBinding,
l3_agent_db.RouterL3AgentBinding.router_id ==
VPNService.router_id)
query = query.filter(
l3_agent_db.RouterL3AgentBinding.l3_agent_id == agent.id)
return query
def update_status_by_agent(self, context, service_status_info_list):
"""Updating vpnservice and vpnconnection status.
:param context: context variable
:param service_status_info_list: list of status
The structure is
[{id: vpnservice_id,
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
ipsec_site_connections: {
ipsec_site_connection_id: {
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
}
}]
The agent will set updated_pending_status as True,
when agent update any pending status.
"""
with context.session.begin(subtransactions=True):
for vpnservice in service_status_info_list:
try:
vpnservice_db = self._get_vpnservice(
context, vpnservice['id'])
except vpnaas.VPNServiceNotFound:
LOG.warn(_('vpnservice %s in db is already deleted'),
vpnservice['id'])
continue
if (not utils.in_pending_status(vpnservice_db.status)
or vpnservice['updated_pending_status']):
vpnservice_db.status = vpnservice['status']
for conn_id, conn in vpnservice[
'ipsec_site_connections'].items():
self._update_connection_status(
context, conn_id, conn['status'],
conn['updated_pending_status'])
| onecloud/neutron | neutron/db/vpn/vpn_db.py | Python | apache-2.0 | 32,188 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimally-sized unquantized model.
The minimal model has the smallest legal values for all model size parameters.
Useful for testing that training the model doesn't generate errors.
"""
from aqt.jax.wmt_mlperf.hparams_configs import base_config
def get_config(quant_target=base_config.QuantTarget.none):
"""Returns configuration for a minimal transformer model."""
config = base_config.get_config(quant_target=quant_target, n_layers=1)
config.num_train_steps = 1
model = config.model_hparams
model.emb_dim = 1
model.num_heads = 1
model.qkv_dim = 1
model.mlp_dim = 1
return config
| google-research/google-research | aqt/jax/wmt_mlperf/hparams_configs/experimental/minimal_model_bfloat16.py | Python | apache-2.0 | 1,214 |
#ADDER
def adder(no, runningtotal):
if no == "":
return runningtotal
else:
runningtotal = float(no) + runningtotal
no = raw_input("Running total: " + str(runningtotal) + "\nNext number: ")
return adder(no, runningtotal)
#BIGGEST
def biggest(BIG):
#Here, previousno is basically the biggest number you typed UNTIL NOW.
no = raw_input("Next number:")
if no == "":
return BIG
else:
if no >= BIG:
BIG = no
print biggest(BIG)
elif no < BIG:
print biggest(BIG)
#SMALLEST
#POW
def pow(number, times):
if times == 0:
return 1
else:
return number * pow(number, times-1)
#MAIN MAIN MAINNN
def main():
#out = adder(0, 0)
#print "The sum is " + str(out)
#print biggest(-float('Inf'))
print pow(2, 3)
main()
| bomin2406-cmis/bomin2406-cmis-cs2 | practice.py | Python | cc0-1.0 | 813 |
import os
from unittest import mock
from django.core import mail
from conductor.tests import TestCase
from conductor.vendor import tasks
from conductor.vendor.models import PromptSchool
@mock.patch("conductor.vendor.tasks.requests")
class TestScanPrompt(TestCase):
def test_creates_prompt_school(self, mock_requests: mock.MagicMock) -> None:
"""A prompt school is found on the page and created."""
return
prompt_results_path = os.path.join(
os.path.dirname(__file__), "data", "prompt_results.html"
)
with open(prompt_results_path, "r") as f:
prompt_results = f.read()
response = mock.MagicMock()
response.text = prompt_results
mock_requests.get.return_value = response
tasks.scan_prompt()
self.assertEqual(
1,
PromptSchool.objects.filter(
slug="colorado-state", name="Colorado State University"
).count(),
)
self.assertEqual(1, len(mail.outbox))
self.assertIn("Prompt", mail.outbox[0].subject)
def test_skip_existing_prompt_school(self, mock_requests: mock.MagicMock) -> None:
"""Skip creating a prompt school if one already exists."""
slug = "colorado-state"
self.PromptSchoolFactory.create(slug="colorado-state")
prompt_results_path = os.path.join(
os.path.dirname(__file__), "data", "prompt_results.html"
)
with open(prompt_results_path, "r") as f:
prompt_results = f.read()
response = mock.MagicMock()
response.text = prompt_results
mock_requests.get.return_value = response
tasks.scan_prompt()
self.assertEqual(1, PromptSchool.objects.filter(slug=slug).count())
| mblayman/lcp | conductor/vendor/tests/test_tasks.py | Python | bsd-2-clause | 1,776 |
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
try: # redirect python output
from StringIO import StringIO # for python 3
except ImportError:
from io import StringIO # for python 2
# This test is used to ensured that the correct warning message is received if user tries to use
# remove_collinear_columns with solver other than IRLSM
def test_GLM_RCC_warning():
warnNumber = 1
hdf = h2o.upload_file(pyunit_utils.locate("smalldata/prostate/prostate_complete.csv.zip"))
print("Testing for family: TWEEDIE")
print("Set variables for h2o.")
y = "CAPSULE"
x = ["AGE","RACE","DCAPS","PSA","VOL","DPROS","GLEASON"]
print("Create models with canonical link: TWEEDIE")
buffer = StringIO() # redirect output
sys.stderr=buffer
model_h2o_tweedie = H2OGeneralizedLinearEstimator(family="tweedie", link="tweedie", alpha=0.5, Lambda=0.1,
remove_collinear_columns=True, solver="coordinate_descent")
model_h2o_tweedie.train(x=x, y=y, training_frame=hdf) # this should generate a warning message
model_h2o_tweedie_wo_rcc = H2OGeneralizedLinearEstimator(family="tweedie", link="tweedie", alpha=0.5, Lambda=0.1,
solver="coordinate_descent")
sys.stderr=sys.__stderr__ # redirect printout back to normal path
model_h2o_tweedie_wo_rcc.train(x=x, y=y, training_frame=hdf) # no warning message here.
# since remove_collinear_columns have no effect, this two models should be the same
pyunit_utils.assertCoefDictEqual(model_h2o_tweedie.coef(), model_h2o_tweedie_wo_rcc.coef())
# check and make sure we get the correct warning message
warn_phrase = "remove_collinear_columns only works when IRLSM"
try: # for python 2.7
assert len(buffer.buflist)==warnNumber
print(buffer.buflist[0])
assert warn_phrase in buffer.buflist[0]
except: # for python 3.
warns = buffer.getvalue()
print("*** captured warning message: {0}".format(warns))
assert warn_phrase in warns
if __name__ == "__main__":
pyunit_utils.standalone_test(test_GLM_RCC_warning)
else:
test_GLM_RCC_warning()
| h2oai/h2o-3 | h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_8072_remove_collinear_columns.py | Python | apache-2.0 | 2,430 |
from thefuck.specific.git import git_support
@git_support
def match(command):
return (' rm ' in command.script and
'error: the following file has changes staged in the index' in command.output and
'use --cached to keep the file, or -f to force removal' in command.output)
@git_support
def get_new_command(command):
command_parts = command.script_parts[:]
index = command_parts.index('rm') + 1
command_parts.insert(index, '--cached')
command_list = [u' '.join(command_parts)]
command_parts[index] = '-f'
command_list.append(u' '.join(command_parts))
return command_list
| SimenB/thefuck | thefuck/rules/git_rm_staged.py | Python | mit | 629 |
"""
Suppose you have a matrix in the form of a 2 dimensional array. Write a method to read the rows of the matrix
alternatively from right to left, left to right so on and return them as a 1 dimensional array.
for eg:
1 2 3
4 5 6
7 8 9
e.x. 1 2 3 6 5 4 7 8 9
Ref: https://www.careercup.com/question?id=5654187787419648
"""
import unittest
class AlternatingMatrix:
def __init__(self, matrix):
self._data = matrix
def __str__(self):
out = ''
for i, row in enumerate(self._data):
if i % 2 == 0:
for ele in row:
out += str(ele) + ' '
else:
for j in range(len(row) - 1, -1, -1):
out += str(row[j]) + ' '
return out
class MyTestCases(unittest.TestCase):
def test_AlternatingMatrix(self):
test_matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
alt_matrix = AlternatingMatrix(test_matrix)
self.assertEqual(str(alt_matrix), '1 2 3 6 5 4 7 8 9 ')
| MFry/pyAlgoDataStructures | microsoft/alternate_printing_matrix.py | Python | mit | 1,059 |
import errno
import codecs
import os.path
class NamespaceNotFound(LookupError):
pass
class Source(object):
def __init__(self, name, content, modified_time, file_path):
self.name = name
self.content = content
self.modified_time = modified_time
self.file_path = file_path
class LoaderBase(object):
def is_uptodate(self, ns):
raise NotImplementedError
def load(self, name):
raise NotImplementedError
class CacheBase(object):
def get(self, name):
raise NotImplementedError
def set(self, name, code):
raise NotImplementedError
class DictLoader(LoaderBase):
def __init__(self, mapping):
self._sources = mapping
def is_uptodate(self, ns):
return True
def load(self, name):
file_path = '<memory:{}.kinko>'.format(name)
try:
return Source(name, self._sources[name], None, file_path)
except KeyError:
raise NamespaceNotFound(name)
class FileSystemLoader(LoaderBase):
_encoding = 'utf-8'
_template = '{}.kinko'
def __init__(self, path):
self._path = path
def is_uptodate(self, ns):
file_name = os.path.join(self._path, self._template.format(ns.name))
try:
return os.path.getmtime(file_name) == ns.modified_time
except OSError:
return False
def load(self, name):
file_path = os.path.join(self._path, self._template.format(name))
try:
with codecs.open(file_path, encoding=self._encoding) as f:
content = f.read()
except IOError as e:
if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
raise
raise NamespaceNotFound(name)
modified_time = os.path.getmtime(file_path)
return Source(name, content, modified_time, file_path)
class DictCache(CacheBase):
def __init__(self):
self._cache = {}
def get(self, name):
try:
return self._cache[name]
except KeyError:
raise NamespaceNotFound(name)
def set(self, name, code):
self._cache[name] = code
| vmagamedov/kinko | kinko/loaders.py | Python | bsd-3-clause | 2,177 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.