code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import requests
from pprint import pprint
from lxml import html
#expected command with two search tokens separated by /
#rq = ".h2hm fed/mur"
#rq = ".h2hw wil/sha"
def head2head(p1, p2, gender):
#scraping from tennisexplorer.com
page = requests.get('http://www.tennisexplorer.com/list-players/?search-text-pl='+p1)
tree = html.fromstring(page.text)
p1link = tree.xpath('//tbody[@class="flags"]/tr[1]/td['+str(gender)+']/a/@href')
p1link = ''.join(p1link)
p1link = p1link.strip('/')
p1link = p1link.split('/')
p1link = p1link[1]
print p1link
page = requests.get('http://www.tennisexplorer.com/list-players/?search-text-pl='+p2)
tree = html.fromstring(page.text)
p2link = tree.xpath('//tbody[@class="flags"]/tr[1]/td['+str(gender)+']/a/@href')
p2link = ''.join(p2link)
p2link = p2link.strip('/')
p2link = p2link.split('/')
p2link = p2link[1]
print p2link
page = requests.get('http://www.tennisexplorer.com/mutual/' + p1link +'/' + p2link +'/')
tree = html.fromstring(page.text)
#building string
pNames = tree.xpath('//th[@class="plName"]/a/text()')
p1name = pNames[0]
p2name = pNames[1]
p1name = p1name.split()
p1name = p1name[1] + ' ' + p1name[0]
p2name = p2name.split()
p2name = p2name[1] + ' ' + p2name[0]
score = tree.xpath('//td[@class="gScore"]/text()')
score = ''.join(score)
h2h = p1name + ' ' + score + ' ' + p2name
print h2h
surfaceList = [0, 0, 0, 0, 0]
rowCount = tree.xpath('count(//*[@id="center"]/div[2]/div/div/table/tbody/tr)')+1
rowCount = int(rowCount)
for row in range(1, rowCount):
if row % 2 != 0:
surface = tree.xpath('//*[@id="center"]/div[2]/div/div/table/tbody/tr['+str(row)+']/td[5]/span//@title')
if len(surface) > 0:
surface = surface[0]
if surface == 'Clay':
surfaceList[0] = surfaceList[0] + 1
if surface == 'Grass':
surfaceList[1] = surfaceList[1] + 1
if surface == 'Hard':
surfaceList[2] = surfaceList[2] + 1
if surface == 'Indoors':
surfaceList[3] = surfaceList[3] + 1
else:
surfaceList[4] = surfaceList[4] + 1
if sum(surfaceList) > 1:
h2h = h2h + ': '
for num in range(0, 5):
if surfaceList[num] != 0:
surface = (float(surfaceList[num])/float(sum(surfaceList)))*100
surface = round(surface, 2)
print surface
if num == 0:
h2h = h2h + str(surface) + '% Clay, '
if num == 1:
h2h = h2h + str(surface) + '% Grass, '
if num == 2:
h2h = h2h + str(surface) + '% Hard, '
if num == 3:
h2h = h2h + str(surface) + '% Indoors, '
if num == 4:
h2h = h2h + str(surface) + '% Unknown surface, '
h2h = h2h[:-2]
return h2h
| t0m201/tennis-irc-scripts | ircbot/h2h.py | Python | mit | 3,460 |
from pyhistorian import Story, Scenario
from pyhistorian.language import TEMPLATE_PATTERN
from story_parser import parse_text
import re
class StoryRunner(object):
def __init__(self, story_text, output, colored,
modules=(), language='en-us',
before_all=(), before_each=(),
after_all=(), after_each=()):
self._story_text = story_text
self._output = output
self._modules = modules
self._colored = colored
self._language = language
self._parsed_story = parse_text(story_text, self._language)
self._pycukes_story = self._get_pycukes_story()
self._all_givens = {}
self._all_whens = {}
self._all_thens = {}
self._collect_steps()
self._before_all = before_all
self._before_each = before_each
self._after_all = after_all
self._after_each = after_each
def _collect_steps(self):
for module in self._modules:
for step_name in ['given', 'when', 'then']:
steps = getattr(module, '_%ss' % step_name, [])
for method, message, args in steps:
all_this_step = getattr(self, '_all_%ss' % step_name)
all_this_step[message] = (method, args)
def _get_header(self):
story = self._parsed_story.get_stories()[0]
return story.header
def _call_before_each_methods(self, namespace):
for before_meth in self._before_each:
before_meth(namespace)
def _call_before_all_methods(self, namespace):
for before_meth in self._before_all:
before_meth(namespace)
def _call_after_all_methods(self, namespace):
for after_meth in self._after_all:
after_meth(namespace)
def _call_after_each_methods(self, namespace):
for after_meth in self._after_each:
after_meth(namespace)
def _get_pycukes_story(self):
return type('PyCukesStory',
(Story,),
{'__doc__' :'\n'.join(self._get_header().split('\n')[1:]),
'output': self._output,
'title': self._parsed_story.get_stories()[0].title,
'colored': self._colored,
'scenarios': [],
'template_color':'yellow',
'language': self._language,
'before_each': self._call_before_each_methods,
'before_all': self._call_before_all_methods,
'after_all': self._call_after_all_methods,
'after_each': self._call_after_each_methods,})
def run(self):
scenarios = self._parsed_story.get_stories()[0].scenarios
for scenario_title, steps in scenarios:
new_scenario = type('PyCukesScenario',
(Scenario,),
{'__doc__': scenario_title,
'_givens': [],
'_whens': [],
'_thens': [],
})
for step_name in ['given', 'when', 'then']:
for step_message in steps[step_name]:
scenario_steps = getattr(new_scenario, '_%ss' % step_name)
all_runner_steps = getattr(self, '_all_%ss' % step_name)
actual_scenario = (None, step_message, ())
for step_regex, (step_method, step_args) in all_runner_steps.items():
msg_pattern = re.sub(TEMPLATE_PATTERN, r'(.*)', step_regex)
msg_pattern = re.escape(msg_pattern)
msg_pattern = msg_pattern.replace(re.escape(r'(.*)'), r'(.*)')
if re.match(msg_pattern, step_message):
actual_scenario = (step_method,
step_message,
re.match(msg_pattern,
step_message).groups())
scenario_steps.append(actual_scenario)
self._pycukes_story.scenarios.append(new_scenario)
return self._pycukes_story.run()
| hugobr/pycukes | pycukes/runner.py | Python | mit | 4,287 |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = [
'SQLAlchemy',
'apscheduler',
'chaussette',
'couchdb',
'grequests',
'iso8601',
'ndg-httpsclient',
'pbkdf2',
'pyasn1',
'pyopenssl',
'pyramid',
'pyramid_exclog',
'pysqlite',
'setuptools',
]
test_requires = requires + [
'openprocurement.api',
'webtest',
]
entry_points = {
'paste.app_factory': [
'main = openprocurement.chronograph:main'
],
'console_scripts': [
'bootstrap_chronograph_security = openprocurement.chronograph.database:bootstrap_chronograph_security'
]
}
setup(name='openprocurement.chronograph',
version='0.7',
description='openprocurement.chronograph',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web pyramid pylons',
packages=find_packages(),
namespace_packages=['openprocurement'],
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=test_requires,
extras_require={'test': test_requires},
test_suite="openprocurement.chronograph.tests.test.suite",
entry_points=entry_points)
| openprocurement/openprocurement.chronograph | setup.py | Python | apache-2.0 | 1,610 |
from array import array
from random import randint, choice
import numpy
from pprint import pprint
import json
from combinatorics_class import *
from widget import load_expectation, get_next_state, get_state_big_mask
from widget import get_expected_score
import logging
logging.basicConfig(format='%(message)s',level=logging.INFO)
logger = logging.getLogger('dice_bot')
state_to_id = {}
action_blob = array('B')
N_STATES = 357568
N_BYTES = 1386
DIFF_MIN = 0
DIFF_MAX = 100
ACTION_OFFSET = 1134
BLOCK_SIZE = 252
BLOCK_PLUS_SIZE = (BLOCK_SIZE/4)*9
def roll_dice(n):
res = []
for i in range(n):
res.append(choice(dice))
return tuple(sorted(res))
def merge(r1, r2):
assert(len(r1) + len(r2) == 5)
r = []
r.extend(list(r1))
r.extend(list(r2))
return tuple(sorted(r))
def load_optimal_strategy():
id_to_state_json = json.load(open('./data/id_to_state.json'))
for k,v in id_to_state_json.iteritems():
state_to_id[int(v)] = int(k)
action_blob.fromfile(file('./data/options.dat'),N_STATES*N_BYTES)
def pass_rand_test(diff_level):
if randint(DIFF_MIN, DIFF_MAX) <= diff_level:
return True
return False
def get_option_by_decoding(state, rid, roll_left, diff_level=10):
"""
state: the 19 bit bitmask to represent the current game state
rid: the 8 bit roll id representing the roll result
roll_left: 0,1 or 2, how many rolls left in this turn, start with 2 and end with 0
diff_level: how intelligent the bot move is, 0 - 100, integer
"""
state_id = state_to_id[state]
state_base = N_BYTES * state_id
state_segment = action_blob[state_base:state_base+N_BYTES]
if roll_left == 0:
move_offset = 0
elif roll_left == 1:
move_offset = BLOCK_SIZE
else:
move_offset = BLOCK_SIZE+BLOCK_PLUS_SIZE
if roll_left == 0:
mask = (1<<4)-1
pos_idx = move_offset + rid
opt1 = state_segment[pos_idx] & mask
opt2 = (state_segment[pos_idx]>>4) & mask
else:
mask = (1<<9)-1
opt_lower_idx = move_offset + (rid/4)*9 + (rid%4)*2
opt_higher_idx = move_offset + (rid/4)*9 + 8
highest_bit_1 = ((state_segment[opt_higher_idx] >> ((rid%4)*2))) & 1
highest_bit_2 = ((state_segment[opt_higher_idx] >> ((rid%4)*2+1))) & 1
opt1 = state_segment[opt_lower_idx] | (highest_bit_1<<8)
opt2 = state_segment[opt_lower_idx+1] | (highest_bit_2<<8)
if pass_rand_test(diff_level) or opt2 == mask:
return opt1
return opt2
#if roll_left == 0 :
# return opt1
def run_game(trial,diff_level=10):
state = (0,0)
i=1
total_pts = 0
while i<=13:
logger.info("%s,diff %2d, trial %d, turn %d", '='*20, diff_level,trial,i)
i+=1
roll_left = 2
bigstate = get_state_big_mask(state)
roll0 = dice_to_id[roll_dice(5)]
logger.info('dice0, %s', id_to_dice[roll0])
kid0 = get_option_by_decoding(bigstate, roll0, roll_left, diff_level)
keep0 = id_to_kept[kid0]
logger.info('keep0,%s', keep0)
roll_left -= 1
roll1 = dice_to_id[merge(keep0, roll_dice(5-len(keep0)))]
logger.info('dice1,%s', id_to_dice[roll1])
kid1 = get_option_by_decoding(bigstate, roll1, roll_left, diff_level)
keep1 = id_to_kept[kid1]
logger.info('keep1,%s', keep1)
roll_left -= 1
roll2 = dice_to_id[merge(keep1, roll_dice(5-len(keep1)))]
logger.info('dice2,%s', id_to_dice[roll2])
act = get_option_by_decoding(bigstate, roll2, roll_left, diff_level)
state = get_next_state(state, id_to_dice[roll2], Category.CATEGORY_ID_TO_NAME[act])
pts = eval_points[Category.CATEGORY_ID_TO_NAME[act]][id_to_dice[roll2]]
logger.info('choice,%s, earn %f', Category.CATEGORY_ID_TO_NAME[act], pts)
total_pts += pts
logger.info('this turn: %d, total pts: %d, upper score: %d',pts, total_pts, state[1])
logger.info('state: %s', bin(state[0]))
if state[1] == 63:
total_pts += 35
logger.info( 'extra bonus added, total: %s', total_pts)
return total_pts
def get_adaptive_diff(turn, delta_pts, diff_level=50):
#if turn <= 4:
# return diff_level
return max(DIFF_MIN,min(DIFF_MAX, diff_level - .5*delta_pts))
def run_game_two_players(trial, diff_level=50):
"""
the first player is the bot player with fixed difficulty level
the second player is the adaptive bot that tries to get a
final score as close as to the first play's.
"""
state = (0,0) # first player state
my_state = (0,0) # second player state
turn = 1
total_pts = 0
my_total_pts = 0
while turn<=13:
# first player begin
logger.info("%s,diff %2d, trial %d, p1-turn %d", '='*20, diff_level,trial,turn)
roll_left = 2
bigstate = get_state_big_mask(state)
roll0 = dice_to_id[roll_dice(5)]
logger.info('dice0, %s', id_to_dice[roll0])
kid0 = get_option_by_decoding(bigstate, roll0, roll_left, diff_level)
keep0 = id_to_kept[kid0]
logger.info('keep0,%s', keep0)
roll_left -= 1
roll1 = dice_to_id[merge(keep0, roll_dice(5-len(keep0)))]
logger.info('dice1,%s', id_to_dice[roll1])
kid1 = get_option_by_decoding(bigstate, roll1, roll_left, diff_level)
keep1 = id_to_kept[kid1]
logger.info('keep1,%s', keep1)
roll_left -= 1
roll2 = dice_to_id[merge(keep1, roll_dice(5-len(keep1)))]
logger.info('dice2,%s', id_to_dice[roll2])
act = get_option_by_decoding(bigstate, roll2, roll_left, diff_level)
state = get_next_state(state, id_to_dice[roll2], Category.CATEGORY_ID_TO_NAME[act])
pts = eval_points[Category.CATEGORY_ID_TO_NAME[act]][id_to_dice[roll2]]
my_diff_level = get_adaptive_diff(turn, my_total_pts - total_pts, diff_level)
total_pts += pts
logger.info('choice,%s, earn %f, exp-score %f', Category.CATEGORY_ID_TO_NAME[act], pts, total_pts + get_expected_score(state))
logger.info('this turn: %d, total pts: %d, upper score: %d',pts, total_pts, state[1])
logger.info("%s,diff %2d, trial %d, p2-turn %d", '='*20, diff_level,trial,turn)
# second player begin
# my intelligence is based on delta score and the current turn
my_roll_left = 2
my_bigstate = get_state_big_mask(my_state)
my_roll0 = dice_to_id[roll_dice(5)]
logger.info('dice0, %s', id_to_dice[my_roll0])
my_kid0 = get_option_by_decoding(my_bigstate, my_roll0, my_roll_left, my_diff_level)
my_keep0 = id_to_kept[my_kid0]
logger.info('keep0,%s', my_keep0)
my_roll_left -= 1
my_roll1 = dice_to_id[merge(my_keep0, roll_dice(5-len(my_keep0)))]
logger.info('dice1,%s', id_to_dice[my_roll1])
my_kid1 = get_option_by_decoding(my_bigstate, my_roll1, my_roll_left, my_diff_level)
my_keep1 = id_to_kept[my_kid1]
logger.info('keep1,%s', my_keep1)
my_roll_left -= 1
my_roll2 = dice_to_id[merge(my_keep1, roll_dice(5-len(my_keep1)))]
logger.info('dice2,%s', id_to_dice[my_roll2])
my_act = get_option_by_decoding(my_bigstate, my_roll2, my_roll_left, my_diff_level)
my_state = get_next_state(my_state, id_to_dice[my_roll2], Category.CATEGORY_ID_TO_NAME[my_act])
my_pts = eval_points[Category.CATEGORY_ID_TO_NAME[my_act]][id_to_dice[my_roll2]]
my_total_pts += my_pts
logger.info('choice,%s, earn %f, exp-score %f', Category.CATEGORY_ID_TO_NAME[my_act], my_pts, my_total_pts + get_expected_score(my_state))
logger.info('this turn: %d, total pts: %d, upper score: %d',my_pts, my_total_pts, my_state[1])
#logger.info('state: %s', bin(state[0]))
turn += 1
if state[1] == 63:
total_pts += 35
logger.info( 'p1 extra bonus added, total: %s', total_pts)
if my_state[1] == 63:
my_total_pts += 35
logger.info( 'p2 extra bonus added, total: %s', my_total_pts)
return my_total_pts - total_pts
import time
from datetime import datetime
if __name__ == '__main__':
load_optimal_strategy()
E = load_expectation()
n_trial = 1000
for difficulty in range(100,-1,-10):
t1 = datetime.now()
tp=[]
for trial in range(n_trial):
t = run_game(trial, difficulty)
#t = run_game_two_players(trial, difficulty)
tp.append(t)
logger.warning("diff=%d, %f, (%f, %f)", difficulty, t, numpy.mean(tp), numpy.std(tp))
t2 = datetime.now()
logger.critical("diff=%3d, (%f, %f), time:%s", difficulty, numpy.mean(tp), numpy.std(tp), (t2-t1).total_seconds())
time.sleep(2)
| greeness/yahtzee-optimal-strategy | sim_game_with_cache.py | Python | mit | 9,075 |
import contextlib
import os
from google import auth
from google.auth.transport import grpc as google_auth_transport_grpc
from google.auth.transport import requests as google_auth_transport_requests
from googleapiclient import discovery
BIGSTORE_SCOPES = [
'https://www.googleapis.com/auth/devstorage.write_only',
]
RESULTSTORE_SCOPES = [
"https://www.googleapis.com/auth/cloud-source-tools",
"https://www.googleapis.com/auth/cloud-platform",
]
ALL_SCOPES = BIGSTORE_SCOPES + RESULTSTORE_SCOPES
RESULTSTORE_SEARCH_VIEW_ROLE = 'roles/cloudsourcetoolscore.developer'
PORT = '[::]:9090'
class Credentials():
"""
Credentials container/helper for resultstoreui
"""
def __init__(self):
"""
Initialize Credentials
Args:
project_id: GCP project id
client_id: GCP oauth client_id
"""
self.channel = None
self.scopes = ALL_SCOPES
self.project_id = os.environ.get('PROJECT_ID')
self.client_id = os.environ.get('CLIENT_ID')
self.destination_sever = os.environ.get('RESULT_STORE_API_ENDPOINT')
self.bigstore_bucket_name = os.environ.get('BUCKET_NAME')
self.port = PORT
@contextlib.contextmanager
def create_secure_channel(self, addr):
"""
Creates a secure channel using GOOGLE_APPLICATION_CREDENTIALS from the
users path
Args:
target (str): The host and port of the service
Returns:
A gRPC channel
"""
credentials, _ = auth.default(scopes=self.scopes)
request = google_auth_transport_requests.Request()
channel = google_auth_transport_grpc.secure_authorized_channel(
credentials, request, addr)
self.channel = channel
yield channel
def get_active_channel(self):
"""Returns current active channel"""
return self.channel
def get_scopes(self):
"""Returns scopes"""
return self.scopes
def verify_user(self, email, version=1):
"""
Verifies user by checking if they have the roles/cloudsourcetoolscore.developer
in the currect gcp project
Args:
email (str): User email to check for authenticated
version (int): cloudresourcemanager api version
Returns:
Boolean, true if verified, false if not verified
"""
credentials, _ = auth.default(scopes=self.scopes)
service = discovery.build('cloudresourcemanager',
'v1',
credentials=credentials)
policy = (service.projects().getIamPolicy(
resource=self.project_id,
body={
'options': {
'requestedPolicyVersion': version
}
},
).execute())
try:
roles = policy['bindings']
index = self._index_of_role(roles, RESULTSTORE_SEARCH_VIEW_ROLE)
if index == -1:
return False
if 'user:{}'.format(email) in roles[index]['members']:
return True
return False
except:
return False
def get_client_id(self):
"""
Returns:
Application client_id
"""
return self.client_id
def get_project_id(self):
"""
Returns:
Application project_id
"""
return self.project_id
def get_destination_sever(self):
"""
Returns:
Application destination_sever
"""
return self.destination_sever
def get_port(self):
"""
Returns:
Application port
"""
return self.port
def get_bucket_name(self):
return self.bigstore_bucket_name
def _index_of_role(self, lst, role):
"""
Find the index of the iap role in the list
Args:
lst (str): lst to be searched
role (str): role that is being searched for
Returns:
Index of the role or -1 if it doesn't exist
"""
for i, v in enumerate(lst):
if v['role'] == role:
return i
return -1
| google/resultstoreui | resultstoresearch/server/resultstoresearch/credentials/credentials.py | Python | apache-2.0 | 4,250 |
'''
Created on 16 Nov 2016
@author: gbstring
'''
from flask import render_template, session, redirect, url_for
from .forms import TitleForm
from .models import Title
from .__init__ import db
from app import app
| gbstringer/flasksimple | flask/simple/app/views.py | Python | cc0-1.0 | 216 |
"""HTTP server classes.
Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see
SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST,
and CGIHTTPRequestHandler for CGI scripts.
It does, however, optionally implement HTTP/1.1 persistent connections,
as of version 0.3.
Notes on CGIHTTPRequestHandler
------------------------------
This class implements GET and POST requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
subprocess.Popen() is used as a fallback, with slightly altered semantics.
In all cases, the implementation is intentionally naive -- all
requests are executed synchronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
Note that status code 200 is sent prior to execution of a CGI script, so
scripts cannot send other status codes such as 302 (redirect).
XXX To do:
- log requests even later (to capture byte count)
- log user-agent header and other interesting goodies
- send error log to separate file
"""
# See also:
#
# HTTP Working Group T. Berners-Lee
# INTERNET-DRAFT R. T. Fielding
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
# Expires September 8, 1995 March 8, 1995
#
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
#
# and
#
# Network Working Group R. Fielding
# Request for Comments: 2616 et al
# Obsoletes: 2068 June 1999
# Category: Standards Track
#
# URL: http://www.faqs.org/rfcs/rfc2616.html
# Log files
# ---------
#
# Here's a quote from the NCSA httpd docs about log file format.
#
# | The logfile format is as follows. Each line consists of:
# |
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
# |
# | host: Either the DNS name or the IP number of the remote client
# | rfc931: Any information returned by identd for this person,
# | - otherwise.
# | authuser: If user sent a userid for authentication, the user name,
# | - otherwise.
# | DD: Day
# | Mon: Month (calendar name)
# | YYYY: Year
# | hh: hour (24-hour format, the machine's timezone)
# | mm: minutes
# | ss: seconds
# | request: The first line of the HTTP request as sent by the client.
# | ddd: the status code returned by the server, - if not available.
# | bbbb: the total number of bytes sent,
# | *not including the HTTP/1.0 header*, - if not available
# |
# | You can determine the name of the file accessed through request.
#
# (Actually, the latter is only true if you know the server configuration
# at the time the request was made!)
__version__ = "0.6"
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
import html
import email.message
import email.parser
import http.client
import io
import mimetypes
import os
import posixpath
import select
import shutil
import socket # For gethostbyaddr()
import socketserver
import sys
import time
import urllib.parse
import copy
# Default error message template
DEFAULT_ERROR_MESSAGE = """\
<head>
<title>Error response</title>
</head>
<body>
<h1>Error response</h1>
<p>Error code %(code)d.
<p>Message: %(message)s.
<p>Error code explanation: %(code)s = %(explain)s.
</body>
"""
DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8"
def _quote_html(html):
return html.replace("&", "&").replace("<", "<").replace(">", ">")
class HTTPServer(socketserver.TCPServer):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
socketserver.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
class BaseHTTPRequestHandler(socketserver.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of email.message.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
error_message_format = DEFAULT_ERROR_MESSAGE
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
# The default request version. This only affects responses up until
# the point where the request line is parsed, so it mainly decides what
# the client gets back when sending a malformed request line.
# Most web servers default to HTTP 0.9, i.e. don't send a status line.
default_request_version = "HTTP/0.9"
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = 1
requestline = str(self.raw_requestline, 'iso-8859-1')
if requestline[-2:] == '\r\n':
requestline = requestline[:-2]
elif requestline[-1:] == '\n':
requestline = requestline[:-1]
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
[command, path, version] = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
[command, path] = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive.
try:
self.headers = http.client.parse_headers(self.rfile,
_class=self.MessageClass)
except http.client.LineTooLong:
self.send_error(400, "Line too long")
return False
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = 0
# Examine the headers and look for an Expect directive
expect = self.headers.get('Expect', "")
if (expect.lower() == "100-continue" and
self.protocol_version >= "HTTP/1.1" and
self.request_version >= "HTTP/1.1"):
if not self.handle_expect_100():
return False
return True
def handle_expect_100(self):
"""Decide what to do with an "Expect: 100-continue" header.
If the client is expecting a 100 Continue response, we must
respond with either a 100 Continue or a final response before
waiting for the request body. The default is to always respond
with a 100 Continue. You can behave differently (for example,
reject unauthorized requests) by overriding this method.
This method should either return True (possibly after sending
a 100 Continue response) or send an error response and return
False.
"""
self.send_response_only(100)
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout as e:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = 1
return
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = 1
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None):
"""Send and log an error reply.
Arguments are the error code, and a detailed message.
The detailed message defaults to the short entry matching the
response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
shortmsg, longmsg = self.responses[code]
except KeyError:
shortmsg, longmsg = '???', '???'
if message is None:
message = shortmsg
explain = longmsg
self.log_error("code %d, message %s", code, message)
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
content = (self.error_message_format %
{'code': code, 'message': _quote_html(message), 'explain': explain})
self.send_response(code, message)
self.send_header("Content-Type", self.error_content_type)
self.send_header('Connection', 'close')
self.end_headers()
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
self.wfile.write(content.encode('UTF-8', 'replace'))
def send_response(self, code, message=None):
"""Send the response header and log the response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
self.send_response_only(code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_response_only(self, code, message=None):
"""Send the response header only."""
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write(("%s %d %s\r\n" %
(self.protocol_version, code, message)).encode('latin1', 'strict'))
def send_header(self, keyword, value):
"""Send a MIME header."""
if self.request_version != 'HTTP/0.9':
if not hasattr(self, '_headers_buffer'):
self._headers_buffer = []
self._headers_buffer.append(
("%s: %s\r\n" % (keyword, value)).encode('latin1', 'strict'))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = 1
elif value.lower() == 'keep-alive':
self.close_connection = 0
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self._headers_buffer.append(b"\r\n")
self.wfile.write(b"".join(self._headers_buffer))
self._headers_buffer = []
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client host and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address formatted for logging.
This version looks up the full hostname using gethostbyaddr(),
and tries to find a name that contains at least one dot.
"""
host, port = self.client_address[:2]
return socket.getfqdn(host)
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# MessageClass used to parse headers
MessageClass = http.client.HTTPMessage
# Table mapping response codes to messages; entries have the
# form {code: (shortmessage, longmessage)}.
# See RFC 2616.
responses = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
r = []
displaypath = html.escape(urllib.parse.unquote(self.path))
r.append('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
r.append("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
r.append("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
r.append("<hr>\n<ul>\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
r.append('<li><a href="%s">%s</a>\n'
% (urllib.parse.quote(linkname), html.escape(displayname)))
r.append("</ul>\n<hr>\n</body>\n</html>\n")
enc = sys.getfilesystemencoding()
encoded = ''.join(r).encode(enc)
f = io.BytesIO()
f.write(encoded)
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html; charset=%s" % enc)
self.send_header("Content-Length", str(len(encoded)))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
# Utilities for CGIHTTPRequestHandler
# TODO(gregory.p.smith): Move this into an appropriate library.
def _url_collapse_path_split(path):
"""
Given a URL path, remove extra '/'s and '.' path elements and collapse
any '..' references.
Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.
Returns: A tuple of (head, tail) where tail is everything after the final /
and head is everything before it. Head will always start with a '/' and,
if it contains anything else, never have a trailing '/'.
Raises: IndexError if too many '..' occur within the path.
"""
# Similar to os.path.split(os.path.normpath(path)) but specific to URL
# path semantics rather than local operating system semantics.
path_parts = []
for part in path.split('/'):
if part == '.':
path_parts.append('')
else:
path_parts.append(part)
# Filter out blank non trailing parts before consuming the '..'.
path_parts = [part for part in path_parts[:-1] if part] + path_parts[-1:]
if path_parts:
tail_part = path_parts.pop()
else:
tail_part = ''
head_parts = []
for part in path_parts:
if part == '..':
head_parts.pop()
else:
head_parts.append(part)
if tail_part and tail_part == '..':
head_parts.pop()
tail_part = ''
return ('/' + '/'.join(head_parts), tail_part)
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(x[2] for x in pwd.getpwall())
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0o111 != 0
class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Returns True and updates the cgi_info attribute to the tuple
(dir, rest) if self.path requires running a CGI script.
Returns False otherwise.
If any exception is raised, the caller should assume that
self.path was rejected as invalid and act accordingly.
The default implementation tests whether the normalized url
path begins with one of the strings in self.cgi_directories
(and the next character is a '/' or the end of the string).
"""
splitpath = _url_collapse_path_split(self.path)
if splitpath[0] in self.cgi_directories:
self.cgi_info = splitpath
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# dissect the part after the directory name into a script name &
# a possible additional path, to be stored in PATH_INFO.
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = copy.deepcopy(os.environ)
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.parse.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.get("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = authorization[1].encode('ascii')
authorization = base64.decodebytes(authorization).\
decode('ascii')
except (binascii.Error, UnicodeError):
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.get('content-type') is None:
env['CONTENT_TYPE'] = self.headers.get_content_type()
else:
env['CONTENT_TYPE'] = self.headers['content-type']
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.get('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.get('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.get_all('cookie', []))
cookie_str = ', '.join(co)
if cookie_str:
env['HTTP_COOKIE'] = cookie_str
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, env)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
else:
# Non-Unix -- use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = [interp, '-u'] + cmdline
if '=' not in query:
cmdline.append(query)
self.log_message("command: %s", subprocess.list2cmdline(cmdline))
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
p = subprocess.Popen(cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env = env
)
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
else:
data = None
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
stdout, stderr = p.communicate(data)
self.wfile.write(stdout)
if stderr:
self.log_error('%s', stderr)
p.stderr.close()
p.stdout.close()
status = p.returncode
if status:
self.log_error("CGI script exit status %#x", status)
else:
self.log_message("CGI script exited OK")
def test(HandlerClass = BaseHTTPRequestHandler,
ServerClass = HTTPServer, protocol="HTTP/1.0"):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the first command line
argument).
"""
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
server_address = ('', port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
httpd.server_close()
sys.exit(0)
if __name__ == '__main__':
test(HandlerClass=SimpleHTTPRequestHandler)
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.2/Lib/http/server.py | Python | mit | 43,374 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Generic layers.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
# Advanced activations.
from tensorflow.python.keras.layers.advanced_activations import LeakyReLU
from tensorflow.python.keras.layers.advanced_activations import PReLU
from tensorflow.python.keras.layers.advanced_activations import ELU
from tensorflow.python.keras.layers.advanced_activations import ReLU
from tensorflow.python.keras.layers.advanced_activations import ThresholdedReLU
from tensorflow.python.keras.layers.advanced_activations import Softmax
# Convolution layers.
from tensorflow.python.keras.layers.convolutional import Conv1D
from tensorflow.python.keras.layers.convolutional import Conv2D
from tensorflow.python.keras.layers.convolutional import Conv3D
from tensorflow.python.keras.layers.convolutional import Conv2DTranspose
from tensorflow.python.keras.layers.convolutional import Conv3DTranspose
from tensorflow.python.keras.layers.convolutional import SeparableConv1D
from tensorflow.python.keras.layers.convolutional import SeparableConv2D
# Convolution layer aliases.
from tensorflow.python.keras.layers.convolutional import Convolution1D
from tensorflow.python.keras.layers.convolutional import Convolution2D
from tensorflow.python.keras.layers.convolutional import Convolution3D
from tensorflow.python.keras.layers.convolutional import Convolution2DTranspose
from tensorflow.python.keras.layers.convolutional import Convolution3DTranspose
from tensorflow.python.keras.layers.convolutional import SeparableConvolution1D
from tensorflow.python.keras.layers.convolutional import SeparableConvolution2D
from tensorflow.python.keras.layers.convolutional import DepthwiseConv2D
# Image processing layers.
from tensorflow.python.keras.layers.convolutional import UpSampling1D
from tensorflow.python.keras.layers.convolutional import UpSampling2D
from tensorflow.python.keras.layers.convolutional import UpSampling3D
from tensorflow.python.keras.layers.convolutional import ZeroPadding1D
from tensorflow.python.keras.layers.convolutional import ZeroPadding2D
from tensorflow.python.keras.layers.convolutional import ZeroPadding3D
from tensorflow.python.keras.layers.convolutional import Cropping1D
from tensorflow.python.keras.layers.convolutional import Cropping2D
from tensorflow.python.keras.layers.convolutional import Cropping3D
# Core layers.
from tensorflow.python.keras.layers.core import Masking
from tensorflow.python.keras.layers.core import Dropout
from tensorflow.python.keras.layers.core import SpatialDropout1D
from tensorflow.python.keras.layers.core import SpatialDropout2D
from tensorflow.python.keras.layers.core import SpatialDropout3D
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.core import Reshape
from tensorflow.python.keras.layers.core import Permute
from tensorflow.python.keras.layers.core import Flatten
from tensorflow.python.keras.layers.core import RepeatVector
from tensorflow.python.keras.layers.core import Lambda
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.keras.layers.core import ActivityRegularization
# Embedding layers.
from tensorflow.python.keras.layers.embeddings import Embedding
# Locally-connected layers.
from tensorflow.python.keras.layers.local import LocallyConnected1D
from tensorflow.python.keras.layers.local import LocallyConnected2D
# Merge layers.
from tensorflow.python.keras.layers.merge import Add
from tensorflow.python.keras.layers.merge import Subtract
from tensorflow.python.keras.layers.merge import Multiply
from tensorflow.python.keras.layers.merge import Average
from tensorflow.python.keras.layers.merge import Maximum
from tensorflow.python.keras.layers.merge import Minimum
from tensorflow.python.keras.layers.merge import Concatenate
from tensorflow.python.keras.layers.merge import Dot
from tensorflow.python.keras.layers.merge import add
from tensorflow.python.keras.layers.merge import subtract
from tensorflow.python.keras.layers.merge import multiply
from tensorflow.python.keras.layers.merge import average
from tensorflow.python.keras.layers.merge import maximum
from tensorflow.python.keras.layers.merge import minimum
from tensorflow.python.keras.layers.merge import concatenate
from tensorflow.python.keras.layers.merge import dot
# Noise layers.
from tensorflow.python.keras.layers.noise import AlphaDropout
from tensorflow.python.keras.layers.noise import GaussianNoise
from tensorflow.python.keras.layers.noise import GaussianDropout
# Normalization layers.
from tensorflow.python.keras.layers.normalization import BatchNormalization
from tensorflow.python.keras.layers.normalization import LayerNormalization
# Kernelized layers.
from tensorflow.python.keras.layers.kernelized import RandomFourierFeatures
# Pooling layers.
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling1D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling2D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling3D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling1D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling2D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling3D
# Pooling layer aliases.
from tensorflow.python.keras.layers.pooling import MaxPool1D
from tensorflow.python.keras.layers.pooling import MaxPool2D
from tensorflow.python.keras.layers.pooling import MaxPool3D
from tensorflow.python.keras.layers.pooling import AvgPool1D
from tensorflow.python.keras.layers.pooling import AvgPool2D
from tensorflow.python.keras.layers.pooling import AvgPool3D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool1D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool2D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool3D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool1D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool2D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool3D
# Recurrent layers.
from tensorflow.python.keras.layers.recurrent import RNN
from tensorflow.python.keras.layers.recurrent import StackedRNNCells
from tensorflow.python.keras.layers.recurrent import SimpleRNNCell
from tensorflow.python.keras.layers.recurrent import GRUCell
from tensorflow.python.keras.layers.recurrent import LSTMCell
from tensorflow.python.keras.layers.recurrent import PeepholeLSTMCell
from tensorflow.python.keras.layers.recurrent import SimpleRNN
from tensorflow.python.keras.layers.recurrent import GRU
from tensorflow.python.keras.layers.recurrent import LSTM
from tensorflow.python.keras.layers.recurrent import UnifiedGRU
from tensorflow.python.keras.layers.recurrent import UnifiedLSTM
# Convolutional-recurrent layers.
from tensorflow.python.keras.layers.convolutional_recurrent import ConvLSTM2D
# CuDNN recurrent layers.
from tensorflow.python.keras.layers.cudnn_recurrent import CuDNNLSTM
from tensorflow.python.keras.layers.cudnn_recurrent import CuDNNGRU
# Wrapper functions
from tensorflow.python.keras.layers.wrappers import Wrapper
from tensorflow.python.keras.layers.wrappers import Bidirectional
from tensorflow.python.keras.layers.wrappers import TimeDistributed
# Serialization functions
from tensorflow.python.keras.layers.serialization import deserialize
from tensorflow.python.keras.layers.serialization import serialize
del absolute_import
del division
del print_function
| jendap/tensorflow | tensorflow/python/keras/layers/__init__.py | Python | apache-2.0 | 8,897 |
# -*- coding: utf-8 -*-
"""SMTP email client
.. module:: network.email.smtp_client
:platform: Unix
:synopsis: SMTP email client
.. moduleauthor:: Petr Rašek <[email protected]>
"""
"""
Events:
-------
email_before_connect
email_after_connect
email_before_send_email
email_after_send_email
"""
from hydratk.core.masterhead import MasterHead
from hydratk.core import event
from smtplib import SMTP, SMTP_SSL, SMTPException
from socket import error
from sys import version_info
class EmailClient(object):
"""Class EmailClient
"""
_mh = None
_client = None
_secured = None
_host = None
_port = None
_user = None
_passw = None
_verbose = None
_is_connected = None
def __init__(self, secured=False, verbose=False):
"""Class constructor
Called when the object is initialized
Args:
secured (bool): secured SMTP
verbose (bool): verbose mode
"""
self._mh = MasterHead.get_head()
self._secured = secured
if (not self._secured):
self._client = SMTP()
else:
self._client = SMTP_SSL()
self._verbose = verbose
if (self.verbose):
self._client.set_debuglevel(2)
@property
def client(self):
""" SMTP client property getter """
return self._client
@property
def secured(self):
""" secured property getter """
return self._secured
@property
def host(self):
""" server host property getter """
return self._host
@property
def port(self):
""" server port property getter """
return self._port
@property
def user(self):
""" username property getter """
return self._user
@property
def passw(self):
""" user password property getter """
return self._passw
@property
def verbose(self):
""" verbose mode property getter """
return self._verbose
@property
def is_connected(self):
""" is_connected property getter """
return self._is_connected
def connect(self, host, port=None, user=None, passw=None, timeout=10):
"""Method connects to server
Args:
host (str): server host
port (str): server port, default protocol port
user (str): username
passw (str): password
timeout (int): timeout
Returns:
bool: result
Raises:
event: email_before_connect
event: email_after_connect
"""
try:
if (port == None):
port = 25 if (not self._secured) else 465
message = '{0}/{1}@{2}:{3} timeout:{4}'.format(
user, passw, host, port, timeout)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_email_connecting', message), self._mh.fromhere())
ev = event.Event(
'email_before_connect', host, port, user, passw, timeout)
if (self._mh.fire_event(ev) > 0):
host = ev.argv(0)
port = ev.argv(1)
user = ev.argv(2)
passw = ev.argv(3)
timeout = ev.argv(4)
self._host = host
self._port = port
self._user = user
self._passw = passw
if (ev.will_run_default()):
self._client.timeout = timeout
if (self._secured == True and version_info[0] == 3 and version_info[1] >= 4):
self._client = SMTP_SSL(self.host, self.port)
else:
self._client.connect(self.host, self.port)
if (self._user != None):
self._client.login(self.user, self.passw)
self._is_connected = True
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_email_connected'), self._mh.fromhere())
ev = event.Event('email_after_connect')
self._mh.fire_event(ev)
return True
except (SMTPException, error) as ex:
self._mh.demsg(
'htk_on_error', 'error: {0}'.format(ex), self._mh.fromhere())
return False
def disconnect(self):
"""Method disconnects from server
Args:
none
Returns:
bool: result
"""
try:
if (not self._is_connected):
self._mh.demsg('htk_on_warning', self._mh._trn.msg(
'htk_email_not_connected'), self._mh.fromhere())
return False
else:
self._client.quit()
self._is_connected = False
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_email_disconnected'), self._mh.fromhere())
return True
except (SMTPException, error) as ex:
self._mh.demsg(
'htk_on_error', 'error: {0}'.format(ex), self._mh.fromhere())
return False
def send_email(self, subject, message, sender='[email protected]', recipients=['[email protected]'],
cc=[], bcc=[]):
"""Method sends email
Args:
subject (str): email subject
message (str): email content, string, mandatory
sender (str): from email address
recipients (list): to email addresses
cc (list): carbon copy email addresses
bcc (list): blind carbon copy email addresses
Returns:
bool: result
Raises:
event: email_before_send_email
event: email_after_send_email
"""
try:
msg = 'From:{0}, To:{1}, CC:{2}, BCC:{3}, Subject:{4}'.format(
sender, recipients, cc, bcc, subject)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_email_sending', msg), self._mh.fromhere())
if (not self._is_connected):
self._mh.demsg('htk_on_warning', self._mh._trn.msg(
'htk_email_not_connected'), self._mh.fromhere())
return False
ev = event.Event(
'email_before_send_email', subject, message, sender, recipients, cc, bcc)
if (self._mh.fire_event(ev) > 0):
subject = ev.argv(0)
message = ev.argv(1)
sender = ev.argv(2)
recipients = ev.argv(3)
cc = ev.argv(4)
bcc = ev.argv(5)
if (ev.will_run_default()):
msg = 'From: {0}\r\n'.format(sender) + \
'To: {0}\r\n'.format(','.join(recipients)) + \
'CC: {0}\r\n'.format(','.join(cc)) + \
'Subject: {0}\r\n'.format(subject) + \
'\r\n{0}'.format(message)
self._client.sendmail(sender, recipients + cc + bcc, msg)
ev = event.Event('email_after_send_email')
self._mh.fire_event(ev)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'htk_email_sent'), self._mh.fromhere())
return True
except (SMTPException, error) as ex:
self._mh.demsg(
'htk_on_error', 'error: {0}'.format(ex), self._mh.fromhere())
return False
| hydratk/hydratk-lib-network | src/hydratk/lib/network/email/smtp_client.py | Python | bsd-3-clause | 7,498 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License
from nova.api.openstack import extensions
class Cell_capacities(extensions.ExtensionDescriptor):
"""Adding functionality to get cell capacities."""
name = "CellCapacities"
alias = "os-cell-capacities"
namespace = ("http://docs.openstack.org/compute/ext/"
"cell_capacities/api/v1.1")
updated = "2013-05-27T00:00:00+00:00"
| DirectXMan12/nova-hacking | nova/api/openstack/compute/contrib/cell_capacities.py | Python | apache-2.0 | 1,016 |
'''
* Copyright (c) 2011, University of Kent
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 1. Neither the name of the University of Kent nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* 2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED.
*
* 3. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* 4. YOU AGREE THAT THE EXCLUSIONS IN PARAGRAPHS 2 AND 3 ABOVE ARE REASONABLE
* IN THE CIRCUMSTANCES. IN PARTICULAR, YOU ACKNOWLEDGE (1) THAT THIS
* SOFTWARE HAS BEEN MADE AVAILABLE TO YOU FREE OF CHARGE, (2) THAT THIS
* SOFTWARE IS NOT "PRODUCT" QUALITY, BUT HAS BEEN PRODUCED BY A RESEARCH
* GROUP WHO DESIRE TO MAKE THIS SOFTWARE FREELY AVAILABLE TO PEOPLE WHO WISH
* TO USE IT, AND (3) THAT BECAUSE THIS SOFTWARE IS NOT OF "PRODUCT" QUALITY
* IT IS INEVITABLE THAT THERE WILL BE BUGS AND ERRORS, AND POSSIBLY MORE
* SERIOUS FAULTS, IN THIS SOFTWARE.
*
* 5. This license is governed, except to the extent that local laws
* necessarily apply, by the laws of England and Wales.
'''
'''
Created on 1 Feb 2013
@author: Kristy Siu
'''
import logging
import urlparse
import sys
import uuid
from keystone import exception
sys.path.insert(0, '../')
import dm.xmlsec.binding as xmlsec
xmlsec.initialize()
from os.path import dirname, basename
from lxml.etree import parse,tostring,fromstring,ElementTree
from time import localtime, strftime, gmtime
import urllib
import webbrowser
import urllib2
import zlib
import base64
import webob.dec
import webob.exc
import json
from keystone.contrib import mapping
from keystone import catalog
LOG = logging.getLogger(__name__)
class RequestIssuingService(object):
def __init__(self):
self.tmpl_req = """<samlp:AuthnRequest
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol"
xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion"
ID=""
Version="2.0"
IssueInstant=""
AssertionConsumerServiceIndex="0"
AttributeConsumingServiceIndex="0">
<saml:Issuer></saml:Issuer>
<samlp:NameIDPolicy
AllowCreate="true"
Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient"/>
<Signature xmlns="http://www.w3.org/2000/09/xmldsig#">
<SignedInfo>
<CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/>
<SignatureMethod Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>
<Reference>
<Transforms>
<Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature"/>
</Transforms>
<DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
<DigestValue/>
</Reference>
</SignedInfo>
<SignatureValue/>
<KeyInfo>
<KeyName/>
</KeyInfo>
</Signature>
</samlp:AuthnRequest>"""
def getIdPRequest(self,key, issuer, endpoints):
endpoint = None
if not len(endpoints) < 1:
for e in endpoints:
if e['interface'] == 'public':
endpoint = e['url']
# print (e)
else:
LOG.error('No endpoint found for this service')
resp = {}
resp['idpRequest'] = '?'+self.create_IdpRequest(key, issuer)
resp['idpEndpoint'] = endpoint
return valid_Response(resp)
def sign(self,doc, key):
#print ("doc: ",tostring(doc))
#print ("key: ",key)
node = xmlsec.findNode(doc, xmlsec.dsig("Signature"))
dsigCtx = xmlsec.DSigCtx()
signKey = xmlsec.Key.load(key, xmlsec.KeyDataFormatPem, None)
signKey.name = basename(key)
dsigCtx.signKey = signKey
dsigCtx.sign(node)
#print ("node: ",tostring(node))
#print ("doc : ",tostring(doc))
return tostring(doc)
def create_IdpRequest(self,key, issuer):
time=strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
id = uuid.uuid4()
doc = ElementTree(fromstring(self.tmpl_req))
doc.getroot().set("ID", id.urn)
doc.getroot().set("IssueInstant", time)
for node in doc.getroot().iter():
if node.tag == "{urn:oasis:names:tc:SAML:2.0:assertion}Issuer":
node.text = issuer
# node = xmlsec.findNode(doc, "Issuer")
# node.text = issuer
#print self.sign(doc,key)
#print self.encodeReq(self.sign(doc,key))
return self.encodeReq(self.sign(doc,key))
def __call__(self):
return None
def deflate(self,data, compresslevel=9):
compress = zlib.compressobj(
compresslevel, # level: 0-9
zlib.DEFLATED, # method: must be DEFLATED
-zlib.MAX_WBITS, # window size in bits:
# -15..-8: negate, suppress header
# 8..15: normal
# 16..30: subtract 16, gzip header
zlib.DEF_MEM_LEVEL, # mem level: 1..8/9
0 # strategy:
# 0 = Z_DEFAULT_STRATEGY
# 1 = Z_FILTERED
# 2 = Z_HUFFMAN_ONLY
# 3 = Z_RLE
# 4 = Z_FIXED
)
deflated = compress.compress(data)
deflated += compress.flush()
return deflated
def inflate(self,data):
decompress = zlib.decompressobj(
-zlib.MAX_WBITS # see above
)
inflated = decompress.decompress(data)
inflated += decompress.flush()
return inflated
def encodeReq(self, req):
print req
req = self.deflate(req)
print req
req = base64.b64encode(req)
print req
req = urllib.urlencode({"SAMLRequest": req})
print req
return req
class Negotiator(object):
def __init__(self):
""" do nothing """
raise exception.NotImplemented()
def negotiate(self, data):
""" do nothing """
raise exception.NotImplemented()
class CredentialValidator(object):
def __init__(self):
self.org_mapping_api = mapping.controllers.OrgMappingController()
self.mapping_api = mapping.controllers.AttributeMappingController()
self.catalog_api = catalog.controllers.EndpointV3()
def __call__(self):
return None
def validate(self, data, realm_id, ris):
context = {}
context['is_admin'] = True
context['query_string'] = {}
context['query_string']['service_id'] = realm_id
context['interface'] = 'adminurl'
context['path'] = ""
idp_info = self.catalog_api.list_endpoints(context)
unique_attribute = idp_info["endpoints"][0].get("identifier_attribute",None)
resp = urlparse.parse_qsl(data)
k, v = resp[0]
try:
resp = base64.b64decode(v)
#print resp;
resp = ElementTree(fromstring(resp))
except TypeError:
resp = base64.b64decode(v.replace(" ", "+"))
#print resp;
resp = ElementTree(fromstring(resp))
atts = {}
names = []
for cond in resp.iter("{urn:oasis:names:tc:SAML:2.0:assertion}Conditions"):
expires = cond.attrib.get("NotOnOrAfter")
for name in resp.iter("{urn:oasis:names:tc:SAML:2.0:assertion}NameID"):
names.append(name.text)
for att in resp.iter("{urn:oasis:names:tc:SAML:2.0:assertion}Attribute"):
ats = []
for value in att.iter("{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue"):
ats.append(value.text)
atts[att.get("Name")] = ats
if unique_attribute is not None and atts.get(unique_attribute, None) is not None:
names = atts.get(unique_attribute)
# print "name : ", names[0]
# print "expires: ", expires
# print "issuers: ", self.check_issuers(data, atts, realm_id)
return names[0], expires, self.check_issuers(data, atts, realm_id)
def check_issuers(self, data, atts, realm_id):
context = {"is_admin": True}
valid_atts = {}
for att in atts:
for val in atts[att]:
org_atts = self.org_mapping_api.list_org_attributes(context)['org_attributes']
for org_att in org_atts:
if org_att['type'] == att:
if org_att['value'] == val or org_att['value'] is None:
try:
self.org_mapping_api.check_attribute_can_be_issued(context, service_id=realm_id, org_attribute_id=org_att['id'])
try:
valid_atts[att].append(val)
except:
valid_atts[att] = [val]
except exception.NotFound:
pass
return valid_atts
def valid_Response(response):
resp = webob.Response(content_type='application/json')
resp.body = json.dumps(response)
return resp
def inflate(data):
decompress = zlib.decompressobj(
-zlib.MAX_WBITS # see above
)
inflated = decompress.decompress(data)
inflated += decompress.flush()
return inflated
| ioram7/keystone-federado-pgid2013 | build/lib.linux-x86_64-2.7/keystone/contrib/federated/middleware/saml.py | Python | apache-2.0 | 10,370 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import time
import boto.exception
from oslo_log import log as logging
import testtools
from tempest import config
CONF = config.CONF
LOG = logging.getLogger(__name__)
def state_wait(lfunction, final_set=set(), valid_set=None):
# TODO(afazekas): evaluate using ABC here
if not isinstance(final_set, set):
final_set = set((final_set,))
if not isinstance(valid_set, set) and valid_set is not None:
valid_set = set((valid_set,))
start_time = time.time()
old_status = status = lfunction()
while True:
if status != old_status:
LOG.info('State transition "%s" ==> "%s" %d second', old_status,
status, time.time() - start_time)
if status in final_set:
return status
if valid_set is not None and status not in valid_set:
return status
dtime = time.time() - start_time
if dtime > CONF.boto.build_timeout:
raise testtools.TestCase\
.failureException("State change timeout exceeded!"
'(%ds) While waiting'
'for %s at "%s"' %
(dtime, final_set, status))
time.sleep(CONF.boto.build_interval)
old_status = status
status = lfunction()
def re_search_wait(lfunction, regexp):
"""Stops waiting on success."""
start_time = time.time()
while True:
text = lfunction()
result = re.search(regexp, text)
if result is not None:
LOG.info('Pattern "%s" found in %d second in "%s"',
regexp,
time.time() - start_time,
text)
return result
dtime = time.time() - start_time
if dtime > CONF.boto.build_timeout:
raise testtools.TestCase\
.failureException('Pattern find timeout exceeded!'
'(%ds) While waiting for'
'"%s" pattern in "%s"' %
(dtime, regexp, text))
time.sleep(CONF.boto.build_interval)
def wait_no_exception(lfunction, exc_class=None, exc_matcher=None):
"""Stops waiting on success."""
start_time = time.time()
if exc_matcher is not None:
exc_class = boto.exception.BotoServerError
if exc_class is None:
exc_class = BaseException
while True:
result = None
try:
result = lfunction()
LOG.info('No Exception in %d second',
time.time() - start_time)
return result
except exc_class as exc:
if exc_matcher is not None:
res = exc_matcher.match(exc)
if res is not None:
LOG.info(res)
raise exc
# Let the other exceptions propagate
dtime = time.time() - start_time
if dtime > CONF.boto.build_timeout:
raise testtools.TestCase\
.failureException("Wait timeout exceeded! (%ds)" % dtime)
time.sleep(CONF.boto.build_interval)
# NOTE(afazekas): EC2/boto normally raise exception instead of empty list
def wait_exception(lfunction):
"""Returns with the exception or raises one."""
start_time = time.time()
while True:
try:
lfunction()
except BaseException as exc:
LOG.info('Exception in %d second',
time.time() - start_time)
return exc
dtime = time.time() - start_time
if dtime > CONF.boto.build_timeout:
raise testtools.TestCase\
.failureException("Wait timeout exceeded! (%ds)" % dtime)
time.sleep(CONF.boto.build_interval)
# TODO(afazekas): consider strategy design pattern..
| mtreinish/tempest_ec2 | tempest_ec2/tests/thirdparty/boto/utils/wait.py | Python | apache-2.0 | 4,468 |
#
# This file is part of Mapnik (c++ mapping toolkit)
#
# Copyright (C) 2013 Artem Pavlenko
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
Import ('plugin_base')
Import ('env')
from copy import copy
PLUGIN_NAME = 'ogr'
plugin_env = plugin_base.Clone()
plugin_sources = Split(
"""
%(PLUGIN_NAME)s_converter.cpp
%(PLUGIN_NAME)s_datasource.cpp
%(PLUGIN_NAME)s_featureset.cpp
%(PLUGIN_NAME)s_index_featureset.cpp
""" % locals()
)
cxxflags = []
plugin_env['LIBS'] = []
if env['RUNTIME_LINK'] == 'static':
cmd = 'gdal-config --dep-libs'
plugin_env.ParseConfig(cmd)
# Link Library to Dependencies
plugin_env.Append(LIBS=env['PLUGINS']['ogr']['lib'])
libraries = copy(plugin_env['LIBS'])
if env.get('BOOST_LIB_VERSION_FROM_HEADER'):
boost_version_from_header = int(env['BOOST_LIB_VERSION_FROM_HEADER'].split('_')[1])
if boost_version_from_header < 46:
# avoid ubuntu issue with boost interprocess:
# https://github.com/mapnik/mapnik/issues/1082
cxxflags.append('-fpermissive')
plugin_env.Append(CXXFLAGS=cxxflags)
if env['PLUGIN_LINKING'] == 'shared':
libraries.append('mapnik')
libraries.append(env['ICU_LIB_NAME'])
libraries.append('boost_system%s' % env['BOOST_APPEND'])
TARGET = plugin_env.SharedLibrary('../%s' % PLUGIN_NAME,
SHLIBPREFIX='',
SHLIBSUFFIX='.input',
source=plugin_sources,
LIBS=libraries)
# if the plugin links to libmapnik ensure it is built first
Depends(TARGET, env.subst('../../../src/%s' % env['MAPNIK_LIB_NAME']))
if 'uninstall' not in COMMAND_LINE_TARGETS:
env.Install(env['MAPNIK_INPUT_PLUGINS_DEST'], TARGET)
env.Alias('install', env['MAPNIK_INPUT_PLUGINS_DEST'])
plugin_obj = {
'CXXFLAGS': cxxflags,
'LIBS': libraries,
'SOURCES': plugin_sources,
}
Return('plugin_obj')
| yiqingj/work | plugins/input/ogr/build.py | Python | lgpl-2.1 | 2,637 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 Jendrik Seipp ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
| jendrikseipp/pogo | pogo/__init__.py | Python | gpl-2.0 | 794 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""A filter for signals which either filters or passes them."""
import functools
from PyQt5.QtCore import QObject
from qutebrowser.utils import debug, log, objreg
class SignalFilter(QObject):
"""A filter for signals.
Signals are only passed to the parent TabbedBrowser if they originated in
the currently shown widget.
Attributes:
_win_id: The window ID this SignalFilter is associated with.
Class attributes:
BLACKLIST: List of signal names which should not be logged.
"""
BLACKLIST = ['cur_scroll_perc_changed', 'cur_progress', 'cur_link_hovered']
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._win_id = win_id
def create(self, signal, tab):
"""Factory for partial _filter_signals functions.
Args:
signal: The pyqtSignal to filter.
tab: The WebView to create filters for.
Return:
A partial function calling _filter_signals with a signal.
"""
return functools.partial(self._filter_signals, signal, tab)
def _filter_signals(self, signal, tab, *args):
"""Filter signals and trigger TabbedBrowser signals if needed.
Triggers signal if the original signal was sent from the _current_ tab
and not from any other one.
The original signal does not matter, since we get the new signal and
all args.
Args:
signal: The signal to emit if the sender was the current widget.
tab: The WebView which the filter belongs to.
*args: The args to pass to the signal.
"""
log_signal = debug.signal_name(signal) not in self.BLACKLIST
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
try:
tabidx = tabbed_browser.widget.indexOf(tab)
except RuntimeError:
# The tab has been deleted already
return
if tabidx == tabbed_browser.widget.currentIndex():
if log_signal:
log.signals.debug("emitting: {} (tab {})".format(
debug.dbg_signal(signal, args), tabidx))
signal.emit(*args)
else:
if log_signal:
log.signals.debug("ignoring: {} (tab {})".format(
debug.dbg_signal(signal, args), tabidx))
| toofar/qutebrowser | qutebrowser/browser/signalfilter.py | Python | gpl-3.0 | 3,206 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Humanizing functions for numbers."""
import re
from fractions import Fraction
from .import compat
from .i18n import gettext as _, gettext_noop as N_, pgettext as P_
def ordinal(value):
"""Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',
3 is '3rd', etc. Works for any integer or anything int() will turn into an
integer. Anything other value will have nothing done to it."""
try:
value = int(value)
except (TypeError, ValueError):
return value
t = (P_('0', 'th'),
P_('1', 'st'),
P_('2', 'nd'),
P_('3', 'rd'),
P_('4', 'th'),
P_('5', 'th'),
P_('6', 'th'),
P_('7', 'th'),
P_('8', 'th'),
P_('9', 'th'))
if value % 100 in (11, 12, 13): # special case
return "%d%s" % (value, t[0])
return '%d%s' % (value, t[value % 10])
def intcomma(value):
"""Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'. To maintain
some compatability with Django's intcomma, this function also accepts
floats."""
try:
if isinstance(value, compat.string_types):
float(value.replace(',', ''))
else:
float(value)
except (TypeError, ValueError):
return value
orig = str(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new)
powers = [10 ** x for x in (6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 100)]
human_powers = (N_('million'), N_('billion'), N_('trillion'), N_('quadrillion'),
N_('quintillion'), N_('sextillion'), N_('septillion'),
N_('octillion'), N_('nonillion'), N_('decillion'), N_('googol'))
def intword(value, format='%.1f'):
"""Converts a large integer to a friendly text representation. Works best for
numbers over 1 million. For example, 1000000 becomes '1.0 million', 1200000
becomes '1.2 million' and '1200000000' becomes '1.2 billion'. Supports up to
decillion (33 digits) and googol (100 digits). You can pass format to change
the number of decimal or general format of the number portion. This function
returns a string unless the value passed was unable to be coaxed into an int."""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < powers[0]:
return str(value)
for ordinal, power in enumerate(powers[1:], 1):
if value < power:
chopped = value / float(powers[ordinal - 1])
return (' '.join([format, _(human_powers[ordinal - 1])])) % chopped
return str(value)
def apnumber(value):
"""For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style. This always returns a string
unless the value was not int-able, unlike the Django filter."""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 < value < 10:
return str(value)
return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'),
_('seven'), _('eight'), _('nine'))[value - 1]
def fractional(value):
'''
There will be some cases where one might not want to show
ugly decimal places for floats and decimals.
This function returns a human readable fractional number
in form of fractions and mixed fractions.
Pass in a string, or a number or a float, and this function returns
a string representation of a fraction
or whole number
or a mixed fraction
Examples:
fractional(0.3) will return '1/3'
fractional(1.3) will return '1 3/10'
fractional(float(1/3)) will return '1/3'
fractional(1) will return '1'
This will always return a string.
'''
try:
number = float(value)
except (TypeError, ValueError):
return value
wholeNumber = int(number)
frac = Fraction(number - wholeNumber).limit_denominator(1000)
numerator = frac._numerator
denominator = frac._denominator
if wholeNumber and not numerator and denominator == 1:
return '%.0f' % wholeNumber # this means that an integer was passed in (or variants of that integer like 1.0000)
elif not wholeNumber:
return '%.0f/%.0f' % (numerator, denominator)
else:
return '%.0f %.0f/%.0f' % (wholeNumber, numerator, denominator)
| fniephaus/alfred-travis-ci | src/humanize/number.py | Python | mit | 4,567 |
import stp.play as play
import stp.tactic as tactic
from rj_gameplay.tactic import (
# pass_tactic,
pass_seek,
goalie_tactic,
clear_tactic,
wall_tactic,
)
import stp.skill as skill
import stp.role as role
from stp.role.assignment.naive import NaiveRoleAssignment
import stp.rc as rc
from typing import (
Dict,
List,
Tuple,
Type,
)
import numpy as np
from rj_gameplay.calculations import wall_calculations
class RestartPlay(play.IPlay):
"""One robot passes to another. Some markers."""
def __init__(self):
self.target_point = np.array([1.0, 4.0])
# TODO: simplify tactic with list (see basic_defense.py)
self.goalie_tactic = goalie_tactic.GoalieTactic()
self.clear_tactic = clear_tactic.Clear(
np.array([0.0, 9.0]), chip=True, kick_speed=3.0
)
# TODO: make it pass
"""
self.pass_tactic = pass_tactic.Pass(
self.target_point, pass_tactic.PasserCost(self.target_point),
pass_tactic.PassToClosestReceiver(self.target_point))
self.seek_tactic = pass_seek.Seek(
self.target_point, pass_seek.restart_seek,
pass_seek.SeekCost(self.target_point))
"""
self.wall_tactic_1 = wall_tactic.WallTactic(role.Priority.LOW, cost_scale=0.1)
self.wall_tactic_2 = wall_tactic.WallTactic(role.Priority.LOW, cost_scale=0.1)
left_pt = np.array([1.5, 7.5])
self.seek_left = pass_seek.Seek(
left_pt,
pass_seek.build_seek_function(left_pt),
pass_seek.SeekCost(left_pt),
)
right_pt = np.array([-1.5, 7.5])
self.seek_right = pass_seek.Seek(
right_pt,
pass_seek.build_seek_function(right_pt),
pass_seek.SeekCost(right_pt),
)
self.role_assigner = NaiveRoleAssignment()
# number of wallers for finding wall_pts
self.num_wallers = 2
def compute_props(self, prev_props):
pass
def tick(
self,
world_state: rc.WorldState,
prev_results: role.assignment.FlatRoleResults,
props,
) -> Tuple[
Dict[Type[tactic.SkillEntry], List[role.RoleRequest]],
List[tactic.SkillEntry],
]:
# pre-calculate wall points and store in numpy array
wall_pts = wall_calculations.find_wall_pts(self.num_wallers, world_state)
# Get role requests from all tactics and put them into a dictionary
role_requests: play.RoleRequests = {}
# role_requests[self.pass_tactic] = self.pass_tactic.get_requests(world_state, None)
# role_requests[self.seek_tactic] = self.seek_tactic.get_requests(world_state, None)
role_requests[self.clear_tactic] = self.clear_tactic.get_requests(
world_state, None
)
role_requests[self.wall_tactic_1] = self.wall_tactic_1.get_requests(
world_state, wall_pts[0], None
)
role_requests[self.wall_tactic_2] = self.wall_tactic_2.get_requests(
world_state, wall_pts[1], None
)
role_requests[self.goalie_tactic] = self.goalie_tactic.get_requests(
world_state, None
)
role_requests[self.seek_left] = self.seek_left.get_requests(world_state, None)
role_requests[self.seek_right] = self.seek_right.get_requests(world_state, None)
# Flatten requests and use role assigner on them
flat_requests = play.flatten_requests(role_requests)
flat_results = self.role_assigner.assign_roles(
flat_requests, world_state, prev_results
)
role_results = play.unflatten_results(flat_results)
# Get list of all skills with assigned roles from tactics
skill_dict = {}
skills = []
skills = self.clear_tactic.tick(world_state, role_results[self.clear_tactic])
skills += self.goalie_tactic.tick(world_state, role_results[self.goalie_tactic])
skills += self.wall_tactic_1.tick(world_state, role_results[self.wall_tactic_1])
skills += self.wall_tactic_2.tick(world_state, role_results[self.wall_tactic_2])
skills += self.seek_left.tick(world_state, role_results[self.seek_left])
skills += self.seek_right.tick(world_state, role_results[self.seek_right])
skill_dict.update(role_results[self.clear_tactic])
skill_dict.update(role_results[self.goalie_tactic])
skill_dict.update(role_results[self.wall_tactic_1])
skill_dict.update(role_results[self.wall_tactic_2])
skill_dict.update(role_results[self.seek_left])
skill_dict.update(role_results[self.seek_right])
return (skill_dict, skills)
def is_done(self, world_state: rc.WorldState):
return self.clear_tactic.is_done(world_state)
class DirectRestartPlay(play.IPlay):
"""One robot passes to another. Some markers."""
def __init__(self):
self.target_point = np.array([1.0, 4.0])
# TODO: simplify tactic with list (see basic_defense.py)
self.goalie_tactic = goalie_tactic.GoalieTactic()
self.clear_tactic = clear_tactic.Clear(
np.array([0.0, 9.0]), chip=False, kick_speed=5.5
)
# TODO: make it pass
"""
self.pass_tactic = pass_tactic.Pass(
self.target_point, pass_tactic.PasserCost(self.target_point),
pass_tactic.PassToClosestReceiver(self.target_point))
self.seek_tactic = pass_seek.Seek(
self.target_point, pass_seek.restart_seek,
pass_seek.SeekCost(self.target_point))
"""
self.wall_tactic_1 = wall_tactic.WallTactic(role.Priority.LOW, cost_scale=0.1)
self.wall_tactic_2 = wall_tactic.WallTactic(role.Priority.LOW, cost_scale=0.1)
# might need to change to for-loop
self.num_wallers = 2
left_pt = np.array([1.5, 7.5])
self.seek_left = pass_seek.Seek(
left_pt,
pass_seek.build_seek_function(left_pt),
pass_seek.SeekCost(left_pt),
)
right_pt = np.array([-1.5, 7.5])
self.seek_right = pass_seek.Seek(
right_pt,
pass_seek.build_seek_function(right_pt),
pass_seek.SeekCost(right_pt),
)
self.role_assigner = NaiveRoleAssignment()
def compute_props(self, prev_props):
pass
def tick(
self,
world_state: rc.WorldState,
prev_results: role.assignment.FlatRoleResults,
props,
) -> Tuple[
Dict[Type[tactic.SkillEntry], List[role.RoleRequest]],
List[tactic.SkillEntry],
]:
# pre-calculate wall points and store in numpy array
wall_pts = calculations.find_wall_pts(self.num_wallers, world_state)
# Get role requests from all tactics and put them into a dictionary
role_requests: play.RoleRequests = {}
# role_requests[self.pass_tactic] = self.pass_tactic.get_requests(world_state, None)
# role_requests[self.seek_tactic] = self.seek_tactic.get_requests(world_state, None)
role_requests[self.clear_tactic] = self.clear_tactic.get_requests(
world_state, None
)
role_requests[self.wall_tactic_1] = self.wall_tactic_1.get_requests(
world_state, wall_pts[0], None
)
role_requests[self.wall_tactic_2] = self.wall_tactic_2.get_requests(
world_state, wall_pts[1], None
)
role_requests[self.goalie_tactic] = self.goalie_tactic.get_requests(
world_state, None
)
role_requests[self.seek_left] = self.seek_left.get_requests(world_state, None)
role_requests[self.seek_right] = self.seek_right.get_requests(world_state, None)
# Flatten requests and use role assigner on them
flat_requests = play.flatten_requests(role_requests)
flat_results = self.role_assigner.assign_roles(
flat_requests, world_state, prev_results
)
role_results = play.unflatten_results(flat_results)
# Get list of all skills with assigned roles from tactics
skill_dict = {}
skills = []
skills = self.clear_tactic.tick(world_state, role_results[self.clear_tactic])
skills += self.goalie_tactic.tick(world_state, role_results[self.goalie_tactic])
skills += self.wall_tactic_1.tick(world_state, role_results[self.wall_tactic_1])
skills += self.wall_tactic_2.tick(world_state, role_results[self.wall_tactic_2])
skills += self.seek_left.tick(world_state, role_results[self.seek_left])
skills += self.seek_right.tick(world_state, role_results[self.seek_right])
skill_dict.update(role_results[self.clear_tactic])
skill_dict.update(role_results[self.goalie_tactic])
skill_dict.update(role_results[self.wall_tactic_1])
skill_dict.update(role_results[self.wall_tactic_2])
skill_dict.update(role_results[self.seek_left])
skill_dict.update(role_results[self.seek_right])
return (skill_dict, skills)
def is_done(self, world_state: rc.WorldState):
return self.clear_tactic.is_done(world_state)
| RoboJackets/robocup-software | rj_gameplay/rj_gameplay/play/restart.py | Python | apache-2.0 | 9,160 |
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadCrypter import DeadCrypter, create_getInfo
class StealthTo(DeadCrypter):
__name__ = "StealthTo"
__type__ = "crypter"
__version__ = "0.22"
__status__ = "stable"
__pattern__ = r'http://(?:www\.)?stealth\.to/folder/.+'
__config__ = [] #@TODO: Remove in 0.4.10
__description__ = """Stealth.to decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("spoob", "[email protected]")]
getInfo = create_getInfo(StealthTo)
| fzimmermann89/pyload | module/plugins/crypter/StealthTo.py | Python | gpl-3.0 | 530 |
import json
import time
import sched
from api import send_message
scheduler = sched.scheduler(time.time, time.sleep)
def load_reminders():
reminders = {}
try:
with open("data/reminders.json") as fp:
reminders = json.load(fp)
except Exception:
with open("data/reminders.json", "w") as fp:
json.dump(reminders, fp, indent=4)
return reminders
def save_reminders(reminders):
with open("data/reminders.json", "w") as fp:
json.dump(reminders, fp, indent=4)
def list_reminders(chat):
chat = str(chat)
reminders = load_reminders()
msg = ""
reminders = reminders[chat]
for reminder in reminders:
futuretime = time.localtime(float(reminder))
msg += time.strftime("%d/%m/%y as %H:%M:%S", futuretime) + ": " + reminders[reminder] + "\n"
return msg
def add_reminder(chat, date, message):
chat = str(chat)
reminders = load_reminders()
assert type(reminders) is dict
if chat not in reminders:
reminders[chat] = {}
reminders[chat][date] = message
save_reminders(reminders)
def check_time():
reminders = load_reminders()
for chat in reminders:
for date in reminders[chat]:
if float(date) < time.time():
send_message(chat, "O MEU JA DEU ORA D " + reminders[chat][date])
# print(reminders[chat][date])
reminders[chat].pop(date)
save_reminders(reminders)
break
scheduler.enter(1, 1, check_time)
def on_msg_received(msg, matches):
chat = msg["chat"]["id"]
days = matches.group(1)
hours = matches.group(2)
minutes = matches.group(3)
seconds = matches.group(4)
message = matches.group(5)
timeoffset = 0
if days is not None:
days = days.lower().replace("d", "")
timeoffset += 86400 * int(days)
if hours is not None:
hours = hours.lower().replace("h", "")
timeoffset += 3600 * int(hours)
if minutes is not None:
minutes = minutes.lower().replace("m", "")
timeoffset += 60 * int(minutes)
if seconds is not None:
seconds = seconds.lower().replace("s", "")
timeoffset += int(seconds)
if days is None and hours is None and minutes is None and seconds is None and message is None:
response = list_reminders(chat)
send_message(chat, response)
return
if message is None:
message = "auguna cosa"
futuretime = time.time() + timeoffset
if "username" in msg["from"]:
message += " blz @" + msg["from"]["username"]
add_reminder(chat, futuretime, message)
futuretime = time.localtime(futuretime)
response = "belesinhaaaaa vo lenbra dia " + time.strftime("%d/%m/%y as %H:%M:%S", futuretime) + " sobr \"" + message + "\""
send_message(chat, response)
def run():
scheduler.enter(1, 1, check_time)
scheduler.run()
| lucasberti/telegrao-py | plugins/melenbra.py | Python | mit | 2,962 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
import bpy
from bpy.types import Operator
from bpy.props import EnumProperty, IntProperty
class MeshMirrorUV(Operator):
"""Copy mirror UV coordinates on the X axis based on a mirrored mesh"""
bl_idname = "mesh.faces_mirror_uv"
bl_label = "Copy Mirrored UV coords"
bl_options = {'REGISTER', 'UNDO'}
direction = EnumProperty(
name="Axis Direction",
items=(('POSITIVE', "Positive", ""),
('NEGATIVE', "Negative", "")),
)
precision = IntProperty(
name="Precision",
description=("Tolerance for finding vertex duplicates"),
min=1, max=16,
soft_min=1, soft_max=16,
default=3,
)
@classmethod
def poll(cls, context):
obj = context.active_object
return (obj and obj.type == 'MESH' and obj.data.uv_textures.active)
def execute(self, context):
DIR = (self.direction == 'NEGATIVE')
precision = self.precision
double_warn = 0
ob = context.active_object
is_editmode = (ob.mode == 'EDIT')
if is_editmode:
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
mesh = ob.data
# mirror lookups
mirror_gt = {}
mirror_lt = {}
vcos = (v.co.to_tuple(precision) for v in mesh.vertices)
for i, co in enumerate(vcos):
if co[0] >= 0.0:
double_warn += co in mirror_gt
mirror_gt[co] = i
if co[0] <= 0.0:
double_warn += co in mirror_lt
mirror_lt[co] = i
vmap = {}
for mirror_a, mirror_b in ((mirror_gt, mirror_lt),
(mirror_lt, mirror_gt)):
for co, i in mirror_a.items():
nco = (-co[0], co[1], co[2])
j = mirror_b.get(nco)
if j is not None:
vmap[i] = j
polys = mesh.polygons
loops = mesh.loops
uv_loops = mesh.uv_layers.active.data
nbr_polys = len(polys)
mirror_pm = {}
pmap = {}
puvs = [None] * nbr_polys
puvs_cpy = [None] * nbr_polys
puvsel = [None] * nbr_polys
pcents = [None] * nbr_polys
vidxs = [None] * nbr_polys
for i, p in enumerate(polys):
lstart = lend = p.loop_start
lend += p.loop_total
puvs[i] = tuple(uv.uv for uv in uv_loops[lstart:lend])
puvs_cpy[i] = tuple(uv.copy() for uv in puvs[i])
puvsel[i] = (False not in
(uv.select for uv in uv_loops[lstart:lend]))
# Vert idx of the poly.
vidxs[i] = tuple(l.vertex_index for l in loops[lstart:lend])
pcents[i] = p.center
# Preparing next step finding matching polys.
mirror_pm[tuple(sorted(vidxs[i]))] = i
for i in range(nbr_polys):
# Find matching mirror poly.
tvidxs = [vmap.get(j) for j in vidxs[i]]
if None not in tvidxs:
tvidxs.sort()
j = mirror_pm.get(tuple(tvidxs))
if j is not None:
pmap[i] = j
for i, j in pmap.items():
if not puvsel[i] or not puvsel[j]:
continue
elif DIR == 0 and pcents[i][0] < 0.0:
continue
elif DIR == 1 and pcents[i][0] > 0.0:
continue
# copy UVs
uv1 = puvs[i]
uv2 = puvs_cpy[j]
# get the correct rotation
v1 = vidxs[j]
v2 = tuple(vmap[k] for k in vidxs[i])
if len(v1) == len(v2):
for k in range(len(v1)):
k_map = v1.index(v2[k])
uv1[k].xy = - (uv2[k_map].x - 0.5) + 0.5, uv2[k_map].y
if is_editmode:
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
if double_warn:
self.report({'WARNING'},
"%d duplicates found, mirror may be incomplete" %
double_warn)
return {'FINISHED'}
class MeshSelectNext(Operator):
"""Select the next element (using selection order)"""
bl_idname = "mesh.select_next_item"
bl_label = "Select Next Element"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return (context.mode == 'EDIT_MESH')
def execute(self, context):
import bmesh
from .bmesh import find_adjacent
obj = context.active_object
me = obj.data
bm = bmesh.from_edit_mesh(me)
if find_adjacent.select_next(bm, self.report):
bm.select_flush_mode()
bmesh.update_edit_mesh(me, False)
return {'FINISHED'}
class MeshSelectPrev(Operator):
"""Select the next element (using selection order)"""
bl_idname = "mesh.select_prev_item"
bl_label = "Select Previous Element"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return (context.mode == 'EDIT_MESH')
def execute(self, context):
import bmesh
from .bmesh import find_adjacent
obj = context.active_object
me = obj.data
bm = bmesh.from_edit_mesh(me)
if find_adjacent.select_prev(bm, self.report):
bm.select_flush_mode()
bmesh.update_edit_mesh(me, False)
return {'FINISHED'}
| Microvellum/Fluid-Designer | win64-vc/2.78/scripts/startup/bl_operators/mesh.py | Python | gpl-3.0 | 6,277 |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 17:59:04 2016
@author: Wasit
"""
def fib(x):
if x==1:
return 1
else:
return x*fib(x-1)
if __name__ == "__main__":
print fib(4) | wasit7/tutorials | kivy/fib.py | Python | mit | 207 |
#!/usr/bin/env python
from horton import *
import h5py as h5
import os
log.set_level(log.silent)
def store_wfn(fn_h5, mixing, name_case, exp):
with h5.File(fn_h5) as f:
name_mixing = '%08.5f' % (-np.log10(mixing))
grp = f.require_group(name_mixing)
grp = grp.require_group(name_case)
# clear the group if anything was present
for key in grp.keys():
del grp[key]
for key in grp.attrs.keys():
del grp.attrs[key]
exp.to_hdf5(grp)
# The following is needed to create object of the right type when
# reading from the checkpoint:
grp.attrs['class'] = exp.__class__.__name__
def get_random_occupations(nbasis, nep):
result = np.zeros(nbasis)
# this is not uniformely random, but it is good enough.
for iep in xrange(int(np.round(nep))):
total = 1.0
while total > 0:
if total < 0.01:
fraction = total
total = 0.0
else:
fraction = np.random.uniform(0, total)
total -= fraction
index = np.random.randint(nbasis)
result[index] += fraction
if result[index] > 1:
total += result[index] - 1
result[index] = 1.0
return result
def main():
try:
os.remove("guesses.h5")
except OSError:
pass
fn_name = context.get_fn('test/2h-azirine.xyz')
mol = Molecule.from_file(fn_name)
obasis = get_gobasis(mol.coordinates, mol.numbers, '3-21G')
lf = DenseLinalgFactory(obasis.nbasis)
# Compute Gaussian integrals
olp = obasis.compute_overlap(lf)
kin = obasis.compute_kinetic(lf)
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, lf)
er = obasis.compute_electron_repulsion(lf)
# Create alpha orbitals
exp_alpha = lf.create_expansion()
# Initial guess
guess_core_hamiltonian(olp, kin, na, exp_alpha)
# Construct the restricted HF effective Hamiltonian
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
ROneBodyTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RExchangeTerm(er, 'x_hf'),
ROneBodyTerm(na, 'ne'),
]
ham = REffHam(terms, external)
# Decide how to occupy the orbitals (5 alpha electrons)
occ_model = AufbauOccModel(5)
# Converge WFN with plain SCF
scf_solver = PlainSCFSolver(1e-6)
scf_solver(ham, lf, olp, occ_model, exp_alpha)
# generate randomized wavefunctions:
# - arbitrary unitary transformation
# - arbitrary (fractional) occupation numbers (with proper sum)
nbasis = obasis.nbasis
random_exps = []
for irandom in xrange(nrandom):
# random symmetric matrix
tmp1 = np.random.normal(0, 1, (nbasis, nbasis))
tmp1 = tmp1 + tmp1.T
# the random unitary matrix
utrans = np.linalg.eigh(tmp1)[1]
# apply transformation
coeffs = np.dot(exp_alpha.coeffs, utrans)
# random occupation numbers
occupations = get_random_occupations(nbasis, exp_alpha.occupations.sum())
# create a expansion object
exp_alpha_temp = lf.create_expansion()
# assign the random orbitals
exp_alpha_temp.coeffs[:] = coeffs
exp_alpha_temp.occupations[:] = occupations
# store the expansion in the h5 file and in the list
store_wfn('guesses.h5', 1.0, 'case_%03i' % irandom, exp_alpha_temp)
random_exps.append(exp_alpha_temp)
# interpolate between solution and random wfns
for mixing in mixings[1:]: # do not consider mixing==1.0
for irandom in xrange(nrandom):
# create a new wfn object.
# construct the mixed density matrix
dm_mixed = lf.create_one_body()
dm_mixed.iadd(random_exps[irandom].to_dm(), mixing)
dm_mixed.iadd(ham.cache['dm_alpha'], 1-mixing)
# turn it into a set of orbitals
exp_alpha_temp = lf.create_expansion()
exp_alpha_temp.derive_naturals(dm_mixed, olp)
# store the wfn in the h5 file
store_wfn('guesses.h5', mixing, 'case_%03i' % irandom, exp_alpha_temp)
if __name__ == '__main__':
mixings = np.array([1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8])
nrandom = 20
main()
| eustislab/horton | tools/convergence_tester/make_guesses.py | Python | gpl-3.0 | 4,368 |
# coding=utf-8
__author__ = "AstroPrint Product Team <[email protected]>"
__license__ = "GNU Affero General Public License http://www.gnu.org/licenses/agpl.html"
__copyright__ = "Copyright (C) 2017 3DaGoGo, Inc - Released under terms of the AGPLv3 License"
import os
import threading
import time
from astroprint.plugin import Plugin, PrinterCommsService, SystemEvent, PrinterState
from astroprint.printfiles.gcode import PrintFileManagerGcode
class VirtualComms(Plugin, PrinterCommsService):
# PrinterCommsService
def initPrinterCommsService(self, printerManager):
super(VirtualComms, self).initPrinterCommsService(printerManager)
settings_file = "%s/virtual-printer-settings.yaml" % self.settingsDir
self._previousSelectedTool = 0
self._currentSelectedTool = 0
self.printingSpeed = 100
self.printingFlow = 100
self._vpSettings = {
'connection': 1.0,
'heatingUp': 2.0,
'printJob': 10.0,
'prompt': {}
}
if os.path.isfile(settings_file):
import yaml
config = None
with open(settings_file, "r") as f:
config = yaml.safe_load(f)
if config:
def merge_dict(a,b):
for key in b:
if isinstance(b[key], dict):
merge_dict(a[key], b[key])
else:
a[key] = b[key]
merge_dict(self._vpSettings, config)
self._printing = False
self._heatingUp = False
self._heatingUpTimer = None
self._printJob = None
self._comm = False
self._preheating = False
self._temperatureChanger = None
def changeTemperature(self, tempTool, bedTool):
extruder_count = self._profileManager.data.get('extruder_count')
for i in range(extruder_count):
self.setTemperature('tool'+str(i), tempTool)
self.setTemperature("bed", bedTool)
def connect(self, port=None, baudrate=None):
self._comm = True
def doConnect():
if not self._printerManager.shuttingDown:
self._temperatureChanger = TempsChanger(self)
self._temperatureChanger.start()
self._changePrinterState(PrinterState.STATE_OPERATIONAL if self._printerManager.isBedClear else PrinterState.STATE_NOT_READY_TO_PRINT)
#set initial temps
self.changeTemperature(25, 25)
prompt = self._vpSettings.get('prompt')
if prompt:
self._printerManager.promptManager.end_prompt()
self._printerManager.promptManager.begin_prompt(prompt.get('message'))
choices = prompt.get('choices') or []
for c in choices:
self._printerManager.promptManager.add_choice(c)
self._printerManager.promptManager.show()
self._changePrinterState(PrinterState.STATE_CONNECTING)
t = threading.Timer(self._vpSettings['connection'], doConnect)
t.daemon = True
t.start()
return True
def disconnect(self):
if self._comm:
self._comm = False
self._changePrinterState(PrinterState.STATE_CLOSED)
if self._temperatureChanger:
self._temperatureChanger.stop()
self._temperatureChanger.join()
self._temperatureChanger = None
return True
def startPrint(self):
if self._printJob and self._printJob.isAlive():
raise Exception("A Print Job is still running")
self._changePrinterState(PrinterState.STATE_PRINTING)
currentFile = self._printerManager.selectedFile
data = self._printerManager.getFileInfo(currentFile['filename'])
self.fireSystemEvent(SystemEvent.PRINT_STARTED, data)
#First we simulate heatup
self.changeTemperature(210, 60)
self.reportHeatingUpChange(True)
self._heatingUp = True
def heatupDone():
if not self._printerManager.shuttingDown:
self.reportHeatingUpChange(False)
self._heatingUp = False
self._heatingUpTimer = None
self._printJob = JobSimulator(self, self._printerManager, currentFile)
#From this point on we assume that there's something on the bed
self._printerManager.set_bed_clear(False)
self._printJob.start()
self._printJob = None
self._heatingUpTimer = threading.Timer(self._vpSettings['heatingUp'], heatupDone)
self._heatingUpTimer.daemon = True
self._heatingUpTimer.start()
def disableMotorsAndHeater(self):
self.changeTemperature(0,0)
self._logger.info('Turning down motors')
def executeCancelCommands(self, disableMotorsAndHeater):
if self._printJob:
self._printJob.cancel()
if self.paused:
self.setPaused(False)
if self._heatingUpTimer:
self._heatingUpTimer.cancel()
self._heatingUpTimer = None
self.reportHeatingUpChange(False)
time.sleep(1)
self._changePrinterState(PrinterState.STATE_OPERATIONAL if self._printerManager.isBedClear else PrinterState.STATE_NOT_READY_TO_PRINT)
def jog(self, axis, amount):
self._logger.info('Jog - Axis: %s, Amount: %s', axis, amount)
def home(self, axes):
self._logger.info('Home - Axes: %s', ', '.join(axes))
def fan(self, tool, speed):
speed = (int(speed) / 100.0) * 255
self._logger.info('Fan - Tool: %s, Speed: %s', tool, speed)
def extrude(self, tool, amount, speed=None):
self._logger.info('Extrude - Tool: %s, Amount: %s, Speed: %s', tool, amount, speed)
def setPrintingSpeed(self, amount):
self.printingSpeed = amount
self._logger.info("Printing Speed - Amount: %s", amount)
self.onPrintingSpeedChanged(amount)
def setPrintingFlow(self, amount):
self.printingFlow = amount
self._logger.info("Printing Flow - Amount: %s", amount)
self.onPrintingFlowChanged(amount)
def changeTool(self, tool):
self._logger.info('Change tool from %s to %s', self.currentTool, tool)
self._currentSelectedTool = tool
self.onToolChanged(tool)
def sendCommand(self, command):
self._logger.info('Command Sent - %s', command)
def babystepping(self, amount):
self._logger.info('Babystepping - Amount - %s', amount)
def setTemperature(self, type, value):
self._logger.info('Temperature - Type: %s, Value: %s', type, value)
if self._temperatureChanger:
self._temperatureChanger.setTarget(type, value)
def serialLoggingChanged(self):
pass
def onPromptResponse(self, index):
self._logger.info("Prompt index %d selected" % index)
@property
def ports(self):
return {
'virtual': 'Virtual Printer'
}
@property
def baudRates(self):
return []
@property
def currentConnection(self):
return ('virtual', None) if self._comm else (None, None)
@property
def settingsProperties(self):
return {
'customCancelCommands': True
}
@property
def fileManagerClass(self):
return PrintFileManagerGcode
@property
def allowTerminal(self):
return True
@property
def connected(self):
return self._comm
@property
def preHeating(self):
return self._heatingUp
@property
def printProgress(self):
if self._printJob:
return self._printJob.progress
else:
return None
@property
def printFilePosition(self):
if self._printJob:
return self._printJob.filePos
else:
return None
@property
def consumedFilamentData(self):
return self._printJob._consumedFilament if self._printJob else 0
@property
def consumedFilamentSum(self):
return sum([self._printJob._consumedFilament[k] for k in self._printJob._consumedFilament.keys()]) if self._printJob else 0
def setPaused(self, paused):
currentFile = self._printerManager.selectedFile
printFileInfo = {
"file": currentFile['filename'],
"filename": os.path.basename(currentFile['filename']),
"origin": currentFile['origin']
}
if paused:
self._previousSelectedTool = self._currentSelectedTool
self._changePrinterState(PrinterState.STATE_PAUSED)
self.fireSystemEvent(SystemEvent.PRINT_PAUSED, printFileInfo)
else:
if self._currentSelectedTool != self._previousSelectedTool:
self.onToolChanged(self._previousSelectedTool)
self._currentSelectedTool = self._previousSelectedTool
self._changePrinterState(PrinterState.STATE_PRINTING)
self.fireSystemEvent(SystemEvent.PRINT_RESUMED, printFileInfo)
if self._printJob:
self._printJob.setPaused(paused)
class TempsChanger(threading.Thread):
def __init__(self, plugin):
self._stopped = False
self._plugin = plugin
self._targets = {}
self._actuals = {}
super(TempsChanger, self).__init__()
self.daemon = True
def run(self):
while not self._stopped:
for t in self._targets.keys():
if self._actuals[t] > self._targets[t]:
self._actuals[t] = self._actuals[t] - 5
elif self._actuals[t] < self._targets[t]:
self._actuals[t] = self._actuals[t] + 5
self._updateTemps()
time.sleep(1)
self._plugin = None
def stop(self):
self._stopped = True
def setTarget(self, type, target):
self._targets[type] = target
if type not in self._actuals:
self._actuals[type] = 0
def _updateTemps(self):
tools = {}
bed = (None, None)
for t in self._targets.keys():
if t.startswith('tool'):
tools[int(t[4:])] = ( self._actuals[t], self._targets[t] )
elif t.startswith('bed'):
bed = ( self._actuals[t], self._targets[t] )
self._plugin.reportTempChange(tools, bed)
class JobSimulator(threading.Thread):
def __init__(self, plugin, printerManager, currentFile):
self._pm = printerManager
self._plugin = plugin
self._file = currentFile
self._stopped = False
self._percentCompleted = 0
self._filePos = 0
self._pausedEvent = threading.Event()
self._consumedFilament = {0: 0}
super(JobSimulator, self).__init__()
self.daemon = True
def run(self):
self._pausedEvent.set()
timeElapsed = 0
currentLayer = 0
jobLength = self._plugin._vpSettings['printJob']
while not self._stopped and self._percentCompleted < 1:
self._pausedEvent.wait()
if self._stopped:
break
timeElapsed += 1
currentLayer += 1
self._filePos += 1
self._consumedFilament[0] += 10
self._percentCompleted = timeElapsed / jobLength
self._plugin.reportNewLayer()
self._plugin.reportPrintProgressChanged()
time.sleep(1)
self._plugin.changeTemperature(0, 0)
if self._percentCompleted >= 1:
self._plugin.reportPrintJobCompleted()
else:
self._plugin.reportPrintJobFailed()
self._pm = None
def cancel(self):
self._stopped = True
if not self._pausedEvent.isSet():
self.setPaused(False)
def setPaused(self, value):
if value:
self._pausedEvent.clear()
else:
self._pausedEvent.set()
@property
def progress(self):
return self._percentCompleted
@property
def filePos(self):
return self._filePos
__plugin_instance__ = VirtualComms()
| AstroPrint/AstroBox | src/plugins/com_astroprint_astrobox_plugins_virtualcomms/__init__.py | Python | agpl-3.0 | 10,257 |
import wx.lib.ogl as ogl
from myhdl import Signal, always
from MyHDLSim.combinational import Nxor
from MyHDLSim.Wrappers.GenericGateWrapper import GenericGateWrapper
class NxorGateShape(ogl.CompositeShape):
""" This shape is used exclusively to contruct the NXOR Gate main shape.
The shape is initially based within an 80x80 square, centered """
def __init__(self, canvas):
ogl.CompositeShape.__init__(self)
self.SetCanvas(canvas)
# Shape1 = Outerline of Shape
shape1 = ogl.PolygonShape()
points1 = [ (-2.5, 40),
(2.5, 20),
(2.5, -20),
(-2.5, -40),
(2.5, -20),
(2.5, 20) ]
shape1.Create(points1)
# Shape2 = Actual Gate Shape
shape2 = ogl.PolygonShape()
points2 = [ (-35, 40),
(20, 40),
(35, 0),
(35, 10),
(40, 10),
(40, -10),
(35, -10),
(35, 0),
(20, -40),
(-35, -40),
(-30, -20),
(-30, 20) ]
shape2.Create(points2)
self.AddText("Nxor")
self.SetRegionName("Nxor")
self.AddChild(shape1)
self.AddChild(shape2)
constraint = ogl.Constraint(ogl.CONSTRAINT_RIGHT_OF, shape1, [shape2])
constraint.SetSpacing(1,0)
self.AddConstraint(constraint)
self.Recompute()
# If we don't do this, the shapes will be able to move on their
# own, instead of moving the composite
shape1.SetDraggable(False)
shape2.SetDraggable(False)
# If we don't do this the shape will take all left-clicks for itself
shape1.SetSensitivityFilter(0)
class NxorGateWrapper(GenericGateWrapper):
""" This class wraps a MyHDLSim.combinational.NXOR function for drawing """
def __init__(self, drawManager, x, y, out, a, b, c = None, d = None):
GenericGateWrapper.__init__(self, drawManager, x, y, [a,b,c,d], NxorGateShape(drawManager), out)
self._CreateInstance(Nxor, out, a, b, c, d)
GenericGateWrapper._connectWires(self, drawManager)
| mattsnowboard/msu-myhdlsim | MyHDLSim/Wrappers/NxorGateWrapper.py | Python | bsd-2-clause | 2,302 |
#!/usr/bin/env python2
# -.- coding: utf-8 -.-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: audit_iptables
short_description: Get current iptables settings
description:
- Get current iptables settings.
author:
- "Aleksandr Seleznev (@seleznev)"
'''
def main():
module = AnsibleModule({})
(rc, out, err) = module.run_command("iptables -L -n", check_rc=True)
module.exit_json(changed=False, ansible_facts={"audit_iptables": out.strip("\n")})
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| seleznev/audit-helper | modules/audit_iptables.py | Python | gpl-3.0 | 1,167 |
"""Directed graph production.
This module contains the code to produce an ordered directed graph of a
bzr branch, such as we display in the tree view at the top of the bzrk
window.
"""
__copyright__ = "Copyright 2005 Canonical Ltd."
__author__ = "Scott James Remnant <[email protected]>"
from bzrlib.revision import NULL_REVISION
from bzrlib.tsort import merge_sort
from bzrlib import ui
def linegraph(graph, start_revs, maxnum=None, broken_line_length=None,
graph_data=True, mainline_only=False, root_progress=None):
"""Produce a directed graph of a bzr repository.
Returns a tuple of (line_graph, revid_index, columns_len) where
* line_graph is a list of tuples of (revid,
node,
lines,
parents,
children,
revno_sequence),
* revid_index is a dict of each revision with the key being the revid, and
the value the row index, and
* columns_len is the number of columns need to draw the line graph.
Node is a tuple of (column, colour) with column being a zero-indexed
column number of the graph that this revision represents and colour
being a zero-indexed colour (which doesn't specify any actual colour
in particular) to draw the node in.
Lines is a list of tuples which represent lines you should draw away
from the revision, if you also need to draw lines into the revision
you should use the lines list from the previous iteration. Each
typle in the list is in the form (start, end, colour) with start and
end being zero-indexed column numbers and colour as in node.
It's up to you how to actually draw the nodes and lines (straight,
curved, kinked, etc.) and to pick the actual colours for each index.
"""
assert isinstance(start_revs, list)
def update_root_progress(step_number):
"""IFF our container received a root progress bar, then update it."""
if root_progress is not None:
root_progress.update(None, step_number)
graph_parents = {}
ghosts = set()
graph_children = {}
update_root_progress(1)
progress_bar = ui.ui_factory.nested_progress_bar()
try:
progress_bar.update("Arranging tree fragments")
for i, (revid, parent_revids) in enumerate(graph.iter_ancestry(start_revs)):
if i % 25 == 0:
progress_bar.tick()
if parent_revids is None:
ghosts.add(revid)
continue
if parent_revids == (NULL_REVISION,):
graph_parents[revid] = ()
else:
graph_parents[revid] = parent_revids
for parent in parent_revids:
graph_children.setdefault(parent, []).append(revid)
graph_children.setdefault(revid, [])
finally:
progress_bar.finished()
update_root_progress(2)
progress_bar = ui.ui_factory.nested_progress_bar()
try:
progress_bar.update("Removing ghosts", 0, len(ghosts))
for i, ghost in enumerate(ghosts):
if i % 25 == 0:
progress_bar.update(None, i)
for ghost_child in graph_children[ghost]:
graph_parents[ghost_child] = [p for p in graph_parents[ghost_child]
if p not in ghosts]
finally:
progress_bar.finished()
graph_parents["top:"] = start_revs
if len(graph_parents)>0:
merge_sorted_revisions = merge_sort(
graph_parents,
"top:",
generate_revno=True)
else:
merge_sorted_revisions = ()
if mainline_only:
merge_sorted_revisions = [elem for elem in merge_sorted_revisions \
if len(elem[3])==1 ]
assert merge_sorted_revisions[0][1] == "top:"
merge_sorted_revisions = merge_sorted_revisions[1:]
revid_index = {}
revno_index = {}
# This will hold an item for each "branch". For a revisions, the revsion
# number less the least significant digit is the branch_id, and used as the
# key for the dict. Hence revision with the same revsion number less the
# least significant digit are considered to be in the same branch line.
# e.g.: for revisions 290.12.1 and 290.12.2, the branch_id would be 290.12,
# and these two revisions will be in the same branch line. Each value is
# a list of rev_indexes in the branch.
branch_lines = {}
linegraph = []
update_root_progress(3)
progress_bar = ui.ui_factory.nested_progress_bar()
try:
progress_bar.update("Finding nodes", 0, len(merge_sorted_revisions))
for (rev_index, (sequence_number,
revid,
merge_depth,
revno_sequence,
end_of_merge)) in enumerate(merge_sorted_revisions):
if rev_index % 25 == 0:
progress_bar.update(None, rev_index)
if maxnum and rev_index >= maxnum:
break
revid_index[revid] = rev_index
parents = graph_parents[revid]
linegraph.append([revid,
None,
[],
parents,
None,
revno_sequence])
if graph_data:
revno_index[revno_sequence] = rev_index
branch_id = revno_sequence[0:-1]
branch_line = None
if branch_id not in branch_lines:
branch_line = []
branch_lines[branch_id] = branch_line
else:
branch_line = branch_lines[branch_id]
branch_line.append(rev_index)
finally:
progress_bar.finished()
if graph_data:
branch_ids = branch_lines.keys()
def branch_id_cmp(x, y):
"""Compaire branch_id's first by the number of digits, then reversed
by their value"""
len_x = len(x)
len_y = len(y)
if len_x == len_y:
return -cmp(x, y)
return cmp(len_x, len_y)
branch_ids.sort(branch_id_cmp)
# This will hold a tuple of (child_index, parent_index, col_index) for each
# line that needs to be drawn. If col_index is not none, then the line is
# drawn along that column, else the the line can be drawn directly between
# the child and parent because either the child and parent are in the same
# branch line, or the child and parent are 1 row apart.
lines = []
empty_column = [False for i in range(len(graph_parents))]
# This will hold a bit map for each cell. If the cell is true, then the
# cell allready contains a node or line. This use when deciding what column
# to place a branch line or line in, without it overlaping something else.
columns = [list(empty_column)]
update_root_progress(4)
progress_bar = ui.ui_factory.nested_progress_bar()
try:
progress_bar.update("Organizing edges", 0, len(branch_ids))
for i, branch_id in enumerate(branch_ids):
if i % 25 == 0:
progress_bar.update(None, i)
branch_line = branch_lines[branch_id]
# Find the col_index for the direct parent branch. This will be the
# starting point when looking for a free column.
parent_col_index = 0
parent_index = None
if len(branch_id) > 1:
parent_revno = branch_id[0:-1]
if parent_revno in revno_index:
parent_index = revno_index[parent_revno]
parent_node = linegraph[parent_index][1]
if parent_node:
parent_col_index = parent_node[0]
col_search_order = _branch_line_col_search_order(columns,
parent_col_index)
color = reduce(lambda x, y: x+y, branch_id, 0)
cur_cont_line = []
line_range = []
last_rev_index = None
for rev_index in branch_line:
if last_rev_index:
if broken_line_length and \
rev_index - last_rev_index > broken_line_length:
line_range.append(last_rev_index+1)
line_range.append(rev_index-1)
else:
line_range.extend(range(last_rev_index+1, rev_index))
line_range.append(rev_index)
last_rev_index = rev_index
if parent_index:
if broken_line_length and \
parent_index - last_rev_index > broken_line_length:
line_range.append(last_rev_index+1)
else:
line_range.extend(range(last_rev_index+1, parent_index))
col_index = _find_free_column(columns,
empty_column,
col_search_order,
line_range)
node = (col_index, color)
for rev_index in branch_line:
linegraph[rev_index][1] = node
columns[col_index][rev_index] = True
for rev_index in branch_line:
(sequence_number,
revid,
merge_depth,
revno_sequence,
end_of_merge) = merge_sorted_revisions[rev_index]
linegraph[rev_index][4] = graph_children[revid]
col_index = linegraph[rev_index][1][0]
for parent_revid in graph_parents[revid]:
if parent_revid in revid_index:
parent_index = revid_index[parent_revid]
parent_node = linegraph[parent_index][1]
if parent_node:
parent_col_index = parent_node[0]
else:
parent_col_index = None
col_search_order = \
_line_col_search_order(columns,
parent_col_index,
col_index)
# If this line is really long, break it.
if len(branch_id) > 0 and \
broken_line_length and \
parent_index - rev_index > broken_line_length:
child_line_col_index = \
_find_free_column(columns,
empty_column,
col_search_order,
(rev_index + 1,))
_mark_column_as_used(columns,
child_line_col_index,
(rev_index + 1,))
# Recall _line_col_search_order to reset it back to
# the beging.
col_search_order = \
_line_col_search_order(columns,
parent_col_index,
col_index)
parent_col_line_index = \
_find_free_column(columns,
empty_column,
col_search_order,
(parent_index - 1,))
_mark_column_as_used(columns,
parent_col_line_index,
(parent_index - 1,))
lines.append((rev_index,
parent_index,
(child_line_col_index,
parent_col_line_index)))
else :
line_col_index = col_index
if parent_index - rev_index >1:
line_range = range(rev_index + 1, parent_index)
line_col_index = \
_find_free_column(columns,
empty_column,
col_search_order,
line_range)
_mark_column_as_used(columns,
line_col_index,
line_range)
lines.append((rev_index,
parent_index,
(line_col_index,)))
finally:
progress_bar.finished()
update_root_progress(5)
progress_bar = ui.ui_factory.nested_progress_bar()
try:
progress_bar.update("Prettifying graph", 0, len(lines))
for i, (child_index, parent_index, line_col_indexes) in enumerate(lines):
if i % 25 == 0:
progress_bar.update(None, i)
(child_col_index, child_color) = linegraph[child_index][1]
(parent_col_index, parent_color) = linegraph[parent_index][1]
if len(line_col_indexes) == 1:
if parent_index - child_index == 1:
linegraph[child_index][2].append(
(child_col_index,
parent_col_index,
parent_color))
else:
# line from the child's column to the lines column
linegraph[child_index][2].append(
(child_col_index,
line_col_indexes[0],
parent_color))
# lines down the line's column
for line_part_index in range(child_index+1, parent_index-1):
linegraph[line_part_index][2].append(
(line_col_indexes[0],
line_col_indexes[0],
parent_color))
# line from the line's column to the parent's column
linegraph[parent_index-1][2].append(
(line_col_indexes[0],
parent_col_index,
parent_color))
else:
# Broken line
# line from the child's column to the lines column
linegraph[child_index][2].append(
(child_col_index,
line_col_indexes[0],
parent_color))
# Broken line end
linegraph[child_index+1][2].append(
(line_col_indexes[0],
None,
parent_color))
# Broken line end
linegraph[parent_index-2][2].append(
(None,
line_col_indexes[1],
parent_color))
# line from the line's column to the parent's column
linegraph[parent_index-1][2].append(
(line_col_indexes[1],
parent_col_index,
parent_color))
finally:
progress_bar.finished()
return (linegraph, revid_index, len(columns))
else:
return (linegraph, revid_index, 0)
def _branch_line_col_search_order(columns, parent_col_index):
for col_index in range(parent_col_index, len(columns)):
yield col_index
for col_index in range(parent_col_index-1, -1, -1):
yield col_index
def _line_col_search_order(columns, parent_col_index, child_col_index):
if parent_col_index is not None:
max_index = max(parent_col_index, child_col_index)
min_index = min(parent_col_index, child_col_index)
for col_index in range(max_index, min_index -1, -1):
yield col_index
else:
max_index = child_col_index
min_index = child_col_index
yield child_col_index
i = 1
while max_index + i < len(columns) or \
min_index - i > -1:
if max_index + i < len(columns):
yield max_index + i
if min_index - i > -1:
yield min_index - i
i += 1
def _find_free_column(columns, empty_column, col_search_order, line_range):
for col_index in col_search_order:
column = columns[col_index]
has_overlaping_line = False
for row_index in line_range:
if column[row_index]:
has_overlaping_line = True
break
if not has_overlaping_line:
break
else:
col_index = len(columns)
column = list(empty_column)
columns.append(column)
return col_index
def _mark_column_as_used(columns, col_index, line_range):
column = columns[col_index]
for row_index in line_range:
column[row_index] = True
def same_branch(a, b):
"""Return whether we think revisions a and b are on the same branch."""
if len(a.parent_ids) == 1:
# Defacto same branch if only parent
return True
elif a.committer == b.committer:
# Same committer so may as well be
return True
else:
return False
| jelmer/bzr-gtk | branchview/linegraph.py | Python | gpl-2.0 | 19,106 |
from django.db import models
from mezzanine.pages.models import Page
from django.contrib.auth.models import User
#from account import models as user_models
class UseCase(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return (self.name)
class CollectionTable(models.Model):
#Group selection
APPLICATION = (
('CL', 'Cloud'),
('PR', 'Precipitation'),
('OZ', 'Ozone'),
('AE', 'Aerosol'),
('NO', 'NO2'),
('SO', 'SO2'),
('CH', 'CH4'),
('OG', 'other gases'),
)
GROUP = (
('G1', 'Group 1'),
('G2', 'Group 2'),
('G3', 'Group 3')
)
IPR = (
('I00', 'Restricted'),
('I01', 'Creative commons'),
('I02', 'Academic Free License 3.0 (AFL 3.0) Adaptive Public License'),
('I03', 'Attribution Assurance Licenses'),
('I04', 'FreeBSD License'),
('I05', 'Common Development and Distribution License'),
('I06', 'Common Public Attribution License 1.0 (CPAL)'),
('I07', 'Computer Associates Trusted Open Source License 1.1 Creative Commons Attribution'),
('I08', 'EU DataGrid Software License'),
('I09', 'Educational Community License. Version 2.0'),
('I10', 'European Union Public License. Version 1.1 (EUPL-1.1) Fair License'),
('I11', 'GNU General Public License (GPL)'),
('I12', 'Local Authority Copyright with data.gov.uk rights Lucent Public License (Plan9)'),
('I13', 'MIT license'),
)
ACCESS = (
('AO', 'Open'),
('AR', 'Restricted')
)
location = models.FileField(max_length=255, upload_to='collections/', blank=False, null=False, verbose_name = "Location on Server")
name = models.CharField(max_length=255, null=False, blank=False, verbose_name = "Collection name")
source = models.CharField(max_length=255, blank=True, null=True, verbose_name = "Collection source")
max_lat = models.CharField(max_length=10, blank=True, null=True, verbose_name = "max. longitude")
max_lon = models.CharField(max_length=10, blank=True, null=True, verbose_name = "max. latitude")
min_lat = models.CharField(max_length=10, blank=True, null=True, verbose_name = "min. longitude")
min_lon = models.CharField(max_length=10, blank=True, null=True, verbose_name = "min. latitude")
start_date = models.DateField(auto_now=False, auto_now_add=False, blank=True, null=True, verbose_name = "Time coverage: start date")
end_date = models.DateField(auto_now=False, auto_now_add=False, blank=True, null=True, verbose_name = "end date")
use_cases = models.ManyToManyField(UseCase, verbose_name = "Use case")
application = models.CharField(max_length=3, choices=APPLICATION, verbose_name = "Application field", default='CL')
measurement_unit = models.CharField(max_length=255, blank=True, null=True, verbose_name="measurement unit")
group = models.CharField(max_length=3, choices=GROUP, blank=True, null=True, verbose_name = "Group")
other_info = models.CharField(max_length=255, blank=True, null=True, verbose_name = "Other information")
uploaded_by = models.ForeignKey("auth.User", verbose_name = "Uploaded by", blank=True, null=True)
status = models.CharField(max_length=255, blank=True, null=True, verbose_name = "Status")
IO = models.CharField(max_length=1, blank=True, null=True)
coverageID = models.CharField(max_length=255, blank=True, null=True)
ipr = models.CharField(max_length=4, choices=IPR, blank=False, null=True, verbose_name = "IPR statement", default='I00')
access = models.CharField(max_length=3, choices=ACCESS, blank=False, null=False, verbose_name = "Access", default='AO')
min_value = models.CharField(max_length=9, blank=False, null=False, verbose_name = "Min. value", default='0')
max_value = models.CharField(max_length=9, blank=False, null=False, verbose_name = "Max. value", default='1')
def __unicode__ (self):
return self.name
| SISTEMAsw/TAMP | gui/data_ingestion/models.py | Python | mit | 4,023 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class OperationsManagementClientConfiguration(Configuration):
"""Configuration for OperationsManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param provider_name: Provider name for the parent resource.
:type provider_name: str
:param resource_type: Resource type for the parent resource.
:type resource_type: str
:param resource_name: Parent resource name.
:type resource_name: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
provider_name: str,
resource_type: str,
resource_name: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if provider_name is None:
raise ValueError("Parameter 'provider_name' must not be None.")
if resource_type is None:
raise ValueError("Parameter 'resource_type' must not be None.")
if resource_name is None:
raise ValueError("Parameter 'resource_name' must not be None.")
super(OperationsManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.provider_name = provider_name
self.resource_type = resource_type
self.resource_name = resource_name
self.api_version = "2015-11-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-operationsmanagement/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| Azure/azure-sdk-for-python | sdk/operationsmanagement/azure-mgmt-operationsmanagement/azure/mgmt/operationsmanagement/aio/_configuration.py | Python | mit | 4,167 |
# -*- coding: utf-8 -*-
import pytest
from mutagenwrapper import MediaFile, ReservedTagNameError
basic_ref = {
'artist': 'Daft Punk',
'title': 'Get Lucky',
'album': 'Random Access Memories',
'date': '2013',
'genre': 'Electronic',
'composer': 'Guy-Manuel de Homem-Christo, Nile Rodgers, Pharrell Williams, Thomas Bangalter',
'tracknumber': 8,
'tracktotal': 13,
'discnumber': 1,
'disctotal': 1,
}
def test_basic_read(path_basic):
m = MediaFile(path_basic)
assert m.artist == basic_ref['artist']
assert m['artist'] == basic_ref['artist']
for k in sorted(basic_ref.iterkeys()):
assert getattr(m, k) == basic_ref[k]
assert m[k] == basic_ref[k]
def test_basic_write(path_basic, tempcopy):
with tempcopy(path_basic) as tf:
m = MediaFile(tf.name)
assert m.artist == basic_ref['artist']
m.artist = 'DAFT PUNK'
assert m.artist == 'DAFT PUNK'
m.save(reload=True)
assert m.artist == 'DAFT PUNK'
m.album = ['Foo', 'Bar']
assert m.album == ['Foo', 'Bar']
m.save(reload=True)
assert m.album == ['Foo', 'Bar']
# XXX Appending values are NOT supported (e.g. m.album.append(...))
# and should not used (behaviors vary for different formats,
# or even different tags in the same format).
m.date = '2014-08-15'
assert m.date == '2014-08-15'
m.save(reload=True)
assert m.date == '2014-08-15'
m.tracknumber = 1
assert m.tracknumber == 1
assert m.tracktotal == 13
m.save(reload=True)
assert m.tracknumber == 1
assert m.tracktotal == 13
del m.artist
assert m.artist is None
m.save(reload=True)
assert m.artist is None
del m.date
assert m.date is None
m.save(reload=True)
assert m.date is None
del m.tracknumber
assert m.tracknumber is None
m.save(reload=True)
assert m.tracknumber is None
m.tracknumber = 9
assert m.tracknumber == 9
m.save(reload=True)
assert m.tracknumber == 9
m.tracktotal = 42
assert m.tracknumber == 9
assert m.tracktotal == 42
m.save(reload=True)
assert m.tracknumber == 9
assert m.tracktotal == 42
def test_basic_write_reserved(path_basic, tempcopy):
with tempcopy(path_basic) as tf:
m = MediaFile(tf.name)
if m.wrapper.__class__.__name__ == 'ID3TagsWrapper':
# conductor is a regular tag in ID3
return
with pytest.raises(ReservedTagNameError):
m.___conductor = 'Abbado'
def test_basic_write_unicode(path_basic, tempcopy):
with tempcopy(path_basic) as tf:
m = MediaFile(tf.name)
m.artist = u'Frédéric François Chopin'
m.save(reload=True)
assert m.artist == u'Frédéric François Chopin'
m.album = [u'Études', u'Klavierstück']
m.save(reload=True)
assert m.album == [u'Études', u'Klavierstück']
def test_basic_read_picture(path_basic):
m = MediaFile(path_basic)
#assert m.picture == ''
| clee704/mutagenwrapper | tests/test_mutagenwrapper.py | Python | mit | 3,184 |
from __future__ import print_function
import atexit, copy, inspect, itertools, os, pprint, re, sys, time, warnings
from datetime import datetime, tzinfo, timedelta # used by time tests
stashedPath = copy.copy(sys.path)
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'common')))
import driver, utils
sys.path = stashedPath
try:
unicode
except NameError:
unicode = str
try:
long
except NameError:
long = int
# -- global variables
failure_count = 0
passed_count = 0
start_time=time.time()
# -- import driver
r = utils.import_python_driver()
print('Using RethinkDB client from: %s' % r.__file__)
# -- get settings
DEBUG_ENABLED = os.environ.get('VERBOSE', 'false').lower() == 'true'
def print_debug(message):
if DEBUG_ENABLED:
print('DEBUG (%.2f):\t %s' % (time.time() - start_time, message.rstrip()))
sys.stdout.flush()
DRIVER_PORT = int(sys.argv[1] if len(sys.argv) > 1 else os.environ.get('RDB_DRIVER_PORT') or 28015)
SERVER_HOST = os.environ.get('RDB_SERVER_HOST', 'localhost')
print_debug('Using driver port: %d on host: %s' % (DRIVER_PORT, SERVER_HOST))
required_external_tables = []
if len(sys.argv) > 2 or os.environ.get('TEST_DB_AND_TABLE_NAME'):
for rawValue in (sys.argv[2] if len(sys.argv) > 3 else os.environ.get('TEST_DB_AND_TABLE_NAME')).split(','):
rawValue = rawValue.strip()
if rawValue == '':
continue
splitValue = rawValue.split('.')
if len(splitValue) == 1:
required_external_tables += [('test', splitValue[0])]
elif len(splitValue) == 2:
required_external_tables += [(splitValue[0], splitValue[1])]
else:
raise AssertionError('Unuseable value for external tables: %s' % rawValue)
required_external_tables.reverse() # setup for .pop()
# -- utilities --
def print_failure(name, src, expected, result, message=None):
global failure_count
failure_count += 1
print('''
TEST FAILURE: %(name)s%(message)s
SOURCE: %(source)s
EXPECTED: %(expected)s
RESULT: %(result)s''' % {
'name': name,
'source': utils.RePrint.pformat(src, hangindent=14),
'message': '\n FAILURE: %s' % message if message is not None else '',
'expected': utils.RePrint.pformat(expected, hangindent=14),
'result': utils.RePrint.pformat(result, hangindent=14)
})
def check_pp(src, query):
# This isn't a good indicator because of lambdas, whitespace differences, etc
# But it will at least make sure that we don't crash when trying to print a query
printer = r.errors.QueryPrinter(query)
composed = printer.print_query()
#if composed != src:
# print('Warning, pretty printing inconsistency:')
# print("Source code: %s", src)
# print("Printed query: %s", composed)
class OptionsBox(object):
value = None
options = None
def __init__(self, value, options):
assert isinstance(options, dict)
self.value = value
self.options = options
def __str__(self):
if self.options and self.options.keys() == ['ordered'] and self.options['ordered'] == False:
return 'bag(%s)' % self.value
elif self.options and self.options.keys() == ['partial'] and self.options['partial'] == True:
return 'partial(%s)' % self.value
else:
return 'options(%s, %s)' % (self.options, self.value)
def __repr__(self):
if self.options and self.options.keys() == ['ordered'] and self.options['ordered'] == False:
return 'bag(%r)' % self.value
elif self.options and self.options.keys() == ['partial'] and self.options['partial'] == True:
return 'partial(%r)' % self.value
else:
return 'options(%s, %r)' % (self.options, self.value)
class FalseStr(str):
'''return a string that evaluates as false'''
def __nonzero__(self):
return False
class Anything(object):
__instance = None
def __new__(cls):
if not cls.__instance:
cls.__instance = super(Anything, cls).__new__(cls)
return cls.__instance
def __str__(self):
return "<no error>"
def __repr__(self):
return self.__str__()
class Err(object):
exceptionRegex = re.compile('^(?P<message>[^\n]*?)((?: in)?:\n|\nFailed assertion:).*$', flags=re.DOTALL)
err_type = None
message = None
frames = None
regex = False
def __init__(self, err_type=None, message=None, err_frames=None, **kwargs):
# translate err_type into the class
if type(err_type) == type(Exception) and issubclass(err_type, Exception):
self.err_type = err_type
elif hasattr(r, err_type):
self.err_type = r.__getattribute__(err_type)
elif hasattr(__builtins__, err_type):
self.err_type = __builtins__.__getattribute__(err_type)
else:
try:
self.err_type = eval(err_type) # just in case we got a string with the name
except Exception: pass
assert issubclass(self.err_type, Exception), 'err_type must be a subclass Exception, got: %r' % err_type
if message is not None:
self.message = message
self.frames = None # TODO: test frames
def __str__(self):
return "%s(%s)" % (self.err_type.__name__, ('~' + self.message.pattern) if hasattr(self.message, 'pattern') else self.message)
def __repr__(self):
return self.__str__()
class Regex(object):
value = None
def __init__ (self, value, **kwargs):
try:
self.value = re.compile(value)
except Exception as e:
raise ValueError('Regex got a bad value: %r' % value)
def match(self, other):
if not isinstance(other, (str, unicode)):
return False
return self.value.match(other) is not None
@property
def pattern(self):
return self.value.pattern
def __str__(self):
return "Regex(%s)" % (self.value.pattern if self.value else '<none>')
def __repr__(self):
return "Regex(%s)" % (self.value.pattern if self.value else '<none>')
class Uuid(Regex):
value = re.compile('^[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}$')
def __init__(self, **kwargs):
pass
def __str__(self):
return "uuid()"
def __repr__(self):
return "uuid()"
def compare(expected, result, options=None):
'''Compare the two items by the rules we have, returning either True, or a message about why it failed'''
# -- merge options
defaultOptions = {
'ordered': True,
'partial': False,
'precision': 0.0,
'explicit_type': None
}
if options is None:
options = defaultOptions
else:
options = copy.copy(options)
for key in defaultOptions:
if key == 'explicit_type':
options[key] = None
if key not in options:
options[key] = defaultOptions[key]
if isinstance(expected, OptionsBox) and expected.options:
for key in defaultOptions:
if key in expected.options:
options[key] = expected.options[key]
expected = expected.value
# == compare based on type of expected
if inspect.isclass(expected):
try:
expected = expected()
except Exception as e:
return FalseStr('Expected was a class that can not easily be instantiated: %s' % str(e))
# -- explicit type
if options['explicit_type'] and not isinstance(result, options['explicit_type']):
return FalseStr('expected explicit type %s, got %s (%s)' % (options['explicit_type'], result, type(result).__name__))
# -- Anything... but an error
if isinstance(expected, Anything):
if isinstance(result, Exception):
return FalseStr('expected anything() but got error: %r' % result)
else:
return True
# -- None
if expected is None: # note: this means the expected was 'None', not just omitted (translated to Anything)
if result is None:
return True
else:
return FalseStr('expected None, but got: %r (%s)' % (result, type(result).__name__))
return result is None
# -- number
if isinstance(expected, (int, long, float)):
if not isinstance(result, (int, long, float)):
return FalseStr('expected number %s but got %s (%s)' % (expected, result, type(result).__name__))
if abs(expected - result) <= options['precision']:
return True
else:
if options['precision']:
return FalseStr('value << %r >> was not within %r of %r' % (result, options['precision'], expected))
else:
return FalseStr('value << %r >> was not equal to: %r' % (result, expected))
# -- string/unicode
if isinstance(expected, (str, unicode)):
if result == expected:
return True
else:
return FalseStr('value << %r >> was not the expected: %s' % (result, expected))
# -- dict
if isinstance(expected, dict):
if not isinstance(result, dict):
return FalseStr('expected dict, got %r (%s)' % (result, type(result).__name__))
# - keys
expectedKeys = set(expected.keys())
resultKeys = set(result.keys())
if options['partial']:
if not expectedKeys.issubset(resultKeys):
return FalseStr('unmatched keys: %s' % expectedKeys.difference(resultKeys))
else:
if not expectedKeys == resultKeys:
return FalseStr('unmatched keys from either side: %s' % expectedKeys.symmetric_difference(resultKeys))
# - values
for key, value in expected.items():
compareResult = compare(value, result[key], options=options)
if not compareResult:
return compareResult
# - all found
return True
# -- list/tuple/array
if hasattr(expected, '__iter__'):
if not hasattr(result, '__iter__'):
return FalseStr('expected iterable, got %s (%s)' % (result, type(result).__name__))
# - ordered
if options['ordered']:
haystack = result
if not hasattr(haystack, 'next'):
haystack = iter(result)
for needle in expected:
try:
while True:
straw = next(haystack)
if compare(needle, straw, options=options):
break
elif not options['partial']:
return FalseStr('got an unexpected item: %r while looking for %r' % (straw, needle))
except StopIteration:
return FalseStr('ran out of results before finding: %r' % needle)
if not options['partial']:
try:
straw = next(haystack)
return FalseStr('found at least one extra result: %r' % straw)
except StopIteration: pass
# - unordered
else:
haystack = list(result)
for needle in expected:
for straw in haystack:
if compare(needle, straw, options=options):
break
else:
return FalseStr('missing expected item: %r' % needle)
haystack.remove(straw)
if haystack and not options['partial']:
return FalseStr('extra items returned: %r' % haystack)
# - reutrn sucess
return True
# -- exception
if isinstance(expected, (Err, Exception)):
# - type
if isinstance(expected, Err):
if not isinstance(result, expected.err_type):
return FalseStr('expected error type %s, got %r (%s)' % (expected.err_type, result, type(result).__name__))
elif not isinstance(result, type(expected)):
return FalseStr('expected error type %s, got %r (%s)' % (type(expected).__name__, result, type(result).__name__))
# - message
if expected.message:
# strip details from output message
resultMessage = None
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if hasattr(result, 'message'):
resultMessage = str(result.message)
else:
resultMessage = str(result)
resultMessage = re.sub(Err.exceptionRegex, '\g<message>:', resultMessage)
compareResult = compare(expected.message, resultMessage, options=options)
if not compareResult:
return compareResult
# - frames -- ToDo: implement this
return True
# -- Regex/UUID
if isinstance(expected, (Regex, re._pattern_type)):
match = expected.match(result)
if match:
return True
else:
return FalseStr('expected match for %s, but got: %s' % (expected, result))
# -- type
if not isinstance(expected, type(result)) and (type(expected) != type(object) or not issubclass(expected, type(result))): # reversed to so we can handle subclasses
return FalseStr('expected type %s, got: %r (%s)' % (expected, result, type(result).__name__))
# -- other
if result != expected:
return FalseStr('expected %r but got %r (%s)' % (expected, result, type(result).__name__))
else:
return True
# -- Curried output test functions --
class PyTestDriver(object):
scope = None
__con_cache = None
def __init__(self):
self.scope = globals()
def connection(self, new=False, user=None):
if user is None:
user = 'admin'
if self.__con_cache is None:
self.__con_cache = {}
if new is True or user not in self.__con_cache:
if user in self.__con_cache:
try:
self.__con_cache[user].close()
except Exception as e:
print_debug('Failed while closing a connection for replacement: %s' % str(e))
self.__con_cache[user] = r.connect(host=SERVER_HOST, port=DRIVER_PORT, user=user)
print_debug('\tConnected to %s:%d as user %s' % (SERVER_HOST, DRIVER_PORT, user))
return self.__con_cache[user]
def define(self, expr, variable):
print_debug('Defining: %s%s' % (expr, ' to %s' % variable if variable else ''))
self.scope['conn'] = self.connection()
try:
exec(compile('%s = %s' % (variable, expr), '<string>', 'single'), self.scope) # handle things like: a['b'] = b
except Exception as e:
print_failure('--define--', expr, 'Exception while processing define', str(e))
def run(self, src, expected, name, runopts, testopts):
global passed_count
print_debug('Test: %s' % name)
if runopts:
runopts["profile"] = True
else:
runopts = {"profile": True}
compareOptions = {}
if 'precision' in testopts:
compareOptions['precision'] = float(testopts['precision']) # errors will bubble up
conn = self.connection(new=testopts.get('new-connection', False), user=runopts.get('user'))
self.scope['conn'] = conn
# -- build the expected result
print_debug('\tExpected: %s' % str(expected))
exp_val = eval(unicode(expected), self.scope)
# -- evaluate the command
try:
result = eval(src, self.scope)
except Exception as err:
print_debug('\tError evaluating: %s - %r' % (src, err))
result = err
else:
try:
# - collect the contents of a cursor
if isinstance(result, r.Cursor):
print_debug('\tEvaluating cursor: %s %r' % (src, runopts))
result = list(result)
# - run as a query if it is one
elif isinstance(result, r.RqlQuery):
print_debug('\tRunning query: %s %r' % (src, runopts))
# Check pretty-printing
check_pp(src, result)
# run the query
actualRunOpts = copy.copy(runopts)
if 'user' in actualRunOpts:
del actualRunOpts['user']
result = result.run(conn, **actualRunOpts)
if result and "profile" in runopts and runopts["profile"] and "value" in result:
result = result["value"]
# ToDo: do something reasonable with the profile
else:
print_debug('\tRunning: %s' % src)
# - Save variable if requested
if 'variable' in testopts:
# ToDo: handle complex variables like: a[2]
self.scope[testopts['variable']] = result
print_debug('\tVariable: %s' % testopts['variable'])
if exp_val is None:
return
if 'noreply_wait' in testopts and testopts['noreply_wait']:
conn.noreply_wait()
except Exception as err:
print_debug('\tError: %r' % err)
result = err
else:
print_debug('\tResult: %r' % result)
# Compare to the expected result
compareResult = compare(exp_val, result, options=compareOptions)
if compareResult:
passed_count += 1
else:
print_failure(name, src, exp_val, result, message=compareResult)
if __name__ == '__main__':
driver = PyTestDriver()
# Emitted test code will consist of calls to these functions
class UTCTimeZone(tzinfo):
'''UTC'''
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
class PacificTimeZone(tzinfo):
'''Pacific timezone emulator for timestamp: 1375147296.68'''
def utcoffset(self, dt):
return timedelta(-1, 61200)
def tzname(self, dt):
return 'PDT'
def dst(self, dt):
return timedelta(0, 3600)
def test(query, expected, name, runopts=None, testopts=None):
if runopts is None:
runopts = {}
else:
for k, v in runopts.items():
if isinstance(v, str):
try:
runopts[k] = eval(v)
except NameError:
runopts[k] = v
if testopts is None:
testopts = {}
if 'max_batch_rows' not in runopts:
runopts['max_batch_rows'] = 3
if expected == '':
expected = None
driver.run(query, expected, name, runopts, testopts)
def setup_table(table_variable_name, table_name, db_name='test'):
global required_external_tables
def _teardown_table(table_name, db_name):
'''Used for tables that get created for this test'''
res = r.db(db_name).table_drop(table_name).run(driver.connection())
assert res["tables_dropped"] == 1, 'Failed to delete table %s.%s: %s' % (db_name, table_name, str(res))
def _clean_table(table_name, db_name):
'''Used for pre-existing tables'''
res = r.db(db_name).table(table_name).delete().run(driver.connection())
assert res["errors"] == 0, 'Failed to clean out contents from table %s.%s: %s' % (db_name, table_name, str(res))
r.db(db_name).table(table_name).index_list().for_each(r.db(db_name).table(table_name).index_drop(r.row)).run(driver.connection())
if len(required_external_tables) > 0:
db_name, table_name = required_external_tables.pop()
try:
r.db(db_name).table(table_name).info(driver.connection())
except r.ReqlRuntimeError:
raise AssertionError('External table %s.%s did not exist' % (db_name, table_name))
atexit.register(_clean_table, table_name=table_name, db_name=db_name)
print('Using existing table: %s.%s, will be %s' % (db_name, table_name, table_variable_name))
else:
if table_name in r.db(db_name).table_list().run(driver.connection()):
r.db(db_name).table_drop(table_name).run(driver.connection())
res = r.db(db_name).table_create(table_name).run(driver.connection())
assert res["tables_created"] == 1, 'Unable to create table %s.%s: %s' % (db_name, table_name, str(res))
r.db(db_name).table(table_name).wait(wait_for="all_replicas_ready").run(driver.connection())
print_debug('Created table: %s.%s, will be %s' % (db_name, table_name, table_variable_name))
globals()[table_variable_name] = r.db(db_name).table(table_name)
def setup_table_check():
'''Make sure that the required tables have been setup'''
if len(required_external_tables) > 0:
raise Exception('Unused external tables, that is probably not supported by this test: %s' % ('%s.%s' % tuple(x) for x in required_external_tables).join(', '))
def check_no_table_specified():
if DB_AND_TABLE_NAME != "no_table_specified":
raise ValueError("This test isn't meant to be run against a specific table")
def define(expr, variable=None):
driver.define(expr, variable=variable)
def anything():
return Anything()
def bag(expected, ordered=False, partial=None):
options = {'ordered':ordered}
if partial is not None:
options['partial'] = partial
if isinstance(expected, OptionsBox):
newoptions = copy.copy(expected.options)
newoptions.update(options)
options = newoptions
expected = expected.value
assert isinstance(expected, (list, tuple)), \
'bag can only work on lists, or tuples, got: %s (%r)' % (type(expected).__name__, expected)
return OptionsBox(expected, options)
def partial(expected, ordered=False, partial=True):
options = {'ordered':ordered, 'partial':partial}
if isinstance(expected, OptionsBox):
newoptions = copy.copy(expected.options)
newoptions.update(options)
options = newoptions
expected = expected.value
assert isinstance(expected, (dict, list, tuple)), \
'partial can only work on dicts, lists, or tuples, got: %s (%r)' % (type(expected).__name__, expected)
return OptionsBox(expected, options)
def fetch(cursor, limit=None, timeout=0.2):
'''Pull items from a cursor'''
# -- input validation
if limit is not None:
try:
limit = int(limit)
assert limit > 0
except Exception:
raise ValueError('invalid value for limit: %r' % limit)
if timeout not in (None, 0):
try:
timeout = float(timeout)
assert timeout > 0
except Exception:
raise ValueError('invalid value for timeout: %r' % timeout)
# -- collect results
result = []
deadline = time.time() + timeout if timeout else None
while (deadline is None) or (time.time() < deadline):
try:
if deadline:
result.append(cursor.next(wait=deadline - time.time()))
else:
result.append(cursor.next())
if limit and len(result) >= limit:
break
except r.ReqlTimeoutError:
if limit is not None: # no error unless we get less than we expect
result.append(r.ReqlTimeoutError())
break
else:
result.append(r.ReqlTimeoutError())
return result
def wait(seconds):
'''Sleep for some seconds'''
time.sleep(seconds)
def err(err_type, message=None, frames=None):
return Err(err_type, message, frames)
def err_regex(err_type, message=None, frames=None):
return Err(err_type, re.compile(message), frames)
def arrlen(length, thing=Anything()):
return [thing] * length
def uuid():
return Uuid()
def regex(value):
return Regex(value)
def int_cmp(expected_value):
if not isinstance(expected_value, (int, long)):
raise ValueError('value must be of type `int` or `long` but got: %r (%s)' % (expected_value, type(expected_value).__name__))
return OptionsBox(expected_value, {'explicit_type': (int, long)})
def float_cmp(expected_value):
if not isinstance(expected_value, float):
raise ValueError('value must be of type `float` but got: %r (%s)' % (expected_value, type(expected_value).__name__))
return OptionsBox(expected_value, {'explicit_type': float})
def the_end():
if failure_count > 0:
sys.exit("Failed %d tests, passed %d" % (failure_count, passed_count))
else:
print("Passed all %d tests" % passed_count)
false = False
true = True
| robertjpayne/rethinkdb | test/rql_test/drivers/driver.py | Python | apache-2.0 | 25,478 |
# Settings file optimized for test running. Sets up in-memory database,
# Nose test runner and disables South for the tests
from .base import *
# Use in-memory SQLIte3 database for faster tests
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
# No need to use South in testing
SOUTH_TESTS_MIGRATE = False
SKIP_SOUTH_TESTS = True
# Disable cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
try:
import django_nose # noqa
import os.path
INSTALLED_APPS += (
'django_nose',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
PROJECT_APPS = [app for app in INSTALLED_APPS
if os.path.exists(os.path.join(ROOT_DIR, '..', app))]
if PROJECT_APPS:
NOSE_ARGS = ['--cover-package=' + ','.join(PROJECT_APPS)]
except ImportError:
pass
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
| AnthonyHonstain/LazyBower | lazybower/settings/test.py | Python | mit | 988 |
import matplotlib.pyplot as plt
import numpy as np
def runningMoments(data):
x = np.cumsum(data)
x2 = np.cumsum(data**2)
n = np.arange(1, len(data)+1)
mean = x / n
std = np.sqrt((x2*n - x*x) / (n * (n-1)))
return mean, std
def walkers(data, labels, npts=250, fontmin=11):
# Modified from: http://conference.scipy.org/jhepc2013/2013/entry5/index.html
nwalkers, nstep, ndim = data.shape
subsample = nstep / npts
red = "#cf6775"
blue = "#67a9cf"
fontcolor = "#dddddd"
fig = plt.figure(facecolor='#222222', figsize=(8.5, 11))
fig.subplots_adjust(hspace=0.01, wspace=0.01)
for i in range(ndim):
spA = plt.subplot2grid((ndim,3), (i,0), colspan=2)
spB = plt.subplot2grid((ndim,3), (i,2))
spA.set_axis_bgcolor("#333333")
spB.set_axis_bgcolor("#333333")
spA.tick_params(direction="out", colors="w")
spB.tick_params(axis="x", bottom="off", top="off")
spB.tick_params(axis="y", left="off", right="off")
spA.tick_params(axis="y", right="off")
if i != ndim-1: spA.tick_params(axis="x", bottom="off", top="off")
else: spA.tick_params(axis="x", top="off")
cmap = np.log(np.std(data[:,:,i], axis=1))
cmap -= np.min(cmap)
cmap /= np.max(cmap)
for j in range(nwalkers):
wdata = data[j,:,i]
rmean, rstd = runningMoments(wdata)
wdata = wdata[::subsample][1:] # [1:] since std[0] = nan
rmean = rmean[::subsample][1:]
rstd = rstd[::subsample][1:]
nsub = np.arange(nstep)[::subsample][1:]
cmap = np.abs(wdata-rmean)/rstd
#spA.plot(nsub, wdata, drawstyle="steps", color="w", alpha=0.15)
spA.plot(nsub, wdata, drawstyle="steps", color=plt.cm.bone_r(cmap[i]), alpha=0.15)
spA.plot(nsub, rmean, color=red, linestyle="-")
spA.fill_between(nsub, rmean-rstd, rmean+rstd, facecolor=blue, alpha=0.15)
spB.hist(np.ravel(data[:,:,i]), orientation='horizontal', facecolor=red, bins=50, edgecolor="none")
spB.set_ylabel(labels[i], rotation='horizontal', fontsize=fontmin+3, labelpad=15, weight="bold", color=fontcolor)
spB.set_ylim(spA.get_ylim())
spB.xaxis.set_visible(False)
spB.yaxis.tick_right()
spB.yaxis.set_label_position("right")
plt.setp(spB.get_yticklabels(), visible=False)
spA.locator_params(nbins=7, axis="y")
spA.set_yticks(spA.get_yticks()[1:-1])
spA.set_xlim(0, nstep)
if i != ndim-1:
plt.setp(spA.get_xticklabels(), visible=False)
else:
spA.set_xlabel("Step", fontsize=fontmin+3, labelpad=8, weight="bold", color=fontcolor)
plt.setp(spA.get_xticklabels(), fontsize=fontmin, weight="bold", color=fontcolor)
plt.setp(spA.get_yticklabels(), fontsize=fontmin, weight="bold", color=fontcolor)
| acbecker/pyhm | pyhm/walkers.py | Python | mit | 2,993 |
#!/usr/bin/env python
# "convertor" - converts ODF files from a YUSCII font-encoding to proper UTF-8.
# Copyright (C) 2009 Damjan Georgievski
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import setuptools
__author__ = 'Damjan Georgievski'
__version__ = '2.0'
__email__ = '[email protected]'
setuptools.setup(
name = 'convertor',
version = __version__,
author = __author__,
author_email = __email__,
description = 'converts ODF files from a YUSCII font-encoding to proper UTF-8 ODF',
license = 'AGPL 3.0',
url = 'http://github.com/gdamjan/convertor',
packages = ['convertor'],
package_data = {},
keywords = "ODF",
include_package_data = True,
classifiers = [
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.6'
],
test_suite = '',
zip_safe = False,
entry_points = {
'console_scripts':
['convertor=convertor.__main__:main']
},
install_requires = ['lxml'],
extras_require = {
"web": "Werkzeug"
}
)
| gdamjan/convertor | setup.py | Python | agpl-3.0 | 1,692 |
import py, os, sys
from pytest import raises
from .support import setup_make, IS_WINDOWS
class TestREGRESSION:
helpout = []
def setup_class(cls):
import cppyy
def stringpager(text, cls=cls):
cls.helpout.append(text)
import pydoc
pydoc.pager = stringpager
def test01_kdcraw(self):
"""Doc strings for KDcrawIface (used to crash)."""
import cppyy, pydoc
# TODO: run a find for these paths
qtpath = "/usr/include/qt5"
kdcraw_h = "/usr/include/KF5/KDCRAW/kdcraw/kdcraw.h"
if not os.path.isdir(qtpath) or not os.path.exists(kdcraw_h):
py.test.skip("no KDE/Qt found, skipping test01_kdcraw")
# need to resolve qt_version_tag for the incremental compiler; since
# it's not otherwise used, just make something up
cppyy.cppdef("int qt_version_tag = 42;")
cppyy.add_include_path(qtpath)
cppyy.include(kdcraw_h)
# bring in some symbols to resolve the class
cppyy.load_library("libQt5Core.so")
cppyy.load_library("libKF5KDcraw.so")
from cppyy.gbl import KDcrawIface
self.__class__.helpout = []
pydoc.doc(KDcrawIface.KDcraw)
helptext = ''.join(self.__class__.helpout)
assert 'KDcraw' in helptext
assert 'CPPInstance' in helptext
def test02_dir(self):
"""For the same reasons as test01_kdcraw, this used to crash."""
import cppyy, pydoc
assert not '__abstractmethods__' in dir(cppyy.gbl.gInterpreter)
assert '__class__' in dir(cppyy.gbl.gInterpreter)
self.__class__.helpout = []
pydoc.doc(cppyy.gbl.gInterpreter)
helptext = ''.join(self.__class__.helpout)
assert 'TInterpreter' in helptext
assert 'CPPInstance' in helptext
assert 'AddIncludePath' in helptext
cppyy.cppdef("namespace cppyy_regression_test { void iii() {}; }")
assert not 'iii' in cppyy.gbl.cppyy_regression_test.__dict__
assert not '__abstractmethods__' in dir(cppyy.gbl.cppyy_regression_test)
assert '__class__' in dir(cppyy.gbl.cppyy_regression_test)
assert 'iii' in dir(cppyy.gbl.cppyy_regression_test)
assert not 'iii' in cppyy.gbl.cppyy_regression_test.__dict__
assert cppyy.gbl.cppyy_regression_test.iii
assert 'iii' in cppyy.gbl.cppyy_regression_test.__dict__
self.__class__.helpout = []
pydoc.doc(cppyy.gbl.cppyy_regression_test)
helptext = ''.join(self.__class__.helpout)
# TODO: it's deeply silly that namespaces inherit from CPPInstance (in CPyCppyy)
assert ('CPPInstance' in helptext or 'CPPNamespace' in helptext)
def test03_pyfunc_doc(self):
"""Help on a generated pyfunc used to crash."""
import cppyy, distutils, pydoc, sys
import distutils.sysconfig as sc
cppyy.add_include_path(sc.get_python_inc())
if sys.hexversion < 0x3000000:
cppyy.cppdef("#undef _POSIX_C_SOURCE")
cppyy.cppdef("#undef _XOPEN_SOURCE")
else:
cppyy.cppdef("#undef slots") # potentially pulled in by Qt/xapian.h
cppyy.cppdef("""#include "Python.h"
long py2long(PyObject* obj) { return PyLong_AsLong(obj); }""")
pydoc.doc(cppyy.gbl.py2long)
assert 1 == cppyy.gbl.py2long(1)
def test04_avx(self):
"""Test usability of AVX by default."""
import cppyy, subprocess
has_avx = False
try:
for line in open('/proc/cpuinfo', 'r'):
if 'avx' in line:
has_avx = True
break
except Exception:
try:
cli_arg = subprocess.check_output(['sysctl', 'machdep.cpu.features'])
has_avx = 'avx' in cli_arg.decode("utf-8").strip().lower()
except Exception:
pass
if has_avx:
assert cppyy.cppdef('int check_avx() { return (int) __AVX__; }')
assert cppyy.gbl.check_avx() # attribute error if compilation failed
def test05_default_template_arguments(self):
"""Calling a templated method on a templated class with all defaults used to crash."""
import cppyy
cppyy.cppdef("""
template<typename T>
class AllDefault {
public:
AllDefault(int val) : m_t(val) {}
template<int aap=1, int noot=2>
int do_stuff() { return m_t+aap+noot; }
public:
T m_t;
};""")
a = cppyy.gbl.AllDefault[int](24)
a.m_t = 21;
assert a.do_stuff() == 24
def test06_class_refcounting(self):
"""The memory regulator would leave an additional refcount on classes"""
import cppyy, gc, sys
x = cppyy.gbl.std.vector['float']
old_refcnt = sys.getrefcount(x)
y = x()
del y
gc.collect()
assert sys.getrefcount(x) == old_refcnt
def test07_typedef_identity(self):
"""Nested typedefs should retain identity"""
import cppyy
cppyy.cppdef("""namespace PyABC {
struct S1 {};
struct S2 {
typedef std::vector<const PyABC::S1*> S1_coll;
};
}""")
from cppyy.gbl import PyABC
assert PyABC.S2.S1_coll
assert 'S1_coll' in dir(PyABC.S2)
assert not 'vector<const PyABC::S1*>' in dir(PyABC.S2)
assert PyABC.S2.S1_coll is cppyy.gbl.std.vector('const PyABC::S1*')
def test08_gil_not_released(self):
"""GIL was released by accident for by-value returns"""
import cppyy
something = 5.0
code = """
#include "Python.h"
std::vector<float> some_foo_calling_python() {
auto pyobj = reinterpret_cast<PyObject*>(ADDRESS);
float f = (float)PyFloat_AsDouble(pyobj);
std::vector<float> v;
v.push_back(f);
return v;
}
""".replace("ADDRESS", str(id(something)))
cppyy.cppdef(code)
cppyy.gbl.some_foo_calling_python()
def test09_enum_in_global_space(self):
"""Enum declared in search.h did not appear in global space"""
if IS_WINDOWS:
return # no such enum in MSVC's search.h
import cppyy
cppyy.include('search.h')
assert cppyy.gbl.ACTION
assert hasattr(cppyy.gbl, 'ENTER')
assert hasattr(cppyy.gbl, 'FIND')
def test10_cobject_addressing(self):
"""AsCObject (now as_cobject) had a deref too many"""
import cppyy
import cppyy.ll
cppyy.cppdef('struct CObjA { CObjA() : m_int(42) {} int m_int; };')
a = cppyy.gbl.CObjA()
co = cppyy.ll.as_cobject(a)
assert a == cppyy.bind_object(co, 'CObjA')
assert a.m_int == 42
assert cppyy.bind_object(co, 'CObjA').m_int == 42
def test11_exception_while_exception(self):
"""Exception from SetDetailedException during exception handling used to crash"""
import cppyy
cppyy.cppdef("namespace AnExceptionNamespace { }")
try:
cppyy.gbl.blabla
except AttributeError:
try:
cppyy.gbl.AnExceptionNamespace.blabla
except AttributeError:
pass
def test12_char_star_over_char(self):
"""Map str to const char* over char"""
# This is debatable, but although a single character string passes through char,
# it is more consistent to prefer const char* or std::string in all cases. The
# original bug report is here:
# https://bitbucket.org/wlav/cppyy/issues/127/string-argument-resolves-incorrectly
import cppyy
cppyy.cppdef("""
namespace csoc1 {
std::string call(char) { return "char"; }
}
namespace csoc2 {
std::string call(char) { return "char"; }
std::string call(const char*) { return "const char*"; }
}
namespace csoc3 {
std::string call(char) { return "char"; }
std::string call(const std::string&) { return "string"; }
}
""")
assert cppyy.gbl.csoc1.call('0') == 'char'
raises(ValueError, cppyy.gbl.csoc1.call, '00')
assert cppyy.gbl.csoc2.call('0') == 'const char*'
assert cppyy.gbl.csoc2.call('00') == 'const char*'
assert cppyy.gbl.csoc3.call('0') == 'string'
assert cppyy.gbl.csoc3.call('00') == 'string'
def test13_struct_direct_definition(self):
"""Struct defined directly in a scope miseed scope in renormalized name"""
import cppyy
cppyy.cppdef("""
namespace struct_direct_definition {
struct Bar {
struct Baz {
std::vector<double> data;
} baz[2];
Bar() {
baz[0].data.push_back(3.14);
baz[1].data.push_back(2.73);
}
};
class Foo {
public:
class Bar {
public:
Bar(): x(5) {}
int x;
} bar;
}; }""")
from cppyy.gbl import struct_direct_definition as sds
b = sds.Bar()
assert len(b.baz) == 2
assert len(b.baz[0].data) == 1
assert b.baz[0].data[0] == 3.14
assert len(b.baz[1].data) == 1
assert b.baz[1].data[0] == 2.73
f = sds.Foo()
assert f.bar.x == 5
def test14_vector_vs_initializer_list(self):
"""Prefer vector in template and initializer_list in formal arguments"""
import cppyy
cppyy.cppdef("""
namespace vec_vs_init {
template<class T>
std::string nameit1(const T& t) {
return typeid(T).name();
}
template<class T>
std::string nameit2(T&& t) {
return typeid(T).name();
}
template<class T>
size_t sizeit(T&& t) {
return t.size();
}
}""")
nameit1 = cppyy.gbl.vec_vs_init.nameit1
assert 'vector' in nameit1(list(range(10)))
assert 'vector' in nameit1(cppyy.gbl.std.vector[int]())
nameit2 = cppyy.gbl.vec_vs_init.nameit2
assert 'vector' in nameit2(list(range(10)))
assert 'vector' in nameit2(cppyy.gbl.std.vector[int]())
sizeit = cppyy.gbl.vec_vs_init.sizeit
assert sizeit(list(range(10))) == 10
def test15_iterable_enum(self):
"""Use template to iterate over an enum"""
# from: https://stackoverflow.com/questions/52459530/pybind11-emulate-python-enum-behaviour
import cppyy
cppyy.cppdef("""
template <typename Enum>
struct my_iter_enum {
struct iterator {
using value_type = Enum;
using difference_type = ptrdiff_t;
using reference = const Enum&;
using pointer = const Enum*;
using iterator_category = std::input_iterator_tag;
iterator(Enum value) : cur(value) {}
reference operator*() { return cur; }
pointer operator->() { return &cur; }
bool operator==(const iterator& other) { return cur == other.cur; }
bool operator!=(const iterator& other) { return !(*this == other); }
iterator& operator++() { if (cur != Enum::Unknown) cur = static_cast<Enum>(static_cast<std::underlying_type_t<Enum>>(cur) + 1); return *this; }
iterator operator++(int) { iterator other = *this; ++(*this); return other; }
private:
Enum cur;
int TODO_why_is_this_placeholder_needed; // JIT error? Too aggressive optimization?
};
iterator begin() {
return iterator(Enum::Black);
}
iterator end() {
return iterator(Enum::Unknown);
}
};
enum class MyColorEnum : char {
Black = 1,
Blue,
Red,
Yellow,
Unknown
};""")
Color = cppyy.gbl.my_iter_enum['MyColorEnum']
assert Color.iterator
c_iterable = Color()
assert c_iterable.begin().__deref__() == 1
all_enums = []
for c in c_iterable:
all_enums.append(int(c))
assert all_enums == list(range(1, 5))
def test16_operator_eq_pickup(self):
"""Base class python-side operator== interered with derived one"""
import cppyy
cppyy.cppdef("""
namespace SelectOpEq {
class Base {};
class Derived1 : public Base {
public:
bool operator==(Derived1&) { return true; }
};
class Derived2 : public Base {
public:
bool operator!=(Derived2&) { return true; }
}; }""")
soe = cppyy.gbl.SelectOpEq
soe.Base.__eq__ = lambda first, second: False
soe.Base.__ne__ = lambda first, second: False
a = soe.Derived1()
b = soe.Derived1()
assert a == b # derived class' C++ operator== called
a = soe.Derived2()
b = soe.Derived2()
assert a != b # derived class' C++ operator!= called
def test17_operator_plus_overloads(self):
"""operator+(string, string) should return a string"""
import cppyy
a = cppyy.gbl.std.string("a")
b = cppyy.gbl.std.string("b")
assert a == 'a'
assert b == 'b'
assert type(a+b) == str
assert a+b == 'ab'
def test18_std_string_hash(self):
"""Hashing of std::string"""
import cppyy
import cppyy
s = cppyy.gbl.std.string("text")
d = {}
# hashes of std::string larger than 2**31 would fail; run a couple of
# strings to check although it may still succeed by accident (and never
# was an issue on p3 anyway)
for s in ['abc', 'text', '321', 'stuff', 'very long string']:
d[s] = 1
def test19_signed_char_ref(self):
"""Signed char executor was self-referencing"""
import cppyy
cppyy.cppdef("""
class SignedCharRefGetter {
public:
void setter(signed char sc) { m_c = sc; }
signed char& getter() { return m_c; }
signed char m_c;
};""")
obj = cppyy.gbl.SignedCharRefGetter()
obj.setter('c')
assert obj.getter() == 'c'
def test20_temporaries_and_vector(self):
"""Extend a life line to references into a vector if needed"""
import cppyy
cppyy.cppdef("""
std::vector<std::string> get_some_temporary_vector() { return { "x", "y", "z" }; }
""")
l = [e for e in cppyy.gbl.get_some_temporary_vector()]
assert l == ['x', 'y', 'z']
def test21_initializer_list_and_temporary(self):
"""Conversion rules when selecting intializer_list v.s. temporary"""
import cppyy
cppyy.cppdef("""
namespace regression_test21 {
std::string what_called = "";
class Foo {
public:
Foo() = default;
Foo(int i) {
what_called += "Foo(int)";
}
Foo(std::initializer_list<uint8_t> il) {
std::ostringstream os;
os << "Foo(il<size=" << il.size() << ">)";
what_called += os.str();
}
};
class Bar {
public:
Bar() = default;
Bar(int i) {
what_called = "Bar(int)";
}
Bar(std::initializer_list<uint8_t> il) {
std::ostringstream os;
os << "Bar(il<size=" << il.size() << ">)";
what_called += os.str();
}
Bar(Foo x) {
what_called += "Bar(Foo)";
}
}; }""")
r21 = cppyy.gbl.regression_test21
assert len(r21.what_called) == 0
r21.Bar(1)
assert r21.what_called == 'Bar(int)'
r21.what_called = ''
r21.Bar([1,2]) # used to call Bar(Foo x) through implicit conversion
assert r21.what_called == 'Bar(il<size=2>)'
def test22_copy_constructor(self):
"""Copy construct an object into an empty (NULL) proxy"""
import cppyy, gc
cppyy.cppdef("""
namespace regression_test22 {
struct Countable {
static int s_count;
Countable() { ++s_count; }
Countable(const Countable&) { ++s_count; }
Countable& operator=(const Countable&) { return *this; }
~Countable() { --s_count; }
};
int Countable::s_count = 0;
}""")
r22 = cppyy.gbl.regression_test22
assert r22.Countable.s_count == 0
c = r22.Countable()
assert r22.Countable.s_count == 1
raises(ReferenceError, c.__init__, r22.Countable())
gc.collect()
assert r22.Countable.s_count == 1
c.__assign__(r22.Countable())
gc.collect()
assert r22.Countable.s_count == 1
c.__destruct__()
assert r22.Countable.s_count == 0
c.__init__(r22.Countable())
gc.collect()
assert r22.Countable.s_count == 1
del c
gc.collect()
assert r22.Countable.s_count == 0
c = cppyy.bind_object(cppyy.nullptr, r22.Countable)
assert r22.Countable.s_count == 0
c.__init__(r22.Countable())
gc.collect()
assert r22.Countable.s_count == 1
| karies/root | bindings/pyroot/cppyy/cppyy/test/test_regression.py | Python | lgpl-2.1 | 17,758 |
import ast
import ConfigParser
import datetime
import exceptions
import glob
import grp
import importlib
import inspect
import multiprocessing
import os
import sys
from drop_privileges import drop_privileges
from pwd import getpwnam
class JobCtl(object):
def __init__(self, sched, config, logging):
self.sched = sched
self.config = config
self.logging = logging
def add_job(self, filename, username, realuser):
"""adds a job to jobstore if it passes read_jobfile, check_job functions"""
# can't read a jobfile that doesn't exist
if not os.path.isfile(filename):
return False, "Jobfile does not exist."
if os.path.basename(filename) not in os.listdir(self.config.get('main', 'job_dir')):
return False, "Job file must be placed under: %s" % self.config.get('main', 'job_dir')
returncode, output = self.read_jobfile(filename)
if returncode is False:
self.logging.error("Adding job failed: %s", output)
return False, output
else:
# output is a dictionary, return from read_jobfile
jobdict = output.itervalues().next()
try:
jobname = jobdict['__name__']
owner = jobdict['owner']
# don't let someone submit a job with a name that already exists
if self._check_if_job_exists(jobname):
return False, "Job: %s already exists." % jobname
returncode, output = self._check_job(jobname, owner, username)
if returncode is False:
return False, output
except KeyError as ke:
return False, "Jobcheck subroutine could not complete, exception: '%s'" % ke
except TypeError as te:
return False, "Improper format for in jobfile: '%s', error: '%s'" % (filename, te)
try:
# takes string "* * * * *" and turns it into a list
schedule = [ f for f in jobdict['schedule'].split() ]
# converts a string to dict, to be passed to a function as **kwargs
jobargs = ast.literal_eval(jobdict['kwargs'])
# silently add the owner to the kwargs.
jobargs['owner'] = jobdict['owner']
# add the job to the jobstore
self.sched.add_cron_job(
self.run_job,
minute=schedule[0],
hour=schedule[1],
month=schedule[3],
day_of_week=schedule[4],
name=jobdict['__name__'],
kwargs=jobargs,
max_instances=1
)
except ValueError as ve:
self.logging.error("Error adding job: '%s', Jobfile: '%s', Error: '%s'",
jobdict['__name__'], filename, ve)
return False, "Error adding job, job file parse error: '%s'" % ve
# extend the apscheduler.job schema by adding some attributes to a job object
job = self._get_job_obj(jobname)
job.jobfile = filename
job.owner = jobdict['owner']
job.type = jobdict['type']
# if the job is disabled by default (enabled=false), set job as disabled
# so that during the add_job phase, it's disabled...
if jobdict['enabled'].lower() != 'true':
job.status = 'Null' # set the attribute for now. as soon as disable_job() runs, it will be set to 'Disabled'
self.disable_job(jobname, 'initial_import', 'initial_import')
else:
job.status = 'Enabled'
self.logging.info("Adding job: '%s', Submitted by user: '%s(%s)'",
jobdict['__name__'], username, realuser)
return True, "Successfully added job: '%s'" % jobdict['__name__']
def _check_if_job_exists(self, jobname):
"""returns True/False based on whether or not job exists"""
joblist = self.sched._jobstores.get('default').jobs
for job in joblist:
if jobname == job.name:
self.logging.debug("check_if_job_exists = True, for job: '%s'" % jobname)
return True
else:
pass
self.logging.debug("check_if_job_exists = False, for job: '%s'" % jobname)
return False
def _check_job(self, jobname, owner, submituser):
"""
Checks if if submitting user submits a job to be run by someone else.
Returns False if job submission is invalid for any number of reasons
"""
# cant submit a job that runs as someone else, nonexistant userid
try:
getpwnam(owner)
except KeyError as ke:
self.logging.debug("checkjob: '%s', job_owner: '%s' does not exist on system",
jobname, owner)
return False, "User: '%s' in jobfile does not exist on system" % owner
# anyone as root or initial_startup gets a free pass; security hole!
if submituser == 'root' or submituser == 'initial_startup':
pass
else:
if submituser != owner:
self.logging.debug("Job: '%s', JobOwner: '%s', JobSubmitter: '%s'"
% ( jobname, owner, submituser))
return False, "Job is set to run as user: '%s', You cannot add jobs that run as a different user." % owner
return True, "Job passed all checks"
def disable_job(self, jobname, user, realuser):
"""
This disables a job by setting its rundate.year > 2100,
and job.status to 'Disabled'.
"""
if not self._check_if_job_exists(jobname):
return False, "Job does not exist."
job = self._get_job_obj(jobname)
# sorry, can't disable someone else's job unless you are root
if job.owner != user and user not in ['root', 'initial_import']:
self.logging.error("User '%s' tried to disable job: '%s', owned by: '%s'."
% (user, job.name, job.owner) )
return False, "Cannot disable job: '%s', owned by: '%s'" % (job.name, job.owner)
# no job.status attribute. The only way possible is during job import
#
# this was ordered above the job.status line below because of Attribute.errors that
# I couldn't figure out how to catch
elif not job.status:
# this shouldn't return anything, since an xmlrpc request to disable
# a non existent job should fail long before this
self.logging.debug("Job: '%s' has no status, must be an import with \
'enabled=false'. Disabling job.", jobname)
# can't disable a job that's already in a 'Disabled' state
elif job.status == 'Disabled':
return False, "Job: '%s' is already disabled." % job.name
# set the year for the job to run to be > 2100. ghetto disable
#
# idea from: http://stackoverflow.com/questions/5871168/how-can-i-subtract-or-add-100-years-to-a-datetime-field-in-the-database-in-djang
next_run_time = job.next_run_time
disabled_run_time = datetime.datetime(
next_run_time.year + 200, *next_run_time.timetuple()[1:-2])
job.next_run_time = disabled_run_time
job.status = 'Disabled'
self.logging.info("Job: '%s' has been disabled by user: '%s'.",
job.name, user)
self.logging.info("Disabled job: '%s', New Schedule: '%s'",
job.name, job)
return True, "Job: '%s' has been disabled." % job.name
def enable_job(self, jobname, user, realuser):
"""Re-enables a job that was disabled via the rpc client"""
if not self._check_if_job_exists(jobname):
return False, "Job does not exist."
job = self._get_job_obj(jobname)
# sorry, can't enable someone else's job unless you are root
if job.owner != user and user != 'root':
self.logging.error("User '%s' tried to re-enable job: '%s', owned by: '%s'."
% (user, job.name, job.owner) )
return False, "Cannot re-enable job: '%s', owned by: '%s'" % (job.name, job.owner)
elif job.status == 'Enabled':
return False, "Job: '%s' is already enabled." % job.name
# job.compute_next_run_time is an internal apscheduler function that
# uses the initial job scedule submission parameters to determine the
# next run time. since we want to re-enable the job, this will
# reschedule the job to run at the next valid time
new_next_run_time = job.compute_next_run_time(datetime.datetime.now())
job.next_run_time = new_next_run_time
job.status = 'Enabled'
self.logging.info("Job: '%s' has been re-enabled by user: '%s'.",
job.name, user)
self.logging.info("Re-enabled job: '%s', New Schedule: '%s'", job.name, job)
return True, "Job: '%s' has been re-enabled" % job.name
def force_run_job(self, jobname, user, realuser):
"""Run a job in the jobstore at this very moment. Does not spawn another thread."""
if not self._check_if_job_exists(jobname):
return False, "Job does not exist."
job = self._get_job_obj(jobname)
# don't let any joe schmoe force run a job they don't own
if user != job.owner and user != 'root':
self.logging.error("User '%s', tried to run job: '%s', owned by '%s'",
user, job.name, job.owner)
return False, "User: '%s', cannot force run job: '%s', owned by '%s'" % (user, job.name, job.owner)
self.logging.info("User: '%s(%s)', force running job: '%s'", user, realuser, jobname)
self.run_job(**job.kwargs)
return True, "Successfully force ran job: '%s'" % job.name
def _get_job_obj(self, jobname):
"""returns a job object"""
# check if job checks
if not self._check_if_job_exists(jobname):
self.logging.debug("Function: get_job, Job: '%s' does not exist." % jobname)
return False, "Job: '%s' does not exist." % jobname
jobs = self.sched.get_jobs()
for job in jobs:
if job.name == jobname:
return job
else:
pass
# this is an internal error. if we make it all the way through the for loop
# and don't find the job, it doesn't exist.
raise RatkingException("Function: ratking.jobcontrol._get_job_obj() went plaid.")
def read_jobfile(self, filename):
"""reads a file, parses with ConfigParser, and returns a dictionary of config options"""
# first off, check and see if the jobfile is in $RATKING_ROOT/etc/jobs.d
# We do not want jobfiles spread randomly everywhere
parser=ConfigParser.SafeConfigParser()
try:
parser.read([filename])
except ConfigParser.MissingSectionHeaderError as error:
return False, "Config file: '%s' is not in correct format" % filename
except MissingSectionHeaderError as error:
return False, "Config file: '%s' is not in correct format" % filename
for section in parser.sections():
for option in parser.options(section):
try:
value=parser.getint(section,option)
except ValueError:
value=parser.get(section,option)
# check the config file, and make sure a number of things exist
if len(parser.sections()) != 1:
self.logging.error("Reading jobfile: Cannot have more (or less) than one [section] in job config file.")
return False, "Cannot have more (or less) than one [section] in job config file."
jobname = parser.sections()[0] # we already ensured that this array is length 1
# make sure the jobname matches the filename (minus the .conf)
if jobname != filename.split(os.path.sep)[-1].split('.')[0]:
self.logging.error("Filename: %s (minus .conf) must match header name: [%s]", filename, jobname)
return False, "Filename: %s (minus .conf) must match header name: [%s]" % (filename, jobname)
required_options = ['type', 'schedule', 'owner', 'plugin_name', 'kwargs', 'enabled', 'autostart']
# make sure if the job is a plugin job, the plugin is in $RATKING_ROOT/var/lib/ratkingd/plugins.d
for req in required_options:
if not req in parser._sections[jobname]:
self.logging.error("Missing required job config options: '%s'" % req)
return False, "Missing one or more attributes '[%s]' in job config file." % req
# get the calling function, so that if its import_jobs, we don't load if autostart=false
calling_function = inspect.stack()[2][3]
if calling_function == 'import_jobs' and parser._sections[jobname]['autostart'].lower() == 'false':
self.logging.info("During import: job will not be added, autostart=false in config file.")
return False, 'During import: job will not be added, autostart=false in config file.'
# this checks to make sure that the kwargs= section in the jobfile can be properly converted to
# a dictionary later on in the process
try:
ast.literal_eval(parser._sections[jobname]['kwargs'])
except ValueError as ve:
self.logging.debug("Error importing jobfile: '%s'. kwargs must have apostrophies around all key value " +
"pairs: { 'keyname' : 'value' }, or be True|False: { 'mail' : True }")
return False, 'Job import failed, kwargs key/value parsing error'
return True, parser._sections
def remove_job(self, jobname, user, realuser):
"""Removes a job from the schedule completely"""
if not self._check_if_job_exists(jobname):
return False, "Job does not exist."
job = self._get_job_obj(jobname)
# first check if the user removing the job owns that job
if user != 'root' and user != job.owner:
self.logging.error("Job: '%s', cannot be removed by user: '%s(%s)'", job.name, user, realuser)
return False, 'Cannot remove a job you do not own.'
try:
self.sched.unschedule_job(job)
self.logging.info("Job: '%s', removed by user: '%s(%s)'", job.name, user, realuser)
return True, "Successfully removed job: '%s'" % job.name
except KeyError as ke:
return False, "Removing job: '%s' failed, Error: '%s'" % (job.name, ke)
def run_job(self, **kwargs):
"""sets up multiprocessor to run the actual exec function """
# test mode enabled. No subprocesses will spawn, and job won't execute it's function
if self.config.get('main', 'test_mode') == '1':
self.logging.info("Test mode enabled, job '%s' finishing.", kwargs['job_name'])
return True
# use multiprocess to fork a new process for each job being run
p = multiprocessing.Process(target=self._run_job_exec, kwargs=kwargs)
p.daemon = True
p.start()
# this will wait for the process to finish. since apscheduler runs jobs via a separate thread,
# only the job running thread will be blocking, so the rest of the main daemon will be fine
p.join()
return True
def _run_job_exec(self, **kwargs):
"""Loads appropriate module, changes uid/gid to owner/group, and runs the job """
plugin_dir = self.config.get('main', 'plugin_dir')
plugin_name = kwargs['plugin_name'].split('.')[0]
plugin_path = "%s%s%s" % (plugin_dir, os.path.sep, kwargs['plugin_name'])
#sys.path.append(plugin_dir)
# this loads the plugin
try:
# first, check if the plugin even exists
if os.path.isfile(plugin_path):
pass
else:
#self.logging.error("Job (%s) run error, plugin does not exist: '%s'"
# % (kwargs['job_name'], plugin_path) )
raise RatkingException("Job (%s) run error, plugin does not exist: '%s'"
% (kwargs['job_name'], plugin_path) )
if kwargs['plugin_name'] not in sys.modules.keys():
self.logging.debug("Importing module: %s" % plugin_name)
lib = importlib.import_module(plugin_name)
else:
# we reload so that each time the script is run, any updates to the plugin
# will be in effect. usecases would be if you disable/re-enable a job due to error.
# Python won't 'unload' the module when you disable it. remove_job and then add_job
# probably won't do any garbage collection either.
self.logging.debug("Reloading module: %s" % plugin_name)
reload(plugin_name)
except ImportError as ie:
self.logging.error("Module import error for job: '%s'" % kwargs['job_name'])
raise RatkingException("Module import error for job: '%s, error: %s'" \
% (kwargs['job_name'], ie) )
try:
# get uid/gid of job owner:
uid = getpwnam(kwargs['owner'])[2]
# get primary group gid of user
gid = getpwnam(kwargs['owner'])[3]
group_name = grp.getgrgid(gid)[0]
self.logging.debug("Running job: '%s' as user: '%s', group: '%s'",
kwargs['job_name'], kwargs['owner'], group_name)
# use drop_privileges module to set the uid/gid of the subprocess
drop_privileges(uid_name=kwargs['owner'], gid_name=group_name)
# ALL plugin's main() function should accept **kwargs:
# EX: def main(**kwargs):
#
# run the actual module
lib.main(**kwargs)
return True
except OSError as oe:
self.logging.error("Something went wrong here: '%s'" % oe )
raise RatkingException("Job '%s' did not execute successfully, error: '%s'" % (kwargs['job_name'], oe) )
return True
def show_jobs(self):
"""Returns a list object of all active jobs"""
output = []
jobs = self.sched.get_jobs()
output.append('{0: <25} {1: <15} {2: <15} {3: <10} {4}' \
.format('Jobname', 'Jobowner', 'JobType', 'Status', 'Next Run Time'))
output.append('='*110)
for job in jobs:
status = job.status
next_run_time = job.next_run_time
# dirty hack to display disabled jobs in a meaningful way
if job.next_run_time.year > 2100 or job.status == 'Disabled':
next_run_time = 'Never'
status = 'Disabled'
line = '{0: <25} {1: <15} {2: <15} {3: <10} {4}' \
.format(job.name, job.owner, job.type, status, next_run_time)
output.append(line)
return output
class RatkingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
| stevec7/ratking | ratking/jobhandler.py | Python | mit | 19,610 |
import os, re
file = 'latex.tex'
dir = 'tex-test'
with open(file, 'r', encoding='utf8') as f:
data = ''.join(f.readlines())
split = re.split(r'(\\hier[ABCDE]def{|\\section{|\\subsection{)', data)
print(len(split))
for s in split:
title = s[:s.find("}")].replace('?','').replace('/','')
title = title.replace('\\backi', '`i').replace('\\backd', '`d')
title = title.replace('\\backc', '`c').replace('\\backposs', '`$')
if '\\' in title:
continue
print(title)
if not os.path.exists(dir):
os.mkdir(dir)
with open(os.path.join(dir,title+'.tex'), 'w+', encoding='utf8') as f2:
f2.write('\\section{'+s)
| nert-gu/Xposition | xposition/scripts/split_latex.py | Python | gpl-3.0 | 706 |
import mock
from librarian_setup import setup as mod
@mock.patch.object(mod.Setup, 'auto_configure')
@mock.patch.object(mod.Setup, 'load')
def test_setup_init_already_completed(load, auto_configure):
load.return_value = {'some': 'data', 'completed': True}
setup = mod.Setup('setup.json')
assert setup.data == {'some': 'data', 'completed': True}
assert not auto_configure.called
@mock.patch.object(mod.Setup, 'auto_configure')
@mock.patch.object(mod.Setup, 'load')
def test_setup_init_not_completed(load, auto_configure):
load.return_value = None
auto_configure.return_value = {'auto': 'configured'}
setup = mod.Setup('setup.json')
assert setup.data == {'auto': 'configured'}
auto_configure.assert_called_once_with()
@mock.patch.object(mod.Setup, 'load')
def test_setup_data_access(load):
load.return_value = {'some': 'data'}
setup = mod.Setup('setup.json')
assert setup['some'] == 'data'
assert setup.get('some') == 'data'
assert setup.get('invalid', 1) == 1
@mock.patch.object(mod.os.path, 'exists')
@mock.patch.object(mod.Setup, '__init__')
def test_load_does_not_exist(init, exists):
exists.return_value = False
init.return_value = None
setup = mod.Setup()
setup.setup_file = '/path/to/setup.json'
assert setup.load() == {}
exists.assert_called_once_with('/path/to/setup.json')
@mock.patch.object(mod.json, 'load')
@mock.patch('__builtin__.open')
@mock.patch.object(mod.os.path, 'exists')
@mock.patch.object(mod.Setup, '__init__')
def test_load_invalid_config(init, exists, f_open, json_load):
exists.return_value = True
init.return_value = None
setup = mod.Setup()
setup.setup_file = '/path/to/setup.json'
mocked_file = mock.Mock()
ctx_manager = mock.MagicMock()
ctx_manager.__enter__.return_value = mocked_file
f_open.return_value = ctx_manager
json_load.side_effect = ValueError()
assert setup.load() == {}
exists.assert_called_once_with('/path/to/setup.json')
json_load.assert_called_once_with(mocked_file)
@mock.patch.object(mod.json, 'dump')
@mock.patch('__builtin__.open')
@mock.patch.object(mod.Setup, '__init__')
def test_save_config(init, f_open, json_dump):
init.return_value = None
setup = mod.Setup()
setup.setup_file = '/path/to/setup.json'
setup.data = {'auto': 'configured'}
mocked_file = mock.Mock()
ctx_manager = mock.MagicMock()
ctx_manager.__enter__.return_value = mocked_file
f_open.return_value = ctx_manager
setup.append({'setup': 'result', 'another': 1})
merged_data = {'auto': 'configured',
'setup': 'result',
'another': 1}
json_dump.assert_called_once_with(merged_data, mocked_file)
| Outernet-Project/librarian-setup | tests/test_setup.py | Python | gpl-3.0 | 2,743 |
# -*- coding: utf-8 -*-
# Copyright 2011 NorfCran <[email protected]>
# License: same as zim (gpl)
from __future__ import with_statement
import gtk
from zim.plugins import PluginClass, extends, WindowExtension
from zim.actions import action
from zim.gui.widgets import ui_environment, MessageDialog
#from zim.gui.clipboard import parsetree_from_selectiondata
import logging
logger = logging.getLogger('zim.plugins.linesorter')
class LineSorterPlugin(PluginClass):
'''FIXME'''
plugin_info = {
'name': _('Line Sorter'), # T: plugin name
'description': _('''\
This plugin sorts selected lines in alphabetical order.
If the list is already sorted the order will be reversed
(A-Z to Z-A).
'''), # T: plugin description
'author': 'NorfCran',
'help': 'Plugins:Line Sorter',
}
@extends('MainWindow')
class MainWindowExtension(WindowExtension):
uimanager_xml = '''
<ui>
<menubar name='menubar'>
<menu action='edit_menu'>
<placeholder name='plugin_items'>
<menuitem action='sort_selected_lines'/>
</placeholder>
</menu>
</menubar>
</ui>
'''
@action(_('_Sort lines'), stock='gtk-sort-ascending')
def sort_selected_lines(self):
buffer = self.window.pageview.view.get_buffer()
try:
sel_start, sel_end = buffer.get_selection_bounds()
except ValueError:
MessageDialog(self.ui,
_('Please select more than one line of text, first.')).run()
# T: Error message in "" dialog, %s will be replaced by application name
return
first_lineno = sel_start.get_line()
last_lineno = sel_end.get_line()
with buffer.user_action:
# Get iters for full selection
iter_end_line = buffer.get_iter_at_line(last_lineno)
iter_end_line.forward_line() # include \n at end of line
if iter_end_line.is_end() and not iter_end_line.starts_line():
# no \n at end of buffer, insert it
buffer.insert(iter_end_line, '\n')
iter_end_line = buffer.get_end_iter()
iter_begin_line = buffer.get_iter_at_line(first_lineno)
# 1/ build a list of formatted lines with get_parsetree()
# 2/ make a list of tuples, first element of each tuple is
# text only (white space stripped etc.), second element
# is parsetree per line from step 1
lines = []
for line_nr in range(first_lineno, last_lineno+1):
start, end = buffer.get_line_bounds(line_nr)
text = buffer.get_text(start, end).lower().strip()
tree = buffer.get_parsetree(bounds=(start, end))
lines.append((text, tree))
#logger.debug("Content of selected lines (text, tree): %s", lines)
# 3/ sort this list of tuples, sort will look at first element of the tuple
sorted_lines = sorted(lines, key=lambda lines: lines[0])
# checks whether the list is sorted "a -> z", if so reverses its order
if lines == sorted_lines:
sorted_lines.reverse()
# logger.debug("Sorted lines: %s", sorted_lines)
# 4/ for the replacement insert the parsetrees of the lines one by one
buffer.delete(iter_begin_line, iter_end_line)
for line in sorted_lines:
buffer.insert_parsetree_at_cursor(line[1])
| gdw2/zim | zim/plugins/linesorter.py | Python | gpl-2.0 | 3,057 |
# Copyright 2019 The MLPerf Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python demo showing how to use the MLPerf Inference load generator bindings.
"""
from __future__ import print_function
import threading
import time
from absl import app
import mlperf_loadgen
import numpy
def load_samples_to_ram(query_samples):
del query_samples
return
def unload_samples_from_ram(query_samples):
del query_samples
return
# Processes queries in 3 slices that complete at different times.
def process_query_async(query_samples, i_slice):
time.sleep(.001 * (i_slice + 1))
responses = []
samples_to_complete = query_samples[i_slice:len(query_samples):3]
for s in samples_to_complete:
responses.append(mlperf_loadgen.QuerySampleResponse(s.id, 0, 0))
mlperf_loadgen.QuerySamplesComplete(responses)
def issue_query(query_samples):
threading.Thread(target=process_query_async,
args=(query_samples, 0)).start()
threading.Thread(target=process_query_async,
args=(query_samples, 1)).start()
threading.Thread(target=process_query_async,
args=(query_samples, 2)).start()
def flush_queries():
pass
def process_latencies(latencies_ns):
print("Average latency: ")
print(numpy.mean(latencies_ns))
print("Median latency: ")
print(numpy.percentile(latencies_ns, 50))
print("90 percentile latency: ")
print(numpy.percentile(latencies_ns, 90))
def main(argv):
del argv
settings = mlperf_loadgen.TestSettings()
settings.scenario = mlperf_loadgen.TestScenario.MultiStream
settings.mode = mlperf_loadgen.TestMode.PerformanceOnly
settings.multi_stream_target_latency_ns = 100000000
settings.multi_stream_samples_per_query = 4
settings.multi_stream_max_async_queries = 2
settings.min_query_count = 100
settings.min_duration_ms = 10000
sut = mlperf_loadgen.ConstructSUT(
issue_query, flush_queries, process_latencies)
qsl = mlperf_loadgen.ConstructQSL(
1024, 128, load_samples_to_ram, unload_samples_from_ram)
mlperf_loadgen.StartTest(sut, qsl, settings)
mlperf_loadgen.DestroyQSL(qsl)
mlperf_loadgen.DestroySUT(sut)
if __name__ == "__main__":
app.run(main)
| mlperf/inference_results_v0.5 | open/Inspur/code/resnet/schedule/src/loadgen/demos/py_demo_multi_stream.py | Python | apache-2.0 | 2,879 |
from oslo_log import log as logging
from nca47.common.exception import NonExistParam, ParamNull, ParamValueError
from nca47.common.i18n import _
from nca47.common.i18n import _LE
from nca47.manager import central
from oslo_messaging.exceptions import MessagingException
from nca47.common.exception import BadRequest
from nca47.common.exception import Nca47Exception
from oslo_serialization import jsonutils as json
from nca47.api.controllers.v1 import tools
from nca47.api.controllers.v1.firewall import fw_base
LOG = logging.getLogger(__name__)
class SnatAddrPoolController(fw_base.BaseRestController):
"""
nca47 snataddrpool class, using for add/delete/query/queryallname the
snataddrpool info, validate parameters whether is legal, handling DB
operations and calling rpc client's corresponding method to send
messaging to agent endpoints
"""
def __init__(self):
self.manager = central.CentralManager.get_instance()
super(SnatAddrPoolController, self).__init__()
def create(self, req, *args, **kwargs):
"""create the snataddrpool"""
url = req.url
try:
# get the right url
if len(args) != 1:
raise BadRequest(resource="AddrPool operation", msg=url)
# get the body
json_body = req.body
# get the context
context = req.context
values = json.loads(json_body)
# check the in values
valid_attributes = ['tenant_id', 'dc_name', 'network_zone', 'name',
'ipstart', 'ipend', 'slotip', 'vfwname']
# check the in values
recom_msg = self.validat_values(values, valid_attributes)
LOG.info(_("the in value body is %(body)s"), {"body": values})
# from rpc server create the snataddrpool in db and device
snataddrpool = self.manager.add_snataddrpool(context, recom_msg)
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except MessagingException as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
message = "the values of the body format error"
return tools.ret_info(self.response.status, message)
return snataddrpool
def remove(self, req, *args, **kwargs):
"""del the snataddrpool"""
url = req.url
try:
# get the right url
if len(args) != 1:
raise BadRequest(resource="AddrPool operation", msg=url)
# get the body
json_body = req.body
# get the context
context = req.context
values = json.loads(json_body)
# check the in values
valid_attributes = ['tenant_id', 'dc_name', 'id', 'network_zone',
'vfwname']
# check the in values
recom_msg = self.validat_values(values, valid_attributes)
LOG.info(_("the in value body is %(body)s"), {"body": values})
# from rpc server delete the snataddrpool in db and device
snataddrpool = self.manager.del_snataddrpool(context, recom_msg)
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except MessagingException as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
message = "the values of the body format error"
return tools.ret_info(self.response.status, message)
return snataddrpool
def show(self, req, *args, **kwargs):
"""get the one snataddrpool"""
url = req.url
try:
# get the right url
if len(args) != 1:
raise BadRequest(resource="AddrPool operation", msg=url)
# get the body
json_body = req.body
# get the context
context = req.context
values = json.loads(json_body)
# check the in values
valid_attributes = ['id']
# check the in values
recom_msg = self.validat_values(values, valid_attributes)
LOG.info(_("the in value body is %(body)s"), {"body": values})
# from rpc server get the snataddrpool in db and device
snataddrpool = self.manager.get_snataddrpool(context, recom_msg)
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except MessagingException as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
message = "the values of the body format error"
return tools.ret_info(self.response.status, message)
return snataddrpool
def list(self, req, *args, **kwargs):
"""get the all snataddrpool"""
url = req.url
try:
# get the right url
if len(args) != 1:
raise BadRequest(resource="AddrPool operation", msg=url)
# get the body
json_body = req.body
# get the context
context = req.context
values = json.loads(json_body)
# check the in values
valid_attributes = ['vfwname', 'tenant_id', 'dc_name',
'network_zone']
# check the in values
recom_msg = self.validat_values(values, valid_attributes)
LOG.info(_("the in value body is %(body)s"), {"body": values})
# from rpc server get the snataddrpool in db and device
snataddrpools = self.manager.get_snataddrpools(context, recom_msg)
except Nca47Exception as e:
self.response.status = e.code
LOG.error(_LE('Error exception! error info: %' + e.message))
LOG.exception(e)
return tools.ret_info(e.code, e.message)
except MessagingException as exception:
self.response.status = 500
message = exception.value
return tools.ret_info(self.response.status, message)
except Exception as exception:
LOG.exception(exception)
self.response.status = 500
message = "the values of the body format error"
return tools.ret_info(self.response.status, message)
return snataddrpools
def validat_values(self, values, valid_keys):
"""Non null input parameters"""
recom_msg = {}
for key in valid_keys:
if key == 'ipstart':
if not tools._is_valid_ipv4_addr(values[key]):
raise ParamValueError(param_name=key)
if key == 'ipend':
if not tools._is_valid_ipv4_addr(values[key]):
raise ParamValueError(param_name=key)
if key == 'slotip':
if not tools._is_valid_slotip(values[key]):
raise ParamValueError(param_name=key)
if key not in values.keys():
raise NonExistParam(param_name=key)
else:
recom_msg[key] = values[key]
if values[key] is None:
raise ParamNull(param_name=key)
return recom_msg
| willowd878/nca47 | nca47/api/controllers/v1/firewall/fw_snat_addr_pool.py | Python | apache-2.0 | 8,191 |
from ircutils import dictapi
import time
import re
dictionary = dictapi.DictApi()
def chmsg(event, server):
global dictionary
msg = event['msg']
nick = event['nicka']
field = re.match('!mean (?P<word>.*)', msg)
if not field:
return
word = field.group('word')
ret = dictionary.lookup(word)
for ind in ret.split('\n'):
server.send_msg(nick, ind)
time.sleep(0.5)
| iogf/candocabot | plugins/mean/mean.py | Python | apache-2.0 | 428 |
"""
SoftLayer.tests.managers.loadbal_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import SoftLayer
from SoftLayer import testing
VIRT_IP_SERVICE = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_VirtualIpAddress')
class LoadBalancerTests(testing.TestCase):
def set_up(self):
self.lb_mgr = SoftLayer.LoadBalancerManager(self.client)
def test_get_lb_pkgs(self):
result = self.lb_mgr.get_lb_pkgs()
self.assertEqual(len(result), 12)
_filter = {
'items': {
'description': {
'operation': '*= Load Balancer'
}
}
}
self.assert_called_with('SoftLayer_Product_Package', 'getItems',
identifier=0,
filter=_filter)
def test_get_hc_types(self):
result = self.lb_mgr.get_hc_types()
self.assertEqual(len(result), 6)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Health_Check_Type')
self.assert_called_with(service, 'getAllObjects')
def test_get_routing_methods(self):
result = self.lb_mgr.get_routing_methods()
self.assertEqual(len(result), 12)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Routing_Method')
self.assert_called_with(service, 'getAllObjects')
def test_get_location(self):
id1 = self.lb_mgr._get_location('sjc01')
self.assertEqual(id1, 168642)
id2 = self.lb_mgr._get_location('dal05')
self.assertEqual(id2, 'FIRST_AVAILABLE')
def test_get_routing_types(self):
result = self.lb_mgr.get_routing_types()
self.assertEqual(len(result), 6)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Routing_Type')
self.assert_called_with(service, 'getAllObjects')
def test_cancel_lb(self):
result = self.lb_mgr.cancel_lb(6327)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Billing_Item', 'cancelService',
identifier=21370814)
def test_add_local_lb(self):
self.lb_mgr.add_local_lb(6327, 'sjc01')
args = ({
'complexType': 'SoftLayer_Container_Product_Order_Network_'
'LoadBalancer',
'quantity': 1,
'packageId': 0,
"location": 168642,
'prices': [{'id': 6327}]
},)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder',
args=args)
def test_get_local_lbs(self):
result = self.lb_mgr.get_local_lbs()
self.assertEqual(len(result), 0)
mask = 'mask[loadBalancerHardware[datacenter],ipAddress]'
self.assert_called_with('SoftLayer_Account', 'getAdcLoadBalancers',
mask=mask)
def test_get_local_lb(self):
result = self.lb_mgr.get_local_lb(22348)
self.assertEqual(result['id'], 22348)
mask = ('mask['
'loadBalancerHardware[datacenter], '
'ipAddress, virtualServers[serviceGroups'
'[routingMethod,routingType,services'
'[healthChecks[type], groupReferences,'
' ipAddress]]]]')
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
identifier=22348,
mask=mask)
def test_delete_service(self):
result = self.lb_mgr.delete_service(1234)
self.assertEqual(result, True)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Service')
self.assert_called_with(service, 'deleteObject', identifier=1234)
def test_delete_service_group(self):
result = self.lb_mgr.delete_service_group(1234)
self.assertEqual(result, True)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_VirtualServer')
self.assert_called_with(service, 'deleteObject', identifier=1234)
def test_toggle_service_status(self):
result = self.lb_mgr.toggle_service_status(1234)
self.assertEqual(result, True)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Service')
self.assert_called_with(service, 'toggleStatus', identifier=1234)
def test_edit_service(self):
self.lb_mgr.edit_service(12345, 1234, '9.9.9.9', 80, True, 21, 1)
_filter = {
'virtualServers': {
'serviceGroups': {
'services': {
'id': {
'operation': 1234
}
}
}
}
}
mask = 'mask[serviceGroups[services[groupReferences,healthChecks]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getVirtualServers',
identifier=12345,
filter=_filter,
mask=mask)
self.assert_called_with(VIRT_IP_SERVICE, 'editObject')
def test_add_service(self):
self.lb_mgr.add_service(12345, 50718, 123, 80, True, 21, 1)
mask = 'mask[virtualServers[serviceGroups[services[groupReferences]]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
mask=mask,
identifier=12345)
self.assert_called_with(VIRT_IP_SERVICE, 'editObject',
identifier=12345)
arg = self.calls(VIRT_IP_SERVICE, 'editObject')[0].args[0]
self.assertEqual(
len(arg['virtualServers'][0]['serviceGroups'][0]['services']),
2)
def test_edit_service_group(self):
self.lb_mgr.edit_service_group(12345,
group_id=50718,
allocation=100,
port=80,
routing_type=2,
routing_method=10)
mask = 'mask[virtualServers[serviceGroups[services[groupReferences]]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
identifier=12345,
mask=mask)
self.assert_called_with(VIRT_IP_SERVICE, 'getObject', identifier=12345)
def test_add_service_group(self):
self.lb_mgr.add_service_group(12345, 100, 80, 2, 10)
mask = 'mask[virtualServers[serviceGroups[services[groupReferences]]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
mask=mask,
identifier=12345)
self.assert_called_with(VIRT_IP_SERVICE, 'editObject',
identifier=12345)
arg = self.calls(VIRT_IP_SERVICE, 'editObject')[0].args[0]
self.assertEqual(len(arg['virtualServers']), 2)
def test_reset_service_group(self):
result = self.lb_mgr.reset_service_group(12345, group_id=50718)
self.assertEqual(result, True)
_filter = {'virtualServers': {'id': {'operation': 50718}}}
self.assert_called_with(VIRT_IP_SERVICE, 'getVirtualServers',
identifier=12345,
filter=_filter,
mask='mask[serviceGroups]')
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Service_Group')
self.assert_called_with(service, 'kickAllConnections',
identifier=51758)
| underscorephil/softlayer-python | tests/managers/loadbal_tests.py | Python | mit | 7,926 |
# -*- coding: utf-8 -*-
# Copyright (c) 2005 - 2013 Christopher Zorn
# See LICENSE.txt for details
import os
import sha
from twisted.trial import unittest
from twisted.words.protocols.jabber import jid
from twisted.internet import defer, reactor
from twisted.words.xish import xpath
try:
from twisted.words.protocols.jabber.component import IService
except:
from twisted.words.protocols.jabber.ijabber import IService
from twisted.words.protocols.jabber import component, xmlstream
from palaver import groupchat, palaver, muc
from palaver import dir_storage
from palaver.test import readlog
PASSWORD = 'palaveriscool'
HOSTNAME = 'palaver.localhost'
PORT = 5437
class DummyTransport:
def __init__(self, xmlparser):
#self.list = list
self.xmlparser = xmlparser
def write(self, bytes):
# should we reset or use the stream?
self.xmlparser.parse(bytes)
#self.list.append(elem)
def loseConnection(self, *args, **kwargs):
self.xmlparser._reset()
class XEP045Tests(unittest.TestCase):
"""
"""
def setUp(self):
"""
Set up harness and palaver connection to the harness
"""
# PALAVER set up
# set up Jabber Component
sm = component.buildServiceManager(HOSTNAME, PASSWORD,
("tcp:"+HOSTNAME+":"+str(PORT) ))
# Turn on verbose mode
palaver.LogService().setServiceParent(sm)
sadmins = ['[email protected]']
# allow for other storage in tests
st = dir_storage.Storage(spool='/tmp/palaver_test/')
st.sadmins = sadmins
self.groupchat_service = groupchat.GroupchatService(st)
c = IService(self.groupchat_service)
c.setServiceParent(sm)
self.room_service = groupchat.RoomService()
self.room_service.setServiceParent(self.groupchat_service)
IService(self.room_service).setServiceParent(sm)
self.admin_service = groupchat.AdminService()
self.admin_service.setServiceParent(self.groupchat_service)
IService(self.admin_service).setServiceParent(sm)
self.palaver_service = palaver.PalaverService()
self.palaver_service.setServiceParent(sm)
self.palaver_factory = sm.getFactory()
# set up xmlstream for palaver
self.wstream = readlog.XmlParser()
self.palaver_xs = self.palaver_factory.buildProtocol(None)
self.palaver_xs.transport = DummyTransport(self.wstream)
# Indicate that palaver is connected
self.palaver_xs.connectionMade()
self.palaver_xs.dataReceived("<stream:stream xmlns='jabber:component:accept' xmlns:stream='http://etherx.jabber.org/streams' from='localhost' id='12345'>")
hv = sha.new("%s%s" % ("12345", PASSWORD)).hexdigest()
self.assertEquals(str(self.wstream.entity.handshake), hv)
self.palaver_xs.dataReceived("<handshake/>")
# now trigger authd event
self.palaver_xs.dispatch(self.palaver_xs, xmlstream.STREAM_AUTHD_EVENT)
# check if the xmlstream was set and jabber id
self.assertEquals(self.palaver_service.xmlstream, self.palaver_xs)
self.assertEquals(self.palaver_service.jid, HOSTNAME)
def _waitForData(self, childNumber, d, timeout):
timeout -= 0.25
if len(self.wstream.entity.children)>=childNumber or timeout <= 0:
d.callback(True)
else:
reactor.callLater(0.25, self._waitForData, childNumber, d, timeout)
def _testCreate(self, test_elem, frm):
self.assertEquals(xpath.matches("/presence[@from='"+frm+"']/x[@xmlns='http://jabber.org/protocol/muc#user']/item[@role='moderator']", test_elem), 1)
def _clearElems(self):
while len(self.wstream.entity.children)>1:
self.wstream.entity.children.pop()
def doWait(self, cb, num, timeout=5):
d = defer.Deferred()
self._waitForData(num,d, timeout)
d.addCallback(cb)
return d
def _createRoom(self, frm, to):
CLIENT_XML = """<presence from='%s' to='%s'/>""" % (frm, to, )
self.palaver_xs.dataReceived(CLIENT_XML)
def test1stCreateRoom(self):
""" Test Create a Room .........................................................."""
def _cbCreateRoom(t):
self.assertEquals(t, True)
test_elem = self.wstream.entity.children.pop()
# Next element should be a presence broadcast
self.assertEquals(test_elem.name, 'presence')
frm = 'darkcave@%s/thirdwitch' % HOSTNAME
self._testCreate(test_elem, frm)
if len(self.wstream.entity.children)>1:
# Joining room instead of creating it
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
self._createRoom('[email protected]/pda', 'darkcave@%s/thirdwitch' % (HOSTNAME, ))
return self.doWait(_cbCreateRoom, 2)
def testMucOwnerCreateRoom(self):
def testMucOwner(t):
test_elem = self.wstream.entity.children.pop()
self.assertEqual(test_elem.name, 'iq')
self.assertEqual(test_elem.getAttribute('type'), 'result')
self.assertEqual(test_elem.getAttribute('id'), '7')
self.assertEqual(3, len(test_elem.children))
def _cbCreateRoom(t):
self.assertEquals(t, True)
test_elem = self.wstream.entity.children.pop()
# Next element should be a presence broadcast
self.assertEquals(test_elem.name, 'presence')
frm = 'darkcave@%s/thirdwitch' % HOSTNAME
self._testCreate(test_elem, frm)
if len(self.wstream.entity.children)>1:
# Joining room instead of creating it
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
self.palaver_xs.dataReceived("""
<iq to="darkcave@%s" type="get" id="7" from="[email protected]/pda">
<query xmlns="http://jabber.org/protocol/muc#owner"/>
</iq>
""" % (HOSTNAME, ))
return self.doWait(testMucOwner, 2)
self._createRoom('[email protected]/pda', 'darkcave@%s/thirdwitch' % (HOSTNAME, ))
return self.doWait(_cbCreateRoom, 2)
def testLeaveAndDeleteRoom(self):
""" Test leave and delete a room .........................................................."""
def test109(t):
test_elem = self.wstream.entity.children.pop()
self.failUnless(xpath.matches("/presence/x/status[@code='201']", test_elem), 'Invalid room create.')
def testRoom(t):
self._clearElems()
# join the room again and see if we get the status code
CLIENT_XML = """<presence from='%s' to='%s'>
<x xmlns='http://jabber.org/protocol/muc'/>
</presence>""" % ('[email protected]/pda', 'delete@%s/thirdwitch' % (HOSTNAME, ))
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(test109, 2)
def leaveRoom(t):
test_elem = self.wstream.entity.children.pop()
self.failUnless(xpath.matches("/iq[@type='result']", test_elem), 'Invalid iq result.')
self._clearElems()
CLIENT_XML = """<presence from='%s' to='%s' type='unavailable'>
<x xmlns='http://jabber.org/protocol/muc'/>
</presence>""" % ('[email protected]/pda', 'delete@%s/thirdwitch' % (HOSTNAME, ))
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(testRoom, 2)
def _cbCreateRoom(t):
self.assertEquals(t, True)
test_elem = self.wstream.entity.children.pop()
frm = 'delete@%s/thirdwitch' % HOSTNAME
self._testCreate(test_elem, frm)
# send config
CONFIG_XML = """<iq from='[email protected]/pda' id='arbiter_kds_9877' type='set' to='delete@%s'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<x xmlns='jabber:x:data' type='submit'>
<field var='FORM_TYPE'>
<value>http://jabber.org/protocol/muc#roomconfig</value>
</field>
<field var='muc#roomconfig_whois'><value>anyone</value></field>
</x></query></iq>""" % (HOSTNAME, )
self.palaver_xs.dataReceived(CONFIG_XML)
return self.doWait(leaveRoom, 2)
CLIENT_XML = """<presence from='%s' to='%s'>
<x xmlns='http://jabber.org/protocol/muc'/>
</presence>""" % ('[email protected]/pda', 'delete@%s/thirdwitch' % (HOSTNAME, ))
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(_cbCreateRoom, 2)
def testUnicodeMessages(self):
""" Test send strange chars to room ......................................................"""
def testRoom(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
self.failUnless(test_elem.name == 'message', 'Not a message returned.')
self.failUnless(test_elem['type'] == 'groupchat', 'Error in message type')
def _cbCreateRoom(t):
self.assertEquals(t, True)
test_elem = self.wstream.entity.children.pop()
frm = 'unicode@%s/thirdwitch' % HOSTNAME
self._testCreate(test_elem, frm)
MESSAGE_XML = """<message from='[email protected]/pda' to='unicode@%s' type='groupchat' id='2822'>
<body>ä ö and ü %%</body>
</message> """ % (HOSTNAME,)
self.palaver_xs.dataReceived(MESSAGE_XML)
return self.doWait(testRoom, 2)
CLIENT_XML = """<presence from='%s' to='%s' />""" % ('[email protected]/pda', 'unicode@%s/thirdwitch' % (HOSTNAME, ))
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(_cbCreateRoom, 2)
def testNameSpaceMessages(self):
""" Test send strange chars to room ......................................................"""
def testRoom(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
self.failUnless(test_elem.body.uri==test_elem.uri, 'uri is wrong')
self.failUnless(test_elem.name == 'message', 'Not a message returned.')
self.failUnless(test_elem['type'] == 'groupchat', 'Error in message type')
def _cbCreateRoom(t):
self.assertEquals(t, True)
test_elem = self.wstream.entity.children.pop()
frm = 'unicode@%s/thirdwitch' % HOSTNAME
self._testCreate(test_elem, frm)
MESSAGE_XML = """<message from='[email protected]/pda' to='unicode@%s' type='groupchat' id='2822'>
<body>yes, i know you do </body>
<nick xmlns="http://jabber.org/protocol/nick">cgrady</nick>
</message> """ % (HOSTNAME,)
self.palaver_xs.dataReceived(MESSAGE_XML)
return self.doWait(testRoom, 2)
CLIENT_XML = """<presence from='%s' to='%s' />""" % ('[email protected]/pda', 'unicode@%s/thirdwitch' % (HOSTNAME, ))
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(_cbCreateRoom, 2)
def test61(self):
""" Test Section 6.1 http://www.xmpp.org/extensions/xep-0045.html#disco-component """
def _cb61(t):
test_elem = self.wstream.entity.children.pop()
self.assertNotEquals(test_elem['type'],'error')
# test for correct namespace
self.assertEquals(test_elem.query.uri,'http://jabber.org/protocol/disco#info')
got_muc = False
for f in test_elem.query.elements():
if f.name == 'feature' and f['var'] == 'http://jabber.org/protocol/muc':
got_muc = True
self.assertEquals(got_muc, True)
CLIENT_XML = """
<iq from='[email protected]/pda' xmlns='jabber:client'
id='disco1'
to='%s'
type='get'>
<query xmlns='http://jabber.org/protocol/disco#info'/>
</iq>
""" % (HOSTNAME)
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(_cb61, 2)
def test62(self):
""" Test Section 6.2 http://www.xmpp.org/extensions/xep-0045.html#disco-rooms ..."""
def _cb62(t):
test_elem = self.wstream.entity.children.pop()
self.assertNotEquals(test_elem['type'],'error')
# test for correct namespace
self.assertEquals(test_elem.query.uri,'http://jabber.org/protocol/disco#items')
def _doDisco(t):
while len(self.wstream.entity.children)>1:
self.wstream.entity.children.pop()
CLIENT_XML = """
<iq from='[email protected]/pda' xmlns='jabber:client'
id='disco1'
to='%s'
type='get'>
<query xmlns='http://jabber.org/protocol/disco#items'/>
</iq>
""" % (HOSTNAME)
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(_cb62, 2)
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='lusófonos@%s/king' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_doDisco, 2)
def test63(self):
""" Test Section 6.3 http://www.xmpp.org/extensions/xep-0045.html#disco-roominfo."""
def _cb63(t):
test_elem = self.wstream.entity.children.pop()
self.assertNotEquals(test_elem['type'],'error')
# test for correct namespace
self.assertEquals(test_elem.query.uri,'http://jabber.org/protocol/disco#info')
# TODO - add more tests to this
# palaver returns extended disco
CLIENT_XML = """
<iq from='[email protected]/pda' xmlns='jabber:client'
id='disco3'
to='darkcave@%s'
type='get'>
<query xmlns='http://jabber.org/protocol/disco#info'/>
</iq>
""" % (HOSTNAME)
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(_cb63, 2)
def test64(self):
""" Test Section 6.4 http://www.xmpp.org/extensions/xep-0045.html#disco-roomitems"""
def _cb64(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem['type'],'result')
self.assertEquals(test_elem['id'],'disco4')
# TODO - add test for public and private items
DISCO_ITEMS_XML = """
<iq from='[email protected]/pda'
id='disco4'
to='darkcave@%s'
type='get'>
<query xmlns='http://jabber.org/protocol/disco#items'/>
</iq>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(DISCO_ITEMS_XML)
return self.doWait(_cb64, 2)
def test65(self):
""" Test Section 6.5 http://www.xmpp.org/extensions/xep-0045.html#disco-occupant."""
def _eb65(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem['type'],'error')
self.assertEquals(test_elem['id'],'disco6')
self.assertEquals(getattr(test_elem.error,'bad-request').name,'bad-request')
def _cb65(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem['type'],'result')
self.assertEquals(test_elem['id'],'disco5')
# TODO - add test for public and private items
DISCO_ITEMS_XML = """
<iq from='[email protected]/pda'
id='disco6'
to='darkcave@%s/oldhag'
type='get'>
<query xmlns='http://jabber.org/protocol/disco#items'/>
</iq>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(DISCO_ITEMS_XML)
return self.doWait(_eb65, 2)
DISCO_ITEMS_XML = """
<iq from='[email protected]/pda'
id='disco5'
to='darkcave@%s/oldhag'
type='get'>
<query xmlns='http://jabber.org/protocol/disco#items'/>
</iq>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(DISCO_ITEMS_XML)
return self.doWait(_cb65, 2)
def test71(self):
""" Test Section 7.1 http://www.xmpp.org/extensions/xep-0045.html#enter ........."""
def _cbJoin(t):
child_count = len(self.wstream.entity.children)
found_from = False
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
frm = 'darkcave@%s/palaver' % HOSTNAME
if test_elem['from'] == frm:
found_from = xpath.matches("/presence/x[@xmlns='http://jabber.org/protocol/muc#user']/item[@role='participant']", test_elem)
# TODO - add the rest of the section
self.failUnless(found_from, 'Did not find correct from presence.')
def sendJoin(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='darkcave@%s/palaver'/>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_cbJoin, 2)
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='darkcave@%s/test71'/>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(sendJoin, 2)
def test71a(self):
""" Test Section 7.1.1 http://www.xmpp.org/extensions/xep-0045.html#enter-gc ...."""
def _cbJoin(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(xpath.matches("/presence[@type='error']/error[@code='400']/jid-malformed", test_elem), 1)
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='darkcave@%s'/>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_cbJoin, 2)
def test71b(self):
""" Test Section 7.1.3 http://www.xmpp.org/extensions/xep-0045.html#enter-pres ."""
def _cbJoin(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(xpath.matches("/presence[@from='newcave@%s/palaver']/x[@xmlns='http://jabber.org/protocol/muc#user']/item[@affiliation='owner']"%(HOSTNAME,), test_elem), 1)
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='newcave@%s/palaver'/>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_cbJoin, 2)
def testHistoryOrder(self):
""" Test to make sure presence comes before history. ."""
def finish(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
self.assertEqual(test_elem.name, 'presence')
def testHistory(t):
test_elem = self.wstream.entity.children.pop()
self.failUnless(test_elem.name == 'message', 'Messages need to be last')
mtest = filter(lambda el: xpath.matches("/message" , el), self.wstream.entity.children)
#self.failUnless(len(mtest)==4,'Did not get the correct number of messages')
ptest = filter(lambda el: xpath.matches("/presence" , el), self.wstream.entity.children)
#self.failUnless(len(ptest)==10,'Did not get the correct number of presence stanzas')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
# leave room
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='historyorder@%s/palaverHistory' type='unavailable'/>
<presence
from='[email protected]/pda'
to='historyorder@%s/history' type='unavailable'/>
""" % (HOSTNAME, HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(finish, 4)
def sendPresence(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
self.assertEqual(test_elem.name, 'message')
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='historyorder@%s/history'/>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(testHistory, 14)
def sendMessages(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
# send messages
MESSAGE_XML = """
<message xmlns='jabber:client' to='historyorder@%s' from='[email protected]/pda' type='groupchat'>
<body>3</body>
</message>
<message xmlns='jabber:client' to='historyorder@%s' from='[email protected]/pda' type='groupchat'>
<body>2</body>
</message>
<message xmlns='jabber:client' to='historyorder@%s' from='[email protected]/pda' type='groupchat'>
<body>1</body>
</message>
<message xmlns='jabber:client' to='historyorder@%s' from='[email protected]/pda' type='groupchat'>
<body>contact</body>
</message>
""" % (HOSTNAME, HOSTNAME, HOSTNAME, HOSTNAME)
self.palaver_xs.dataReceived(MESSAGE_XML)
return self.doWait(sendPresence, 16)
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='historyorder@%s/palaverHistory'/>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(sendMessages, 2)
def testHistoryMaxStanzas(self):
""" Test to make sure we only get the history we want. ."""
def finish(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
self.assertEqual(test_elem.name, 'presence')
def testHistory(t):
mtest = filter(lambda el: xpath.matches("/message" , el), self.wstream.entity.children)
self.failUnless(len(mtest)==1,'Did not get the correct number of messages')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
# leave room
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='historymanage@%s/palaverHistory' type='unavailable'/>
<presence
from='[email protected]/pda'
to='historymanage@%s/history' type='unavailable'/>
""" % (HOSTNAME, HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(finish, 4)
def sendPresence(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
self.assertEqual(test_elem.name, 'message')
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='historymanage@%s/history'>
<x xmlns='http://jabber.org/protocol/muc'>
<history maxstanzas='1'/>
</x>
</presence>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(testHistory, 14)
def sendMessages(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
# send messages
MESSAGE_XML = """
<message xmlns='jabber:client' to='historymanage@%s' from='[email protected]/pda' type='groupchat'>
<body>3</body>
</message>
<message xmlns='jabber:client' to='historymanage@%s' from='[email protected]/pda' type='groupchat'>
<body>2</body>
</message>
<message xmlns='jabber:client' to='historymanage@%s' from='[email protected]/pda' type='groupchat'>
<body>1</body>
</message>
<message xmlns='jabber:client' to='historymanage@%s' from='[email protected]/pda' type='groupchat'>
<body>contact</body>
</message>
""" % (HOSTNAME, HOSTNAME, HOSTNAME, HOSTNAME)
self.palaver_xs.dataReceived(MESSAGE_XML)
return self.doWait(sendPresence, 16)
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='historymanage@%s/palaverHistory'/>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(sendMessages, 2)
def testInvalidNick(self):
""" Test for no resource to='[email protected]@chat.chesspark.com' .... """
def _cbJoin(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(xpath.matches("/presence[@type='error']/error[@code='400']/jid-malformed", test_elem), 1)
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='darkcave@%s@%s'/>
""" % (HOSTNAME, HOSTNAME)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_cbJoin, 2)
def test72(self):
""" Test Section 7.2 http://www.xmpp.org/extensions/xep-0045.html#exit .........."""
def _cbLeave(t):
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
if test_elem['from'] == 'darkcave@%s/palaver' % (HOSTNAME,):
self.assertEquals(xpath.matches("/presence[@type='unavailable']/x[@xmlns='http://jabber.org/protocol/muc#user']/item[@role='none']", test_elem), 1)
PRESENCE_XML = """<presence
from='[email protected]/pda'
to='darkcave@%s/palaver'
type='unavailable'/>""" % (HOSTNAME,)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_cbLeave, 2)
def test73(self):
""" Test Section 7.3 http://www.xmpp.org/extensions/xep-0045.html#changenick ...."""
def _cbJoin(t):
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
frm = 'darkcave@%s/change_nick' % HOSTNAME
if test_elem['from'] == frm:
self.assertEquals(xpath.matches("/presence/x[@xmlns='http://jabber.org/protocol/muc#user']/item[@role='participant']", test_elem), 1)
if test_elem['from'] == 'darkcave@%s/palaver' % (HOSTNAME,):
self.assertEquals(xpath.matches("/presence[@type='unavailable']/x[@xmlns='http://jabber.org/protocol/muc#user']/item[@role='participant']", test_elem), 1)
self.assertEquals(xpath.matches("/presence[@type='unavailable']/x[@xmlns='http://jabber.org/protocol/muc#user']/status[@code='303']", test_elem), 1)
# TODO - add the rest of the section
def _doTest(t):
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='darkcave@%s/change_nick'/>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_cbJoin, 2)
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='darkcave@%s/testingtesting'/>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_doTest, 2)
def test74(self):
""" Test Section 7.4 http://www.xmpp.org/extensions/xep-0045.html#changepres ...."""
def _cb74(t):
# grab elements to test
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
if test_elem['from'] == 'darkcave@%s/oldhag' % (HOSTNAME,):
self.assertEquals(str(test_elem.status),'I am ready to discuss wikka')
self.assertEquals(str(test_elem.show),'chat')
if test_elem['from'] == 'darkcave@%s/testhag' % (HOSTNAME,):
self.assertEquals(xpath.matches("/presence/x[@xmlns='http://jabber.org/protocol/muc#user']/item[@role='participant']", test_elem), 1)
def _cbChangeStatus(t):
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
if test_elem['from'] == 'darkcave@%s/oldhag' % (HOSTNAME,):
self.assertEquals(str(test_elem.status),'I am ready to discuss wikka')
self.assertEquals(str(test_elem.show),'chat')
PRESENCE_XML = """
<presence
from='[email protected]/laptop'
to='darkcave@%s/testhag' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_cb74, 3)
def _cbJoin(t):
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
if test_elem['from'] == 'darkcave@%s/oldhag' % (HOSTNAME,):
self.assertEquals(str(test_elem.status),'gone where the goblins go')
self.assertEquals(str(test_elem.show),'xa')
CHANGE_STATUS_XML = """
<presence
from='[email protected]/laptop'
to='darkcave@%s/oldhag'>
<show>chat</show>
<status>I am ready to discuss wikka</status>
</presence>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(CHANGE_STATUS_XML)
return self.doWait(_cbChangeStatus, 3)
JOIN_STATUS_XML = """
<presence
from='[email protected]/laptop'
to='darkcave@%s/oldhag'>
<show>xa</show>
<status>gone where the goblins go</status>
</presence>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(JOIN_STATUS_XML)
return self.doWait(_cbJoin, 3)
def test75(self):
""" Test Section 7.5 http://www.xmpp.org/extensions/xep-0045.html#invite ...."""
def _cbInvite(t):
child_count = len(self.wstream.entity.children)
test_elem = self.wstream.entity.children.pop()
self.failUnless(test_elem.name=='message',
'Not a message returned')
self.failUnless(test_elem['to']=='[email protected]',
'The message was sent to the wrong person')
return True
def _cbJoin(t):
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
if test_elem['from'] == 'darkcave@%s/oldhag' % (HOSTNAME,):
self.assertEquals(str(test_elem.status),'gone where the goblins go')
self.assertEquals(str(test_elem.show),'xa')
INVITE_XML = """
<message
from='[email protected]/desktop'
to='darkcave@%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<invite to='[email protected]'>
<reason>
Hey Hecate, this is the place for all good witches!
</reason>
</invite>
</x>
</message>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(INVITE_XML)
return self.doWait(_cbInvite, 2)
JOIN_STATUS_XML = """
<presence
from='[email protected]/laptop'
to='darkcave@%s/oldhag'>
<show>xa</show>
<status>gone where the goblins go</status>
</presence>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(JOIN_STATUS_XML)
return self.doWait(_cbJoin, 3)
def test75BadInvite(self):
""" Test Section 7.5 http://www.xmpp.org/extensions/xep-0045.html#invite ...."""
def _cbInvite(t):
child_count = len(self.wstream.entity.children)
test_elem = self.wstream.entity.children.pop()
self.failUnless(test_elem.name=='message',
'Not a message returned')
self.failUnless(test_elem['type']=='error',
'Need an error here.')
def _cbJoin(t):
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
if test_elem['from'] == 'darkcave@%s/oldhag' % (HOSTNAME,):
self.assertEquals(str(test_elem.status),'gone where the goblins go')
self.assertEquals(str(test_elem.show),'xa')
INVITE_XML = """
<message
from='[email protected]/desktop'
to='darkcave@%s'>
<x xmlns='http://jabber.org/protocol/muc#user'>
<invite to='@shakespeare.lit'>
<reason>
Hey Hecate, this is the place for all good witches!
</reason>
</invite>
</x>
</message>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(INVITE_XML)
return self.doWait(_cbInvite, 2)
JOIN_STATUS_XML = """
<presence
from='[email protected]/laptop'
to='darkcave@%s/oldhag'>
<show>xa</show>
<status>gone where the goblins go</status>
</presence>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(JOIN_STATUS_XML)
return self.doWait(_cbJoin, 3)
def test79(self):
""" Test Section 7.9 http://www.xmpp.org/extensions/xep-0045.html#message ...."""
def _cbInvite(t):
mtest = filter(lambda el: xpath.matches("/message", el), self.wstream.entity.children)
self.failUnless(len(mtest)==2,'Did not get the correct number of messages')
user1 = filter(lambda el: xpath.matches("/message[@to='[email protected]/laptop']", el), mtest)
self.failUnless(len(user1)==1,'Did not get the correct number of messages')
user2 = filter(lambda el: xpath.matches("/message[@to='[email protected]/laptop']", el), mtest)
self.failUnless(len(user2)==1,'Did not get the correct number of messages')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
def _cbJoin(t):
ptest = filter(lambda el: xpath.matches("/presence", el), self.wstream.entity.children)
self.failUnless(len(ptest)>1, 'Invalid number of presence stanzas')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
MESSAGE_XML = """
<message
from='[email protected]/laptop'
to='test79@%s' type='groupchat'>
<x xmlns='http://jabber.org/protocol/muc#user' />
<body>This is a test of the palaver broadcast system.</body>
</message>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(MESSAGE_XML)
return self.doWait(_cbInvite, 3)
def _cbJoin1(t):
JOIN_STATUS_XML = """
<presence
from='[email protected]/laptop'
to='test79@%s/79'>
</presence>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(JOIN_STATUS_XML)
return self.doWait(_cbJoin, 5)
JOIN_STATUS_XML = """
<presence
from='[email protected]/laptop'
to='test79@%s/oldhag'>
<show>xa</show>
<status>gone where the goblins go</status>
</presence>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(JOIN_STATUS_XML)
return self.doWait(_cbJoin1, 5)
def test81(self):
""" Test Section 8.1 http://www.xmpp.org/extensions/xep-0045.html#subject-mod """
def _cbInvite(t):
mtest = filter(lambda el: xpath.matches("/message", el), self.wstream.entity.children)
self.failUnless(len(mtest)==2,'Did not get the correct number of messages')
user1 = filter(lambda el: xpath.matches("/message[@to='[email protected]/laptop']/subject", el), mtest)
self.failUnless(len(user1)==1,'Did not get the correct number of messages')
user2 = filter(lambda el: xpath.matches("/message[@to='[email protected]/laptop']/subject[text()='This is a test of the palaver broadcast system.']", el), mtest)
self.failUnless(len(user2)==1,'Did not get the correct number of messages')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
def _cbJoin(t):
ptest = filter(lambda el: xpath.matches("/presence", el), self.wstream.entity.children)
self.failUnless(len(ptest)>1, 'Invalid number of presence stanzas')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
MESSAGE_XML = """
<message
from='[email protected]/laptop'
to='test79@%s' type='groupchat'>
<x xmlns='http://jabber.org/protocol/muc#user' />
<subject>This is a test of the palaver broadcast system.</subject>
</message>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(MESSAGE_XML)
return self.doWait(_cbInvite, 3)
def _cbJoin1(t):
JOIN_STATUS_XML = """
<presence
from='[email protected]/laptop'
to='test79@%s/79'>
</presence>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(JOIN_STATUS_XML)
return self.doWait(_cbJoin, 5)
JOIN_STATUS_XML = """
<presence
from='[email protected]/laptop'
to='test79@%s/oldhag'>
<show>xa</show>
<status>gone where the goblins go</status>
</presence>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(JOIN_STATUS_XML)
return self.doWait(_cbJoin1, 5)
def testKickMessage(self):
""" Test if user can still chat after kicking ."""
def _checkError(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem['type'],'error')
self.failUnless(getattr(test_elem.error,'not-authorized',False),
'Bad error result')
def _cbTestKick(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem['type'],'result')
test_elem = self.wstream.entity.children.pop()
self.failUnless(test_elem.hasAttribute('type'),
'Presence does not have a type attribute')
self.assertEquals(test_elem['type'],'unavailable')
for c in test_elem.elements():
if c.name == 'x' and c.uri == 'http://jabber.org/protocol/muc#user':
self.assertEquals(c.item['affiliation'],'none')
self.assertEquals(c.item['role'],'none')
test_elem = self.wstream.entity.children.pop()
self.failUnless(test_elem.hasAttribute('type'),
'Presence does not have a type attribute')
self.assertEquals(test_elem['type'],'unavailable')
for c in test_elem.elements():
if c.name == 'x' and c.uri == 'http://jabber.org/protocol/muc#user':
self.assertEquals(c.item['affiliation'],'none')
self.assertEquals(c.item['role'],'none')
# send messages
MESSAGE_XML = """
<message xmlns='jabber:client' to='testkick@%s' from='[email protected]/throne' type='groupchat'>
<body>3</body>
</message>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(MESSAGE_XML)
return self.doWait(_checkError, 2)
def _kick(t):
for i in range(0, 3):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
BAN_XML = """<iq from='[email protected]/throne'
id='ban1'
to='testkick@%s'
type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item role='none'
jid='[email protected]'>
<reason>Treason</reason>
</item>
</query>
</iq>""" % (HOSTNAME,)
self.palaver_xs.dataReceived(BAN_XML)
return self.doWait(_cbTestKick, 4)
def _create(t):
test_elem = self.wstream.entity.children.pop()
frm = 'testkick@%s/king' % HOSTNAME
self._testCreate(test_elem, frm)
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='testkick@%s/kingoftown' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_kick, 3)
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='testkick@%s/king' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_create, 2)
def test91(self):
""" Test section 9.1 http://www.xmpp.org/extensions/xep-0045.html#ban """
def _checkDestroy(r):
miq = filter(lambda el: xpath.matches("/iq[@type='result']" , el), self.wstream.entity.children)
self.failUnless(len(miq)==1, 'Did not get a destroy result')
def _checkPresenceError(t):
test_elem = self.wstream.entity.children.pop()
self.failUnless(xpath.matches("/presence[@type='error']/error", test_elem), 'Presence needs to be an error')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
ADMIN_XML = """<iq from='[email protected]/throne'
id='admin1'
to='southhampton@%s'
type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<destroy jid='southhampton@%s'>
<reason>Macbeth doth come.</reason>
</destroy>
</query>
</iq>""" % (HOSTNAME, HOSTNAME)
self.palaver_xs.dataReceived(ADMIN_XML)
return self.doWait(_checkDestroy, 2)
def _checkMessageError(t):
test_elem = self.wstream.entity.children.pop()
self.failUnless(xpath.matches("/message[@type='error']/error", test_elem), 'Message needs to be an error')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='southhampton@%s/kingoftown' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_checkPresenceError, 3)
def _cb91(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem['type'],'result')
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem['type'],'unavailable')
for c in test_elem.elements():
if c.name == 'x' and c.uri == 'http://jabber.org/protocol/muc#user':
self.assertEquals(c.item['affiliation'],'outcast')
self.assertEquals(c.item['role'],'none')
self.assertEquals(str(c.item.reason),'Treason')
self.assertEquals(c.status['code'],'301')
# test if we can send a message after the ban
MESSAGE_XML = """
<message xmlns='jabber:client' to='southhampton@%s' from='[email protected]/throne' type='groupchat'>
<body>3</body>
</message>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(MESSAGE_XML)
return self.doWait(_checkMessageError, 3)
def _ban(t):
for i in range(0, 3):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
BAN_XML = """<iq from='[email protected]/throne'
id='ban1'
to='southhampton@%s'
type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item affiliation='outcast'
jid='[email protected]'>
<reason>Treason</reason>
</item>
</query>
</iq>""" % (HOSTNAME,)
self.palaver_xs.dataReceived(BAN_XML)
return self.doWait(_cb91, 2)
def _create(t):
test_elem = self.wstream.entity.children.pop()
frm = 'southhampton@%s/king' % HOSTNAME
self._testCreate(test_elem, frm)
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='southhampton@%s/kingoftown' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_ban, 3)
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='southhampton@%s/king' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_create, 2)
def testE100ToE103(self):
""" Test section 9.2 http://www.xmpp.org/extensions/xep-0045.html#modifyban ....."""
def _removeNoneParticipant(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(jid.internJID(test_elem['from']).userhost(),'southhampton@%s' % (HOSTNAME,))
self.assertEquals(test_elem['type'],'result')
self.assertEquals(test_elem['id'],'removeban4')
def _checkRemove(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(jid.internJID(test_elem['from']).userhost(),'southhampton@%s' % (HOSTNAME,))
self.assertEquals(test_elem['type'],'result')
test_elem = self.wstream.entity.children.pop()
REMOVE_XML = """
<iq from='[email protected]/throne'
id='removeban4'
to='southhampton@%s'
type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item affiliation='none'
jid='[email protected]' />
</query>
</iq>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(REMOVE_XML)
return self.doWait(_removeNoneParticipant, 3)
def _remove(t):
miq = filter(lambda el: xpath.matches("/iq[@type='result']" , el), self.wstream.entity.children)
self.failUnless(len(miq)==1, 'Did not get a result')
self.assertEquals(jid.internJID(miq[0]['from']).userhost(),'southhampton@%s' % (HOSTNAME,))
self.assertEquals(miq[0]['type'],'result')
# pop the rest
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
REMOVE_XML = """
<iq from='[email protected]/throne'
id='removeban3'
to='southhampton@%s'
type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item affiliation='none'
jid='[email protected]' />
</query>
</iq>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(REMOVE_XML)
return self.doWait(_checkRemove, 3)
def _modify(t):
miq = filter(lambda el: xpath.matches("/iq[@type='result']/query/item[@affiliation='outcast']" % (), el), self.wstream.entity.children)
self.failUnless(len(miq)==1, 'Did not get the correct outcast result')
self.assertEquals(jid.internJID(miq[0]['from']).userhost(),'southhampton@%s' % (HOSTNAME,))
self.failUnless(miq[0].hasAttribute('type'), 'Wrong Attribute Type')
self.assertEquals(miq[0]['type'],'result')
self.assertEquals(miq[0].query.item['affiliation'],'outcast')
self.assertEquals(miq[0].query.item['jid'],'[email protected]')
self.failUnless(str(miq[0].query.item.reason)=='Treason',
'Reason was not returned')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
MODIFY_XML = """
<iq from='[email protected]/throne'
id='ban3'
to='southhampton@%s'
type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item affiliation='outcast'
jid='[email protected]'>
<reason>Treason</reason>
</item>
<item affiliation='outcast'>
jid='[email protected]'>
<reason>Treason</reason>
</item>
<item affiliation='outcast'
jid='[email protected]'>
<reason>Treason</reason>
</item>
</query>
</iq>
""" % (HOSTNAME,)
self.palaver_xs.dataReceived(MODIFY_XML)
return self.doWait(_remove, 3)
def _first_ban_result(t):
test_elem = self.wstream.entity.children.pop()
self.failUnless(xpath.matches("/iq[@type='result']", test_elem), 'Error in ban result.')
GET_XML = """
<iq from='[email protected]/throne'
id='ban2'
to='southhampton@%s'
type='get'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item affiliation='outcast' />
</query>
</iq>""" % (HOSTNAME,)
self.palaver_xs.dataReceived(GET_XML)
return self.doWait(_modify, 4)
def _do_first_ban(t):
# pop off presence
self._clearElems()
BAN_XML = """<iq from='[email protected]/throne'
id='ban1'
to='southhampton@%s'
type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item affiliation='outcast'
jid='[email protected]'>
<reason>Treason</reason>
</item>
</query>
</iq>""" % (HOSTNAME,)
self.palaver_xs.dataReceived(BAN_XML)
return self.doWait(_first_ban_result, 4)
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='southhampton@%s/king' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_do_first_ban, 3)
def test93(self):
""" Test section 9.3 http://www.xmpp.org/extensions/xep-0045.html#grantmember ..........."""
def _cb93(t):
test_elem = self.wstream.entity.children.pop()
self.failUnless(xpath.matches("/iq[@type='result']/query", test_elem), 'Error in member add result.')
def _create(t):
test_elem = self.wstream.entity.children.pop()
frm = 'membertest@%s/king' % HOSTNAME
self._testCreate(test_elem, frm)
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
MEMBER_XML = """
<iq from='[email protected]/throne'
id='member1'
to='membertest@%s'
type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item affiliation='member'
jid='[email protected]'/>
</query>
</iq>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(MEMBER_XML)
return self.doWait(_cb93, 3)
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='membertest@%s/king' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_create, 2)
def test96(self):
""" Test section 9.6 http://www.xmpp.org/extensions/xep-0045.html#grantmod ..........."""
def _cb96(t):
test_elem = self.wstream.entity.children.pop()
self.failUnless(xpath.matches("/iq[@type='result']/query", test_elem), 'Error in moderator result.')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
self.failUnless(xpath.matches("/presence[@from='modtest@%s/witch']/x/item[@role='moderator']" % (HOSTNAME,), test_elem), 'Error in presence.')
def _setRole(t):
while len(self.wstream.entity.children)>1:
self.wstream.entity.children.pop()
MEMBER_XML = """
<iq from='[email protected]/throne'
id='member1'
to='modtest@%s'
type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item role='moderator'
nick='witch'/>
</query>
</iq>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(MEMBER_XML)
return self.doWait(_cb96, 3)
def _create(t):
test_elem = self.wstream.entity.children.pop()
frm = 'modtest@%s/king' % HOSTNAME
self._testCreate(test_elem, frm)
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
PRESENCE_XML = """
<presence
from='[email protected]/witch'
to='modtest@%s/witch' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_setRole, 2)
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='modtest@%s/king' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_create, 2)
def test106(self):
""" Test section 10.6 http://www.xmpp.org/extensions/xep-0045.html#grantadmin ..........."""
def _cb106(t):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem['type'],'result')
test_elem = self.wstream.entity.children.pop()
for c in test_elem.elements():
if c.name == 'x' and c.uri == 'http://jabber.org/protocol/muc#user':
self.assertEquals(c.item['affiliation'],'admin')
self.assertEquals(c.item['role'],'moderator')
def _admin(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
ADMIN_XML = """<iq from='[email protected]/throne'
id='admin1'
to='admintest@%s'
type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item affiliation='admin'
jid='[email protected]'/>
</query>
</iq>""" % (HOSTNAME,)
self.palaver_xs.dataReceived(ADMIN_XML)
return self.doWait(_cb106, 4)
def _create(t):
test_elem = self.wstream.entity.children.pop()
frm = 'admintest@%s/king' % HOSTNAME
self._testCreate(test_elem, frm)
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='admintest@%s/kingoftown' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_admin, 3)
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='admintest@%s/king' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_create, 2)
def test109(self):
""" Test section 10.9 http://www.xmpp.org/extensions/xep-0045.html#destroyroom ..........."""
def _cb109(t):
ptest = filter(lambda el: xpath.matches("/presence[@type='unavailable']/x/item[@role='none']", el), self.wstream.entity.children)
self.failUnless(len(ptest)==1, 'Presence was not sent that use left the room.')
iqtest = filter(lambda el: xpath.matches("/iq[@type='result']", el), self.wstream.entity.children)
self.failUnless(len(iqtest)==1, 'Invalid iq result.')
def _admin(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
ADMIN_XML = """<iq from='[email protected]/throne'
id='admin1'
to='destroytest@%s'
type='set'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<destroy jid='destroytest@%s'>
<reason>Macbeth doth come.</reason>
</destroy>
</query>
</iq>""" % (HOSTNAME, HOSTNAME)
self.palaver_xs.dataReceived(ADMIN_XML)
return self.doWait(_cb109, 4)
PRESENCE_XML = """
<presence
from='[email protected]/throne'
to='destroytest@%s/king' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_admin, 2)
def testPresenceLeak(self):
""" Test to make sure presence does not leak. ."""
user_list = set()
def testLeave(t):
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem['type'], 'unavailable')
self.failUnless(test_elem['to'].lower() in user_list)
user_list.remove(test_elem['to'].lower())
# Test for leak, if all users did not get unavailable then there is a leak
self.assertEqual(0, len(user_list))
def testJoin(t):
send_one_to_users = 0
send_two_to_users = 0
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
if test_elem.name =='presence' and test_elem['from'] == 'leak@%s/One' % (HOSTNAME,) \
and test_elem['to'] != '[email protected]/testing':
send_one_to_users += 1
if test_elem.name =='presence' and test_elem['from'] == 'leak@%s/two' % (HOSTNAME,):
send_two_to_users += 1
user_list.add(test_elem['to'].lower())
self.failUnless(send_one_to_users >= 2, 'Not enough presence elements')
self.failUnless(send_two_to_users >= 3, 'Not enough presence elements')
PRESENCE_XML = """
<presence from='[email protected]/testing' to='leak@%s/one' type='unavailable'/>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(testLeave, 7)
def testLeak(t):
PRESENCE_XML = """
<presence from='[email protected]/testing' to='leak@%s/One' />
<presence from='[email protected]/testing' to='leak@%s/two' />
""" % (HOSTNAME, HOSTNAME)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(testJoin, 16)
self._createRoom('[email protected]/pda', 'leak@%s/thirdwitch' % (HOSTNAME, ))
return self.doWait(testLeak, 3)
def testPresenceRaceCondition(self):
"""
This is a test for a race condition when someone leaves the room immediatly after they join.
"""
def testJoin(t):
unavailable = False
test_elem = self.wstream.entity.children.pop()
if test_elem.name == 'presence' and \
test_elem.hasAttribute('type') and \
test_elem['type'] == 'unavailable':
unavailable = True
self.failUnless(unavailable,'Did NOT leave the room')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
def testRace(t):
PRESENCE_XML = """
<presence from='[email protected]/testing' to='racetest@%s/RaceTest' />
<presence from='[email protected]/testing' type='unavailable' to='racetest@%s/RaceTest' />
""" % (HOSTNAME, HOSTNAME)
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(testJoin, 18)
self._createRoom('[email protected]/pda', 'racetest@%s/thirdwitch' % (HOSTNAME, ))
return self.doWait(testRace, 3)
def testZDisconnect(self):
""" Test Disconnect ............................................................."""
self.palaver_xs.connectionLost(None)
def tearDown(self):
self.wstream = None
self.room_service = None
self.admin_service = None
pending = reactor.getDelayedCalls()
if pending:
for p in pending:
if p.active():
p.cancel()
def tearDownClass(self):
for root, dirs, files in os.walk('/tmp/palaver_test/'):
for f in files:
os.unlink(root+f)
os.rmdir('/tmp/palaver_test/')
def testServerAdminJoiningPrivateRoom(self):
""" Test Server Admin joining a private room .................."""
def test109(t):
test_elem = self.wstream.entity.children.pop()
self.failUnless(xpath.matches("/presence/x/item[@jid='[email protected]/pda']", test_elem), 'Invalid room join.')
def testRoom(t):
while len(self.wstream.entity.children)>1:
self.wstream.entity.children.pop()
# join the room again and see if we get the status code
CLIENT_XML = """<presence from='%s' to='%s'>
<x xmlns='http://jabber.org/protocol/muc'/>
</presence>""" % ('[email protected]/pda', 'hidden@%s/thirdwitch' % (HOSTNAME, ))
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(test109, 2)
def joinRoom(t):
test_elem = self.wstream.entity.children.pop()
self.failUnless(xpath.matches("/iq[@type='result']", test_elem), 'Invalid iq result.')
while len(self.wstream.entity.children)>1:
test_elem = self.wstream.entity.children.pop()
CLIENT_XML = """<presence from='%s' to='%s' >
<x xmlns='http://jabber.org/protocol/muc'/>
</presence>""" % ('[email protected]/pda', 'hidden@%s/thirdwitch' % (HOSTNAME, ))
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(testRoom, 2)
def _cbCreateRoom(t):
self.assertEquals(t, True)
test_elem = self.wstream.entity.children.pop()
frm = 'hidden@%s/thirdwitch' % HOSTNAME
self._testCreate(test_elem, frm)
# send config
CONFIG_XML = """<iq from='[email protected]/pda' id='arbiter_kds_9877' type='set' to='hidden@%s'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<x xmlns='jabber:x:data' type='submit'>
<field var='FORM_TYPE'>
<value>http://jabber.org/protocol/muc#roomconfig</value>
</field>
<field var='muc#roomconfig_whois'>
<value>anyone</value>
</field>
<field var="muc#roomconfig_publicroom" type="boolean" label="Turn on public searching of room? Make it public.">
<value>0</value>
</field>
</x></query></iq>""" % (HOSTNAME, )
self.palaver_xs.dataReceived(CONFIG_XML)
return self.doWait(joinRoom, 2)
CLIENT_XML = """<presence from='%s' to='%s'>
<x xmlns='http://jabber.org/protocol/muc'/>
</presence>""" % ('[email protected]/pda', 'hidden@%s/thirdwitch' % (HOSTNAME, ))
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(_cbCreateRoom, 2)
def testAffiliateChangeAndExitRaceCondition(self):
"""
This is a test for a race condition when an affiliation changes immediately before a user leaves.
"""
def _cbModify(t):
found_unavailable = 0
found_iq_result = False
# The last element in the children list is the last one received.
# The first elements we see should be unavailable
while len(self.wstream.entity.children) > 0:
test_elem = self.wstream.entity.children.pop()
if test_elem.name == 'presence' \
and 'type' in test_elem.attributes \
and test_elem['type'] == 'unavailable':
found_unavailable += 1
elif test_elem.name == 'presence' and found_unavailable < 3:
self.fail('The affiliation change needs to happen before the user leaves the room. %s' % (test_elem.toXml()))
if test_elem.name == 'iq':
found_iq_result = True
self.failUnless(found_iq_result, 'Did not change affiliation')
# we should check order
def modifyAndLeave(t):
while len(self.wstream.entity.children) > 0:
test_elem = self.wstream.entity.children.pop()
MODIFY_XML = """
<iq from='[email protected]' to='affiliation@%(host)s' type='set' id='arbiter_llh_142560'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item affiliation='member' jid='[email protected]' role='visitor'/>
</query>
</iq>
<iq from='[email protected]' to='affiliation@%(host)s' type='set' id='arbiter_rzp_142561'>
<query xmlns='http://jabber.org/protocol/muc#admin'>
<item affiliation='member' jid='[email protected]' role='visitor'/>
</query>
</iq>
<presence from='[email protected]/pda' to='affiliation@%(host)s/juliet' type='unavailable'/>
<presence from='[email protected]/pda' to='affiliation@%(host)s/romeo' type='unavailable'/>
""" % {'host': HOSTNAME}
self.palaver_xs.dataReceived(MODIFY_XML)
return self.doWait(_cbModify, 10)
def sendJoin(t):
while len(self.wstream.entity.children) > 0:
test_elem = self.wstream.entity.children.pop()
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='affiliation@%(host)s/romeo'/>
<presence
from='[email protected]/pda'
to='affiliation@%(host)s/juliet'/>
""" % {'host': HOSTNAME}
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(modifyAndLeave, 4)
PRESENCE_XML = """
<presence
from='[email protected]/pda'
to='affiliation@%(host)s/mercutio'/>
""" % {'host': HOSTNAME}
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(sendJoin, 2)
def testCapsAndEncodedNames(self):
"""
test case bugs and spaces in rooms
"""
def _discoItems(t):
pass
def _cb61(t):
test_elem = self.wstream.entity.children.pop()
self.assertNotEquals(test_elem['type'],'error')
# test for correct namespace
self.assertEquals(test_elem.query.uri,'http://jabber.org/protocol/disco#info')
got_muc = False
for f in test_elem.query.elements():
if f.name == 'feature' and f['var'] == 'http://jabber.org/protocol/muc':
got_muc = True
self.assertEquals(got_muc, True)
room = "inner\\20chamber@" + HOSTNAME
CLIENT_XML = """
<iq from='[email protected]/pda' xmlns='jabber:client'
id='disco1'
to='%s'
type='get'>
<query xmlns='http://jabber.org/protocol/disco#items'/>
</iq>
""" % (room)
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(_discoItems, 2)
def _cbCreateRoom(t):
self.assertEquals(t, True)
test_elem = self.wstream.entity.children.pop()
#frm = '[email protected]/cpc'
room = "inner\\20chamber@" + HOSTNAME
frm = room+"/[email protected]"
self._testCreate(test_elem, frm)
CLIENT_XML = """
<iq from='[email protected]/pda' xmlns='jabber:client'
id='disco1'
to='%s'
type='get'>
<query xmlns='http://jabber.org/protocol/disco#info'/>
</iq>
""" % (room)
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(_cb61, 2)
CLIENT_XML = """<presence xmlns='jabber:client' to='Inner\\20Chamber@%s/[email protected]' from='[email protected]/cpc'><x xmlns='http://jabber.org/protocol/muc'/></presence>""" % (HOSTNAME,)
self.palaver_xs.dataReceived(CLIENT_XML)
return self.doWait(_cbCreateRoom, 2)
def testUnicodePresence(self):
""" Unicode Status messages should be supported ...."""
def _cb74(t):
# grab elements to test
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
if test_elem['from'] == 'darkcave@%s/oldhag' % (HOSTNAME,):
self.assertEquals(muc.getCData(test_elem.status), u'ä ö and ü %')
self.assertEquals(str(test_elem.show),'chat')
if test_elem['from'] == 'darkcave@%s/testhag' % (HOSTNAME,):
self.assertEquals(xpath.matches("/presence/x[@xmlns='http://jabber.org/protocol/muc#user']/item[@role='participant']", test_elem), 1)
def _cbChangeStatus(t):
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
if test_elem['from'] == 'darkcave@%s/oldhag' % (HOSTNAME,):
self.assertEquals(muc.getCData(test_elem.status), u'ä ö and ü %')
self.assertEquals(str(test_elem.show),'chat')
PRESENCE_XML = """
<presence
from='[email protected]/laptop'
to='darkcave@%s/testhag' />
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(PRESENCE_XML)
return self.doWait(_cb74, 3)
def _cbJoin(t):
child_count = len(self.wstream.entity.children)
for i in range(1, child_count):
test_elem = self.wstream.entity.children.pop()
self.assertEquals(test_elem.name, 'presence')
if test_elem['from'] == 'darkcave@%s/oldhag' % (HOSTNAME,):
self.assertEquals(str(test_elem.status),'gone where the goblins go')
self.assertEquals(str(test_elem.show),'xa')
CHANGE_STATUS_XML = """
<presence
from='[email protected]/laptop'
to='darkcave@%s/oldhag'>
<show>chat</show>
<status>ä ö and ü %%</status>
</presence>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(CHANGE_STATUS_XML)
return self.doWait(_cbChangeStatus, 3)
JOIN_STATUS_XML = """
<presence
from='[email protected]/laptop'
to='darkcave@%s/oldhag'>
<show>xa</show>
<status>gone where the goblins go</status>
</presence>
""" % (HOSTNAME, )
self.palaver_xs.dataReceived(JOIN_STATUS_XML)
return self.doWait(_cbJoin, 3)
| twonds/palaver | palaver/test/xep045.py | Python | mit | 74,418 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.utils.translation import gettext_lazy as _
FULL = "FULL"
PARTIM = "PARTIM"
LEARNING_UNIT_YEAR_SUBTYPES = (
(FULL, _("Full")),
(PARTIM, _("Partim"))
)
| uclouvain/OSIS-Louvain | base/models/enums/learning_unit_year_subtypes.py | Python | agpl-3.0 | 1,452 |
import logging
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from corehq.apps.adm.dispatcher import ADMSectionDispatcher
from corehq.apps.adm.models import REPORT_SECTION_OPTIONS, ADMReport
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport
from corehq.apps.reports.standard import DatespanMixin, ProjectReportParametersMixin
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext as _, ugettext_noop
class ADMSectionView(GenericReportView):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
hide_filters = True
emailable = True
# adm-specific stuff
adm_slug = None
def __init__(self, request, base_context=None, domain=None, **kwargs):
self.adm_sections = dict(REPORT_SECTION_OPTIONS)
if self.adm_slug not in self.adm_sections:
raise ValueError("The adm_slug provided, %s, is not in the list of valid ADM report section slugs: %s." %
(self.adm_slug, ", ".join([key for key, val in self.adm_sections.items()]))
)
self.subreport_slug = kwargs.get("subreport_slug")
super(ADMSectionView, self).__init__(request, base_context, domain=domain, **kwargs)
self.context['report'].update(sub_slug=self.subreport_slug)
if self.subreport_data:
self.name = mark_safe("""%s <small>%s</small>""" %\
(self.subreport_data.get('value', {}).get('name'),
self.adm_sections.get(self.adm_slug, _("ADM Report"))))
@property
def subreport_data(self):
raise NotImplementedError
@property
def default_report_url(self):
return reverse('default_adm_report', args=[self.request.project])
@classmethod
def get_url(cls, domain=None, render_as=None, **kwargs):
subreport = kwargs.get('subreport')
url = super(ADMSectionView, cls).get_url(domain=domain, render_as=render_as, **kwargs)
return "%s%s" % (url, "%s/" % subreport if subreport else "")
class DefaultReportADMSectionView(GenericTabularReport, ADMSectionView, ProjectReportParametersMixin, DatespanMixin):
section_name = ugettext_noop("Active Data Management")
base_template = "reports/base_template.html"
dispatcher = ADMSectionDispatcher
fix_left_col = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.select.GroupFilter',
'corehq.apps.reports.filters.dates.DatespanFilter']
hide_filters = False
# adm-specific stuff
adm_slug = None
@property
@memoized
def subreport_data(self):
default_subreport = ADMReport.get_default(self.subreport_slug, domain=self.domain,
section=self.adm_slug, wrap=False)
if default_subreport is None:
return dict()
return default_subreport
@property
@memoized
def adm_report(self):
if self.subreport_data:
try:
adm_report = ADMReport.get_correct_wrap(self.subreport_data.get('key')[-1])
adm_report.set_domain_specific_values(self.domain)
return adm_report
except Exception as e:
logging.error("Could not fetch ADM Report: %s" % e)
return None
@property
@memoized
def adm_columns(self):
if self.adm_report:
column_config = self.report_column_config
if not isinstance(column_config, dict):
ValueError('report_column_config should return a dict')
for col in self.adm_report.columns:
col.set_report_values(**column_config)
return self.adm_report.columns
return []
@property
def headers(self):
if self.subreport_slug is None:
raise ValueError("Cannot render this report. A subreport_slug is required.")
header = DataTablesHeader(DataTablesColumn(_("FLW Name")))
for col in self.adm_report.columns:
sort_type = DTSortType.NUMERIC if hasattr(col, 'returns_numerical') and col.returns_numerical else None
help_text = _(col.description) if col.description else None
header.add_column(DataTablesColumn(_(col.name), sort_type=sort_type, help_text=help_text))
header.custom_sort = self.adm_report.default_sort_params
return header
@property
def rows(self):
rows = []
for user in self.users:
row = [self.table_cell(user.raw_username,
user.username_in_report)]
for col in self.adm_columns:
val = col.raw_value(**user._asdict())
row.append(self.table_cell(col.clean_value(val),
col.html_value(val)))
rows.append(row)
self.statistics_rows = [["Total"], ["Average"]]
for ind, col in enumerate(self.adm_columns):
column_data = [row[1+ind] for row in rows]
self.statistics_rows[0].append(col.calculate_totals(column_data))
self.statistics_rows[1].append(col.calculate_averages(column_data))
return rows
@property
def report_column_config(self):
"""
Should return a dict of values important for rendering the ADMColumns in this report.
"""
return dict(
domain=self.domain,
datespan=self.datespan
)
@classmethod
def override_navigation_list(cls, context):
current_slug = context.get('report', {}).get('sub_slug')
domain = context.get('domain')
subreport_context = []
subreports = ADMReport.get_default_subreports(domain, cls.adm_slug)
if not subreports:
subreport_context.append({
'url': '#',
'warning_label': 'No ADM Reports Configured',
})
return subreport_context
for report in subreports:
key = report.get("key", [])
entry = report.get("value", {})
report_slug = key[-2]
if cls.show_subreport_in_navigation(report_slug):
subreport_context.append({
'is_active': current_slug == report_slug,
'url': cls.get_url(domain=domain, subreport=report_slug),
'description': entry.get('description', ''),
'title': entry.get('name', 'Untitled Report'),
})
return subreport_context
@classmethod
def show_subreport_in_navigation(cls, subreport_slug):
return True
| SEL-Columbia/commcare-hq | corehq/apps/adm/reports/__init__.py | Python | bsd-3-clause | 6,857 |
'''
Date: 20th July 2009
This test ensures that the engine is able to run and shut down properly.
The criteria is that you should be able to "exit" the mainloop twice,
either by manual exit (ESC) or the console command "exit".
Before each run a welcoming message will be printed.
'''
import traceback
import smug
import time
def main():
try:
smug.initialize()
except:
print "An error occured while initializing smug. Exiting."
traceback.print_exc()
return
print "Hello World!"
try:
smug.run()
except:
print "An error occured while running smug. Exiting."
traceback.print_exc()
return
try:
smug.terminate()
except:
print "An error occured while terminating smug. Exiting."
traceback.print_exc()
return
| andersandersson/smug | python/tests/hello.py | Python | gpl-3.0 | 878 |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import errno
import os
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
import sys
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
cached_xcode_version = None
def InstalledXcodeVersion():
"""Fetches the installed version of Xcode, returns empty string if it is
unable to figure it out."""
global cached_xcode_version
if not cached_xcode_version is None:
return cached_xcode_version
# Default to an empty string
cached_xcode_version = ''
# Collect the xcodebuild's version information.
try:
import subprocess
cmd = ['/usr/bin/xcodebuild', '-version']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
xcodebuild_version_info = proc.communicate()[0]
# Any error, return empty string
if proc.returncode:
xcodebuild_version_info = ''
except OSError:
# We failed to launch the tool
xcodebuild_version_info = ''
# Pull out the Xcode version itself.
match_line = re.search('^Xcode (.*)$', xcodebuild_version_info, re.MULTILINE)
if match_line:
cached_xcode_version = match_line.group(1)
# Done!
return cached_xcode_version
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext != '':
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile('(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
def EscapeXCodeArgument(s):
"""We must escape the arguments that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals."""
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
return '"' + s + '"'
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
project_version = generator_flags.get('xcode_project_version', None)
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
if project_version:
xcp.project_file.SetXcodeVersion(project_version)
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'shared_library+bundle': 'com.apple.product-type.framework',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_bundle = int(spec.get('mac_bundle', 0))
if type != 'none':
type_bundle_key = type
if is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
if type != 'none' and (spec_actions or spec_rules):
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + ' Support',
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s_%s.make' % (target_name, rule['rule_name'])
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec "${DEVELOPER_BIN_DIR}/make" -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
for copy_group in spec.get('copies', []):
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
for file in copy_group['files']:
pbxcp.AddFile(file)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXCodeArgument(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| kans/birgo | tools/gyp/pylib/gyp/generator/xcode.py | Python | apache-2.0 | 53,685 |
def main(request, response):
response.headers.set("Content-Security-Policy", "default-src * 'unsafe-inline'")
response.headers.set("X-Content-Security-Policy", "default-src * 'unsafe-inline'")
response.headers.set("X-WebKit-CSP", "default-src * 'unsafe-inline'")
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_default-src_asterisk_script</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="default-src * 'unsafe-inline'"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
<script src="../resources/server.js?pipe=sub"></script>
<script src="support/csp.js"></script>
<script id="test"></script>
<script>
document.getElementById("test").src = "http://" + __SERVER__NAME + ":" + __CORS__PORT + "/tests/csp/support/test.js";
</script>
</head>
<body>
<div id="log"></div>
<script>
var t1 = async_test(document.title + "_allowed_int");
var t2 = async_test(document.title + "_allowed_ext");
function runTest() {
t1.step(function() {
assert_true(typeof X == "number", "attribute defined internal");
}, document.title + "_allowed_int");
t1.done();
t2.step(function() {
assert_true(typeof getVideoURI == "function", "Function getVideoURI is defined");
}, document.title + "_allowed_ext");
t2.done();
}
setTimeout(runTest,1000);
</script>
</body>
</html> """
| pk-sam/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_default-src_asterisk_script.py | Python | bsd-3-clause | 3,198 |
import unittest
from nose.tools import raises
from wextractor.extractors.extractor import Extractor
class TestExtractor(unittest.TestCase):
@raises(Exception)
def test_header_matches_dtypes(self):
'''
Tests that mismatched lengths of headers and dtypes raises
'''
Extractor(
'dummy', header=['one', 'two'], dtypes=[unicode, int, datetime.datetime]
)
def test_extractor_has_fields(self):
'''
Tests that an extractor has the proper fields
'''
test = Extractor('dummy')
self.assertEquals(test.target, 'dummy')
self.assertEquals(test.header, None)
self.assertEquals(test.dtypes, None)
if __name__ == '__main__':
unittest.main()
| codeforamerica/w-drive-extractor | test/unit/extractors/test_extractor.py | Python | mit | 752 |
#!/usr/bin/python3
from subframe import Subframe, SubframeError
class Frame(dict):
"""Parse one frame from sonde."""
def __init__(self, data, frame_prev = None):
"""Parse and decode frame. Input data have structure of byte
stream where input data interleaves with data status, eg.
bytes(data0, status0, data1, status1, ...), nonzero status indicate
(potentaly) broken data.
Frame_prev sould contain previous frame, if set it is used to guess
frame structure if frame is broken to try dig out some data."""
dict.__init__(self)
self._parse(data, frame_prev)
def is_broken(self):
return self._broken
def _parse(self, data, frame_prev):
data = data[2*6:2*240]
data, status = data[::2], data[1::2]
idx = 0
self._sf_len = []
self._broken = False
while data:
try:
subframe = Subframe.parse(data, status)
self[subframe.sf_type] = subframe
sf_len = len(subframe)
except SubframeError:
if frame_prev is None:
return
self._broken = True
if len(frame_prev._sf_len) <= idx:
return
sf_len = frame_prev._sf_len[idx]
data = data[sf_len:]
status = status[sf_len:]
self._sf_len.append(sf_len)
idx += 1
| pinkavaj/rstt | rstt_cli/frame.py | Python | apache-2.0 | 1,450 |
import pytest
import requests
from cfme.utils.appliance.implementations.ui import navigate_to
pytestmark = [pytest.mark.ignore_stream("5.11")]
@pytest.mark.tier(3)
def test_verify_rss_links(appliance):
"""
Polarion:
assignee: jhenner
initialEstimate: 1/4h
casecomponent: WebUI
"""
view = navigate_to(appliance.server, 'RSS')
for row in view.table.rows():
url = row[3].text
req = requests.get(url, verify=False)
assert 200 <= req.status_code < 400, "The url {} seems malformed".format(repr(url))
| nachandr/cfme_tests | cfme/tests/intelligence/test_rss.py | Python | gpl-2.0 | 567 |
# -*- coding: utf-8 -*-
"""
objetos_trayectorias.py declaraciones de objetos para el main
loop_trayectorias
@author: Damián E. Stanganelli
"""
class Trayectorias(object):
"""
Objeto que administra el listado de trayectorias
trayectorias
trayectoriasArchivadas
numeroDeFotograma
indiceDisponible
antiguedadPermitida
costoAceptable
"""
def __init__(self, mediciones, numeroDeFotograma):
self.trayectorias = []
self.trayectoriasArchivadas = []
self.numeroDeFotograma = numeroDeFotograma
self.indiceDisponible = 1
self.antiguedadPermitida = 8
self.costoAceptable = 20
for medicion in mediciones:
self.nuevaTrayectoria(medicion)
def nuevaTrayectoria(self, medicion):
self.trayectorias.append(Trayectoria(medicion,
self.indiceDisponible,
self.numeroDeFotograma)
)
self.indiceDisponible += 1
def predicciones(self):
predicciones = []
for trayectoria in self.trayectorias:
predicciones.append(trayectoria.prediccion)
return predicciones
def asignar(self, mediciones, numeroDeFotograma):
self.numeroDeFotograma = numeroDeFotograma
munkres = Munkres()
predicciones = self.predicciones()
costos = self.calcularCostos(predicciones, mediciones)
asignaciones = munkres.compute(costos)
indicesAsignados = [0]*len(mediciones)
for fila, columna in asignaciones:
costo = costos[fila][columna]
if costo <= self.costoAceptable:
self.trayectorias[fila].asignar(mediciones[columna],
self.numeroDeFotograma)
indicesAsignados[columna] = self.trayectorias[fila].indice
else:
self.trayectorias[fila].asignarPrediccion()
self.nuevaTrayectoria(mediciones[columna])
self.archivarAntiguas()
return indicesAsignados
def calcularCostos(self, predicciones, mediciones):
costos = []
for prediccion in predicciones:
costosFila = []
for medicion in mediciones:
costosFila.append(int(self.calcularCosto(prediccion,
medicion)))
costos.append(costosFila)
return costos
def calcularCosto(self, prediccion, medicion):
return np.sqrt((prediccion[0]-medicion[0])**2 +
(prediccion[1]-medicion[1])**2)
def archivarAntiguas(self):
j = 0
for i in range(len(self.trayectorias)):
if (self.trayectorias[i-j].ultimoFotograma <
(self.numeroDeFotograma - self.antiguedadPermitida)):
trayectoria = self.trayectorias[i-j]
trayectoria.limpiar()
self.trayectoriasArchivadas.append(trayectoria)
del(self.trayectorias[i-j])
j += 1
def archivarTodas(self):
for trayectoria in self.trayectorias:
trayectoria.limpiar()
self.trayectoriasArchivadas.extend(self.trayectorias)
self.trayectorias = []
def mejorTrayectoria(self):
largos = [len(trayectoria.posiciones) for trayectoria in
self.trayectoriasArchivadas]
return self.trayectoriasArchivadas[largos.index(max(largos))]
def trayectoriaPorIndice(self, indice):
indices = [trayectoria.indice for trayectoria in
self.trayectoriasArchivadas]
return self.trayectoriasArchivadas[indices.index(indice)]
class Trayectoria(object):
"""
Conjunto de atributos que definen la trayectoria
Atributos:
posiciones: lista de posiciones asignadas ya sea medidas o predichas
filtro: filtro de Kalman asociado
prediccion: una posicion predicha por el filtro
indice: indice que lo identifica, debe ser unico
primerFotograma: fotograma en el que se creo el objeto
ultimoFotograma: ultimo fotograma en el que se asigno una medicion
"""
def __init__(self, medicion, indiceDisponible, numeroDeFotograma):
self.indice = indiceDisponible
self.posiciones = []
self.inicializarFiltro(medicion)
self.primerFotograma = numeroDeFotograma
self.asignar(medicion, numeroDeFotograma)
print(self.indice, self.primerFotograma)
def inicializarFiltro(self, medicion):
# Filtro Kalman: Estados:4, mediciones:2, Entradas de control:0.
self.filtro = cv2.KalmanFilter(4, 2, 0)
# Matrices del filro
self.filtro.measurementMatrix = np.eye(2, 4, dtype=np.float32)
self.filtro.transitionMatrix = np.float32(np.eye(4) +
np.eye(4, 4, 2))
self.filtro.processNoiseCov = np.eye(4, dtype=np.float32) * 0.03
# Posicion inicial
self.filtro.statePre = np.array([[medicion[0]],
[medicion[1]],
[0],
[0]],
dtype=np.float32)
def asignar(self, medicion, numeroDeFotograma):
self.filtro.correct(np.array([[np.float32(medicion[0])],
[np.float32(medicion[1])]]))
self.prediccion = self.filtro.predict()
self.posiciones.append([int(self.prediccion[0]),
int(self.prediccion[1])])
self.ultimoFotograma = numeroDeFotograma
def asignarPrediccion(self):
self.filtro.correct(np.array([[np.float32(self.prediccion[0])],
[np.float32(self.prediccion[1])]]))
self.prediccion = self.filtro.predict()
def limpiar(self):
self.filtro = None
self.prediccion = None
class Blob(object):
"""
Conjunto de atributos que definen el blob
Atributos:
area
centroide
contorno
"""
def __init__(self, area, centroide, contorno):
self.area = area
self.centroide = centroide
self.contorno = contorno
class Blobs(object):
"""
Objeto que administra el listado de blobs
Atributos:
blobs
menorAreaPermitida
"""
def __init__(self, frg, menorAreaPermitida=20):
self.menorAreaPermitida = menorAreaPermitida
# Se obtienen los contornos de los blobs
contornos = cv2.findContours(frg.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[1]
# Se agregan los blobs al listado
self.blobs = []
for contorno in contornos:
M = cv2.moments(contorno)
area = M['m00']
if area >= self.menorAreaPermitida:
centroide = (int(M['m10']/area), int(M['m01']/area))
self.blobs.append(Blob(area, centroide, contorno))
def areas(self):
areas = []
for blob in self.blobs:
areas.append(blob.area)
return areas
def centroides(self):
centroides = []
for blob in self.blobs:
centroides.append(blob.centroide)
return centroides
def contornos(self):
contornos = []
for blob in self.blobs:
contornos.append(blob.contorno)
return contornos
def ordenarPorArea(self):
self.blobs = [b for (a, b) in sorted(zip(self.areas(), self.blobs))]
self.blobs.reverse()
def tomarMayores(self, cantidad):
self.ordenarPorArea()
self.blobs = self.blobs[:cantidad]
def graficar(self, imagen, indices):
for blob, indice in zip(self.blobs, indices):
cv2.drawContours(imagen,
blob.contorno,
-1,
(0, 0, 255),
2)
if indice != 0:
cv2.putText(imagen,
str(indice),
blob.centroide,
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 255, 0),
thickness=2)
| sebalander/VisionUNQ | visionUNQ/objetos_trayectorias.py | Python | bsd-3-clause | 8,447 |
# Generated by Django 1.9.6 on 2016-05-10 09:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('appliances', '0030_bugquery'),
('appliances', '0031_auto_20160506_0848'),
]
operations = [
]
| nachandr/cfme_tests | sprout/appliances/migrations/0032_merge.py | Python | gpl-2.0 | 270 |
# coding: utf8
# textutil.py
# 10/8/2012 jichi
if __name__ == '__main__':
import sys
sys.path.append('..')
import debug
debug.initenv()
import re
from sakurakit import skstr
from unitraits import unichars, jpchars
from windefs import winlocale
import convutil, defs
## Encoding ##
# SHIFT-JIS VS CP932
# http://d.hatena.ne.jp/r_ikeda/20111105/shift_jis
def to_unicode(s, enc, errors='ignore'):
"""
@param enc str not None
@param s str or bytearray or None
@return unicode or u""
"""
if not s:
return u""
enc = winlocale.encoding2py(enc) or enc
return s.decode(enc, errors=errors)
def from_unicode(s, enc, errors='ignore'):
"""
@param enc str not None
@param s str or bytearray or None
@return unicode or u""
"""
if not s:
return u""
enc = winlocale.encoding2py(enc) or enc
return s.encode(enc, errors=errors)
## Helpers ##
__space_re = re.compile(r'\s{2,}')
def remove_repeat_spaces(text):
"""
@param text unicode
@return unicode
"""
return __space_re.sub(' ', text) if text else ''
__repeat_re = re.compile(r'(.+)\1+', re.DOTALL) # http://stackoverflow.com/questions/12468613/regex-to-remove-repeated-character-pattern-in-a-string
def remove_repeat_text(text):
"""
@param text unicode
@return unicode
"""
return __repeat_re.sub(r'\1', text) if text else ''
# http://e-words.jp/p/r-ascii.html
# 0-31, except:
# - x0a
# - x0d
__illegal_re = re.compile(r'[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0b\x0c\x0e\x0f\x10\x11\x12\x12\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]')
def remove_illegal_text(text):
"""
@param text unicode
@return unicode
"""
return __illegal_re.sub('', text) if text else ''
def is_illegal_text(text):
"""
@param text unicode
@return bool
"""
return bool(__illegal_re.search(text))
__beauty_text_re = re.compile(ur'([。?!」】])(?![。!?」]|$)')
def beautify_text(text):
"""
@param text unicode
@return unicode
"""
return __beauty_text_re.sub('\\1\n', text) #.replace(u'\n」', u'」')
__beauty_sub_re = re.compile(ur'】(?<!\n)')
def beautify_subtitle(text):
"""
@param text unicode
@return unicode
"""
return __beauty_sub_re.sub('】\n', text)
__normalize_name_re = re.compile(ur"[【】]")
"""
@param text unicode
@return unicode
"""
def normalize_name(text):
return __normalize_name_re.sub('', text)
#def skip_empty_line(text):
# """
# @param text unicode
# @return bool
# """
# return bool(text) and text != '\n'
def normalize_punct(text):
"""
@param text unicode
@return unicode
"""
return text.replace(u"〜", u"~").replace(u"‥", u"…").replace("...", u"…")
__match_kata_hira_punc_re = re.compile(r"[%s]+" % ''.join((jpchars.s_kata, jpchars.s_hira, jpchars.s_punct)))
def match_kata_hira_punc(text):
"""
@param text unicode
@return bool
"""
return bool(__match_kata_hira_punc_re.match(text))
# http://www.sakuradite.com/wiki/zh/VNR/Voice_Settings
#import config
#repair_zunko_text = skstr.multireplacer(
# config.load_yaml_file(config.ZUNKO_YAML_LOCATION)['escape']
#)
def __capitalize_sentence_s(m): # with space
ch = m.group(2)
if ch.isdigit():
return (m.group(1) or '') + ch # do not change
else:
return ' ' + ch.upper()
def __capitalize_sentence_ns(m): # without space
return m.group(1).upper()
#__capitalize_suffix = r"(\s)*(\w)"
__capitalize_period_re = re.compile(r"(?<=\w\.)(\s)*(\w)", re.UNICODE) # space
__capitalize_punct_re = re.compile(r"(?<=[?!])(\s)*(\w)", re.UNICODE) # space
__capitalize_paragraph_re = re.compile(ur"(?:^|(?<=[「」【】]))(\w)", re.UNICODE) # no space
def capitalize_sentence(text):
"""
@param text unicode
@return unicode
"""
text = __capitalize_paragraph_re.sub(__capitalize_sentence_ns, text)
text = __capitalize_punct_re.sub(__capitalize_sentence_s, text)
text = __capitalize_period_re.sub(__capitalize_sentence_s, text)
return text
__space_punct_re = re.compile(r"\s+(?=[%s])" % (jpchars.s_punct + ',.?!'))
def remove_space_before_punct(text):
"""
@param text unicode
@return unicode
"""
return __space_punct_re.sub('', text)
# Example sentence to test for LEC
# ひとまずいつものように今月の雑誌に目を通そう
def __capitalize_html_sentence_s(m): # with space
ch = m.group(3)
if ch.isdigit():
return (m.group(1) or '') + m.group(2) + ch # do not change
else:
return ' ' + m.group(2) + ch.upper()
def __capitalize_html_sentence_ns(m): # without space
return m.group(1) + m.group(2).upper()
__capitalize_html_period_re = re.compile(r"(?<=\w\.)(\s)*(\<[^>]+?\>)(\w)", re.UNICODE) # space
__capitalize_html_punct_re = re.compile(r"(?<=[?!])(\s)*(\<[^>]+?\>)(\w)", re.UNICODE) # space
__capitalize_html_paragraph_re = re.compile(ur"(?:^|(?<=[【】「」]))(\<[^>]+?\>)(\w)", re.UNICODE) # no space
def capitalize_html_sentence(text):
"""
@param text unicode containing html tags
@return unicode
"""
text = capitalize_sentence(text)
if '<' in text and '>' in text:
text = __capitalize_html_paragraph_re.sub(__capitalize_html_sentence_ns, text)
text = __capitalize_html_punct_re.sub(__capitalize_html_sentence_s, text)
text = __capitalize_html_period_re.sub(__capitalize_html_sentence_s, text)
return text
__html_tag_re = re.compile(r'<[^>]*>')
def remove_html_tags(text):
"""
@param text unicode
@return unicode
"""
return __html_tag_re.sub('', text.replace('<br/>', '\n')) if '<' in text else text
__html_alphabet_re1 = re.compile(r'>[^<>]+<')
__html_alphabet_re2 = re.compile(r'>[^<>]+$')
__html_alphabet_re3 = re.compile(r'^[^<>]+<')
def convert_html_alphabet(text, *args, **kwargs):
"""
@param text unicode
@param* to str
@param* fr str currently not used
@return unicode
"""
conv = convutil.toalphabet
if '>' not in text or '<' not in text:
return conv(text, *args, **kwargs)
def repl(m):
return conv(m.group(), *args, **kwargs)
for rx in __html_alphabet_re1, __html_alphabet_re2, __html_alphabet_re3:
text = rx.sub(repl, text)
return text
__re_chars = re.compile(r"[%s]" % re.escape(
skstr.REGEX_SPECIAL_CHARS + r"{}"
))
def mightbe_regex(text):
"""
@param text unicode
@return bool
"""
return bool(__re_chars.search(text))
def validate_regex(text):
"""
@param text unicode
@return bool
"""
#text = text.replace('[^', '[\\')
return not text or skstr.checkpair(text)
def validate_macro(text):
"""
@param text unicode
@return bool
"""
return not text or skstr.checkpair(text, pair=('{','}'))
_rx_term_role = re.compile(r'^[a-yA-Y0-9_,]+$')
def validate_term_role(text):
"""
@param text unicode
@return bool
"""
return not text or bool(_rx_term_role.match(text))
_s_punct = unichars.s_ascii_punct + jpchars.s_punct
def ispunct(ch):
"""
@param text unicode
@return bool
"""
return ch in _s_punct
_re_punct_space = re.compile(r' +(?=[%s])' % re.escape('-' + _s_punct) )
def remove_punct_space(text):
"""
@param text unicode
@return text
"""
return _re_punct_space.sub('', text) if ' ' in text else text
_re_html_punct_space = re.compile(r' +(?=<[^>]+>[%s])' % re.escape('-' + _s_punct) )
def remove_html_punct_space(text):
"""
@param text unicode
@return text
"""
if ' ' in text:
text = _re_punct_space.sub('', text)
if '<' in text:
text = _re_html_punct_space.sub('', text)
return text
if __name__ == '__main__':
t = u"かたがな"
print match_kata_hira_punc(t)
# EOF
| Dangetsu/vnr | Frameworks/Sakura/py/apps/reader/utilities/textutil.py | Python | gpl-3.0 | 7,550 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationvserver_authenticationpolicy_binding(base_resource) :
""" Binding class showing the authenticationpolicy that can be bound to authenticationvserver.
"""
def __init__(self) :
self._policy = ""
self._priority = 0
self._acttype = 0
self._nextfactor = ""
self._gotopriorityexpression = ""
self._name = ""
self._secondary = False
self._groupextraction = False
self.___count = 0
@property
def priority(self) :
"""The priority, if any, of the vpn vserver policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority, if any, of the vpn vserver policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def name(self) :
"""Name of the authentication virtual server to which to bind the policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the authentication virtual server to which to bind the policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def nextfactor(self) :
"""On success invoke label.
"""
try :
return self._nextfactor
except Exception as e:
raise e
@nextfactor.setter
def nextfactor(self, nextfactor) :
"""On success invoke label.
"""
try :
self._nextfactor = nextfactor
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def secondary(self) :
"""Applicable only while bindind classic authentication policy as advance authentication policy use nFactor.
"""
try :
return self._secondary
except Exception as e:
raise e
@secondary.setter
def secondary(self, secondary) :
"""Applicable only while bindind classic authentication policy as advance authentication policy use nFactor
"""
try :
self._secondary = secondary
except Exception as e:
raise e
@property
def policy(self) :
"""The name of the policy, if any, bound to the authentication vserver.
"""
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
"""The name of the policy, if any, bound to the authentication vserver.
"""
try :
self._policy = policy
except Exception as e:
raise e
@property
def groupextraction(self) :
"""Applicable only while bindind classic authentication policy as advance authentication policy use nFactor.
"""
try :
return self._groupextraction
except Exception as e:
raise e
@groupextraction.setter
def groupextraction(self, groupextraction) :
"""Applicable only while bindind classic authentication policy as advance authentication policy use nFactor
"""
try :
self._groupextraction = groupextraction
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationvserver_authenticationpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationvserver_authenticationpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = authenticationvserver_authenticationpolicy_binding()
updateresource.name = resource.name
updateresource.policy = resource.policy
updateresource.secondary = resource.secondary
updateresource.groupextraction = resource.groupextraction
updateresource.nextfactor = resource.nextfactor
updateresource.gotopriorityexpression = resource.gotopriorityexpression
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [authenticationvserver_authenticationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policy = resource[i].policy
updateresources[i].secondary = resource[i].secondary
updateresources[i].groupextraction = resource[i].groupextraction
updateresources[i].nextfactor = resource[i].nextfactor
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = authenticationvserver_authenticationpolicy_binding()
deleteresource.name = resource.name
deleteresource.policy = resource.policy
deleteresource.secondary = resource.secondary
deleteresource.groupextraction = resource.groupextraction
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [authenticationvserver_authenticationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policy = resource[i].policy
deleteresources[i].secondary = resource[i].secondary
deleteresources[i].groupextraction = resource[i].groupextraction
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch authenticationvserver_authenticationpolicy_binding resources.
"""
try :
obj = authenticationvserver_authenticationpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of authenticationvserver_authenticationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationvserver_authenticationpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count authenticationvserver_authenticationpolicy_binding resources configued on NetScaler.
"""
try :
obj = authenticationvserver_authenticationpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of authenticationvserver_authenticationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationvserver_authenticationpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class authenticationvserver_authenticationpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.authenticationvserver_authenticationpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationvserver_authenticationpolicy_binding = [authenticationvserver_authenticationpolicy_binding() for _ in range(length)]
| mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationvserver_authenticationpolicy_binding.py | Python | apache-2.0 | 9,955 |
import speech_recognition as sr
import os
from .utils import ShellParser
class Parser(ShellParser):
"""
Extract text (i.e. speech) from an audio file, using SpeechRecognition.
Since SpeechRecognition expects a .wav file, with 1 channel,
the audio file has to be converted, via sox, if not compliant
Note: for testing, use -
http://www2.research.att.com/~ttsweb/tts/demo.php,
with Rich (US English) for best results
"""
def extract(self, filename, **kwargs):
speech = ''
# convert to wav, if not already .wav
base, ext = os.path.splitext(filename)
if ext != '.wav':
temp_filename = self.convert_to_wav(filename)
try:
speech = self.extract(temp_filename, **kwargs)
finally: # make sure temp_file is deleted
os.remove(temp_filename)
else:
r = sr.Recognizer()
with sr.WavFile(filename) as source:
audio = r.record(source)
try:
speech = r.recognize(audio)
except LookupError: # audio is not understandable
speech = ''
# add a newline, to make output cleaner
speech += '\n'
return speech
def convert_to_wav(self, filename):
"""
Uses sox cmdline tool, to convert audio file to .wav
Note: for testing, use -
http://www.text2speech.org/,
with American Male 2 for best results
"""
command = (
'sox -G -c 1 "%(filename)s" {0}'
)
temp_filename = '{0}.wav'.format(self.temp_filename())
self.run(command.format(temp_filename) % locals())
return temp_filename
| Nift/textract | textract/parsers/audio.py | Python | mit | 1,736 |
import io
import json
import os
from pathlib import Path
from random import randint, random
from textwrap import dedent
from typing import List, Tuple
import numpy as np
import pandas as pd
import pytest
from ecl.util.util import BoolVector
from numpy.testing import assert_almost_equal, assert_array_equal
from ert_shared.libres_facade import LibresFacade
from ert_shared.storage import extraction
from res.enkf import ErtRunContext
from res.enkf.enkf_main import EnKFMain
from res.enkf.res_config import ResConfig
@pytest.mark.parametrize(
"x_axis, expected",
[
([1, 2, 3, 4], ["1", "2", "3", "4"]),
(["a", "b", "c"], ["a", "b", "c"]),
(
[pd.Timestamp(x, unit="d") for x in range(4)],
[
"1970-01-01T00:00:00",
"1970-01-02T00:00:00",
"1970-01-03T00:00:00",
"1970-01-04T00:00:00",
],
),
],
)
def test_prepare_x_axis(x_axis, expected):
assert expected == extraction._prepare_x_axis(x_axis)
class ErtConfigBuilder:
def __init__(self):
self.ensemble_size = 1
self._priors = {}
self._obs = []
self.job_script = None
def add_general_observation(self, observation_name, response_name, data):
"""Add GENERAL_OBSERVATION
The `data` parameter is a pandas DataFrame. This is to be a two-column
frame where the first column are the values and the second column are
the errors. The index-column of this frame are the observation's indices
which link the observations to the responses.
"""
self._obs.append((observation_name, response_name, data.copy()))
def add_prior(self, name, entry):
assert name not in self._priors
self._priors[name] = entry
return self
def build(self, path=None):
if path is None:
path = Path.cwd()
self._build_ert(path)
self._build_job(path)
self._build_observations(path)
self._build_priors(path)
config = ResConfig(str(path / "test.ert"))
enkfmain = EnKFMain(config)
# The C code doesn't do resource counting correctly, so we need to hook
# ResConfig to EnKFMain because otherwise ResConfig will be deleted and
# EnKFMain will use a dangling pointer.
enkfmain.__config = config
return LibresFacade(enkfmain)
def _build_ert(self, path):
f = (path / "test.ert").open("w")
# Default
f.write(
"JOBNAME poly_%d\n"
"QUEUE_SYSTEM LOCAL\n"
"QUEUE_OPTION LOCAL MAX_RUNNING 50\n"
f"NUM_REALIZATIONS {self.ensemble_size}\n"
)
def _build_job(self, path):
f = (path / "test.ert").open("a")
f.write("INSTALL_JOB job JOB\n" "SIMULATION_JOB job\n")
if self.job_script is None:
# true is an executable which should exist on the path for all normal distros
# and it is then reasonable to expect this instead of using hardcoded path
(path / "JOB").write_text("EXECUTABLE true\n")
else:
(path / "JOB").write_text(f"EXECUTABLE {path}/script\n")
(path / "script").write_text(self.job_script)
def _build_observations(self, path):
"""
Creates a TIME_MAP and OBS_CONFIG entry in the ERT config. The TIME_MAP is
required for ERT to load the OBS_CONFIG.
Creates an 'obs_config.txt' file into which the generate observations are written.
"""
if not self._obs:
return
(path / "time_map").write_text("1/10/2006\n")
with (path / "test.ert").open("a") as f:
f.write("OBS_CONFIG obs_config.txt\n")
f.write("TIME_MAP time_map\n")
f.write(
"GEN_DATA RES RESULT_FILE:poly_%d.out REPORT_STEPS:0 INPUT_FORMAT:ASCII\n"
)
with (path / "obs_config.txt").open("w") as f:
for obs_name, resp_name, data in self._obs:
indices = ",".join(str(index) for index in data.index.tolist())
f.write(
f"GENERAL_OBSERVATION {obs_name} {{\n"
f" DATA = {resp_name};\n"
f" INDEX_LIST = {indices};\n"
f" RESTART = 0;\n"
f" OBS_FILE = {obs_name}.txt;\n"
"};\n"
)
with (path / f"{obs_name}.txt").open("w") as fobs:
data.to_csv(fobs, sep=" ", header=False, index=False)
def _build_priors(self, path):
if not self._priors:
return
with (path / "test.ert").open("a") as f:
f.write("GEN_KW COEFFS coeffs.json.in coeffs.json coeffs_priors\n")
with (path / "coeffs.json.in").open("w") as f:
f.write("{\n")
f.write(",\n".join(f' "{name}": <{name}>' for name in self._priors))
f.write("\n}\n")
with (path / "coeffs_priors").open("w") as f:
for name, entry in self._priors.items():
f.write(f"{name} {entry}\n")
@pytest.fixture(autouse=True)
def _chdir_tmp_path(monkeypatch, tmp_path):
"""
All tests in this file must be run in a clean directory
"""
monkeypatch.chdir(tmp_path)
def test_empty_ensemble(client):
ert = ErtConfigBuilder().build()
extraction.post_ensemble_data(ert, -1)
id = client.fetch_experiment()
# Name is "default"
for ens in client.get(f"/experiments/{id}/ensembles").json():
assert (
client.get(f"/ensembles/{ens['id']}/userdata").json()["name"] == "default"
)
# No priors exist
assert client.get(f"/experiments/{id}").json()["priors"] == {}
def test_empty_ensemble_with_name(client):
name = _rand_name()
# Create case with given name
ert = ErtConfigBuilder().build()
ert.select_or_create_new_case(name)
# Post initial ensemble
extraction.post_ensemble_data(ert, -1)
# Compare results
id = client.fetch_experiment()
for ens in client.get(f"/experiments/{id}/ensembles").json():
assert client.get(f"/ensembles/{ens['id']}/userdata").json()["name"] == name
def test_priors(client):
priors = _make_priors()
# Add priors to ERT config
builder = ErtConfigBuilder()
for name, conf, _ in priors:
builder.add_prior(name, conf)
ert = builder.build()
# Start ERT
_create_runpath(ert)
# Post initial ensemble
extraction.post_ensemble_data(ert, -1)
# Compare results
id = client.fetch_experiment()
actual_priors = client.get(f"/experiments/{id}").json()["priors"]
assert len(priors) == len(actual_priors)
for name, _, resp in priors:
assert actual_priors[f"COEFFS:{name}"] == resp
def test_parameters(client):
priors = _make_priors()
# Add priors to ERT config
builder = ErtConfigBuilder()
builder.ensemble_size = 10
for name, conf, _ in priors:
builder.add_prior(name, conf)
ert = builder.build()
# Start ERT
_create_runpath(ert)
# Post initial ensemble
extraction.post_ensemble_data(ert, -1)
# Get ensemble_id
experiment_id = client.fetch_experiment()
ensembles = client.get(f"/experiments/{experiment_id}/ensembles").json()
ensemble_id = ensembles[0]["id"]
# Compare parameters (+ 2 due to the two log10_ coeffs)
parameters = client.get(f"/ensembles/{ensemble_id}/parameters").json()
assert len(parameters) == len(priors) + 2
for name, _, prior in priors:
assert f"COEFFS:{name}" in parameters
if prior["function"] in ("lognormal", "loguniform"):
assert f"LOG10_COEFFS:{name}" in parameters
# Compare records (+ 2 due to the two log10_ coeffs)
records = client.get(f"/ensembles/{ensemble_id}/records").json()
assert len(records) == len(priors) + 2
for name, _, prior in priors:
assert f"COEFFS:{name}" in records
if prior["function"] in ("lognormal", "loguniform"):
assert f"LOG10_COEFFS:{name}" in records
parameters_df = _get_parameters()
assert len(parameters_df) == builder.ensemble_size
for col in parameters_df:
record_data = client.get(
f"/ensembles/{ensemble_id}/records/COEFFS:{col}",
headers={"accept": "application/x-parquet"},
).content
stream = io.BytesIO(record_data)
df = pd.read_parquet(stream)
# ERT produces a low-quality version
assert_almost_equal(parameters_df[col].values, df.values.flatten(), decimal=4)
def test_observations(client):
data = pd.DataFrame([[1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]], index=[2, 4, 6, 8])
builder = ErtConfigBuilder()
builder.add_general_observation("OBS", "RES", data)
ert = builder.build()
# Post ensemble
extraction.post_ensemble_data(ert, builder.ensemble_size)
# Experiment should have 1 observation
experiment_id = client.fetch_experiment()
observations = client.get(f"/experiments/{experiment_id}/observations").json()
assert len(observations) == 1
# Validate data
obs = observations[0]
assert obs["name"] == "OBS"
assert obs["values"] == data[0].tolist()
assert obs["errors"] == data[1].tolist()
assert obs["x_axis"] == data.index.astype(str).tolist()
assert obs["transformation"] is None
def test_observation_transformation(client):
data = pd.DataFrame([[1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]], index=[0, 1, 2, 3])
builder = ErtConfigBuilder()
builder.ensemble_size = 5
builder.add_general_observation("OBS", "RES", data)
builder.job_script = dedent(
"""\
#!/usr/bin/python3
import re
from pathlib import Path
# Obtain realization index by looking at the name of current directory
real = int(re.match("^realization([0-9]+)$", Path.cwd().name)[1])
outlier = 1000 + real # Large number, ERT disables
active_a = 1 + real
active_b = 2 + real
small_var = 3 # Small variation between responses, ERT disables
output = [outlier, active_a, active_b, small_var]
with open("poly_0.out", "w") as f:
f.write("\\n".join(str(x) for x in output))
"""
)
ert = builder.build()
# Post first ensemble
parent_ensemble_id = extraction.post_ensemble_data(ert, builder.ensemble_size)
# Create runpath and run ERT
run_context = _create_runpath(ert)
_evaluate_ensemble(ert, run_context)
_run_update(ert, run_context)
# Post second ensemble
update_id = extraction.post_update_data(ert, parent_ensemble_id, "boruvka")
child_ensemble_id = extraction.post_ensemble_data(
ert, builder.ensemble_size, update_id
)
# Ensemble should have 1 observation with transformation
observations = client.get(f"/ensembles/{child_ensemble_id}/observations").json()
assert len(observations) == 1
# Validate data
obs = observations[0]
assert obs["name"] == "OBS"
assert obs["values"] == data[0].tolist()
assert obs["errors"] == data[1].tolist()
trans = obs["transformation"]
assert trans["name"] == "OBS"
assert trans["active"] == [False, True, True, False]
assert trans["scale"] == [1.0] * 4
assert trans["observation_id"] == obs["id"]
def test_post_ensemble_results(client):
data = pd.DataFrame([[1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]], index=[2, 4, 6, 8])
response_name = "RES"
# Add priors to ERT config
builder = ErtConfigBuilder()
builder.ensemble_size = 2
builder.add_general_observation("OBS", response_name, data)
data = [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]
df = pd.DataFrame(data)
# Create job
script = dedent(
f"""\
#!/usr/bin/python3
if __name__ == "__main__":
output = {str(data)}
with open("poly_0.out", "w") as f:
f.write("\\n".join(map(str, output)))
"""
)
builder.job_script = script
ert = builder.build()
# Create runpath and run ERT
run_context = _create_runpath(ert)
_evaluate_ensemble(ert, run_context)
# Post initial ensemble
ensemble_id = extraction.post_ensemble_data(ert, builder.ensemble_size)
# Post ensemble results
extraction.post_ensemble_results(ert, ensemble_id)
# Retrieve response data
data = client.get(f"/ensembles/{ensemble_id}/responses/{response_name}/data")
stream = io.BytesIO(data.content)
response_df = pd.read_csv(stream, index_col=0, float_precision="round_trip")
for realization in range(0, builder.ensemble_size):
assert_array_equal(response_df.loc[realization].values, df.values.flatten())
def test_post_update_data(client):
data = pd.DataFrame(np.random.rand(4, 2), index=[2, 4, 6, 8])
builder = ErtConfigBuilder()
builder.add_general_observation("OBS", "RES", data)
ert = builder.build()
# Post two ensembles
parent_ensemble_id = extraction.post_ensemble_data(ert, builder.ensemble_size)
update_id = extraction.post_update_data(ert, parent_ensemble_id, "boruvka")
child_ensemble_id = extraction.post_ensemble_data(
ert, builder.ensemble_size, update_id
)
# Experiment should have two ensembles
experiment_id = client.fetch_experiment()
ensembles = client.get(f"/experiments/{experiment_id}/ensembles").json()
assert len(ensembles) == 2
# Parent ensemble should have a child
assert ensembles[0]["child_ensemble_ids"] == [child_ensemble_id]
assert ensembles[0]["parent_ensemble_id"] is None
# Child ensemble should have a parent
assert ensembles[1]["child_ensemble_ids"] == []
assert ensembles[1]["parent_ensemble_id"] == parent_ensemble_id
def _make_priors() -> List[Tuple[str, str, dict]]:
def normal():
# trans_normal @ libres/enkf/trans_func.cpp
#
# Well defined for all real values of a and b
a, b = random(), random()
return (f"NORMAL {a} {b}", dict(function="normal", mean=a, std=b))
def lognormal():
# trans_lognormal @ libres/enkf/trans_func.cpp
#
# Well defined for all real values of a and b
a, b = random(), random()
return (
f"LOGNORMAL {a} {b}",
{"function": "lognormal", "mean": a, "std": b},
)
def truncnormal():
# trans_truncated_normal @ libres/enkf/trans_func.cpp
#
# Well defined for all real values of a, b, c and d
a, b, c, d = [random() for _ in range(4)]
return (
f"TRUNCATED_NORMAL {a} {b} {c} {d}",
{
"function": "ert_truncnormal",
"mean": a,
"std": b,
"min": c,
"max": d,
},
)
def uniform():
# trans_unif @ libres/enkf/trans_func.cpp
#
# Well defined for all real values of a and b
a, b = random(), random()
return (f"UNIFORM {a} {b}", {"function": "uniform", "min": a, "max": b})
def loguniform():
# trans_logunif @ libres/enkf/trans_func.cpp
#
# Well defined for strictly positive a, b due to log()
a, b = random() + 1, random() + 1 # +1 to avoid zero
return (
f"LOGUNIF {a} {b}",
{"function": "loguniform", "min": a, "max": b},
)
def const():
a = random()
return (f"CONST {a}", {"function": "const", "value": a})
def duniform():
# trans_dunif @ libres/enkf/trans_func.cpp
#
# Well defined for all real values of b and c, integer values >= 2 of
# bins (due to division by [bins - 1])
bins = randint(2, 100)
b, c = random(), random()
return (
f"DUNIF {bins} {b} {c}",
{"function": "ert_duniform", "bins": bins, "min": b, "max": c},
)
def erf():
# trans_errf @ libres/enkf/trans_func.cpp
#
# Well defined for all real values of a, b, c, non-zero real values of d
# (width) due to division by zero.
a, b, c, d = [random() + 1 for _ in range(4)] # +1 to all to avoid zero
return (
f"ERRF {a} {b} {c} {d}",
{"function": "ert_erf", "min": a, "max": b, "skewness": c, "width": d},
)
def derf():
# trans_derrf @ libres/enkf/trans_func.cpp
#
# Well defined for all real values of a, b, c, non-zero real values of d
# (width) due to division by zero, integer values >= 2 of bins due to
# division by (bins - 1)
bins = randint(2, 100)
a, b, c, d = [random() + 1 for _ in range(4)] # +1 to all to avoid zero
return (
f"DERRF {bins} {a} {b} {c} {d}",
{
"function": "ert_derf",
"bins": bins,
"min": a,
"max": b,
"skewness": c,
"width": d,
},
)
return [
(_rand_name(), *p())
for p in (
normal,
lognormal,
truncnormal,
uniform,
loguniform,
const,
duniform,
erf,
derf,
)
]
def _rand_name():
import random
import string
return "".join(random.choice(string.ascii_lowercase) for _ in range(8))
def _create_runpath(ert: LibresFacade, iteration: int = 0) -> ErtRunContext:
"""
Instantiate an ERT runpath. This will create the parameter coefficients.
"""
enkf_main = ert._enkf_main
result_fs = ert.get_current_fs()
target_fs = ert._enkf_main.getEnkfFsManager().getFileSystem("iter")
model_config = enkf_main.getModelConfig()
runpath_fmt = model_config.getRunpathFormat()
jobname_fmt = model_config.getJobnameFormat()
subst_list = enkf_main.getDataKW()
run_context = ErtRunContext.ensemble_smoother(
result_fs,
target_fs,
BoolVector(default_value=True, initial_size=ert.get_ensemble_size()),
runpath_fmt,
jobname_fmt,
subst_list,
iteration,
)
enkf_main.getEnkfSimulationRunner().createRunPath(run_context)
return run_context
def _evaluate_ensemble(ert: LibresFacade, run_context: ErtRunContext):
"""
Launch ensemble experiment with the created config
"""
queue_config = ert.get_queue_config()
_job_queue = queue_config.create_job_queue()
ert._enkf_main.getEnkfSimulationRunner().runSimpleStep(_job_queue, run_context)
def _run_update(ert: LibresFacade, run_context: ErtRunContext):
es_update = ert._enkf_main.getESUpdate()
assert es_update.smootherUpdate(run_context)
def _get_parameters() -> pd.DataFrame:
params_json = [
json.loads(path.read_text())
for path in sorted(Path.cwd().glob("simulations/realization*/coeffs.json"))
]
return pd.DataFrame(params_json)
| joakim-hove/ert | tests/ert_tests/storage/test_extraction.py | Python | gpl-3.0 | 18,961 |
from lib.hachoir_core.field import (MissingField, BasicFieldSet, Field, ParserError,
createRawField, createNullField, createPaddingField, FakeArray)
from lib.hachoir_core.dict import Dict, UniqKeyError
from lib.hachoir_core.error import HACHOIR_ERRORS
from lib.hachoir_core.tools import lowerBound
import lib.hachoir_core.config as config
class GenericFieldSet(BasicFieldSet):
"""
Ordered list of fields. Use operator [] to access fields using their
name (field names are unique in a field set, but not in the whole
document).
Class attributes:
- endian: Bytes order (L{BIG_ENDIAN} or L{LITTLE_ENDIAN}). Optional if the
field set has a parent ;
- static_size: (optional) Size of FieldSet in bits. This attribute should
be used in parser of constant size.
Instance attributes/methods:
- _fields: Ordered dictionnary of all fields, may be incomplete
because feeded when a field is requested ;
- stream: Input stream used to feed fields' value
- root: The root of all field sets ;
- __len__(): Number of fields, may need to create field set ;
- __getitem__(): Get an field by it's name or it's path.
And attributes inherited from Field class:
- parent: Parent field (may be None if it's the root) ;
- name: Field name (unique in parent field set) ;
- value: The field set ;
- address: Field address (in bits) relative to parent ;
- description: A string describing the content (can be None) ;
- size: Size of field set in bits, may need to create field set.
Event handling:
- "connectEvent": Connect an handler to an event ;
- "raiseEvent": Raise an event.
To implement a new field set, you need to:
- create a class which inherite from FieldSet ;
- write createFields() method using lines like:
yield Class(self, "name", ...) ;
- and maybe set endian and static_size class attributes.
"""
_current_size = 0
def __init__(self, parent, name, stream, description=None, size=None):
"""
Constructor
@param parent: Parent field set, None for root parser
@param name: Name of the field, have to be unique in parent. If it ends
with "[]", end will be replaced with "[new_id]" (eg. "raw[]"
becomes "raw[0]", next will be "raw[1]", and then "raw[2]", etc.)
@type name: str
@param stream: Input stream from which data are read
@type stream: L{InputStream}
@param description: Optional string description
@type description: str|None
@param size: Size in bits. If it's None, size will be computed. You
can also set size with class attribute static_size
"""
BasicFieldSet.__init__(self, parent, name, stream, description, size)
self._fields = Dict()
self._field_generator = self.createFields()
self._array_cache = {}
self.__is_feeding = False
def array(self, key):
try:
return self._array_cache[key]
except KeyError:
array = FakeArray(self, key)
self._array_cache[key] = array
return self._array_cache[key]
def reset(self):
"""
Reset a field set:
* clear fields ;
* restart field generator ;
* set current size to zero ;
* clear field array count.
But keep: name, value, description and size.
"""
BasicFieldSet.reset(self)
self._fields = Dict()
self._field_generator = self.createFields()
self._current_size = 0
self._array_cache = {}
def __str__(self):
return '<%s path=%s, current_size=%s, current length=%s>' % \
(self.__class__.__name__, self.path, self._current_size, len(self._fields))
def __len__(self):
"""
Returns number of fields, may need to create all fields
if it's not done yet.
"""
if self._field_generator is not None:
self._feedAll()
return len(self._fields)
def _getCurrentLength(self):
return len(self._fields)
current_length = property(_getCurrentLength)
def _getSize(self):
if self._size is None:
self._feedAll()
return self._size
size = property(_getSize, doc="Size in bits, may create all fields to get size")
def _getCurrentSize(self):
assert not(self.done)
return self._current_size
current_size = property(_getCurrentSize)
eof = property(lambda self: self._checkSize(self._current_size + 1, True) < 0)
def _checkSize(self, size, strict):
field = self
while field._size is None:
if not field._parent:
assert self.stream.size is None
if not strict:
return None
if self.stream.sizeGe(size):
return 0
break
size += field._address
field = field._parent
return field._size - size
autofix = property(lambda self: self.root.autofix)
def _addField(self, field):
"""
Add a field to the field set:
* add it into _fields
* update _current_size
May raise a StopIteration() on error
"""
if not issubclass(field.__class__, Field):
raise ParserError("Field type (%s) is not a subclass of 'Field'!"
% field.__class__.__name__)
assert isinstance(field._name, str)
if field._name.endswith("[]"):
self.setUniqueFieldName(field)
if config.debug:
self.info("[+] DBG: _addField(%s)" % field.name)
# required for the msoffice parser
if field._address != self._current_size:
self.warning("Fix address of %s to %s (was %s)" %
(field.path, self._current_size, field._address))
field._address = self._current_size
ask_stop = False
# Compute field size and check that there is enough place for it
self.__is_feeding = True
try:
field_size = field.size
except HACHOIR_ERRORS, err:
if field.is_field_set and field.current_length and field.eof:
self.warning("Error when getting size of '%s': %s" % (field.name, err))
field._stopFeeding()
ask_stop = True
else:
self.warning("Error when getting size of '%s': delete it" % field.name)
self.__is_feeding = False
raise
self.__is_feeding = False
# No more place?
dsize = self._checkSize(field._address + field.size, False)
if (dsize is not None and dsize < 0) or (field.is_field_set and field.size <= 0):
if self.autofix and self._current_size:
self._fixFieldSize(field, field.size + dsize)
else:
raise ParserError("Field %s is too large!" % field.path)
self._current_size += field.size
try:
self._fields.append(field._name, field)
except UniqKeyError, err:
self.warning("Duplicate field name " + unicode(err))
field._name += "[]"
self.setUniqueFieldName(field)
self._fields.append(field._name, field)
if ask_stop:
raise StopIteration()
def _fixFieldSize(self, field, new_size):
if new_size > 0:
if field.is_field_set and 0 < field.size:
field._truncate(new_size)
return
# Don't add the field <=> delete item
if self._size is None:
self._size = self._current_size + new_size
self.warning("[Autofix] Delete '%s' (too large)" % field.path)
raise StopIteration()
def _getField(self, name, const):
field = Field._getField(self, name, const)
if field is None:
if name in self._fields:
field = self._fields[name]
elif self._field_generator is not None and not const:
field = self._feedUntil(name)
return field
def getField(self, key, const=True):
if isinstance(key, (int, long)):
if key < 0:
raise KeyError("Key must be positive!")
if not const:
self.readFirstFields(key+1)
if len(self._fields.values) <= key:
raise MissingField(self, key)
return self._fields.values[key]
return Field.getField(self, key, const)
def _truncate(self, size):
assert size > 0
if size < self._current_size:
self._size = size
while True:
field = self._fields.values[-1]
if field._address < size:
break
del self._fields[-1]
self._current_size = field._address
size -= field._address
if size < field._size:
if field.is_field_set:
field._truncate(size)
else:
del self._fields[-1]
field = createRawField(self, size, "raw[]")
self._fields.append(field._name, field)
self._current_size = self._size
else:
assert size < self._size or self._size is None
self._size = size
if self._size == self._current_size:
self._field_generator = None
def _deleteField(self, index):
field = self._fields.values[index]
size = field.size
self._current_size -= size
del self._fields[index]
return field
def _fixLastField(self):
"""
Try to fix last field when we know current field set size.
Returns new added field if any, or None.
"""
assert self._size is not None
# Stop parser
message = ["stop parser"]
self._field_generator = None
# If last field is too big, delete it
while self._size < self._current_size:
field = self._deleteField(len(self._fields)-1)
message.append("delete field %s" % field.path)
assert self._current_size <= self._size
# If field size current is smaller: add a raw field
size = self._size - self._current_size
if size:
field = createRawField(self, size, "raw[]")
message.append("add padding")
self._current_size += field.size
self._fields.append(field._name, field)
else:
field = None
message = ", ".join(message)
self.warning("[Autofix] Fix parser error: " + message)
assert self._current_size == self._size
return field
def _stopFeeding(self):
new_field = None
if self._size is None:
if self._parent:
self._size = self._current_size
elif self._size != self._current_size:
if self.autofix:
new_field = self._fixLastField()
else:
raise ParserError("Invalid parser \"%s\" size!" % self.path)
self._field_generator = None
return new_field
def _fixFeedError(self, exception):
"""
Try to fix a feeding error. Returns False if error can't be fixed,
otherwise returns new field if any, or None.
"""
if self._size is None or not self.autofix:
return False
self.warning(unicode(exception))
return self._fixLastField()
def _feedUntil(self, field_name):
"""
Return the field if it was found, None else
"""
if self.__is_feeding \
or (self._field_generator and self._field_generator.gi_running):
self.warning("Unable to get %s (and generator is already running)"
% field_name)
return None
try:
while True:
field = self._field_generator.next()
self._addField(field)
if field.name == field_name:
return field
except HACHOIR_ERRORS, err:
if self._fixFeedError(err) is False:
raise
except StopIteration:
self._stopFeeding()
return None
def readMoreFields(self, number):
"""
Read more number fields, or do nothing if parsing is done.
Returns number of new added fields.
"""
if self._field_generator is None:
return 0
oldlen = len(self._fields)
try:
for index in xrange(number):
self._addField( self._field_generator.next() )
except HACHOIR_ERRORS, err:
if self._fixFeedError(err) is False:
raise
except StopIteration:
self._stopFeeding()
return len(self._fields) - oldlen
def _feedAll(self):
if self._field_generator is None:
return
try:
while True:
field = self._field_generator.next()
self._addField(field)
except HACHOIR_ERRORS, err:
if self._fixFeedError(err) is False:
raise
except StopIteration:
self._stopFeeding()
def __iter__(self):
"""
Create a generator to iterate on each field, may create new
fields when needed
"""
try:
done = 0
while True:
if done == len(self._fields):
if self._field_generator is None:
break
self._addField( self._field_generator.next() )
for field in self._fields.values[done:]:
yield field
done += 1
except HACHOIR_ERRORS, err:
field = self._fixFeedError(err)
if isinstance(field, Field):
yield field
elif hasattr(field, '__iter__'):
for f in field:
yield f
elif field is False:
raise
except StopIteration:
field = self._stopFeeding()
if isinstance(field, Field):
yield field
elif hasattr(field, '__iter__'):
for f in field:
yield f
def _isDone(self):
return (self._field_generator is None)
done = property(_isDone, doc="Boolean to know if parsing is done or not")
#
# FieldSet_SeekUtility
#
def seekBit(self, address, name="padding[]",
description=None, relative=True, null=False):
"""
Create a field to seek to specified address,
or None if it's not needed.
May raise an (ParserError) exception if address is invalid.
"""
if relative:
nbits = address - self._current_size
else:
nbits = address - (self.absolute_address + self._current_size)
if nbits < 0:
raise ParserError("Seek error, unable to go back!")
if 0 < nbits:
if null:
return createNullField(self, nbits, name, description)
else:
return createPaddingField(self, nbits, name, description)
else:
return None
def seekByte(self, address, name="padding[]", description=None, relative=True, null=False):
"""
Same as seekBit(), but with address in byte.
"""
return self.seekBit(address * 8, name, description, relative, null=null)
#
# RandomAccessFieldSet
#
def replaceField(self, name, new_fields):
# TODO: Check in self and not self.field
# Problem is that "generator is already executing"
if name not in self._fields:
raise ParserError("Unable to replace %s: field doesn't exist!" % name)
assert 1 <= len(new_fields)
old_field = self[name]
total_size = sum( (field.size for field in new_fields) )
if old_field.size != total_size:
raise ParserError("Unable to replace %s: "
"new field(s) hasn't same size (%u bits instead of %u bits)!"
% (name, total_size, old_field.size))
field = new_fields[0]
if field._name.endswith("[]"):
self.setUniqueFieldName(field)
field._address = old_field.address
if field.name != name and field.name in self._fields:
raise ParserError(
"Unable to replace %s: name \"%s\" is already used!"
% (name, field.name))
self._fields.replace(name, field.name, field)
self.raiseEvent("field-replaced", old_field, field)
if 1 < len(new_fields):
index = self._fields.index(new_fields[0].name)+1
address = field.address + field.size
for field in new_fields[1:]:
if field._name.endswith("[]"):
self.setUniqueFieldName(field)
field._address = address
if field.name in self._fields:
raise ParserError(
"Unable to replace %s: name \"%s\" is already used!"
% (name, field.name))
self._fields.insert(index, field.name, field)
self.raiseEvent("field-inserted", index, field)
index += 1
address += field.size
def getFieldByAddress(self, address, feed=True):
"""
Only search in existing fields
"""
if feed and self._field_generator is not None:
self._feedAll()
if address < self._current_size:
i = lowerBound(self._fields.values, lambda x: x.address + x.size <= address)
if i is not None:
return self._fields.values[i]
return None
def writeFieldsIn(self, old_field, address, new_fields):
"""
Can only write in existing fields (address < self._current_size)
"""
# Check size
total_size = sum( field.size for field in new_fields )
if old_field.size < total_size:
raise ParserError( \
"Unable to write fields at address %s " \
"(too big)!" % (address))
# Need padding before?
replace = []
size = address - old_field.address
assert 0 <= size
if 0 < size:
padding = createPaddingField(self, size)
padding._address = old_field.address
replace.append(padding)
# Set fields address
for field in new_fields:
field._address = address
address += field.size
replace.append(field)
# Need padding after?
size = (old_field.address + old_field.size) - address
assert 0 <= size
if 0 < size:
padding = createPaddingField(self, size)
padding._address = address
replace.append(padding)
self.replaceField(old_field.name, replace)
def nextFieldAddress(self):
return self._current_size
def getFieldIndex(self, field):
return self._fields.index(field._name)
| Branlala/docker-sickbeardfr | sickbeard/lib/hachoir_core/field/generic_field_set.py | Python | mit | 19,195 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_markdown.models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='content',
field=django_markdown.models.MarkdownField(),
preserve_default=True,
),
]
| Ratina/ratina_portal | blog/migrations/0002_auto_20150209_0254.py | Python | gpl-3.0 | 459 |
import json
import netrc
import tempfile
from click.testing import CliRunner
import pytest
import responses
from ghutil.api.client import ACCEPT
from ghutil.cli.__main__ import cli
@pytest.fixture
def echo_headers():
with responses.RequestsMock() as rsps:
rsps.add_callback(
responses.GET,
"https://api.github.com/echo-headers",
callback=lambda r: (200, {}, json.dumps(dict(r.headers))),
content_type="application/json",
)
yield
@pytest.mark.usefixtures("echo_headers")
@pytest.mark.parametrize(
"config,accept_header",
[
("", ACCEPT),
("[api]", ACCEPT),
("[api]\naccept =\n", ACCEPT),
("[api]\naccept = \n", ACCEPT),
("[api]\naccept = " + ACCEPT, ACCEPT + "," + ACCEPT),
(
"[api]\naccept = application/vnd.github.batman-preview+json\n",
ACCEPT + ",application/vnd.github.batman-preview+json",
),
(
"[api]\n"
"accept = application/vnd.github.batman-preview+json\n"
" text/plain\n",
ACCEPT + ",application/vnd.github.batman-preview+json,text/plain",
),
(
"[api]\n"
"accept =\n"
" application/vnd.github.batman-preview+json\n"
" text/plain\n",
ACCEPT + ",application/vnd.github.batman-preview+json,text/plain",
),
(
"[api]\n"
"accept =\n"
" application/vnd.github.batman-preview+json,\n"
" text/plain,\n",
ACCEPT + ",application/vnd.github.batman-preview+json,text/plain",
),
(
"[api]\n"
"accept =\n"
" application/vnd.github.batman-preview+json\n"
"\n"
" text/plain\n",
ACCEPT + ",application/vnd.github.batman-preview+json,text/plain",
),
(
"[api]\n"
"accept =\n"
" application/vnd.github.batman-preview+json\n"
" \n"
" text/plain\n",
ACCEPT + ",application/vnd.github.batman-preview+json,text/plain",
),
(
"[api]\n"
"accept =\n"
" application/vnd.github.batman-preview+json\n"
" text/plain, application/octet-stream\n",
ACCEPT + ",application/vnd.github.batman-preview+json,text/plain,"
" application/octet-stream",
),
("[api]\nappend-accept = false", ACCEPT),
("[api]\naccept =\nappend-accept = false", None),
("[api]\naccept = text/plain\nappend-accept = false", "text/plain"),
("[api]\nappend-accept = true", ACCEPT),
("[api]\naccept =\nappend-accept = true", ACCEPT),
("[api]\naccept = text/plain\nappend-accept = true", ACCEPT + ",text/plain"),
],
)
def test_accept(config, accept_header):
with tempfile.NamedTemporaryFile(mode="w+") as cfg:
cfg.write(config)
cfg.flush()
r = CliRunner().invoke(cli, ["-c", cfg.name, "request", "/echo-headers"])
assert r.exit_code == 0
headers = json.loads(r.output)
assert headers.get("Accept") == accept_header
@pytest.mark.usefixtures("echo_headers")
@pytest.mark.parametrize(
"config,auth_header",
[
("", None),
(
"[api.auth]\ntoken = legitimateoauthtoken\n",
"token legitimateoauthtoken",
),
(
"[api.auth]\nusername = l.user\npassword = hunter2\n",
None,
),
(
"[api.auth]\n"
"token = legitimateoauthtoken\n"
"username = l.user\n"
"password = hunter2\n",
"token legitimateoauthtoken",
),
],
)
def test_auth(monkeypatch, config, auth_header):
# Keep `requests` from using the local user's ~/.netrc file; this is needed
# not only for the empty config case but also for the tests that set the
# "Authorization" header directly (see
# <https://github.com/requests/requests/issues/3929>)
monkeypatch.delattr(netrc, "netrc")
with tempfile.NamedTemporaryFile(mode="w+") as cfg:
cfg.write(config)
cfg.flush()
r = CliRunner().invoke(cli, ["-c", cfg.name, "request", "/echo-headers"])
assert r.exit_code == 0
headers = json.loads(r.output)
assert headers.get("Authorization") == auth_header
| jwodder/ghutil | test/test_api_config.py | Python | mit | 4,464 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Grid Dynamics
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
from oslo.config import cfg
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova import compute
from nova import exception
from nova import utils
ALIAS = "os-fping"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
authorize_all_tenants = extensions.extension_authorizer(
'compute', 'v3:' + ALIAS + ':all_tenants')
CONF = cfg.CONF
CONF.import_opt('fping_path', 'nova.api.openstack.compute.contrib.fping')
class FpingController(object):
def __init__(self, network_api=None):
self.compute_api = compute.API()
self.last_call = {}
def check_fping(self):
if not os.access(CONF.fping_path, os.X_OK):
raise exc.HTTPServiceUnavailable(
explanation=_("fping utility is not found."))
@staticmethod
def fping(ips):
fping_ret = utils.execute(CONF.fping_path, *ips,
check_exit_code=False)
if not fping_ret:
return set()
alive_ips = set()
for line in fping_ret[0].split("\n"):
ip = line.split(" ", 1)[0]
if "alive" in line:
alive_ips.add(ip)
return alive_ips
@staticmethod
def _get_instance_ips(context, instance):
ret = []
for network in common.get_networks_for_instance(
context, instance).values():
all_ips = itertools.chain(network["ips"], network["floating_ips"])
ret += [ip["address"] for ip in all_ips]
return ret
def index(self, req):
context = req.environ["nova.context"]
search_opts = dict(deleted=False)
if "all_tenants" in req.GET:
authorize_all_tenants(context)
else:
authorize(context)
if context.project_id:
search_opts["project_id"] = context.project_id
else:
search_opts["user_id"] = context.user_id
self.check_fping()
include = req.GET.get("include", None)
if include:
include = set(include.split(","))
exclude = set()
else:
include = None
exclude = req.GET.get("exclude", None)
if exclude:
exclude = set(exclude.split(","))
else:
exclude = set()
instance_list = self.compute_api.get_all(
context, search_opts=search_opts)
ip_list = []
instance_ips = {}
instance_projects = {}
for instance in instance_list:
uuid = instance["uuid"]
if uuid in exclude or (include is not None and
uuid not in include):
continue
ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
instance_ips[uuid] = ips
instance_projects[uuid] = instance["project_id"]
ip_list += ips
alive_ips = self.fping(ip_list)
res = []
for instance_uuid, ips in instance_ips.iteritems():
res.append({
"id": instance_uuid,
"project_id": instance_projects[instance_uuid],
"alive": bool(set(ips) & alive_ips),
})
return {"servers": res}
def show(self, req, id):
try:
context = req.environ["nova.context"]
authorize(context)
self.check_fping()
instance = self.compute_api.get(context, id)
ips = [str(ip) for ip in self._get_instance_ips(context, instance)]
alive_ips = self.fping(ips)
return {
"server": {
"id": instance["uuid"],
"project_id": instance["project_id"],
"alive": bool(set(ips) & alive_ips),
}
}
except exception.NotFound:
raise exc.HTTPNotFound()
class Fping(extensions.V3APIExtensionBase):
"""Fping Management Extension."""
name = "Fping"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/fping/api/v3"
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(ALIAS, FpingController())]
return resources
def get_controller_extensions(self):
return []
| Brocade-OpenSource/OpenStack-DNRM-Nova | nova/api/openstack/compute/plugins/v3/fping.py | Python | apache-2.0 | 5,098 |
# Copyright (C) 2005 Martin v. Löwis
# Licensed to PSF under a Contributor Agreement.
from _msi import *
import fnmatch
import os
import re
import string
import sys
AMD64 = "AMD64" in sys.version
# Keep msilib.Win64 around to preserve backwards compatibility.
Win64 = AMD64
# Partially taken from Wine
datasizemask= 0x00ff
type_valid= 0x0100
type_localizable= 0x0200
typemask= 0x0c00
type_long= 0x0000
type_short= 0x0400
type_string= 0x0c00
type_binary= 0x0800
type_nullable= 0x1000
type_key= 0x2000
# XXX temporary, localizable?
knownbits = datasizemask | type_valid | type_localizable | \
typemask | type_nullable | type_key
class Table:
def __init__(self, name):
self.name = name
self.fields = []
def add_field(self, index, name, type):
self.fields.append((index,name,type))
def sql(self):
fields = []
keys = []
self.fields.sort()
fields = [None]*len(self.fields)
for index, name, type in self.fields:
index -= 1
unk = type & ~knownbits
if unk:
print("%s.%s unknown bits %x" % (self.name, name, unk))
size = type & datasizemask
dtype = type & typemask
if dtype == type_string:
if size:
tname="CHAR(%d)" % size
else:
tname="CHAR"
elif dtype == type_short:
assert size==2
tname = "SHORT"
elif dtype == type_long:
assert size==4
tname="LONG"
elif dtype == type_binary:
assert size==0
tname="OBJECT"
else:
tname="unknown"
print("%s.%sunknown integer type %d" % (self.name, name, size))
if type & type_nullable:
flags = ""
else:
flags = " NOT NULL"
if type & type_localizable:
flags += " LOCALIZABLE"
fields[index] = "`%s` %s%s" % (name, tname, flags)
if type & type_key:
keys.append("`%s`" % name)
fields = ", ".join(fields)
keys = ", ".join(keys)
return "CREATE TABLE %s (%s PRIMARY KEY %s)" % (self.name, fields, keys)
def create(self, db):
v = db.OpenView(self.sql())
v.Execute(None)
v.Close()
class _Unspecified:pass
def change_sequence(seq, action, seqno=_Unspecified, cond = _Unspecified):
"Change the sequence number of an action in a sequence list"
for i in range(len(seq)):
if seq[i][0] == action:
if cond is _Unspecified:
cond = seq[i][1]
if seqno is _Unspecified:
seqno = seq[i][2]
seq[i] = (action, cond, seqno)
return
raise ValueError("Action not found in sequence")
def add_data(db, table, values):
v = db.OpenView("SELECT * FROM `%s`" % table)
count = v.GetColumnInfo(MSICOLINFO_NAMES).GetFieldCount()
r = CreateRecord(count)
for value in values:
assert len(value) == count, value
for i in range(count):
field = value[i]
if isinstance(field, int):
r.SetInteger(i+1,field)
elif isinstance(field, str):
r.SetString(i+1,field)
elif field is None:
pass
elif isinstance(field, Binary):
r.SetStream(i+1, field.name)
else:
raise TypeError("Unsupported type %s" % field.__class__.__name__)
try:
v.Modify(MSIMODIFY_INSERT, r)
except Exception as e:
raise MSIError("Could not insert "+repr(values)+" into "+table)
r.ClearData()
v.Close()
def add_stream(db, name, path):
v = db.OpenView("INSERT INTO _Streams (Name, Data) VALUES ('%s', ?)" % name)
r = CreateRecord(1)
r.SetStream(1, path)
v.Execute(r)
v.Close()
def init_database(name, schema,
ProductName, ProductCode, ProductVersion,
Manufacturer):
try:
os.unlink(name)
except OSError:
pass
ProductCode = ProductCode.upper()
# Create the database
db = OpenDatabase(name, MSIDBOPEN_CREATE)
# Create the tables
for t in schema.tables:
t.create(db)
# Fill the validation table
add_data(db, "_Validation", schema._Validation_records)
# Initialize the summary information, allowing atmost 20 properties
si = db.GetSummaryInformation(20)
si.SetProperty(PID_TITLE, "Installation Database")
si.SetProperty(PID_SUBJECT, ProductName)
si.SetProperty(PID_AUTHOR, Manufacturer)
if AMD64:
si.SetProperty(PID_TEMPLATE, "x64;1033")
else:
si.SetProperty(PID_TEMPLATE, "Intel;1033")
si.SetProperty(PID_REVNUMBER, gen_uuid())
si.SetProperty(PID_WORDCOUNT, 2) # long file names, compressed, original media
si.SetProperty(PID_PAGECOUNT, 200)
si.SetProperty(PID_APPNAME, "Python MSI Library")
# XXX more properties
si.Persist()
add_data(db, "Property", [
("ProductName", ProductName),
("ProductCode", ProductCode),
("ProductVersion", ProductVersion),
("Manufacturer", Manufacturer),
("ProductLanguage", "1033")])
db.Commit()
return db
def add_tables(db, module):
for table in module.tables:
add_data(db, table, getattr(module, table))
def make_id(str):
identifier_chars = string.ascii_letters + string.digits + "._"
str = "".join([c if c in identifier_chars else "_" for c in str])
if str[0] in (string.digits + "."):
str = "_" + str
assert re.match("^[A-Za-z_][A-Za-z0-9_.]*$", str), "FILE"+str
return str
def gen_uuid():
return "{"+UuidCreate().upper()+"}"
class CAB:
def __init__(self, name):
self.name = name
self.files = []
self.filenames = set()
self.index = 0
def gen_id(self, file):
logical = _logical = make_id(file)
pos = 1
while logical in self.filenames:
logical = "%s.%d" % (_logical, pos)
pos += 1
self.filenames.add(logical)
return logical
def append(self, full, file, logical):
if os.path.isdir(full):
return
if not logical:
logical = self.gen_id(file)
self.index += 1
self.files.append((full, logical))
return self.index, logical
def commit(self, db):
from tempfile import mktemp
filename = mktemp()
FCICreate(filename, self.files)
add_data(db, "Media",
[(1, self.index, None, "#"+self.name, None, None)])
add_stream(db, self.name, filename)
os.unlink(filename)
db.Commit()
_directories = set()
class Directory:
def __init__(self, db, cab, basedir, physical, _logical, default, componentflags=None):
"""Create a new directory in the Directory table. There is a current component
at each point in time for the directory, which is either explicitly created
through start_component, or implicitly when files are added for the first
time. Files are added into the current component, and into the cab file.
To create a directory, a base directory object needs to be specified (can be
None), the path to the physical directory, and a logical directory name.
Default specifies the DefaultDir slot in the directory table. componentflags
specifies the default flags that new components get."""
index = 1
_logical = make_id(_logical)
logical = _logical
while logical in _directories:
logical = "%s%d" % (_logical, index)
index += 1
_directories.add(logical)
self.db = db
self.cab = cab
self.basedir = basedir
self.physical = physical
self.logical = logical
self.component = None
self.short_names = set()
self.ids = set()
self.keyfiles = {}
self.componentflags = componentflags
if basedir:
self.absolute = os.path.join(basedir.absolute, physical)
blogical = basedir.logical
else:
self.absolute = physical
blogical = None
add_data(db, "Directory", [(logical, blogical, default)])
def start_component(self, component = None, feature = None, flags = None, keyfile = None, uuid=None):
"""Add an entry to the Component table, and make this component the current for this
directory. If no component name is given, the directory name is used. If no feature
is given, the current feature is used. If no flags are given, the directory's default
flags are used. If no keyfile is given, the KeyPath is left null in the Component
table."""
if flags is None:
flags = self.componentflags
if uuid is None:
uuid = gen_uuid()
else:
uuid = uuid.upper()
if component is None:
component = self.logical
self.component = component
if AMD64:
flags |= 256
if keyfile:
keyid = self.cab.gen_id(keyfile)
self.keyfiles[keyfile] = keyid
else:
keyid = None
add_data(self.db, "Component",
[(component, uuid, self.logical, flags, None, keyid)])
if feature is None:
feature = current_feature
add_data(self.db, "FeatureComponents",
[(feature.id, component)])
def make_short(self, file):
oldfile = file
file = file.replace('+', '_')
file = ''.join(c for c in file if not c in r' "/\[]:;=,')
parts = file.split(".")
if len(parts) > 1:
prefix = "".join(parts[:-1]).upper()
suffix = parts[-1].upper()
if not prefix:
prefix = suffix
suffix = None
else:
prefix = file.upper()
suffix = None
if len(parts) < 3 and len(prefix) <= 8 and file == oldfile and (
not suffix or len(suffix) <= 3):
if suffix:
file = prefix+"."+suffix
else:
file = prefix
else:
file = None
if file is None or file in self.short_names:
prefix = prefix[:6]
if suffix:
suffix = suffix[:3]
pos = 1
while 1:
if suffix:
file = "%s~%d.%s" % (prefix, pos, suffix)
else:
file = "%s~%d" % (prefix, pos)
if file not in self.short_names: break
pos += 1
assert pos < 10000
if pos in (10, 100, 1000):
prefix = prefix[:-1]
self.short_names.add(file)
assert not re.search(r'[\?|><:/*"+,;=\[\]]', file) # restrictions on short names
return file
def add_file(self, file, src=None, version=None, language=None):
"""Add a file to the current component of the directory, starting a new one
if there is no current component. By default, the file name in the source
and the file table will be identical. If the src file is specified, it is
interpreted relative to the current directory. Optionally, a version and a
language can be specified for the entry in the File table."""
if not self.component:
self.start_component(self.logical, current_feature, 0)
if not src:
# Allow relative paths for file if src is not specified
src = file
file = os.path.basename(file)
absolute = os.path.join(self.absolute, src)
assert not re.search(r'[\?|><:/*]"', file) # restrictions on long names
if file in self.keyfiles:
logical = self.keyfiles[file]
else:
logical = None
sequence, logical = self.cab.append(absolute, file, logical)
assert logical not in self.ids
self.ids.add(logical)
short = self.make_short(file)
full = "%s|%s" % (short, file)
filesize = os.stat(absolute).st_size
# constants.msidbFileAttributesVital
# Compressed omitted, since it is the database default
# could add r/o, system, hidden
attributes = 512
add_data(self.db, "File",
[(logical, self.component, full, filesize, version,
language, attributes, sequence)])
#if not version:
# # Add hash if the file is not versioned
# filehash = FileHash(absolute, 0)
# add_data(self.db, "MsiFileHash",
# [(logical, 0, filehash.IntegerData(1),
# filehash.IntegerData(2), filehash.IntegerData(3),
# filehash.IntegerData(4))])
# Automatically remove .pyc files on uninstall (2)
# XXX: adding so many RemoveFile entries makes installer unbelievably
# slow. So instead, we have to use wildcard remove entries
if file.endswith(".py"):
add_data(self.db, "RemoveFile",
[(logical+"c", self.component, "%sC|%sc" % (short, file),
self.logical, 2),
(logical+"o", self.component, "%sO|%so" % (short, file),
self.logical, 2)])
return logical
def glob(self, pattern, exclude = None):
"""Add a list of files to the current component as specified in the
glob pattern. Individual files can be excluded in the exclude list."""
try:
files = os.listdir(self.absolute)
except OSError:
return []
if pattern[:1] != '.':
files = (f for f in files if f[0] != '.')
files = fnmatch.filter(files, pattern)
for f in files:
if exclude and f in exclude: continue
self.add_file(f)
return files
def remove_pyc(self):
"Remove .pyc files on uninstall"
add_data(self.db, "RemoveFile",
[(self.component+"c", self.component, "*.pyc", self.logical, 2)])
class Binary:
def __init__(self, fname):
self.name = fname
def __repr__(self):
return 'msilib.Binary(os.path.join(dirname,"%s"))' % self.name
class Feature:
def __init__(self, db, id, title, desc, display, level = 1,
parent=None, directory = None, attributes=0):
self.id = id
if parent:
parent = parent.id
add_data(db, "Feature",
[(id, parent, title, desc, display,
level, directory, attributes)])
def set_current(self):
global current_feature
current_feature = self
class Control:
def __init__(self, dlg, name):
self.dlg = dlg
self.name = name
def event(self, event, argument, condition = "1", ordering = None):
add_data(self.dlg.db, "ControlEvent",
[(self.dlg.name, self.name, event, argument,
condition, ordering)])
def mapping(self, event, attribute):
add_data(self.dlg.db, "EventMapping",
[(self.dlg.name, self.name, event, attribute)])
def condition(self, action, condition):
add_data(self.dlg.db, "ControlCondition",
[(self.dlg.name, self.name, action, condition)])
class RadioButtonGroup(Control):
def __init__(self, dlg, name, property):
self.dlg = dlg
self.name = name
self.property = property
self.index = 1
def add(self, name, x, y, w, h, text, value = None):
if value is None:
value = name
add_data(self.dlg.db, "RadioButton",
[(self.property, self.index, value,
x, y, w, h, text, None)])
self.index += 1
class Dialog:
def __init__(self, db, name, x, y, w, h, attr, title, first, default, cancel):
self.db = db
self.name = name
self.x, self.y, self.w, self.h = x,y,w,h
add_data(db, "Dialog", [(name, x,y,w,h,attr,title,first,default,cancel)])
def control(self, name, type, x, y, w, h, attr, prop, text, next, help):
add_data(self.db, "Control",
[(self.name, name, type, x, y, w, h, attr, prop, text, next, help)])
return Control(self, name)
def text(self, name, x, y, w, h, attr, text):
return self.control(name, "Text", x, y, w, h, attr, None,
text, None, None)
def bitmap(self, name, x, y, w, h, text):
return self.control(name, "Bitmap", x, y, w, h, 1, None, text, None, None)
def line(self, name, x, y, w, h):
return self.control(name, "Line", x, y, w, h, 1, None, None, None, None)
def pushbutton(self, name, x, y, w, h, attr, text, next):
return self.control(name, "PushButton", x, y, w, h, attr, None, text, next, None)
def radiogroup(self, name, x, y, w, h, attr, prop, text, next):
add_data(self.db, "Control",
[(self.name, name, "RadioButtonGroup",
x, y, w, h, attr, prop, text, next, None)])
return RadioButtonGroup(self, name, prop)
def checkbox(self, name, x, y, w, h, attr, prop, text, next):
return self.control(name, "CheckBox", x, y, w, h, attr, prop, text, next, None)
| xyuanmu/XX-Net | python3.8.2/Lib/msilib/__init__.py | Python | bsd-2-clause | 17,596 |
from django.contrib.auth.models import User
from drf_haystack.serializers import HaystackSerializer
from rest_framework import serializers
from apiv2.models import *
from apiv2.search_indexes import QuestionIndex
class EducationLevelSerializer(serializers.ModelSerializer):
class Meta:
model = EducationLevel
fields = ('id', 'name', 'description')
class SubjectSerializer(serializers.ModelSerializer):
class Meta:
model = Subject
fields = ('id', 'name', 'description', 'education_level')
# depth = 1
class TopicSerializer(serializers.ModelSerializer):
class Meta:
model = Topic
fields = ('id', 'name', 'subject')
# depth = 1
class ConceptSerializer(serializers.ModelSerializer):
class Meta:
model = Concept
fields = ('id', 'name', 'topic')
# depth = 1
class SubconceptSerializer(serializers.ModelSerializer):
class Meta:
model = Subconcept
fields = ('id', 'name', 'concept')
# depth = 1
class KeyPointSerializer(serializers.ModelSerializer):
class Meta:
model = KeyPoint
fields = ('id', 'name', 'type', 'content', 'concept')
# depth = 1
class KeywordSerializer(serializers.ModelSerializer):
class Meta:
model = Keyword
fields = ('id', 'name', 'content')
# depth = 1
class PapersetSerializer(serializers.ModelSerializer):
class Meta:
model = Paperset
fields = ('id', 'name', 'subject')
# depth = 1
class PaperSerializer(serializers.ModelSerializer):
class Meta:
model = Paper
fields = ('id', 'year', 'month', 'number', 'no_of_question',
'subject', 'paperset')
# depth = 1
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ('id', 'content', 'content_cleaned_text', 'concept',
'formula_set', 'formula_categories',
'is_sample', 'subconcept', 'difficulty_level', 'marks',
'keypoints', 'keywords',
'paper', 'source', 'used_for', 'response_type',
'question_type', 'paper')
# depth = 1
class TestQuestionSerializer(serializers.ModelSerializer):
class Meta:
model = TestQuestion
fields = ('id', 'category', 'content', 'concept', 'is_sample',
'subconcept', 'difficulty_level', 'marks',
'paper', 'source', 'response_type',
'question_type', 'paper')
# depth = 1
class SolutionSerializer(serializers.ModelSerializer):
class Meta :
model = Solution
fields = ('id', 'question', 'content')
# depth = 1
class FormulaSerializer(serializers.ModelSerializer):
# questions = QuestionSerializer(many=True, read_only=True)
class Meta:
model = Formula
fields = ('id', 'content', 'categories', 'status', 'inorder_term',
'sorted_term', 'structure_term', 'constant_term',
'variable_term', 'questions', 'concept')
# depth = 1
class FormulaCategorySerializer(serializers.ModelSerializer):
class Meta:
model = FormulaCategory
fields = ('name',)
# depth = 1
class FormulaIndexSerializer(serializers.ModelSerializer):
class Meta:
model = FormulaIndex
fields = ('term_index', 'formulas', 'df')
# depth = 1
class SearchResultSerializer(serializers.Serializer):
rel_formula = FormulaSerializer(read_only=True)
question = QuestionSerializer(read_only=True)
class TestFormulaCategorySerializer(serializers.ModelSerializer):
class Meta:
model = TestFormulaCategory
fields = ('name',)
class TestFormulaIndexSerializer(serializers.ModelSerializer):
class Meta:
model = TestFormulaIndex
fields = ('term_index', 'docsids', 'df')
class TestFormulaSerializer(serializers.ModelSerializer):
class Meta:
model = TestFormula
fields = ('id', 'content', 'categories', 'status', 'inorder_term',
'sorted_term', 'structure_term', 'constant_term',
'variable_term', 'questions')
class AnswerPartSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = (
'id', 'part_name', 'part_content', 'part_respone_type',
'subpart_name_1', 'subpart_content_1', 'respone_type_1',
'subpart_name_2', 'subpart_content_2', 'respone_type_2',
'subpart_name_3', 'subpart_content_3', 'respone_type_3',
'subpart_name_4', 'subpart_content_4', 'respone_type_4',
'question')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password', 'email')
class QuestionHaystackSerializer(HaystackSerializer):
class Meta:
# The `index_classes` attribute is a list of which search indexes
# we want to include in the search.
index_classes = [QuestionIndex]
# The `fields` contains all the fields we want to include.
# NOTE: Make sure you don't confuse these with model attributes. These
# fields belong to the search index!
fields = ('id', 'content', 'content_cleaned_text', 'concept', 'formula_set',
'is_sample', 'subconcept', 'difficulty_level', 'marks',
'keypoints', 'keywords',
'paper', 'source', 'used_for', 'response_type',
'question_type', 'paper')
| deka108/mathqa-server | apiv2/serializers.py | Python | apache-2.0 | 5,579 |
import numpy
import time
import pyglet
import pyglet.graphics as gl
import noise
from config import SECTOR_SIZE
cb_v = numpy.array([
[-1,+1,-1, -1,+1,+1, +1,+1,+1, +1,+1,-1], # top
[-1,-1,-1, +1,-1,-1, +1,-1,+1, -1,-1,+1], # bottom
[-1,-1,-1, -1,-1,+1, -1,+1,+1, -1,+1,-1], # left
[+1,-1,+1, +1,-1,-1, +1,+1,-1, +1,+1,+1], # right
[-1,-1,+1, +1,-1,+1, +1,+1,+1, -1,+1,+1], # front
[+1,-1,-1, -1,-1,-1, -1,+1,-1, +1,+1,-1], # back
],dtype = numpy.float32)
c = 1
cb_v_half = numpy.array([
[-1,+0,-1, -1,+0,+1, +1,+0,+1, +1,+0,-1], # top
[-1,-1,-1, +1,-1,-1, +1,-1,+1, -1,-1,+1], # bottom
[-c,-1,-1, -c,-1,+1, -c,+1,+1, -c,+1,-1], # left
[+c,-1,+1, +c,-1,-1, +c,+1,-1, +c,+1,+1], # right
[-1,-1,+c, +1,-1,+c, +1,+1,+c, -1,+1,+c], # front
[+1,-1,-c, -1,-1,-c, -1,+1,-c, +1,+1,-c], # back
],dtype = numpy.float32)
c = 14.0/16
cb_v_cake = numpy.array([
[-1,+0,-1, -1,+0,+1, +1,+0,+1, +1,+0,-1], # top
[-1,-1,-1, +1,-1,-1, +1,-1,+1, -1,-1,+1], # bottom
[-c,-1,-1, -c,-1,+1, -c,+1,+1, -c,+1,-1], # left
[+c,-1,+1, +c,-1,-1, +c,+1,-1, +c,+1,+1], # right
[-1,-1,+c, +1,-1,+c, +1,+1,+c, -1,+1,+c], # front
[+1,-1,-c, -1,-1,-c, -1,+1,-c, +1,+1,-c], # back
],dtype = numpy.float32)
de_v = numpy.array([
[0]*12,
[0]*12,
[-1,-1,+1, +1,-1,-1, +1,+1,-1, -1,+1,+1],
[+1,-1,-1, -1,-1,+1, -1,+1,+1, +1,+1,-1],
[-1,-1,-1, +1,-1,+1, +1,+1,+1, -1,+1,-1],
[+1,-1,+1, -1,-1,-1, -1,+1,-1, +1,+1,+1],
],dtype = numpy.float32)
def cube_v(pos,n):
return n*cb_v+numpy.tile(pos,4)
def cube_v2(pos,n):
return (n*cb_v)+numpy.tile(pos,4)[:,numpy.newaxis,:]
def deco_v(pos,n):
return n*de_v+numpy.tile(pos,4)
def deco_v2(pos,n):
return (n*de_v)+numpy.tile(pos,4)[:,numpy.newaxis,:]
def tex_coord(x, y, n=4):
""" Return the bounding vertices of the texture square.
"""
m = 1.0 / n
dx = x * m
dy = y * m
return [dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m]
def tex_coords(*sides): #top, bottom,
""" Return a list of the texture squares for the top, bottom and side.
"""
# top = tex_coord(*top)
# bottom = tex_coord(*bottom)
result = []
# result.append(top)
# result.append(bottom)
i=6
for s in sides:
result.append(tex_coord(*s))
i-=1
while i>=0:
result.append(tex_coord(*sides[-1]))
i-=1
return result
FACES = [
( 0, 1, 0), #up
( 0,-1, 0), #down
(-1, 0, 0), #left
( 1, 0, 0), #right
( 0, 0, 1), #forward
( 0, 0,-1), #back
]
def normalize(position):
""" Accepts `position` of arbitrary precision and returns the block
containing that position.
Parameters
----------
position : tuple of len 3
Returns
-------
block_position : tuple of ints of len 3
"""
x, y, z = position
x, y, z = (int(round(x)), int(round(y)), int(round(z)))
return (x, y, z)
def sectorize(position):
""" Returns a tuple representing the sector for the given `position`.
Parameters
----------
position : tuple of len 3
Returns
-------
sector : tuple of len 3
"""
x, y, z = normalize(position)
x, y, z = x / SECTOR_SIZE, y / SECTOR_SIZE, z / SECTOR_SIZE
return (x*SECTOR_SIZE, 0, z*SECTOR_SIZE)
##monkey patch IndirectArrayRegion.__setitem__ to make it a bit quick for numpy arrays
orig_indirect_array_region_setitem = pyglet.graphics.vertexbuffer.IndirectArrayRegion.__setitem__
def numpy__setitem__(self, index, value):
if isinstance(value, numpy.ndarray) and isinstance(index, slice) \
and index.start is None and index.stop is None and index.step is None:
arr = numpy.ctypeslib.as_array(self.region.array)
for i in range(self.count):
arr[i::self.stride] = value[i::self.count]
return
orig_indirect_array_region_setitem(self, index, value)
pyglet.graphics.vertexbuffer.IndirectArrayRegion.__setitem__ = numpy__setitem__
class LineDrawGroup(pyglet.graphics.Group):
def __init__(self, thickness = 1, parent=None):
pyglet.graphics.Group.__init__(self, parent)
self.thickness = thickness
def set_state(self):
gl.glLineWidth(self.thickness)
gl.glColor3d(0, 0, 0)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
def unset_state(self):
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glLineWidth(1)
class DrawTranslateGroup(pyglet.graphics.Group):
def __init__(self, translate = (0,0,0), parent=None):
pyglet.graphics.Group.__init__(self, parent)
self.translate = translate
def set_state(self):
gl.glPushMatrix()
gl.glTranslatef(*self.translate)
def unset_state(self):
gl.glPopMatrix()
class InventoryGroup(pyglet.graphics.Group):
def __init__(self, parent=None):
pyglet.graphics.Group.__init__(self, parent)
def set_state(self):
gl.glPushMatrix()
gl.glTranslatef(0, 0, -64)
gl.glRotatef(45, 1, 0, 0)
gl.glRotatef(45, 0, 1, 0)
def unset_state(self):
gl.glPopMatrix()
class InventoryOutlineGroup(pyglet.graphics.Group):
def __init__(self, parent=None):
pyglet.graphics.Group.__init__(self, parent)
def set_state(self):
gl.glPushMatrix()
gl.glTranslatef(0, 0, -60)
gl.glRotatef(45, 1, 0, 0)
gl.glRotatef(45, 0, 1, 0)
def unset_state(self):
gl.glPopMatrix()
| spillz/minepy | util.py | Python | gpl-3.0 | 5,629 |
from flask import Flask, request, abort
import json
import ndb_util
from model import User
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.api import app_identity
from google.appengine.api import mail
from flask_restful import Resource
from google.appengine.runtime import apiproxy_errors
base_url = 'http://demolisherapp.appspot.com/'
app_name = 'DemolisherApp'
new_user_message = '<p> Create a new account at {app_name} today</p>' \
'<form action="{path}" method="get">'\
'<input type="submit" value="Sign Up">' \
'</form>'
user_message = '<p>Join the team and start demoing today</p' \
'<form action="{path}" method="get">' \
'<input type="submit" value="Join">' \
'</form>'
class InviteUserToOrg(Resource):
def get(self, org_id, user_email):
print "we in here"
client_id = users.get_current_user().user_id()
user_email = str(user_email)
print user_email
org_id = str(org_id)
if org_id != client_id:
abort(401)
org_key = ndb.Key('Organization', org_id)
org = org_key.get()
sender = '[email protected]'
ender = '{}@appspot.gserviceaccount.com'.format(
app_identity.get_application_id())
subject = 'Welcome to the ' + org.name + ' Team!'
body = '<h3>{org_name} has invited you to join their team</h3>' \
'<hr>'
query = User.query()
query = query.filter(User.email == user_email)
query_results = query.fetch()
if len(query_results) == 0:
add_new_user_path = base_url + 'signup?referral=' + org_id
print add_new_user_path
body = body + new_user_message.format(path=add_new_user_path, app_name=app_name)
else:
user = query_results[0]
user_id = query_results[0].key.id()
add_user_path = base_url + 'signup?referral=' + org_id
print add_user_path
body = body + user_message.format(path=add_user_path)
response = mail.send_mail(sender=sender, to=user_email, subject=subject, body="", html=body)
return response
class AddUserToOrg(Resource):
def get(self, org_id, user_id):
client_id = users.get_current_user().user_id()
if client_id != user_id:
abort(401)
org_key = ndb.Key('Organization', org_id)
org = org_key.get()
user_key = ndb.Key('User', user_id)
user = user_key.get()
if user_key in org.workers or org_key in user.works_for_organizations:
abort(403)
user.add_organization(org_key)
org.add_worker(user_key)
return user.to_json()
class RemoveUserFromOrg(Resource):
def delete(self, org_id, user_id):
client_id = users.get_current_user().user_id()
if client_id != org_id:
abort(401)
org_key = ndb.Key('Organization', org_id)
org = org_key.get()
user_key = ndb.Key('User', user_id)
user = user_key.get()
user.delete_organization(org_key)
org.remove_worker(user_key)
return 'OK'
class GetAllWorkersForOrg(Resource):
def get(self, org_id):
client_id = users.get_current_user().user_id()
if client_id != org_id:
abort(401)
org_key = ndb.Key('Organization', org_id)
org = org_key.get()
workers_entities = ndb.get_multi(org.workers)
workers_json = []
for entity in workers_entities:
workers_json.append(entity.to_json())
return {"workers" : workers_json}
class GetAllOrgsForWorker(Resource):
def get(self, user_id):
client_id = users.get_current_user().user_id()
if client_id != user_id:
abort(401)
user_key = ndb.Key('User', user_id)
user = user_key.get()
orgs_entities = ndb.get_multi(user.works_for_organizations)
orgs_json = []
for entity in orgs_entities:
orgs_json.append(entity.to_json())
return {'organizations': orgs_json}
| jtovar2/demo_app | backend/resources/org_user_relationships.py | Python | mit | 4,196 |
import functools
import httplib as http
import itertools
from operator import itemgetter
from dateutil.parser import parse as parse_date
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.utils import timezone
from flask import request, redirect
import pytz
from framework.database import get_or_http_error, autoload
from framework.exceptions import HTTPError
from framework.status import push_status_message
from osf.utils.sanitize import strip_html
from osf.utils.permissions import ADMIN
from osf.utils.functional import rapply
from osf.models import NodeLog, RegistrationSchema, DraftRegistration, Sanction
from website.exceptions import NodeStateError
from website.project.decorators import (
must_be_valid_project,
must_have_permission,
http_error_if_disk_saving_mode
)
from website import language, settings
from website.ember_osf_web.decorators import ember_flag_is_active
from website.prereg import utils as prereg_utils
from website.project import utils as project_utils
from website.project.metadata.schemas import LATEST_SCHEMA_VERSION, METASCHEMA_ORDERING
from website.project.metadata.utils import serialize_meta_schema, serialize_draft_registration
from website.project.utils import serialize_node
get_schema_or_fail = lambda query: get_or_http_error(RegistrationSchema, query)
autoload_draft = functools.partial(autoload, DraftRegistration, 'draft_id', 'draft')
def must_be_branched_from_node(func):
@autoload_draft
@must_be_valid_project
@functools.wraps(func)
def wrapper(*args, **kwargs):
node = kwargs['node']
draft = kwargs['draft']
if draft.deleted:
raise HTTPError(http.GONE)
if not draft.branched_from._id == node._id:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Not a draft of this node',
'message_long': 'This draft registration is not created from the given node.'
}
)
return func(*args, **kwargs)
return wrapper
def validate_embargo_end_date(end_date_string, node):
"""
Our reviewers have a window of time in which to review a draft reg. submission.
If an embargo end_date that is within that window is at risk of causing
validation errors down the line if the draft is approved and registered.
The draft registration approval window is always greater than the time span
for disallowed embargo end dates.
:raises: HTTPError if end_date is less than the approval window or greater than the
max embargo end date
"""
end_date = parse_date(end_date_string, ignoretz=True).replace(tzinfo=pytz.utc)
today = timezone.now()
if (end_date - today) <= settings.DRAFT_REGISTRATION_APPROVAL_PERIOD:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid embargo end date',
'message_long': 'Embargo end date for this submission must be at least {0} days in the future.'.format(settings.DRAFT_REGISTRATION_APPROVAL_PERIOD)
})
elif not node._is_embargo_date_valid(end_date):
max_end_date = today + settings.DRAFT_REGISTRATION_APPROVAL_PERIOD
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Invalid embargo end date',
'message_long': 'Embargo end date must on or before {0}.'.format(max_end_date.isoformat())
})
def validate_registration_choice(registration_choice):
if registration_choice not in ('embargo', 'immediate'):
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': "Invalid 'registrationChoice'",
'message_long': "Values for 'registrationChoice' must be either 'embargo' or 'immediate'."
}
)
def check_draft_state(draft):
registered_and_deleted = draft.registered_node and draft.registered_node.is_deleted
if draft.registered_node and not registered_and_deleted:
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft has already been registered',
'message_long': 'This draft has already been registered and cannot be modified.'
})
if draft.is_pending_review:
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft is pending review',
'message_long': 'This draft is pending review and cannot be modified.'
})
if draft.requires_approval and draft.is_approved and (not registered_and_deleted):
raise HTTPError(http.FORBIDDEN, data={
'message_short': 'This draft has already been approved',
'message_long': 'This draft has already been approved and cannot be modified.'
})
@must_have_permission(ADMIN)
@must_be_branched_from_node
def submit_draft_for_review(auth, node, draft, *args, **kwargs):
"""Submit for approvals and/or notifications
:return: serialized registration
:rtype: dict
:raises: HTTPError if embargo end date is invalid
"""
data = request.get_json()
meta = {}
registration_choice = data.get('registrationChoice', 'immediate')
validate_registration_choice(registration_choice)
if registration_choice == 'embargo':
# Initiate embargo
end_date_string = data['embargoEndDate']
validate_embargo_end_date(end_date_string, node)
meta['embargo_end_date'] = end_date_string
meta['registration_choice'] = registration_choice
if draft.registered_node and not draft.registered_node.is_deleted:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long='This draft has already been registered, if you wish to '
'register it again or submit it for review please create '
'a new draft.'))
# Don't allow resubmission unless submission was rejected
if draft.approval and draft.approval.state != Sanction.REJECTED:
raise HTTPError(http.CONFLICT, data=dict(message_long='Cannot resubmit previously submitted draft.'))
draft.submit_for_review(
initiated_by=auth.user,
meta=meta,
save=True
)
if prereg_utils.get_prereg_schema() == draft.registration_schema:
node.add_log(
action=NodeLog.PREREG_REGISTRATION_INITIATED,
params={'node': node._primary_key},
auth=auth,
save=False
)
node.save()
push_status_message(language.AFTER_SUBMIT_FOR_REVIEW,
kind='info',
trust=False)
return {
'status': 'initiated',
'urls': {
'registrations': node.web_url_for('node_registrations')
}
}, http.ACCEPTED
@must_have_permission(ADMIN)
@must_be_branched_from_node
def draft_before_register_page(auth, node, draft, *args, **kwargs):
"""Allow the user to select an embargo period and confirm registration
:return: serialized Node + DraftRegistration
:rtype: dict
"""
ret = serialize_node(node, auth, primary=True)
ret['draft'] = serialize_draft_registration(draft, auth)
return ret
@must_have_permission(ADMIN)
@must_be_branched_from_node
@http_error_if_disk_saving_mode
def register_draft_registration(auth, node, draft, *args, **kwargs):
"""Initiate a registration from a draft registration
:return: success message; url to registrations page
:rtype: dict
"""
data = request.get_json()
registration_choice = data.get('registrationChoice', 'immediate')
validate_registration_choice(registration_choice)
# Don't allow resubmission unless submission was rejected
if draft.approval and draft.approval.state != Sanction.REJECTED:
raise HTTPError(http.CONFLICT, data=dict(message_long='Cannot resubmit previously submitted draft.'))
register = draft.register(auth)
draft.save()
if registration_choice == 'embargo':
# Initiate embargo
embargo_end_date = parse_date(data['embargoEndDate'], ignoretz=True).replace(tzinfo=pytz.utc)
try:
register.embargo_registration(auth.user, embargo_end_date)
except ValidationError as err:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long=err.message))
else:
try:
register.require_approval(auth.user)
except NodeStateError as err:
raise HTTPError(http.BAD_REQUEST, data=dict(message_long=err.message))
register.save()
push_status_message(language.AFTER_REGISTER_ARCHIVING,
kind='info',
trust=False)
return {
'status': 'initiated',
'urls': {
'registrations': node.web_url_for('node_registrations')
}
}, http.ACCEPTED
@must_have_permission(ADMIN)
@must_be_branched_from_node
def get_draft_registration(auth, node, draft, *args, **kwargs):
"""Return a single draft registration
:return: serialized draft registration
:rtype: dict
"""
return serialize_draft_registration(draft, auth), http.OK
@must_have_permission(ADMIN)
@must_be_valid_project
def get_draft_registrations(auth, node, *args, **kwargs):
"""List draft registrations for a node
:return: serialized draft registrations
:rtype: dict
"""
#'updated': '2016-08-03T14:24:12Z'
count = request.args.get('count', 100)
drafts = itertools.islice(node.draft_registrations_active, 0, count)
serialized_drafts = [serialize_draft_registration(d, auth) for d in drafts]
sorted_serialized_drafts = sorted(serialized_drafts, key=itemgetter('updated'), reverse=True)
return {
'drafts': sorted_serialized_drafts
}, http.OK
@must_have_permission(ADMIN)
@must_be_valid_project
@ember_flag_is_active('ember_create_draft_registration_page')
def new_draft_registration(auth, node, *args, **kwargs):
"""Create a new draft registration for the node
:return: Redirect to the new draft's edit page
:rtype: flask.redirect
:raises: HTTPError
"""
if node.is_registration:
raise HTTPError(http.FORBIDDEN, data={
'message_short': "Can't create draft",
'message_long': 'Creating draft registrations on registered projects is not allowed.'
})
data = request.values
schema_name = data.get('schema_name')
if not schema_name:
raise HTTPError(
http.BAD_REQUEST,
data={
'message_short': 'Must specify a schema_name',
'message_long': 'Please specify a schema_name'
}
)
schema_version = data.get('schema_version', 2)
meta_schema = get_schema_or_fail(Q(name=schema_name, schema_version=int(schema_version)))
draft = DraftRegistration.create_from_node(
node,
user=auth.user,
schema=meta_schema,
data={}
)
return redirect(node.web_url_for('edit_draft_registration_page', draft_id=draft._id))
@must_have_permission(ADMIN)
@ember_flag_is_active('ember_edit_draft_registration_page')
@must_be_branched_from_node
def edit_draft_registration_page(auth, node, draft, **kwargs):
"""Draft registration editor
:return: serialized DraftRegistration
:rtype: dict
"""
check_draft_state(draft)
ret = project_utils.serialize_node(node, auth, primary=True)
ret['draft'] = serialize_draft_registration(draft, auth)
return ret
@must_have_permission(ADMIN)
@must_be_branched_from_node
def update_draft_registration(auth, node, draft, *args, **kwargs):
"""Update an existing draft registration
:return: serialized draft registration
:rtype: dict
:raises: HTTPError
"""
check_draft_state(draft)
data = request.get_json()
schema_data = data.get('schema_data', {})
schema_data = rapply(schema_data, strip_html)
schema_name = data.get('schema_name')
schema_version = data.get('schema_version', 1)
if schema_name:
meta_schema = get_schema_or_fail(Q(name=schema_name, schema_version=schema_version))
existing_schema = draft.registration_schema
if (existing_schema.name, existing_schema.schema_version) != (meta_schema.name, meta_schema.schema_version):
draft.registration_schema = meta_schema
draft.update_metadata(schema_data)
draft.save()
return serialize_draft_registration(draft, auth), http.OK
@must_have_permission(ADMIN)
@must_be_branched_from_node
def delete_draft_registration(auth, node, draft, *args, **kwargs):
"""Permanently delete a draft registration
:return: None
:rtype: NoneType
"""
if draft.registered_node and not draft.registered_node.is_deleted:
raise HTTPError(
http.FORBIDDEN,
data={
'message_short': 'Can\'t delete draft',
'message_long': 'This draft has already been registered and cannot be deleted.'
}
)
draft.deleted = timezone.now()
draft.save(update_fields=['deleted'])
return None, http.NO_CONTENT
def get_metaschemas(*args, **kwargs):
"""
List metaschemas with which a draft registration may be created. Only fetch the newest version for each schema.
:return: serialized metaschemas
:rtype: dict
"""
count = request.args.get('count', 100)
include = request.args.get('include', 'latest')
meta_schemas = RegistrationSchema.objects.filter(active=True)
if include == 'latest':
meta_schemas.filter(schema_version=LATEST_SCHEMA_VERSION)
meta_schemas = sorted(meta_schemas, key=lambda x: METASCHEMA_ORDERING.index(x.name))
return {
'meta_schemas': [
serialize_meta_schema(ms) for ms in meta_schemas[:count]
]
}, http.OK
| erinspace/osf.io | website/project/views/drafts.py | Python | apache-2.0 | 13,819 |
"""Demo EmulatedDevice and EmulatedTile objects for testing and demonstration purposes."""
from .demo_device import DemoEmulatedDevice
from .demo_reference import DemoReferenceDevice
from .demo_proxy import DemoTileProxy
__all__ = ['DemoEmulatedDevice', 'DemoReferenceDevice', 'DemoTileProxy']
| iotile/coretools | iotileemulate/iotile/emulate/demo/__init__.py | Python | gpl-3.0 | 296 |
def get_heroku_caches():
try:
os.environ['MEMCACHE_SERVERS'] = os.environ['MEMCACHIER_SERVERS']
os.environ['MEMCACHE_USERNAME'] = os.environ['MEMCACHIER_USERNAME']
os.environ['MEMCACHE_PASSWORD'] = os.environ['MEMCACHIER_PASSWORD']
return {
'default':
{
'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',
'LOCATION': os.environ['MEMCACHIER_SERVERS'],
'TIMEOUT': 500,
'BINARY': True,
}
}
except:
return {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
}
} | thisismess/heroku-django-helpers | heroku_django_helpers/helpers.py | Python | mit | 531 |
# -*- coding: utf-8 -*-
# Flask-Diamond (c) Ian Dennis Miller
from flask_security import SQLAlchemyUserDatastore
from flask_security import Security
from flask import current_app
security = Security()
def init_accounts(self, user=None, role=None, *args, **kwargs):
"""
Initialize Security for application.
:param kwargs: parameters that will be passed through to Flask-Security
:type kwargs: dict
:returns: None
A number of common User account operations are provided by `Flask-
Security <http://pythonhosted.org/Flask-Security/>`_. This function is
responsible for associating User models in the database with the
Security object.
In case you need to override a Flask-Security form (as is the case
with implementing CAPTCHA) then you must use super() from within your
application and provide any arguments destined for Flask-Security.
>>> result = self.super("accounts", user=User, role=Role,
>>> confirm_register_form=ExtendedRegisterForm)
"""
# import database
from .. import db
if not user or not role:
from ..models.user import User as user
from ..models.role import Role as role
# create datastore
user_datastore = SQLAlchemyUserDatastore(db, user, role)
setattr(Security, "user_datastore", user_datastore)
security.init_app(self.app, datastore=user_datastore, *args, **kwargs)
| diamond-org/flask-diamond | flask_diamond/facets/accounts.py | Python | mit | 1,403 |
from datetime import datetime
from ss.log import Log
from ss.config import Config
from ss.jobs.job import Job
from ss.jobs.sync_job import SyncJob
from ss.jobs.backup_job import BackupJob
from ss.jobs.zfs_status_job import ZfsStatusJob
class DummyJob(Job):
def __init__(self,config,lastExecConfig,name):
super(DummyJob, self).__init__(config,lastExecConfig,name)
def execute(self):
Log.log(Log.INFO,"dummy job executed successfully")
return True
class Scheduler(object):
'''
classdocs
'''
def _instanciateJob(self,config,jobType,jobName):
if jobType == "dummy":
return DummyJob(config,self.lastExecConfig,jobName)
elif jobType == "sync":
return SyncJob(config,self.lastExecConfig,jobName)
elif jobType == "backup":
return BackupJob(config,self.lastExecConfig,jobName)
elif jobType == "zfs-status":
return ZfsStatusJob(config,self.lastExecConfig,jobName)
else:
raise Exception("Unknown job type '" + jobType + "'")
def __init__(self,config,lastExecConfigPath,tools):
'''
Constructor
'''
self.lastExecConfig=Config(lastExecConfigPath,False)
self.jobs = []
self.tools = tools
self.numExecuted=0
self.numExistent=0
self.numFailed=0
now=datetime.now()
for e in config.sections():
if e.startswith(Job.JOB_PREFIX):
jobName = e[len(Job.JOB_PREFIX):]
jobType = config.getRequired(e,Job.JOB_TYPE_KEY)
job = self._instanciateJob(config, jobType, jobName)
self.numExistent+=1
if job.isDue(now):
self.jobs.append(job)
def execute(self,dryRun=False):
numJobs=len(self.jobs)
Log.log(Log.INFO, str(numJobs) + " of " + str(self.numExistent) + " jobs are due")
if dryRun:
Log.log(Log.INFO,"dry run! No jobs executed")
return
# Execute the jobs
for j in self.jobs:
Log.separator()
self.numExecuted+=1
Log.log(Log.INFO,"Started executing job " + str(self.numExecuted) + " of " + str(numJobs) + ": " + j._name)
result=j.run()
if not result:
self.numFailed+=1
if result:
Log.log(Log.INFO,"Job " + str(self.numExecuted) + " executed successfully")
else:
Log.log(Log.INFO,"Job " + str(self.numExecuted) + " failed")
if numJobs > 0:
Log.separator()
# Write updated last exec config
self.lastExecConfig.writeBackToFile() | JFinis/serverscript | src/ss/jobs/scheduler.py | Python | mit | 2,822 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Graphmap(MakefilePackage):
"""A highly sensitive and accurate mapper for long, error-prone reads"""
homepage = "https://github.com/isovic/graphmap"
url = "https://github.com/isovic/graphmap/archive/v0.3.0.tar.gz"
version('0.3.0', git='https://github.com/isovic/graphmap.git', commit='eb8c75d68b03be95464318afa69b645a59f8f6b7')
def edit(self, spec, prefix):
mkdirp(prefix.bin)
makefile = FileFilter('Makefile')
makefile.filter('/usr/bin/graphmap', prefix.bin.graphmap)
def build(self, spec, prefix):
make('modules')
make()
| TheTimmy/spack | var/spack/repos/builtin/packages/graphmap/package.py | Python | lgpl-2.1 | 1,857 |
"""
Class that preprocesses the data
Siddharth Sigtia
Feb,2014
C4DM
"""
import numpy
import cPickle as pickle
import sklearn.preprocessing as preprocessing
import os
import tables as T
from utils import *
class PreProcessor():
def __init__(self,dataset_dir,):
self.dataset_dir = dataset_dir
self.feat_dir = os.path.join(self.dataset_dir,'features')
self.list_dir = os.path.join(self.dataset_dir,'lists')
self.h5_filename = os.path.join(self.feat_dir,'feats.h5')
self.ground_truth = pickle.load(open(os.path.join(self.list_dir,'ground_truth.pickle'),'r'))
self.load_data()
def load_h5(self,):
print 'Loading data from %s'%(self.h5_filename)
with T.openFile(self.h5_filename,'r') as f:
feats = f.root.x.read()
filenames = f.root.filenames.read()
return feats,filenames
def load_data(self,):
features,filenames = self.load_h5()
self.initial_shape = features.shape[1:]
self.n_per_example = numpy.prod(features.shape[1:-1])
self.n_features = features.shape[-1]
self.features,self.filenames = self.flatten_data(features,filenames)
self.preprocess(self.features)
self.filedict = self.build_file_idx_dict()
def flatten_data(self,data,targets):
flat_data = data.view() #Check if reshape is more efficient
flat_data.shape = (-1,self.n_features)
flat_targets = targets.repeat(self.n_per_example)
return flat_data,flat_targets
def preprocess(self,data,scale=True,new_sigma=None,new_mean=None):
print 'Preprocesssing data...'
if scale:
self.scaler = preprocessing.StandardScaler().fit(data)
self.scaler.transform(data)
if new_sigma:
data /= new_sigma
if new_mean:
data += new_mean
def unflatten_data(self,flat_data,flat_targets):
new_shape = (-1,) + self.initial_shape
data = flat_data.reshape*(new_shape)
targets = flat_targets[::self.n_per_example]
return data,targets
def build_file_idx_dict(self,):
keys = list(set(self.filenames))
file_dict = dict([(key,[]) for key in keys])
for i,filename in enumerate(self.filenames):
file_dict[filename].append(i)
for k in keys:
file_dict[k] = numpy.array(file_dict[k])
return file_dict
def prepare_subdataset(self,split='train',unflatten=False,randomize=False):
ids = []
for filename in self.lists[split]:
id = self.filedict[filename]
ids.extend(id)
data = self.features[ids]
files = self.filenames[ids]
targets = self.make_targets(files)
if unflatten:
data,targets = self.unflatten_data(data,targets)
if randomize:
data,targets = self.randomize(data,targets)
self.data[split] = data
self.targets[split] = targets
def prepare_fold(self,train_list_file,valid_list_file,test_list_file):
self.lists = {}
self.data = {}
self.targets = {}
[train_list,valid_list,test_list] = self.get_fold_lists(train_list_file,valid_list_file,test_list_file)
self.lists['train'] = train_list
self.lists['valid'] = valid_list
self.lists['test'] = test_list
self.prepare_subdataset('train')
self.prepare_subdataset('valid')
self.prepare_subdataset('test')
def make_targets(self,filenames):
targets = []
for filename in filenames:
targets.append(self.ground_truth[filename])
return numpy.array(targets)
def get_fold_lists(self, train_list_file,valid_list_file,test_list_file):
return [self.parse_list(train_list_file),
self.parse_list(valid_list_file),
self.parse_list(test_list_file)]
def parse_list(self, list_file):
if list_file is not None:
return list(set([line.strip().split('\t')[0] for line in open(list_file,'r').readlines()]))
| daleloogn/mython | preprocessing.py | Python | gpl-2.0 | 4,046 |
# coding=utf-8
# Copyright 2006-2009 Scott Horowitz <[email protected]>
# Copyright 2009-2014 Jonathan Ballet <[email protected]>
#
# This file is part of Sonata.
#
# Sonata is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sonata is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sonata. If not, see <http://www.gnu.org/licenses/>.
import gettext
import os
from gi.repository import Gtk, GdkPixbuf
from sonata import misc, ui
class About:
def __init__(self, parent_window, config, version, licensetext):
self.parent_window = parent_window
self.config = config
self.version = version
self.license = licensetext
self.about_dialog = None
self.shortcuts_dialog = None
def about_close(self, _event, _data=None):
if _data == Gtk.ResponseType.DELETE_EVENT or \
_data == Gtk.ResponseType.CANCEL:
self.about_dialog.hide()
return True
def shortcuts_close(self, _event, _data=None):
if _data == Gtk.ResponseType.DELETE_EVENT or \
_data == Gtk.ResponseType.CANCEL:
self.shortcuts_dialog.hide()
return True
def shortcuts_close_click_cb(self, _button):
self.shortcuts_dialog.hide()
def _init_shortcuts_dialog(self):
# define the shortcuts and their descriptions
# these are all gettextable
# Keep them here (not as XML) as they're more convenient this way
mainshortcuts = \
[["F1", _("About Sonata")],
["F5", _("Preferences")],
["F11", _("Fullscreen Artwork Mode")],
["Alt-[1-5]", _("Switch to [1st-5th] tab")],
["Alt-C", _("Connect to MPD")],
["Alt-D", _("Disconnect from MPD")],
["Alt-R", _("Randomize current playlist")],
["Alt-Down", _("Expand player")],
["Alt-Left", _("Switch to previous tab")],
["Alt-Right", _("Switch to next tab")],
["Alt-Up", _("Collapse player")],
["Ctrl-H", _("Search library")],
["Ctrl-Q", _("Quit")],
["Ctrl-Shift-U", _("Update entire library")],
["Menu", _("Display popup menu")],
["Escape", _("Minimize to system tray (if enabled)")]]
playbackshortcuts = \
[["Ctrl-Left", _("Previous track")],
["Ctrl-Right", _("Next track")],
["Ctrl-P", _("Play/Pause")],
["Ctrl-S", _("Stop")],
["Ctrl-Minus", _("Lower the volume")],
["Ctrl-Plus", _("Raise the volume")]]
currentshortcuts = \
[["Enter/Space", _("Play selected song")],
["Delete", _("Remove selected song(s)")],
["Ctrl-I", _("Center currently playing song")],
["Ctrl-T", _("Edit selected song's tags")],
["Ctrl-Shift-S", _("Save to new playlist")],
["Ctrl-Delete", _("Clear list")],
["Alt-R", _("Randomize list")]]
libraryshortcuts = \
[["Enter/Space", _("Add selected song(s) or enter directory")],
["Backspace", _("Go to parent directory")],
["Ctrl-D", _("Add selected item(s)")],
["Ctrl-R", _("Replace with selected item(s)")],
["Ctrl-T", _("Edit selected song's tags")],
["Ctrl-Shift-D", _("Add selected item(s) and play")],
["Ctrl-Shift-R", _("Replace with selected item(s) and play")],
["Ctrl-U", _("Update selected item(s)/path(s)")]]
playlistshortcuts = \
[["Enter/Space", _("Add selected playlist(s)")],
["Delete", _("Remove selected playlist(s)")],
["Ctrl-D", _("Add selected playlist(s)")],
["Ctrl-R", _("Replace with selected playlist(s)")],
["Ctrl-Shift-D", _("Add selected playlist(s) and play")],
["Ctrl-Shift-R", _(('Replace with selected '
'playlist(s) and play'))]]
streamshortcuts = \
[["Enter/Space", _("Add selected stream(s)")],
["Delete", _("Remove selected stream(s)")],
["Ctrl-D", _("Add selected stream(s)")],
["Ctrl-R", _("Replace with selected stream(s)")],
["Ctrl-Shift-D", _("Add selected stream(s) and play")],
["Ctrl-Shift-R", _(('Replace with selected '
'stream(s) and play'))]]
infoshortcuts = \
[["Ctrl-T", _("Edit playing song's tags")]]
# define the main array- this adds headings to each section of
# shortcuts that will be displayed
shortcuts = [[_("Main Shortcuts"), mainshortcuts],
[_("Playback Shortcuts"), playbackshortcuts],
[_("Current Shortcuts"), currentshortcuts],
[_("Library Shortcuts"), libraryshortcuts],
[_("Playlist Shortcuts"), playlistshortcuts],
[_("Stream Shortcuts"), streamshortcuts],
[_("Info Shortcuts"), infoshortcuts]]
self.shortcuts_dialog = self.builder.get_object('shortcuts_dialog')
self.shortcuts_dialog.connect('response', self.shortcuts_close)
self.shortcuts_dialog.connect('delete_event', self.shortcuts_close)
shortcuts_close_button = self.builder.get_object(
'shortcuts_dialog_closebutton')
shortcuts_close_button.connect('clicked', self.shortcuts_close_click_cb)
# each pair is a [ heading, shortcutlist ]
vbox = self.builder.get_object('shortcuts_dialog_content_box')
for heading, shortcutlist in shortcuts:
titlelabel = Gtk.Label(heading, xalign=0)
titlelabel.get_style_context().add_class('heading')
vbox.pack_start(titlelabel, False, False, 2)
# print the items of [ shortcut, desc ]
for shortcut, desc in shortcutlist:
tmphbox = Gtk.HBox()
tmplabel = Gtk.Label('{}:'.format(shortcut), xalign=0)
tmplabel.get_style_context().add_class('shortcut')
tmpdesc = Gtk.Label(desc, xalign=0, wrap=False)
tmphbox.pack_start(tmplabel, False, False, 2)
tmphbox.pack_start(tmpdesc, True, True, 2)
vbox.pack_start(tmphbox, False, False, 2)
vbox.pack_start(Gtk.Label(" "), False, False, 2)
def about_shortcuts(self, _button):
if not self.shortcuts_dialog:
self._init_shortcuts_dialog()
self.shortcuts_dialog.show_all()
self.shortcuts_dialog.run()
def statstext(self, stats):
song_count = int(stats['songs'])
song_text = ngettext('{count} song.', '{count} songs.',
song_count).format(count=song_count)
album_count = int(stats['albums'])
album_text = ngettext('{count} album.', '{count} albums.',
album_count).format(count=album_count)
artist_count = int(stats['artists'])
artist_text = ngettext('{count} artist.', '{count} artists.',
artist_count).format(count=artist_count)
try:
db_playtime = float(stats['db_playtime'])
hours = int(misc.convert_time(db_playtime).split(':')[-3])
except:
hours = 0
if hours >= 24:
days = round(hours / 24, 1)
time_text = ngettext('{count} day of bliss.',
'{count} days of bliss.',
days).format(count=days)
else:
time_text = ngettext('{count} hour of bliss.',
'{count} hours of bliss.',
hours).format(count=hours)
parts = (song_text, album_text, artist_text, time_text)
live_parts = [part for part in parts if part is not None]
return '\n'.join(live_parts)
def about_load(self, stats):
self.builder = ui.builder('about')
self.provider = ui.css_provider('about')
self.about_dialog = self.builder.get_object('about_dialog')
try:
self.about_dialog.set_transient_for(self.parent_window)
except:
pass
self.about_dialog.set_version(self.version)
if stats:
self.about_dialog.set_copyright(self.statstext(stats))
context = self.about_dialog.get_style_context()
logo_icon = Gtk.IconFactory.lookup_default('sonata-large')
logo_pb = logo_icon.render_icon_pixbuf(context, -1)
self.about_dialog.set_logo(logo_pb)
# Add button to show keybindings:
children = self.about_dialog.action_area.get_children()[-1]
self.about_dialog.action_area.reorder_child(children, -2)
# Connect to callbacks
self.about_dialog.connect('response', self.about_close)
self.about_dialog.connect('delete_event', self.about_close)
shortcut_button = self.builder.get_object('shortcut_button')
shortcut_button.connect('clicked', self.about_shortcuts)
self.about_dialog.show_all()
| deprint/sonata | sonata/about.py | Python | gpl-3.0 | 9,748 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.layers.python.ops.loss_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class ReduceBatchSumTest(tf.test.TestCase):
def testDimensionNone(self):
with self.test_session():
input_array = np.array([
[1.0, 2.0],
[-1.0, -2.0]
], dtype=np.float32)
placeholder_vec = tf.placeholder(tf.float32, name="placeholder_vec")
expected_result = np.array([3.0, -3.0])
actual_result = tf.contrib.layers.reduce_batch_sum(placeholder_vec)
self.assertEqual(actual_result.get_shape().as_list(), [None])
self.assertAllClose(expected_result, actual_result.eval(feed_dict={
placeholder_vec: input_array
}))
def testDimension0(self):
with self.test_session():
input_vec = tf.constant(2.0)
with self.assertRaises(ValueError):
tf.contrib.layers.reduce_batch_sum(input_vec)
def testDimension1(self):
with self.test_session():
input_vec = tf.constant([1.0, 2.0])
expected_result = np.array([1.0, 2.0])
actual_result = tf.contrib.layers.reduce_batch_sum(input_vec)
self.assertAllClose(expected_result, actual_result.eval())
def testDimension2(self):
with self.test_session():
input_vec = tf.constant([
[1.0, 2.0],
[-1.0, -2.0]
])
expected_result = np.array([3.0, -3.0])
actual_result = tf.contrib.layers.reduce_batch_sum(input_vec)
self.assertAllClose(expected_result, actual_result.eval())
def testReturnShape(self):
with self.test_session():
input_vec = tf.constant([
[1.0, 2.0],
[-1.0, -2.0]
])
expected_result = np.array([3.0, -3.0])
actual_result = tf.contrib.layers.reduce_batch_sum(input_vec)
self.assertShapeEqual(expected_result, actual_result)
def testDimensionN(self):
with self.test_session():
input_vec = tf.constant([
[
[1.0, 2.0],
[3.0, 4.0]
],
[
[5.0, 6.0],
[7.0, 8.0]
]
])
expected_result = np.array([10.0, 26.0])
actual_result = tf.contrib.layers.reduce_batch_sum(input_vec)
self.assertAllClose(expected_result, actual_result.eval())
class ReduceBatchMeanTest(tf.test.TestCase):
def testDimensionNone(self):
with self.test_session():
input_array = np.array([
[1.0, 2.0],
[-1.0, -2.0]
], dtype=np.float32)
placeholder_vec = tf.placeholder(tf.float32, name="placeholder_vec")
expected_result = np.array([1.5, -1.5])
actual_result = tf.contrib.layers.reduce_batch_mean(placeholder_vec)
self.assertEqual(actual_result.get_shape().as_list(), [None])
self.assertAllClose(expected_result, actual_result.eval(feed_dict={
placeholder_vec: input_array
}))
def testDimension0(self):
with self.test_session():
input_vec = tf.constant(2.0)
with self.assertRaises(ValueError):
tf.contrib.layers.reduce_batch_mean(input_vec)
def testDimension1(self):
with self.test_session():
input_vec = tf.constant([1.0, 2.0])
expected_result = np.array([1.0, 2.0])
actual_result = tf.contrib.layers.reduce_batch_mean(input_vec)
self.assertAllClose(expected_result, actual_result.eval())
def testDimension2(self):
with self.test_session():
input_vec = tf.constant([
[1.0, 2.0],
[-1.0, -2.0]
])
expected_result = np.array([1.5, -1.5])
actual_result = tf.contrib.layers.reduce_batch_mean(input_vec)
self.assertAllClose(expected_result, actual_result.eval())
def testReturnShape(self):
with self.test_session():
input_vec = tf.constant([
[1.0, 2.0],
[-1.0, -2.0]
])
expected_result = np.array([3.0, -3.0])
actual_result = tf.contrib.layers.reduce_batch_mean(input_vec)
self.assertShapeEqual(expected_result, actual_result)
def testDimensionN(self):
with self.test_session():
input_vec = tf.constant([
[
[1.0, 2.0],
[3.0, 4.0]
],
[
[5.0, 6.0],
[7.0, 8.0]
]
])
expected_result = np.array([2.5, 6.5])
actual_result = tf.contrib.layers.reduce_batch_mean(input_vec)
self.assertAllClose(expected_result, actual_result.eval())
class AbsoluteLossTest(tf.test.TestCase):
def _getTestVectors(self):
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
predicted = tf.constant([1.1, -0.2, 3.3, 1.6], shape=[2, 2],
name="predicted")
expected_loss = np.array([0.1, 0.2, 0.3, 0.4]).reshape(2, 2)
return target, predicted, expected_loss
def testAbsoluteLoss(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.absolute_loss(predicted, target)
self.assertAllClose(expected_loss, result.eval())
def testAbsoluteLossReturnShape(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.absolute_loss(predicted, target)
self.assertShapeEqual(expected_loss, result)
def testInvalidShapesValueError(self):
with self.test_session():
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
incompatible_shape = tf.constant([0.0, 1.1], shape=[2],
name="incompatible_shape")
with self.assertRaises(ValueError):
tf.contrib.layers.absolute_loss(incompatible_shape, target)
def testAbsoluteLossGradient(self):
with self.test_session():
target, predicted, _ = self._getTestVectors()
result = tf.contrib.layers.absolute_loss(predicted, target)
x_shape = [2, 2]
err = tf.test.compute_gradient_error(target, x_shape, result, x_shape)
err_tolerance = 1e-4
self.assertLess(err, err_tolerance)
class SquaredLossTest(tf.test.TestCase):
def _getTestVectors(self):
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
predicted = tf.constant([1.1, -0.2, 3.3, 1.6], shape=[2, 2],
name="predicted")
expected_loss = np.array([0.01, 0.04, 0.09, 0.16]).reshape(2, 2)
return target, predicted, expected_loss
def testSquaredLoss(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.squared_loss(predicted, target)
self.assertAllClose(expected_loss, result.eval())
def testSquaredLossReturnShape(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.squared_loss(predicted, target)
self.assertShapeEqual(expected_loss, result)
def testInvalidShapesValueError(self):
with self.test_session():
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
incompatible_shape = tf.constant([0.0, 1.1], shape=[2],
name="incompatible_shape")
with self.assertRaises(ValueError):
tf.contrib.layers.squared_loss(incompatible_shape, target)
def testSquaredLossGradient(self):
with self.test_session():
target, predicted, _ = self._getTestVectors()
result = tf.contrib.layers.squared_loss(predicted, target)
x_shape = [2, 2]
err = tf.test.compute_gradient_error(target, x_shape, result, x_shape)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
class SumSquaredLossTest(tf.test.TestCase):
def _getTestVectors(self):
target = tf.constant([[0.0, 1.0],
[3.0, 2.0]],
shape=[2, 2],
name="target")
predicted = tf.constant([[3.0, -2.0],
[1.0, 2.0]],
shape=[2, 2],
name="predicted")
expected_loss = np.array([9.0, 2.0])
return target, predicted, expected_loss
def testSumSquaredLoss(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.sum_squared_loss(predicted, target)
self.assertAllClose(expected_loss, result.eval())
def testSumSquaredLossReturnShape(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.sum_squared_loss(predicted, target)
self.assertShapeEqual(expected_loss, result)
def testInvalidShapesValueError(self):
with self.test_session():
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
incompatible_shape = tf.constant([0.0, 1.1], shape=[2],
name="incompatible_shape")
with self.assertRaises(ValueError):
tf.contrib.layers.sum_squared_loss(incompatible_shape, target)
def testSumSquaredLossGradient(self):
with self.test_session():
target, predicted, _ = self._getTestVectors()
result = tf.contrib.layers.sum_squared_loss(predicted, target)
x_shape = [2, 2]
result_shape = [2]
err = tf.test.compute_gradient_error(target, x_shape,
result, result_shape)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
class MeanAbsoluteLossTest(tf.test.TestCase):
def _getTestVectors(self):
target = tf.constant([[0.0, 1.0, 2.0],
[3.0, 2.0, 4.0]],
shape=[2, 3],
name="target")
predicted = tf.constant([[3.0, -3.0, 0.0],
[1.0, 2.0, 0.0]],
shape=[2, 3],
name="predicted")
expected_loss = np.array([3.0, 2.0])
return target, predicted, expected_loss
def testMeanAbsoluteLoss(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.mean_absolute_loss(predicted, target)
self.assertAllClose(expected_loss, result.eval())
def testMeanAbsoluteLossReturnShape(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.mean_absolute_loss(predicted, target)
self.assertShapeEqual(expected_loss, result)
def testInvalidShapesValueError(self):
with self.test_session():
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
incompatible_shape = tf.constant([0.0, 1.1], shape=[2],
name="incompatible_shape")
with self.assertRaises(ValueError):
tf.contrib.layers.mean_absolute_loss(incompatible_shape, target)
def testMeanAbsoluteLossGradient(self):
with self.test_session():
target, predicted, _ = self._getTestVectors()
result = tf.contrib.layers.mean_absolute_loss(predicted, target)
x_shape = [2, 3]
result_shape = [2]
err = tf.test.compute_gradient_error(target, x_shape,
result, result_shape)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
class MeanSquaredLossTest(tf.test.TestCase):
def _getTestVectors(self):
target = tf.constant([[0.0, 1.0, 2.0],
[3.0, 2.0, 4.0]],
shape=[2, 3],
name="target")
predicted = tf.constant([[3.0, -3.0, 0.0],
[1.0, 2.0, 0.0]],
shape=[2, 3],
name="predicted")
expected_loss = np.array([9.666667, 6.666667])
return target, predicted, expected_loss
def testMeanSquaredLoss(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.mean_squared_loss(predicted, target)
self.assertAllClose(expected_loss, result.eval())
def testMeanSquaredLossReturnShape(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.mean_squared_loss(predicted, target)
self.assertShapeEqual(expected_loss, result)
def testInvalidShapesValueError(self):
with self.test_session():
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
incompatible_shape = tf.constant([0.0, 1.1], shape=[2],
name="incompatible_shape")
with self.assertRaises(ValueError):
tf.contrib.layers.mean_squared_loss(incompatible_shape, target)
def testMeanSquaredLossGradient(self):
with self.test_session():
target, predicted, _ = self._getTestVectors()
result = tf.contrib.layers.mean_squared_loss(predicted, target)
x_shape = [2, 3]
result_shape = [2]
err = tf.test.compute_gradient_error(target, x_shape,
result, result_shape)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
class RootMeanSquaredLossTest(tf.test.TestCase):
def _getTestVectors(self):
target = tf.constant([[0.0, 1.0, 2.0],
[3.0, 2.0, 4.0]],
shape=[2, 3],
name="target")
predicted = tf.constant([[3.0, -3.0, 0.0],
[1.0, 2.0, 0.0]],
shape=[2, 3],
name="predicted")
expected_loss = np.array([3.109126, 2.5819889])
return target, predicted, expected_loss
def testRootMeanSquaredLoss(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.root_mean_squared_loss(predicted, target)
self.assertAllClose(expected_loss, result.eval())
def testRootMeanSquaredLossReturnShape(self):
with self.test_session():
target, predicted, expected_loss = self._getTestVectors()
result = tf.contrib.layers.root_mean_squared_loss(predicted, target)
self.assertShapeEqual(expected_loss, result)
def testInvalidShapesValueError(self):
with self.test_session():
target = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="target")
incompatible_shape = tf.constant([0.0, 1.1], shape=[2],
name="incompatible_shape")
with self.assertRaises(ValueError):
tf.contrib.layers.root_mean_squared_loss(incompatible_shape, target)
def testRootMeanSquaredLossGradient(self):
with self.test_session():
target, predicted, _ = self._getTestVectors()
result = tf.contrib.layers.root_mean_squared_loss(predicted, target)
x_shape = [2, 3]
result_shape = [2]
err = tf.test.compute_gradient_error(target, x_shape,
result, result_shape)
err_tolerance = 1e-3
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
tf.test.main()
| martinbede/second-sight | tensorflow/contrib/layers/python/ops/loss_ops_test.py | Python | apache-2.0 | 15,936 |
import numpy as np
import sonnet as snt
import tensorflow as tf
from tensorflow.contrib.distributions import Bernoulli, NormalWithSoftplusScale
from modules import SpatialTransformer, ParametrisedGaussian
class AIRCell(snt.RNNCore):
"""RNN cell that implements the core features of Attend, Infer, Repeat, as described here:
https://arxiv.org/abs/1603.08575
"""
_n_transform_param = 4
def __init__(self, img_size, crop_size, n_appearance,
transition, input_encoder, glimpse_encoder, glimpse_decoder, transform_estimator, steps_predictor,
discrete_steps=True, canvas_init=None, explore_eps=None, debug=False):
"""Creates the cell
:param img_size: int tuple, size of the image
:param crop_size: int tuple, size of the attention glimpse
:param n_appearance: number of latent units describing the "what"
:param transition: an RNN cell for maintaining the internal hidden state
:param input_encoder: callable, encodes the original input image before passing it into the transition
:param glimpse_encoder: callable, encodes the glimpse into latent representation
:param glimpse_decoder: callable, decodes the glimpse from latent representation
:param transform_estimator: callabe, transforms the hidden state into parameters for the spatial transformer
:param steps_predictor: callable, predicts whether to take a step
:param discrete_steps: boolean, steps are samples from a Bernoulli distribution if True; if False, all steps are
taken and are weighted by the step probability
:param canvas_init: float or None, initial value for the reconstructed image. If None, the canvas is black. If
float, the canvas starts with a given value, which is trainable.
:param explore_eps: float or None; if float, it has to be \in (0., .5); step probability is clipped between
`explore_eps` and (1 - `explore_eps)
:param debug: boolean, adds checks for NaNs in the inputs to distributions
"""
super(AIRCell, self).__init__(self.__class__.__name__)
self._img_size = img_size
self._n_pix = np.prod(self._img_size)
self._crop_size = crop_size
self._n_appearance = n_appearance
self._transition = transition
self._n_hidden = self._transition.output_size[0]
self._sample_presence = discrete_steps
self._explore_eps = explore_eps
self._debug = debug
with self._enter_variable_scope():
self._canvas = tf.zeros(self._img_size, dtype=tf.float32)
if canvas_init is not None:
self._canvas_value = tf.get_variable('canvas_value', dtype=tf.float32, initializer=canvas_init)
self._canvas += self._canvas_value
transform_constraints = snt.AffineWarpConstraints.no_shear_2d()
self._spatial_transformer = SpatialTransformer(img_size, crop_size, transform_constraints)
self._inverse_transformer = SpatialTransformer(img_size, crop_size, transform_constraints, inverse=True)
self._transform_estimator = transform_estimator(self._n_transform_param)
self._input_encoder = input_encoder()
self._glimpse_encoder = glimpse_encoder()
self._glimpse_decoder = glimpse_decoder(crop_size)
self._what_distrib = ParametrisedGaussian(n_appearance, scale_offset=0.5,
validate_args=self._debug, allow_nan_stats=not self._debug)
self._steps_predictor = steps_predictor()
@property
def state_size(self):
return [
np.prod(self._img_size), # image
np.prod(self._img_size), # canvas
self._n_appearance, # what
self._n_transform_param, # where
self._transition.state_size, # hidden state of the rnn
1, # presence
]
@property
def output_size(self):
return [
np.prod(self._img_size), # canvas
np.prod(self._crop_size), # glimpse
self._n_appearance, # what code
self._n_appearance, # what loc
self._n_appearance, # what scale
self._n_transform_param, # where code
self._n_transform_param, # where loc
self._n_transform_param, # where scale
1, # presence prob
1 # presence
]
@property
def output_names(self):
return 'canvas glimpse what what_loc what_scale where where_loc where_scale presence_prob presence'.split()
def initial_state(self, img):
batch_size = img.get_shape().as_list()[0]
hidden_state = self._transition.initial_state(batch_size, tf.float32, trainable=True)
where_code = tf.zeros([1, self._n_transform_param], dtype=tf.float32, name='where_init')
what_code = tf.zeros([1, self._n_appearance], dtype=tf.float32, name='what_init')
flat_canvas = tf.reshape(self._canvas, (1, self._n_pix))
where_code, what_code, flat_canvas = (tf.tile(i, (batch_size, 1)) for i in (where_code, what_code, flat_canvas))
flat_img = tf.reshape(img, (batch_size, self._n_pix))
init_presence = tf.ones((batch_size, 1), dtype=tf.float32)
return [flat_img, flat_canvas,
what_code, where_code, hidden_state, init_presence]
def _build(self, inpt, state):
"""Input is unused; it's only to force a maximum number of steps"""
img_flat, canvas_flat, what_code, where_code, hidden_state, presence = state
img_inpt = img_flat
img = tf.reshape(img_inpt, (-1,) + tuple(self._img_size))
inpt_encoding = self._input_encoder(img)
with tf.variable_scope('rnn_inpt'):
hidden_output, hidden_state = self._transition(inpt_encoding, hidden_state)
where_param = self._transform_estimator(hidden_output)
where_distrib = NormalWithSoftplusScale(*where_param,
validate_args=self._debug, allow_nan_stats=not self._debug)
where_loc, where_scale = where_distrib.loc, where_distrib.scale
where_code = where_distrib.sample()
cropped = self._spatial_transformer(img, where_code)
with tf.variable_scope('presence'):
presence_prob = self._steps_predictor(hidden_output)
if self._explore_eps is not None:
presence_prob = self._explore_eps / 2 + (1 - self._explore_eps) * presence_prob
if self._sample_presence:
presence_distrib = Bernoulli(probs=presence_prob, dtype=tf.float32,
validate_args=self._debug, allow_nan_stats=not self._debug)
new_presence = presence_distrib.sample()
presence *= new_presence
else:
presence = presence_prob
what_params = self._glimpse_encoder(cropped)
what_distrib = self._what_distrib(what_params)
what_loc, what_scale = what_distrib.loc, what_distrib.scale
what_code = what_distrib.sample()
decoded = self._glimpse_decoder(what_code)
inversed = self._inverse_transformer(decoded, where_code)
with tf.variable_scope('rnn_outputs'):
inversed_flat = tf.reshape(inversed, (-1, self._n_pix))
canvas_flat += presence * inversed_flat
decoded_flat = tf.reshape(decoded, (-1, np.prod(self._crop_size)))
output = [canvas_flat, decoded_flat, what_code, what_loc, what_scale, where_code, where_loc, where_scale,
presence_prob, presence]
state = [img_flat, canvas_flat,
what_code, where_code, hidden_state, presence]
return output, state | akosiorek/attend_infer_repeat | attend_infer_repeat/cell.py | Python | gpl-3.0 | 7,842 |
# -*- coding: utf-8 -*-
'''
Created on Jul 18, 2013
@author: Carl, Aaron
'''
import MySQLdb
| MoneyBack/MoneyBack | mb/dal.py | Python | gpl-2.0 | 96 |
from cfme.common.provider import DefaultEndpoint, DefaultEndpointForm
from . import InfraProvider
from wrapanapi.virtualcenter import VMWareSystem
class VirtualCenterEndpoint(DefaultEndpoint):
pass
class VirtualCenterEndpointForm(DefaultEndpointForm):
pass
class VMwareProvider(InfraProvider):
type_name = "virtualcenter"
mgmt_class = VMWareSystem
db_types = ["Vmware::InfraManager"]
endpoints_form = VirtualCenterEndpointForm
discover_dict = {"vmware": True}
def __init__(self, name=None, endpoints=None, key=None, zone=None, hostname=None,
ip_address=None, start_ip=None, end_ip=None, provider_data=None, appliance=None):
super(VMwareProvider, self).__init__(
name=name, endpoints=endpoints, zone=zone, key=key, provider_data=provider_data,
appliance=appliance)
self.hostname = hostname
self.start_ip = start_ip
self.end_ip = end_ip
if ip_address:
self.ip_address = ip_address
def deployment_helper(self, deploy_args):
""" Used in utils.virtual_machines """
# Called within a dictionary update. Since we want to remove key/value pairs, return the
# entire dictionary
deploy_args.pop('username', None)
deploy_args.pop('password', None)
if "allowed_datastores" not in deploy_args and "allowed_datastores" in self.data:
deploy_args['allowed_datastores'] = self.data['allowed_datastores']
return deploy_args
@classmethod
def from_config(cls, prov_config, prov_key, appliance=None):
endpoint = VirtualCenterEndpoint(**prov_config['endpoints']['default'])
if prov_config.get('discovery_range'):
start_ip = prov_config['discovery_range']['start']
end_ip = prov_config['discovery_range']['end']
else:
start_ip = end_ip = prov_config.get('ipaddress')
return cls(name=prov_config['name'],
endpoints={endpoint.name: endpoint},
zone=prov_config['server_zone'],
key=prov_key,
start_ip=start_ip,
end_ip=end_ip,
appliance=appliance)
@property
def view_value_mapping(self):
return {'name': self.name,
'prov_type': 'VMware vCenter'
}
| jteehan/cfme_tests | cfme/infrastructure/provider/virtualcenter.py | Python | gpl-2.0 | 2,366 |
import time
import unittest
from mox import MoxTestBase
from slimta.edge import Edge, EdgeServer
class TestEdge(MoxTestBase, unittest.TestCase):
def test_handoff(self):
self.mox.StubOutWithMock(time, 'time')
env = self.mox.CreateMockAnything()
queue = self.mox.CreateMockAnything()
time.time().AndReturn(12345)
queue.enqueue(env).AndReturn('asdf')
self.mox.ReplayAll()
edge = Edge(queue, 'test.example.com')
self.assertEqual('asdf', edge.handoff(env))
self.assertEqual('test.example.com', env.receiver)
self.assertEqual(12345, env.timestamp)
def test_handoff_error(self):
env = self.mox.CreateMockAnything()
queue = self.mox.CreateMockAnything()
queue.enqueue(env).AndRaise(RuntimeError)
self.mox.ReplayAll()
edge = Edge(queue)
with self.assertRaises(RuntimeError):
edge.handoff(env)
def test_kill(self):
queue = self.mox.CreateMockAnything()
self.mox.ReplayAll()
edge = Edge(queue)
edge.kill()
class TestEdgeServer(MoxTestBase, unittest.TestCase):
def test_edge_interface(self):
edge = EdgeServer(('127.0.0.1', 0), None)
with self.assertRaises(NotImplementedError):
edge.handle(None, None)
def test_handle(self):
queue = self.mox.CreateMockAnything()
sock = self.mox.CreateMockAnything()
edge = EdgeServer(('127.0.0.1', 0), queue)
self.mox.StubOutWithMock(edge, 'handle')
sock.fileno().AndReturn(15)
edge.handle(sock, 'test address')
self.mox.ReplayAll()
try:
edge.server.pre_start()
except AttributeError:
edge.server.init_socket()
edge._handle(sock, 'test address')
def test_handle_error(self):
queue = self.mox.CreateMockAnything()
sock = self.mox.CreateMockAnything()
edge = EdgeServer(('127.0.0.1', 0), queue)
self.mox.StubOutWithMock(edge, 'handle')
sock.fileno().AndReturn(15)
edge.handle(sock, 5).AndRaise(RuntimeError)
self.mox.ReplayAll()
try:
edge.server.pre_start()
except AttributeError:
edge.server.init_socket()
with self.assertRaises(RuntimeError):
edge._handle(sock, 5)
def test_kill(self):
edge = EdgeServer(('127.0.0.1', 0), None)
self.mox.StubOutWithMock(edge.server, 'stop')
edge.server.stop()
self.mox.ReplayAll()
edge.kill()
def test_run(self):
edge = EdgeServer(('127.0.0.1', 0), None)
self.mox.StubOutWithMock(edge.server, 'start')
self.mox.StubOutWithMock(edge.server, 'serve_forever')
edge.server.start()
edge.server.serve_forever()
self.mox.ReplayAll()
edge._run()
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| slimta/python-slimta | test/test_slimta_edge.py | Python | mit | 2,894 |
__author__ = 'albmin'
#allows for package support (i can't think of any methods that need to go in here) | albmin/json_mapper | __init__.py | Python | mit | 104 |
# -*- coding: utf-8 -*-
# This file is part of DigitalPedalBoard python program.
# Copyright (C) 2013 Copyright (C) 2013 Daniel Ken Fujimori Killner,
# Gabriel Moura Vieira Martinez, Rafael Alves de Araujo Sena,
# Ricardo Boccoli Gallego, Danilo de Jesus da Silva Bellini.
#
# DigitalPedalBoard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
File with some important unrelated classes
"""
import wx
from threading import *
from player import *
# Based on http://stackoverflow.com/questions/4709087/wxslider-with-floating-point-values
class FloatSlider(wx.Slider):
"""
Class that modifies the Slider class of wx and makes it accept float (instead of only integers)
"""
def __init__(self, parent, id=-1, value=0.00, min_val=None, max_val=None, res=1e-4,
size=wx.DefaultSize, style=wx.SL_HORIZONTAL,
name='floatslider', texto=None):
self._value = value
self.defaultValue = float(value)
self._min = min_val
self._max = max_val
self.texto = texto
self._res = res
ival, imin, imax = [round(v/res) for v in (value, min_val, max_val)]
self._islider = super(FloatSlider, self)
self._islider.__init__(
parent, id, ival, imin, imax, size=size, style=style, name=name
)
self.Bind(wx.EVT_SCROLL, self._OnScroll)
def _OnScroll(self, event):
ival = self._islider.GetValue()
imin = self._islider.GetMin()
imax = self._islider.GetMax()
if ival == imin:
self._value = self._min
elif ival == imax:
self._value = self._max
else:
self._value = ival * self._res
self.texto.SetValue(str(self._value))
event.Skip()
def GetValue(self):
return self._value
def GetMin(self):
return self._min
def GetMax(self):
return self._max
def GetRes(self):
return self._res
def SetValue(self, value):
self._islider.SetValue(round(value/self._res))
self._value = value
def SetMin(self, minval):
self._islider.SetMin(round(minval/self._res))
self._min = minval
def SetMax(self, maxval):
self._islider.SetMax(round(maxval/self._res))
self._max = maxval
def SetRes(self, res):
self._islider.SetRange(round(self._min/res), round(self._max/res))
self._islider.SetValue(round(self._value/res))
self._res = res
def SetRange(self, minval, maxval):
self._islider.SetRange(round(minval/self._res), round(maxval/self._res))
self._min = minval
self._max = maxval
def UpdateValue(self, e):
valor = e.GetEventObject().GetValue()
try:
valor = float(valor)
except:
if valor == "":
valor = self.defaultValue
else:
valor = self.defaultValue
e.GetEventObject().SetValue(float(valor))
self.SetValue(valor)
class DataGen(object):
"""
Class that generates a list of stream's values
Used to generate a graph.
"""
def __init__(self, window):
self.window = window
def next(self):
if self.window.player is not None:
retorno = self.window.player.last_input_output()
return retorno
class MyThread(Thread):
"""
Graph Thread
"""
def __init__(self, time, func, window):
Thread.__init__(self)
self.stopped = Event()
self.func = func
self.time = time
self.window = window
self._parar = False
def run(self):
while not self.stopped.wait(self.time/1000.0):
if self._parar:
return
self.func(None)
def stop(self):
self._parar = True
def __del__(self):
self.parar = True
def start(self):
if self._parar:
self.__init__(self.time, self.func, self.window)
self._parar = False
Thread.start(self)
| RicardoBoccoliGallego/DigitalPedalBoard | lib.py | Python | gpl-3.0 | 4,588 |
import json
import urllib
import dateutil.parser
from collections import OrderedDict
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.transforms import blended_transform_factory
cache = '_pr_cache.txt'
# Obtain release dates using, e.g.,
#
# git log v0.4 -n 1 --format='%ai'
#
# The first two releases are commented out.
# This was in the era before PRs.
#
releases = OrderedDict([
#('0.1', u'2009-10-07 13:52:19 +0200'),
#('0.2', u'2009-11-12 14:48:45 +0200'),
#('0.3', u'2011-10-10 03:28:47 -0700'),
('0.4', u'2011-12-03 14:31:32 -0800'),
('0.5', u'2012-02-26 21:00:51 -0800'),
('0.6', u'2012-06-24 21:37:05 -0700'),
('0.7', u'2012-09-29 18:08:49 -0700'),
('0.8', u'2013-03-04 20:46:09 +0100')])
month_duration = 24
def fetch_PRs(user='scikit-image', repo='scikit-image', state='open'):
params = {'state': state,
'per_page': 100,
'page': 1}
data = []
page_data = True
while page_data:
config = {'user': user,
'repo': repo,
'params': urllib.urlencode(params)}
fetch_status = ('Fetching page %(page)d (state=%(state)s)' % params +
' from %(user)s/%(repo)s...' % config)
print(fetch_status)
f = urllib.urlopen(
'https://api.github.com/repos/%(user)s/%(repo)s/pulls?%(params)s'
% config
)
params['page'] += 1
page_data = json.loads(f.read())
if 'message' in page_data and page_data['message'] == "Not Found":
page_data = []
print('Warning: Repo not found (%(user)s/%(repo)s)' % config)
else:
data.extend(page_data)
return data
def seconds_from_epoch(dates):
seconds = [(dt - epoch).total_seconds() for dt in dates]
return seconds
def get_month_bins(dates):
now = datetime.now(tz=dates[0].tzinfo)
this_month = datetime(year=now.year, month=now.month, day=1,
tzinfo=dates[0].tzinfo)
bins = [this_month - relativedelta(months=i)
for i in reversed(range(-1, month_duration))]
return seconds_from_epoch(bins)
def date_formatter(value, _):
dt = epoch + timedelta(seconds=value)
return dt.strftime('%Y/%m')
for r in releases:
releases[r] = dateutil.parser.parse(releases[r])
try:
PRs = json.loads(open(cache, 'r').read())
print('Loaded PRs from cache...')
except IOError:
PRs = fetch_PRs(user='stefanv', repo='scikits.image', state='closed')
PRs.extend(fetch_PRs(state='open'))
PRs.extend(fetch_PRs(state='closed'))
cf = open(cache, 'w')
cf.write(json.dumps(PRs))
cf.flush()
nrs = [pr['number'] for pr in PRs]
print('Processing %d pull requests...' % len(nrs))
dates = [dateutil.parser.parse(pr['created_at']) for pr in PRs]
epoch = datetime(2009, 1, 1, tzinfo=dates[0].tzinfo)
dates_f = seconds_from_epoch(dates)
bins = get_month_bins(dates)
fig, ax = plt.subplots(figsize=(7, 5))
n, bins, _ = ax.hist(dates_f, bins=bins, color='blue', alpha=0.6)
ax.xaxis.set_major_formatter(FuncFormatter(date_formatter))
ax.set_xticks(bins[2:-1:3]) # Date label every 3 months.
labels = ax.get_xticklabels()
for l in labels:
l.set_rotation(40)
l.set_size(10)
mixed_transform = blended_transform_factory(ax.transData, ax.transAxes)
for version, date in releases.items():
date = seconds_from_epoch([date])[0]
ax.axvline(date, color='black', linestyle=':', label=version)
ax.text(date, 1, version, color='r', va='bottom', ha='center',
transform=mixed_transform)
ax.set_title('Pull request activity').set_y(1.05)
ax.set_xlabel('Date')
ax.set_ylabel('PRs per month', color='blue')
fig.subplots_adjust(top=0.875, bottom=0.225)
cumulative = np.cumsum(n)
cumulative += len(dates) - cumulative[-1]
ax2 = ax.twinx()
ax2.plot(bins[1:], cumulative, color='black', linewidth=2)
ax2.set_ylabel('Total PRs', color='black')
fig.savefig('PRs.png')
plt.show()
| ClinicalGraphics/scikit-image | doc/tools/plot_pr.py | Python | bsd-3-clause | 4,128 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class FactorList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, service_sid, identity):
"""
Initialize the FactorList
:param Version version: Version that contains the resource
:param service_sid: Service Sid.
:param identity: Unique external identifier of the Entity
:returns: twilio.rest.verify.v2.service.entity.factor.FactorList
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorList
"""
super(FactorList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'identity': identity, }
self._uri = '/Services/{service_sid}/Entities/{identity}/Factors'.format(**self._solution)
def create(self, binding, friendly_name, factor_type, config,
twilio_sandbox_mode=values.unset, authorization=values.unset):
"""
Create the FactorInstance
:param unicode binding: A unique binding for this Factor as a json string
:param unicode friendly_name: The friendly name of this Factor
:param FactorInstance.FactorTypes factor_type: The Type of this Factor
:param unicode config: The config for this Factor as a json string
:param unicode twilio_sandbox_mode: The Twilio-Sandbox-Mode HTTP request header
:param unicode authorization: The Authorization HTTP request header
:returns: The created FactorInstance
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorInstance
"""
data = values.of({
'Binding': binding,
'FriendlyName': friendly_name,
'FactorType': factor_type,
'Config': config,
})
headers = values.of({'Twilio-Sandbox-Mode': twilio_sandbox_mode, 'Authorization': authorization, })
payload = self._version.create(method='POST', uri=self._uri, data=data, headers=headers, )
return FactorInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
)
def stream(self, twilio_sandbox_mode=values.unset, limit=None, page_size=None):
"""
Streams FactorInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode twilio_sandbox_mode: The Twilio-Sandbox-Mode HTTP request header
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.verify.v2.service.entity.factor.FactorInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(twilio_sandbox_mode=twilio_sandbox_mode, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, twilio_sandbox_mode=values.unset, limit=None, page_size=None):
"""
Lists FactorInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode twilio_sandbox_mode: The Twilio-Sandbox-Mode HTTP request header
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.verify.v2.service.entity.factor.FactorInstance]
"""
return list(self.stream(twilio_sandbox_mode=twilio_sandbox_mode, limit=limit, page_size=page_size, ))
def page(self, twilio_sandbox_mode=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of FactorInstance records from the API.
Request is executed immediately
:param unicode twilio_sandbox_mode: The Twilio-Sandbox-Mode HTTP request header
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of FactorInstance
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
headers = values.of({'Twilio-Sandbox-Mode': twilio_sandbox_mode, })
response = self._version.page(method='GET', uri=self._uri, params=data, headers=headers, )
return FactorPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of FactorInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of FactorInstance
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return FactorPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a FactorContext
:param sid: A string that uniquely identifies this Factor.
:returns: twilio.rest.verify.v2.service.entity.factor.FactorContext
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorContext
"""
return FactorContext(
self._version,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a FactorContext
:param sid: A string that uniquely identifies this Factor.
:returns: twilio.rest.verify.v2.service.entity.factor.FactorContext
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorContext
"""
return FactorContext(
self._version,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2.FactorList>'
class FactorPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, response, solution):
"""
Initialize the FactorPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: Service Sid.
:param identity: Unique external identifier of the Entity
:returns: twilio.rest.verify.v2.service.entity.factor.FactorPage
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorPage
"""
super(FactorPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FactorInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.verify.v2.service.entity.factor.FactorInstance
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorInstance
"""
return FactorInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2.FactorPage>'
class FactorContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, service_sid, identity, sid):
"""
Initialize the FactorContext
:param Version version: Version that contains the resource
:param service_sid: Service Sid.
:param identity: Unique external identifier of the Entity
:param sid: A string that uniquely identifies this Factor.
:returns: twilio.rest.verify.v2.service.entity.factor.FactorContext
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorContext
"""
super(FactorContext, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'identity': identity, 'sid': sid, }
self._uri = '/Services/{service_sid}/Entities/{identity}/Factors/{sid}'.format(**self._solution)
def delete(self, twilio_sandbox_mode=values.unset):
"""
Deletes the FactorInstance
:param unicode twilio_sandbox_mode: The Twilio-Sandbox-Mode HTTP request header
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
headers = values.of({'Twilio-Sandbox-Mode': twilio_sandbox_mode, })
return self._version.delete(method='DELETE', uri=self._uri, headers=headers, )
def fetch(self, twilio_sandbox_mode=values.unset):
"""
Fetch the FactorInstance
:param unicode twilio_sandbox_mode: The Twilio-Sandbox-Mode HTTP request header
:returns: The fetched FactorInstance
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorInstance
"""
headers = values.of({'Twilio-Sandbox-Mode': twilio_sandbox_mode, })
payload = self._version.fetch(method='GET', uri=self._uri, headers=headers, )
return FactorInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
sid=self._solution['sid'],
)
def update(self, auth_payload=values.unset, friendly_name=values.unset,
config=values.unset, twilio_sandbox_mode=values.unset):
"""
Update the FactorInstance
:param unicode auth_payload: Optional payload to verify the Factor for the first time
:param unicode friendly_name: The friendly name of this Factor
:param unicode config: The config for this Factor as a json string
:param unicode twilio_sandbox_mode: The Twilio-Sandbox-Mode HTTP request header
:returns: The updated FactorInstance
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorInstance
"""
data = values.of({'AuthPayload': auth_payload, 'FriendlyName': friendly_name, 'Config': config, })
headers = values.of({'Twilio-Sandbox-Mode': twilio_sandbox_mode, })
payload = self._version.update(method='POST', uri=self._uri, data=data, headers=headers, )
return FactorInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Verify.V2.FactorContext {}>'.format(context)
class FactorInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
class FactorStatuses(object):
UNVERIFIED = "unverified"
VERIFIED = "verified"
class FactorTypes(object):
PUSH = "push"
def __init__(self, version, payload, service_sid, identity, sid=None):
"""
Initialize the FactorInstance
:returns: twilio.rest.verify.v2.service.entity.factor.FactorInstance
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorInstance
"""
super(FactorInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'service_sid': payload.get('service_sid'),
'entity_sid': payload.get('entity_sid'),
'identity': payload.get('identity'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'friendly_name': payload.get('friendly_name'),
'status': payload.get('status'),
'factor_type': payload.get('factor_type'),
'config': payload.get('config'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {
'service_sid': service_sid,
'identity': identity,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FactorContext for this FactorInstance
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorContext
"""
if self._context is None:
self._context = FactorContext(
self._version,
service_sid=self._solution['service_sid'],
identity=self._solution['identity'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: A string that uniquely identifies this Factor.
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: Account Sid.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: Service Sid.
:rtype: unicode
"""
return self._properties['service_sid']
@property
def entity_sid(self):
"""
:returns: Entity Sid.
:rtype: unicode
"""
return self._properties['entity_sid']
@property
def identity(self):
"""
:returns: Unique external identifier of the Entity
:rtype: unicode
"""
return self._properties['identity']
@property
def date_created(self):
"""
:returns: The date this Factor was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this Factor was updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def friendly_name(self):
"""
:returns: A human readable description of this resource.
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def status(self):
"""
:returns: The Status of this Factor
:rtype: FactorInstance.FactorStatuses
"""
return self._properties['status']
@property
def factor_type(self):
"""
:returns: The Type of this Factor
:rtype: FactorInstance.FactorTypes
"""
return self._properties['factor_type']
@property
def config(self):
"""
:returns: The config
:rtype: dict
"""
return self._properties['config']
@property
def url(self):
"""
:returns: The URL of this resource.
:rtype: unicode
"""
return self._properties['url']
def delete(self, twilio_sandbox_mode=values.unset):
"""
Deletes the FactorInstance
:param unicode twilio_sandbox_mode: The Twilio-Sandbox-Mode HTTP request header
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete(twilio_sandbox_mode=twilio_sandbox_mode, )
def fetch(self, twilio_sandbox_mode=values.unset):
"""
Fetch the FactorInstance
:param unicode twilio_sandbox_mode: The Twilio-Sandbox-Mode HTTP request header
:returns: The fetched FactorInstance
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorInstance
"""
return self._proxy.fetch(twilio_sandbox_mode=twilio_sandbox_mode, )
def update(self, auth_payload=values.unset, friendly_name=values.unset,
config=values.unset, twilio_sandbox_mode=values.unset):
"""
Update the FactorInstance
:param unicode auth_payload: Optional payload to verify the Factor for the first time
:param unicode friendly_name: The friendly name of this Factor
:param unicode config: The config for this Factor as a json string
:param unicode twilio_sandbox_mode: The Twilio-Sandbox-Mode HTTP request header
:returns: The updated FactorInstance
:rtype: twilio.rest.verify.v2.service.entity.factor.FactorInstance
"""
return self._proxy.update(
auth_payload=auth_payload,
friendly_name=friendly_name,
config=config,
twilio_sandbox_mode=twilio_sandbox_mode,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Verify.V2.FactorInstance {}>'.format(context)
| Vagab0nd/SiCKRAGE | lib3/twilio/rest/verify/v2/service/entity/factor.py | Python | gpl-3.0 | 19,771 |
# -*- coding: utf-8 -*-
from module.plugins.internal.SimpleHoster import SimpleHoster
class DataportCz(SimpleHoster):
__name__ = "DataportCz"
__type__ = "hoster"
__version__ = "0.46"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?dataport\.cz/file/(.+)'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool",
"Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Dataport.cz hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]")]
NAME_PATTERN = r'<span itemprop="name">(?P<N>.+?)</span>'
SIZE_PATTERN = r'<td class="fil">Velikost</td>\s*<td>(?P<S>[^<]+)</td>'
OFFLINE_PATTERN = r'<h2>Soubor nebyl nalezen</h2>'
CAPTCHA_PATTERN = r'<section id="captcha_bg">\s*<img src="(.*?)"'
FREE_SLOTS_PATTERN = ur'Počet volných slotů: <span class="darkblue">(\d+)</span><br />'
def handle_free(self, pyfile):
captchas = {
'1': "jkeG",
'2': "hMJQ",
'3': "vmEK",
'4': "ePQM",
'5': "blBd"}
action, inputs = self.parse_html_form('free_download_form')
self.log_debug(action, inputs)
if not action or not inputs:
self.error(_("free_download_form"))
if "captchaId" in inputs and inputs['captchaId'] in captchas:
inputs['captchaCode'] = captchas[inputs['captchaId']]
else:
self.error(_("Captcha not found"))
self.download("http://www.dataport.cz%s" % action, post=inputs)
check = self.scan_download({'captcha': 'alert("\u0160patn\u011b opsan\u00fd k\u00f3d z obr\u00e1zu");',
'slot': 'alert("Je n\u00e1m l\u00edto, ale moment\u00e1ln\u011b nejsou'})
if check == "captcha":
self.retry_captcha()
elif check == "slot":
self.log_debug("No free slots - wait 60s and retry")
self.retry(wait=60)
| rlindner81/pyload | module/plugins/hoster/DataportCz.py | Python | gpl-3.0 | 2,266 |
"""Packaging settings."""
from codecs import open
from os.path import abspath, dirname, join
from subprocess import call
from setuptools import Command, find_packages, setup
import motey
this_dir = abspath(dirname(__file__))
with open(join(this_dir, 'README.rst'), encoding='utf-8') as file:
long_description = file.read()
class RunTests(Command):
"""Run all tests."""
description = 'run tests'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""Run all tests!"""
import pytest, sys
sys.exit(pytest.main(self.test_args))
setup(
name = motey.__appname__,
version = motey.__version__,
description = 'A fog node prototype.',
long_description = long_description,
url = motey.__url__,
author = motey.__author__,
author_email = motey.__email__,
license = motey.__licence__,
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Utilities',
'Topic :: Internet of Things',
'License :: Apache Version 2.0',
'Natural Language :: English',
'Operating System :: Linux',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords = ['cli', 'IoT', 'Fog Node'],
packages = find_packages(exclude=['motey-docker-image', 'docs', 'resources', 'samples', 'scripts', 'tests*', 'webclient']),
include_package_data=True,
zip_safe=False,
install_requires = [
'aiofiles==0.3.1',
'alabaster==0.7.10',
'appdirs==1.4.3',
'Babel==2.4.0',
'click==6.7',
'daemonize==2.4.7',
'dependency-injector==3.4.5',
'docker==2.2.1',
'docker-pycreds==0.2.1',
'docopt==0.6.2',
'docutils==0.13.1',
'Flask==1.0.2',
'Flask-Cors==3.0.2',
'future==0.16.0',
'gevent==1.2.1',
'greenlet==0.4.12',
'httptools==0.0.9',
'imagesize==0.7.1',
'itsdangerous==0.24',
'Jinja2==2.9.6',
'jsonschema==2.6.0',
'lockfile==0.12.2',
'Logbook==1.0.0',
'MarkupSafe==1.0',
'msgpack-python==0.4.8',
'packaging==16.8',
'paho-mqtt==1.2.3',
'psutil==5.2.2',
'pycodestyle==2.3.1',
'Pygments==2.2.0',
'pyparsing==2.2.0',
'python-dateutil==2.6.0',
'pytz==2017.2',
'PyYAML==3.12',
'pyzmq==16.0.2',
'requests==2.22.0',
'Rx==1.5.9',
'sanic==0.5.2',
'six==1.10.0',
'snowballstemmer==1.2.1',
'Sphinx==1.6.2',
'sphinx-rtd-theme==0.2.4',
'sphinxcontrib-websupport==1.0.1',
'tinydb==3.2.3',
'typing==3.6.1',
'ujson==1.35',
'uvloop==0.8.0',
'websocket-client==0.40.0',
'websockets==3.3',
'Werkzeug==0.12.1',
'Yapsy==1.11.223',
'zerorpc==0.6.1'
],
tests_require = {
'pycodestyle==2.3.1',
'pytest',
'mock',
},
entry_points = {
'console_scripts': [
'motey = motey.cli.main:main',
],
},
cmdclass = {'test': RunTests},
)
| Neoklosch/Motey | setup.py | Python | apache-2.0 | 3,323 |
"""
bungee tests
These require an ElasticSearch server running on localhost:9200.
"""
import unittest
from bungee import SearchModel
from pyelasticsearch import ElasticSearch
class BungeeTestCase(unittest.TestCase):
books = [
{ '_id': 'A',
'title': 'Heart of Darkness',
'author': {
'first': 'Joseph',
'last': 'Conrad',
'born': '1857-12-03'
},
'published': '1900-07-01',
'pages': 72
},
{ '_id': 'B',
'title': 'Catch-22',
'author': {
'first': 'Joseph',
'last': 'Heller',
'born': '1923-05-01'},
'published': '1961-11-11',
'pages': 453
},
{ '_id': 'C',
'title': 'Infinite Jest',
'author': {
'first': 'David',
'last': 'Wallace',
'born': '1962-02-21'},
'published': '1996-02-01',
'pages': 515
}
]
multi_field_mapping = {
'book': {
'properties': {
'title': {
'type': 'multi_field',
'fields': {
'title': { 'type': 'string' },
'untouched': {
'include_in_all': False,
'index': 'not_analyzed',
'omit_norms': True,
'index_options': 'docs',
'type': 'string'
}
}
},
'author': {
'properties': {
'first': { 'type': 'string' },
'last': { 'type': 'string' },
'born': { 'type': 'date', 'format': 'YYYY-MM-dd' }
}
},
'year': { 'type': 'date', 'format': 'YYYY-MM-dd' },
'pages': { 'type': 'integer' }
}
}
}
# Figure out if / how to support this
# nested_mapping = {
# 'book': {
# 'properties': {
# 'title': { 'type': 'string' },
# 'author': { 'type': 'nested' },
# 'year': { 'type': 'date', 'format': 'YYYY-MM-dd' },
# 'pages': { 'type': 'integer' }
# }
# }
# }
def setUp(self):
es_connection = ElasticSearch('http://localhost:9200')
try:
es_connection.delete_index('unit_tests')
except:
pass
es_connection.create_index('unit_tests')
class TestModel(SearchModel):
index_name = 'unit_tests'
self.model = TestModel
def tearDown(self):
try:
self.model.connection.delete_index(self.model.index_name)
self.model.delete_field_mappings()
except:
pass
| wan/bungee | bungee/tests/__init__.py | Python | bsd-2-clause | 2,975 |
import requests
import pytest
import json
import demistomock as demisto
bundle_index = 0
submitted_indicators = 0
mocked_get_token_response = """{"access_token": "fababfafbh"}"""
iocs_bundle = [
{
"id": "bundle--f00374ec-429c-40cb-b7bb-61f920814775",
"objects": [
{
"created": "2017-01-20T00:00:00.000Z",
"definition": {"tlp": "amber"},
"definition_type": "tlp",
"id": "marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
"type": "marking-definition",
},
{
"created": "2019-12-26T00:00:00Z",
"definition": {"statement": "Copyright Sixgill 2020. All rights reserved."},
"definition_type": "statement",
"id": "marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"type": "marking-definition",
},
{
"created": "2020-09-06T20:33:33.538Z",
"external_references": [{"external_id": "CVE-2020-15392", "source_name": "cve"}],
"id": "cveevent--a26f4710-0d64-4a76-ae27-6ac038e7536b",
"modified": "2020-09-06T20:33:33.538Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"type": "x-cybersixgill-com-cve-event",
"x_sixgill_info": {
"event": {
"_id": "5f1f17164731b1cef86c8aaf",
"action": "trend",
"description": "Trend of Github commits related to CVE-2020-15392",
"event_datetime": "2020-06-30T00:00Z",
"name": "trend_Github_commits",
"prev_level": "prev_level",
"type": "github_authoring",
},
"nvd": {
"base_score_v3": 5.3,
"base_severity_v3": "MEDIUM",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2020-15392",
"modified": "2020-07-15T16:52Z",
"published": "2020-07-07T14:15Z",
"score_2_0": 5.0,
"severity_2_0": "MEDIUM",
"vector_v2": "AV:N/AC:L/Au:N/C:P/I:N/A:N",
"vector_v3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N",
},
"score": {
"current": 0.02,
"highest": {"date": "2020-07-27T00:00Z", "value": 0.02},
"previouslyExploited": 0.07,
},
},
},
{
"created": "2020-08-19T23:08:05.709Z",
"external_references": [{"external_id": "CVE-2020-2021", "source_name": "cve"}],
"id": "cveevent--9c735811-6e08-44d8-a844-75acb10d79b9",
"modified": "2020-08-19T23:08:05.709Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"type": "x-cybersixgill-com-cve-event",
"x_sixgill_info": {
"event": {
"_id": "5f3db0ec3ecfe5a6d70b6245",
"action": "trend",
"description": "CVE-2020-2021 is trending on Twitter.",
"event_datetime": "2020-06-30T00:00Z",
"name": "trend_Twitter",
"prev_level": "prev_level",
"type": "dark_mention",
},
"nvd": {
"base_score_v3": 10.0,
"base_severity_v3": "CRITICAL",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2020-2021",
"modified": "2020-07-06T14:39Z",
"published": "2020-06-29T15:15Z",
"score_2_0": 9.3,
"severity_2_0": "HIGH",
"vector_v2": "AV:N/AC:M/Au:N/C:C/I:C/A:C",
"vector_v3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H",
},
"score": {
"current": 9.13,
"highest": {"date": "2020-07-14T00:00Z", "value": 9.25},
"previouslyExploited": 5.32,
},
},
},
{
"created": "2020-08-19T23:08:05.709Z",
"external_references": [{"external_id": "CVE-2020-12828", "source_name": "cve"}],
"id": "cveevent--dffdcd6b-2157-4652-b7eb-4ce4bb9eebc5",
"modified": "2020-08-19T23:08:05.709Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"type": "x-cybersixgill-com-cve-event",
"x_sixgill_info": {
"event": {
"_id": "5f3db0ec3ecfe5a6d70b6274",
"action": "trend",
"description": "CVE-2020-12828 is trending on Twitter.",
"event_datetime": "2020-06-30T00:00Z",
"name": "trend_Twitter",
"prev_level": "prev_level",
"type": "dark_mention",
},
"nvd": {
"base_score_v3": 9.8,
"base_severity_v3": "CRITICAL",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2020-12828",
"modified": "2020-06-02T16:55Z",
"published": "2020-05-21T17:15Z",
"score_2_0": 10.0,
"severity_2_0": "HIGH",
"vector_v2": "AV:N/AC:L/Au:N/C:C/I:C/A:C",
"vector_v3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
},
"score": {
"current": 8.33,
"highest": {"date": "2020-07-25T00:00Z", "value": 8.4},
"previouslyExploited": 5.07,
},
},
},
{
"created": "2020-08-19T23:08:05.709Z",
"external_references": [{"external_id": "CVE-2020-9771", "source_name": "cve"}],
"id": "cveevent--4b86077c-99f6-42ca-8b4d-953411fa17bd",
"modified": "2020-08-19T23:08:05.709Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"type": "x-cybersixgill-com-cve-event",
"x_sixgill_info": {
"event": {
"_id": "5f3db0ec3ecfe5a6d70b627c",
"action": "trend",
"description": "CVE-2020-9771 is trending on Twitter.",
"event_datetime": "2020-06-30T00:00Z",
"name": "trend_Twitter",
"prev_level": "prev_level",
"type": "dark_mention",
},
"nvd": {
"base_score_v3": None,
"base_severity_v3": None,
"link": "https://nvd.nist.gov/vuln/detail/CVE-2020-9771",
"modified": None,
"published": None,
"score_2_0": None,
"severity_2_0": None,
"vector_v2": "None",
"vector_v3": "None",
},
"score": {"current": None, "highest": {"date": None, "value": None}, "previouslyExploited": None},
},
},
{
"created": "2020-08-25T17:16:52.536Z",
"external_references": [{"external_id": "CVE-2015-6086", "source_name": "cve"}],
"id": "cveevent--1d6320f1-8b22-48e2-876d-5e31b9d36288",
"modified": "2020-08-25T17:16:52.536Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"type": "x-cybersixgill-com-cve-event",
"x_sixgill_info": {
"event": {
"_id": "5f454784ffebcfa91197c9d0",
"action": "modified",
"description": "Sixgill Current score of CVE-2015-6086 changed from Low to None.",
"event_datetime": "2020-06-30T00:00Z",
"level": "None",
"name": "Sixgill_score_level_change",
"prev_level": "prev_level",
"type": "score_level",
},
"nvd": {
"base_score_v3": None,
"base_severity_v3": None,
"link": "https://nvd.nist.gov/vuln/detail/CVE-2015-6086",
"modified": "2018-10-12T22:10Z",
"published": "2015-11-11T12:59Z",
"score_2_0": 4.3,
"severity_2_0": "MEDIUM",
"vector_v2": "AV:N/AC:M/Au:N/C:P/I:N/A:N",
"vector_v3": "None",
},
"score": {
"current": None,
"highest": {"date": "2016-04-14T00:00Z", "value": 7.02},
"previouslyExploited": 1.51,
},
},
},
{
"created": "2020-08-25T17:16:52.536Z",
"external_references": [{"external_id": "CVE-2015-6086", "source_name": "cve"}],
"id": "cveevent--1d6320f1-8b22-48e2-876d-5e31b9d36288",
"modified": "2020-08-25T17:16:52.536Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"type": "x-cybersixgill-com-cve-event",
"x_sixgill_info": {
"event": {
"_id": "5f454784ffebcfa91197c9d0",
"action": "modified",
"description": "Sixgill Current score of CVE-2015-6086 changed from Low to None.",
"event_datetime": "2020-06-30T00:00Z",
"level": "None",
"name": "Sixgill_score_level_change",
"prev_level": "prev_level",
"type": "score_level",
},
"nvd": {
"base_score_v3": None,
"base_severity_v3": None,
"link": "https://nvd.nist.gov/vuln/detail/CVE-2015-6086",
"modified": "2018-10-12T22:10Z",
"published": "2015-11-11T12:59Z",
"score_2_0": 4.3,
"severity_2_0": "MEDIUM",
"vector_v2": "AV:N/AC:M/Au:N/C:P/I:N/A:N",
"vector_v3": "None",
},
"score": {
"current": None,
"highest": {"date": "2016-04-14T00:00Z", "value": 7.02},
"previouslyExploited": 1.51,
},
},
},
],
"spec_version": "2.0",
"type": "bundle",
},
{
"id": "bundle--f00374ec-429c-40cb-b7bb-61f920814775",
"objects": [
{
"created": "2017-01-20T00:00:00.000Z",
"definition": {"tlp": "amber"},
"definition_type": "tlp",
"id": "marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
"type": "marking-definition",
},
{
"created": "2019-12-26T00:00:00Z",
"definition": {"statement": "Copyright Sixgill 2020. All rights reserved."},
"definition_type": "statement",
"id": "marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"type": "marking-definition",
},
],
"spec_version": "2.0",
"type": "bundle",
},
]
expected_ioc_output = [
{
"value": "CVE-2020-15392",
"type": "CVE",
"rawJSON": {
"value": "CVE-2020-15392",
"type": "x-cybersixgill-com-cve-event",
"created": "2020-09-06T20:33:33.538Z",
"external_references": [{"external_id": "CVE-2020-15392", "source_name": "cve"}],
"id": "cveevent--a26f4710-0d64-4a76-ae27-6ac038e7536b",
"modified": "2020-09-06T20:33:33.538Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"x_sixgill_info": {
"event": {
"_id": "5f1f17164731b1cef86c8aaf",
"action": "trend",
"description": "Trend of Github commits related to CVE-2020-15392",
"event_datetime": "2020-06-30T00:00Z",
"name": "trend_Github_commits",
"prev_level": "prev_level",
"type": "github_authoring",
},
"nvd": {
"base_score_v3": 5.3,
"base_severity_v3": "MEDIUM",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2020-15392",
"modified": "2020-07-15T16:52Z",
"published": "2020-07-07T14:15Z",
"score_2_0": 5.0,
"severity_2_0": "MEDIUM",
"vector_v2": "AV:N/AC:L/Au:N/C:P/I:N/A:N",
"vector_v3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N",
},
"score": {
"current": 0.02,
"highest": {"date": "2020-07-27T00:00Z", "value": 0.02},
"previouslyExploited": 0.07,
},
},
},
"score": 3,
"fields": {
"description": """Description: Trend of Github commits related to CVE-2020-15392
Created: 2020-09-06T20:33:33.538Z
Modified: 2020-09-06T20:33:33.538Z
External id: CVE-2020-15392
Sixgill DVE score - current: 0.02
Sixgill DVE score - highest ever date: 2020-07-27T00:00Z
Sixgill DVE score - highest ever: 0.02
Sixgill - Previously exploited probability: 0.07
Event Name: trend_Github_commits
Event Type: github_authoring
Event Action: trend
Previous level: prev_level
Event Description: Trend of Github commits related to CVE-2020-15392
Event Datetime: 2020-06-30T00:00Z
CVSS 3.1 score: 5.3
CVSS 3.1 severity: MEDIUM
NVD Link: https://nvd.nist.gov/vuln/detail/CVE-2020-15392
NVD - last modified date: 2020-07-15T16:52Z
NVD - publication date: 2020-07-07T14:15Z
CVSS 2.0 score: 5.0
CVSS 2.0 severity: MEDIUM
NVD Vector - V2.0: AV:N/AC:L/Au:N/C:P/I:N/A:N
NVD Vector - V3.1: CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N
""",
"creationdate": "2020-09-06T20:33:33.538Z",
"modified": "2020-09-06T20:33:33.538Z",
"externalid": "CVE-2020-15392",
"sixgilldvescorecurrent": 0.02,
"sixgilldvescorehighesteverdate": "2020-07-27T00:00Z",
"sixgilldvescorehighestever": 0.02,
"sixgillpreviouslyexploitedprobability": 0.07,
"eventname": "trend_Github_commits",
"eventtype": "github_authoring",
"eventaction": "trend",
"previouslevel": "prev_level",
"eventdescription": "Trend of Github commits related to CVE-2020-15392",
"eventdatetime": "2020-06-30T00:00Z",
"cvss31score": 5.3,
"cvss31severity": "MEDIUM",
"nvdlink": "https://nvd.nist.gov/vuln/detail/CVE-2020-15392",
"nvdlastmodifieddate": "2020-07-15T16:52Z",
"nvdpublicationdate": "2020-07-07T14:15Z",
"cvss20score": 5.0,
"cvss20severity": "MEDIUM",
"nvdvectorv20": "AV:N/AC:L/Au:N/C:P/I:N/A:N",
"nvdvectorv31": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N",
},
},
{
"value": "CVE-2020-2021",
"type": "CVE",
"rawJSON": {
"value": "CVE-2020-2021",
"type": "x-cybersixgill-com-cve-event",
"created": "2020-08-19T23:08:05.709Z",
"external_references": [{"external_id": "CVE-2020-2021", "source_name": "cve"}],
"id": "cveevent--9c735811-6e08-44d8-a844-75acb10d79b9",
"modified": "2020-08-19T23:08:05.709Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"x_sixgill_info": {
"event": {
"_id": "5f3db0ec3ecfe5a6d70b6245",
"action": "trend",
"description": "CVE-2020-2021 is trending on Twitter.",
"event_datetime": "2020-06-30T00:00Z",
"name": "trend_Twitter",
"prev_level": "prev_level",
"type": "dark_mention",
},
"nvd": {
"base_score_v3": 10,
"base_severity_v3": "CRITICAL",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2020-2021",
"modified": "2020-07-06T14:39Z",
"published": "2020-06-29T15:15Z",
"score_2_0": 9.3,
"severity_2_0": "HIGH",
"vector_v2": "AV:N/AC:M/Au:N/C:C/I:C/A:C",
"vector_v3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H",
},
"score": {
"current": 9.13,
"highest": {"date": "2020-07-14T00:00Z", "value": 9.25},
"previouslyExploited": 5.32,
},
},
},
"score": 3,
"fields": {
"description": """Description: CVE-2020-2021 is trending on Twitter.
Created: 2020-08-19T23:08:05.709Z
Modified: 2020-08-19T23:08:05.709Z
External id: CVE-2020-2021
Sixgill DVE score - current: 9.13
Sixgill DVE score - highest ever date: 2020-07-14T00:00Z
Sixgill DVE score - highest ever: 9.25
Sixgill - Previously exploited probability: 5.32
Event Name: trend_Twitter
Event Type: dark_mention
Event Action: trend
Previous level: prev_level
Event Description: CVE-2020-2021 is trending on Twitter.
Event Datetime: 2020-06-30T00:00Z
CVSS 3.1 score: 10.0
CVSS 3.1 severity: CRITICAL
NVD Link: https://nvd.nist.gov/vuln/detail/CVE-2020-2021
NVD - last modified date: 2020-07-06T14:39Z
NVD - publication date: 2020-06-29T15:15Z
CVSS 2.0 score: 9.3
CVSS 2.0 severity: HIGH
NVD Vector - V2.0: AV:N/AC:M/Au:N/C:C/I:C/A:C
NVD Vector - V3.1: CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H
""",
"creationdate": "2020-08-19T23:08:05.709Z",
"modified": "2020-08-19T23:08:05.709Z",
"externalid": "CVE-2020-2021",
"sixgilldvescorecurrent": 9.13,
"sixgilldvescorehighesteverdate": "2020-07-14T00:00Z",
"sixgilldvescorehighestever": 9.25,
"sixgillpreviouslyexploitedprobability": 5.32,
"eventname": "trend_Twitter",
"eventtype": "dark_mention",
"eventaction": "trend",
"previouslevel": "prev_level",
"eventdescription": "CVE-2020-2021 is trending on Twitter.",
"eventdatetime": "2020-06-30T00:00Z",
"cvss31score": 10.0,
"cvss31severity": "CRITICAL",
"nvdlink": "https://nvd.nist.gov/vuln/detail/CVE-2020-2021",
"nvdlastmodifieddate": "2020-07-06T14:39Z",
"nvdpublicationdate": "2020-06-29T15:15Z",
"cvss20score": 9.3,
"cvss20severity": "HIGH",
"nvdvectorv20": "AV:N/AC:M/Au:N/C:C/I:C/A:C",
"nvdvectorv31": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H",
},
},
{
"value": "CVE-2020-12828",
"type": "CVE",
"rawJSON": {
"value": "CVE-2020-12828",
"type": "x-cybersixgill-com-cve-event",
"created": "2020-08-19T23:08:05.709Z",
"external_references": [{"external_id": "CVE-2020-12828", "source_name": "cve"}],
"id": "cveevent--dffdcd6b-2157-4652-b7eb-4ce4bb9eebc5",
"modified": "2020-08-19T23:08:05.709Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"x_sixgill_info": {
"event": {
"_id": "5f3db0ec3ecfe5a6d70b6274",
"action": "trend",
"description": "CVE-2020-12828 is trending on Twitter.",
"event_datetime": "2020-06-30T00:00Z",
"name": "trend_Twitter",
"prev_level": "prev_level",
"type": "dark_mention",
},
"nvd": {
"base_score_v3": 9.8,
"base_severity_v3": "CRITICAL",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2020-12828",
"modified": "2020-06-02T16:55Z",
"published": "2020-05-21T17:15Z",
"score_2_0": 10.0,
"severity_2_0": "HIGH",
"vector_v2": "AV:N/AC:L/Au:N/C:C/I:C/A:C",
"vector_v3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
},
"score": {
"current": 8.33,
"highest": {"date": "2020-07-25T00:00Z", "value": 8.4},
"previouslyExploited": 5.07,
},
},
},
"score": 3,
"fields": {
"description": """Description: CVE-2020-12828 is trending on Twitter.
Created: 2020-08-19T23:08:05.709Z
Modified: 2020-08-19T23:08:05.709Z
External id: CVE-2020-12828
Sixgill DVE score - current: 8.33
Sixgill DVE score - highest ever date: 2020-07-25T00:00Z
Sixgill DVE score - highest ever: 8.4
Sixgill - Previously exploited probability: 5.07
Event Name: trend_Twitter
Event Type: dark_mention
Event Action: trend
Previous level: prev_level
Event Description: CVE-2020-12828 is trending on Twitter.
Event Datetime: 2020-06-30T00:00Z
CVSS 3.1 score: 9.8
CVSS 3.1 severity: CRITICAL
NVD Link: https://nvd.nist.gov/vuln/detail/CVE-2020-12828
NVD - last modified date: 2020-06-02T16:55Z
NVD - publication date: 2020-05-21T17:15Z
CVSS 2.0 score: 10.0
CVSS 2.0 severity: HIGH
NVD Vector - V2.0: AV:N/AC:L/Au:N/C:C/I:C/A:C
NVD Vector - V3.1: CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H
""",
"creationdate": "2020-08-19T23:08:05.709Z",
"modified": "2020-08-19T23:08:05.709Z",
"externalid": "CVE-2020-12828",
"sixgilldvescorecurrent": 8.33,
"sixgilldvescorehighesteverdate": "2020-07-25T00:00Z",
"sixgilldvescorehighestever": 8.4,
"sixgillpreviouslyexploitedprobability": 5.07,
"eventname": "trend_Twitter",
"eventtype": "dark_mention",
"eventaction": "trend",
"previouslevel": "prev_level",
"eventdescription": "CVE-2020-12828 is trending on Twitter.",
"eventdatetime": "2020-06-30T00:00Z",
"cvss31score": 9.8,
"cvss31severity": "CRITICAL",
"nvdlink": "https://nvd.nist.gov/vuln/detail/CVE-2020-12828",
"nvdlastmodifieddate": "2020-06-02T16:55Z",
"nvdpublicationdate": "2020-05-21T17:15Z",
"cvss20score": 10.0,
"cvss20severity": "HIGH",
"nvdvectorv20": "AV:N/AC:L/Au:N/C:C/I:C/A:C",
"nvdvectorv31": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
},
},
{
"value": "CVE-2020-9771",
"type": "CVE",
"rawJSON": {
"value": "CVE-2020-9771",
"type": "x-cybersixgill-com-cve-event",
"created": "2020-08-19T23:08:05.709Z",
"external_references": [{"external_id": "CVE-2020-9771", "source_name": "cve"}],
"id": "cveevent--4b86077c-99f6-42ca-8b4d-953411fa17bd",
"modified": "2020-08-19T23:08:05.709Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"x_sixgill_info": {
"event": {
"_id": "5f3db0ec3ecfe5a6d70b627c",
"action": "trend",
"description": "CVE-2020-9771 is trending on Twitter.",
"event_datetime": "2020-06-30T00:00Z",
"name": "trend_Twitter",
"prev_level": "prev_level",
"type": "dark_mention",
},
"nvd": {
"base_score_v3": None,
"base_severity_v3": None,
"link": "https://nvd.nist.gov/vuln/detail/CVE-2020-9771",
"modified": None,
"published": None,
"score_2_0": None,
"severity_2_0": None,
"vector_v2": "None",
"vector_v3": "None",
},
"score": {"current": None, "highest": {"date": None, "value": None}, "previouslyExploited": None},
},
},
"score": 3,
"fields": {
"description": """Description: CVE-2020-9771 is trending on Twitter.
Created: 2020-08-19T23:08:05.709Z
Modified: 2020-08-19T23:08:05.709Z
External id: CVE-2020-9771
Sixgill DVE score - current: None
Sixgill DVE score - highest ever date: None
Sixgill DVE score - highest ever: None
Sixgill - Previously exploited probability: None
Event Name: trend_Twitter
Event Type: dark_mention
Event Action: trend
Previous level: prev_level
Event Description: CVE-2020-9771 is trending on Twitter.
Event Datetime: 2020-06-30T00:00Z
CVSS 3.1 score: None
CVSS 3.1 severity: None
NVD Link: https://nvd.nist.gov/vuln/detail/CVE-2020-9771
NVD - last modified date: None
NVD - publication date: None
CVSS 2.0 score: None
CVSS 2.0 severity: None
NVD Vector - V2.0: None
NVD Vector - V3.1: None
""",
"creationdate": "2020-08-19T23:08:05.709Z",
"modified": "2020-08-19T23:08:05.709Z",
"externalid": "CVE-2020-9771",
"sixgilldvescorecurrent": None,
"sixgilldvescorehighesteverdate": None,
"sixgilldvescorehighestever": None,
"sixgillpreviouslyexploitedprobability": None,
"eventname": "trend_Twitter",
"eventtype": "dark_mention",
"eventaction": "trend",
"previouslevel": "prev_level",
"eventdescription": "CVE-2020-9771 is trending on Twitter.",
"eventdatetime": "2020-06-30T00:00Z",
"cvss31score": None,
"cvss31severity": None,
"nvdlink": "https://nvd.nist.gov/vuln/detail/CVE-2020-9771",
"nvdlastmodifieddate": None,
"nvdpublicationdate": None,
"cvss20score": None,
"cvss20severity": None,
"nvdvectorv20": "None",
"nvdvectorv31": "None",
},
},
{
"value": "CVE-2015-6086",
"type": "CVE",
"rawJSON": {
"value": "CVE-2015-6086",
"type": "x-cybersixgill-com-cve-event",
"created": "2020-08-25T17:16:52.536Z",
"external_references": [{"external_id": "CVE-2015-6086", "source_name": "cve"}],
"id": "cveevent--1d6320f1-8b22-48e2-876d-5e31b9d36288",
"modified": "2020-08-25T17:16:52.536Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"x_sixgill_info": {
"event": {
"_id": "5f454784ffebcfa91197c9d0",
"action": "modified",
"description": "Sixgill Current score of CVE-2015-6086 changed from Low to None.",
"event_datetime": "2020-06-30T00:00Z",
"level": "None",
"name": "Sixgill_score_level_change",
"prev_level": "prev_level",
"type": "score_level",
},
"nvd": {
"base_score_v3": None,
"base_severity_v3": None,
"link": "https://nvd.nist.gov/vuln/detail/CVE-2015-6086",
"modified": "2018-10-12T22:10Z",
"published": "2015-11-11T12:59Z",
"score_2_0": 4.3,
"severity_2_0": "MEDIUM",
"vector_v2": "AV:N/AC:M/Au:N/C:P/I:N/A:N",
"vector_v3": "None",
},
"score": {
"current": None,
"highest": {"date": "2016-04-14T00:00Z", "value": 7.02},
"previouslyExploited": 1.51,
},
},
},
"score": 3,
"fields": {
"description": """Description: Sixgill Current score of CVE-2015-6086 changed from Low to None.
Created: 2020-08-25T17:16:52.536Z
Modified: 2020-08-25T17:16:52.536Z
External id: CVE-2015-6086
Sixgill DVE score - current: None
Sixgill DVE score - highest ever date: 2016-04-14T00:00Z
Sixgill DVE score - highest ever: 7.02
Sixgill - Previously exploited probability: 1.51
Event Name: Sixgill_score_level_change
Event Type: score_level
Event Action: modified
Previous level: prev_level
Event Description: Sixgill Current score of CVE-2015-6086 changed from Low to None.
Event Datetime: 2020-06-30T00:00Z
CVSS 3.1 score: None
CVSS 3.1 severity: None
NVD Link: https://nvd.nist.gov/vuln/detail/CVE-2015-6086
NVD - last modified date: 2018-10-12T22:10Z
NVD - publication date: 2015-11-11T12:59Z
CVSS 2.0 score: 4.3
CVSS 2.0 severity: MEDIUM
NVD Vector - V2.0: AV:N/AC:M/Au:N/C:P/I:N/A:N
NVD Vector - V3.1: None
""",
"creationdate": "2020-08-25T17:16:52.536Z",
"modified": "2020-08-25T17:16:52.536Z",
"externalid": "CVE-2015-6086",
"sixgilldvescorecurrent": None,
"sixgilldvescorehighesteverdate": "2016-04-14T00:00Z",
"sixgilldvescorehighestever": 7.02,
"sixgillpreviouslyexploitedprobability": 1.51,
"eventname": "Sixgill_score_level_change",
"eventtype": "score_level",
"eventaction": "modified",
"previouslevel": "prev_level",
"eventdescription": "Sixgill Current score of CVE-2015-6086 changed from Low to None.",
"eventdatetime": "2020-06-30T00:00Z",
"cvss31score": None,
"cvss31severity": None,
"nvdlink": "https://nvd.nist.gov/vuln/detail/CVE-2015-6086",
"nvdlastmodifieddate": "2018-10-12T22:10Z",
"nvdpublicationdate": "2015-11-11T12:59Z",
"cvss20score": 4.3,
"cvss20severity": "MEDIUM",
"nvdvectorv20": "AV:N/AC:M/Au:N/C:P/I:N/A:N",
"nvdvectorv31": "None",
},
},
{
"value": "CVE-2015-6086",
"type": "CVE",
"rawJSON": {
"value": "CVE-2015-6086",
"type": "x-cybersixgill-com-cve-event",
"created": "2020-08-25T17:16:52.536Z",
"external_references": [{"external_id": "CVE-2015-6086", "source_name": "cve"}],
"id": "cveevent--1d6320f1-8b22-48e2-876d-5e31b9d36288",
"modified": "2020-08-25T17:16:52.536Z",
"object_marking_refs": [
"marking-definition--41eaaf7c-0bc0-4c56-abdf-d89a7f096ac4",
"marking-definition--f88d31f6-486f-44da-b317-01333bde0b82",
],
"spec_version": "2.0",
"x_sixgill_info": {
"event": {
"_id": "5f454784ffebcfa91197c9d0",
"action": "modified",
"description": "Sixgill Current score of CVE-2015-6086 changed from Low to None.",
"event_datetime": "2020-06-30T00:00Z",
"level": "None",
"name": "Sixgill_score_level_change",
"prev_level": "prev_level",
"type": "score_level",
},
"nvd": {
"base_score_v3": None,
"base_severity_v3": None,
"link": "https://nvd.nist.gov/vuln/detail/CVE-2015-6086",
"modified": "2018-10-12T22:10Z",
"published": "2015-11-11T12:59Z",
"score_2_0": 4.3,
"severity_2_0": "MEDIUM",
"vector_v2": "AV:N/AC:M/Au:N/C:P/I:N/A:N",
"vector_v3": "None",
},
"score": {
"current": None,
"highest": {"date": "2016-04-14T00:00Z", "value": 7.02},
"previouslyExploited": 1.51,
},
},
},
"score": 3,
"fields": {
"description": """Description: Sixgill Current score of CVE-2015-6086 changed from Low to None.
Created: 2020-08-25T17:16:52.536Z
Modified: 2020-08-25T17:16:52.536Z
External id: CVE-2015-6086
Sixgill DVE score - current: None
Sixgill DVE score - highest ever date: 2016-04-14T00:00Z
Sixgill DVE score - highest ever: 7.02
Sixgill - Previously exploited probability: 1.51
Event Name: Sixgill_score_level_change
Event Type: score_level
Event Action: modified
Previous level: prev_level
Event Description: Sixgill Current score of CVE-2015-6086 changed from Low to None.
Event Datetime: 2020-06-30T00:00Z
CVSS 3.1 score: None
CVSS 3.1 severity: None
NVD Link: https://nvd.nist.gov/vuln/detail/CVE-2015-6086
NVD - last modified date: 2018-10-12T22:10Z
NVD - publication date: 2015-11-11T12:59Z
CVSS 2.0 score: 4.3
CVSS 2.0 severity: MEDIUM
NVD Vector - V2.0: AV:N/AC:M/Au:N/C:P/I:N/A:N
NVD Vector - V3.1: None
""",
"creationdate": "2020-08-25T17:16:52.536Z",
"modified": "2020-08-25T17:16:52.536Z",
"externalid": "CVE-2015-6086",
"sixgilldvescorecurrent": None,
"sixgilldvescorehighesteverdate": "2016-04-14T00:00Z",
"sixgilldvescorehighestever": 7.02,
"sixgillpreviouslyexploitedprobability": 1.51,
"eventname": "Sixgill_score_level_change",
"eventtype": "score_level",
"eventaction": "modified",
"previouslevel": "prev_level",
"eventdescription": "Sixgill Current score of CVE-2015-6086 changed from Low to None.",
"eventdatetime": "2020-06-30T00:00Z",
"cvss31score": None,
"cvss31severity": None,
"nvdlink": "https://nvd.nist.gov/vuln/detail/CVE-2015-6086",
"nvdlastmodifieddate": "2018-10-12T22:10Z",
"nvdpublicationdate": "2015-11-11T12:59Z",
"cvss20score": 4.3,
"cvss20severity": "MEDIUM",
"nvdvectorv20": "AV:N/AC:M/Au:N/C:P/I:N/A:N",
"nvdvectorv31": "None",
},
},
]
class MockedResponse(object):
def __init__(
self,
status_code,
text,
reason=None,
url=None,
method=None,
):
self.status_code = status_code
self.text = text
self.reason = reason
self.url = url
self.request = requests.Request("GET")
self.ok = True if self.status_code == 200 else False
def json(self):
return json.loads(self.text)
def init_params():
return {"client_id": "WRONG_CLIENT_ID_TEST", "client_secret": "CLIENT_SECRET_TEST"}
def mocked_request(*args, **kwargs):
global bundle_index
global submitted_indicators
request = kwargs.get("request", {})
end_point = request.path_url
method = request.method
response_dict = {
"POST": {
"/auth/token": MockedResponse(200, mocked_get_token_response),
"/dvefeed/ioc/ack": MockedResponse(200, str(submitted_indicators)),
},
"GET": {"/dvefeed/ioc?limit=1000": MockedResponse(200, json.dumps(iocs_bundle[bundle_index]))},
}
response_dict = response_dict.get(method)
response = response_dict.get(end_point)
if method == "GET" and end_point == "/dvefeed/ioc?limit=1000":
submitted_indicators = len(iocs_bundle[bundle_index].get("objects")) - 2
bundle_index += 1
return response
def test_test_module_command_raise_exception(mocker):
mocker.patch.object(demisto, "params", return_value=init_params())
mocker.patch("requests.sessions.Session.send", return_value=MockedResponse(400, "error"))
from CybersixgillDVEFeed import module_command_test
with pytest.raises(Exception):
module_command_test()
def test_test_module_command(mocker):
mocker.patch.object(demisto, "params", return_value=init_params())
mocker.patch("requests.sessions.Session.send", return_value=MockedResponse(200, "ok"))
from CybersixgillDVEFeed import module_command_test
module_command_test()
def test_fetch_indicators_command(mocker):
global bundle_index
global submitted_indicators
mocker.patch.object(demisto, "params", return_value=init_params())
mocker.patch("requests.sessions.Session.send", new=mocked_request)
from CybersixgillDVEFeed import fetch_indicators_command
from sixgill.sixgill_feed_client import SixgillFeedClient
from sixgill.sixgill_constants import FeedStream
client = SixgillFeedClient(
demisto.params()["client_id"],
demisto.params()["client_secret"],
"some_channel",
FeedStream.DVEFEED,
demisto,
1000,
)
output = fetch_indicators_command(client)
bundle_index = 0
submitted_indicators = 0
assert output == expected_ioc_output
def test_get_indicators_command(mocker):
global bundle_index
global submitted_indicators
mocker.patch.object(demisto, "params", return_value=init_params())
mocker.patch("requests.sessions.Session.send", new=mocked_request)
from CybersixgillDVEFeed import get_indicators_command
from sixgill.sixgill_feed_client import SixgillFeedClient
from sixgill.sixgill_constants import FeedStream
client = SixgillFeedClient(
demisto.params()["client_id"],
demisto.params()["client_secret"],
"some_channel",
FeedStream.DVEFEED,
demisto,
1000,
)
output = get_indicators_command(client, {"limit": 10})
bundle_index = 0
submitted_indicators = 0
assert output[2] == expected_ioc_output
@pytest.mark.parametrize("tlp_color", ["", None, "AMBER"])
def test_feed_tags_and_tlp_color(mocker, tlp_color):
"""
Given:
- feedTags parameter
When:
- Executing fetch command on feed
Then:
- Validate the tags supplied are added to the tags list in addition to the tags that were there before
"""
global bundle_index
global submitted_indicators
mocker.patch.object(demisto, "params", return_value=init_params())
mocker.patch("requests.sessions.Session.send", new=mocked_request)
from CybersixgillDVEFeed import fetch_indicators_command
from sixgill.sixgill_feed_client import SixgillFeedClient
from sixgill.sixgill_constants import FeedStream
client = SixgillFeedClient(
demisto.params()["client_id"],
demisto.params()["client_secret"],
"some_channel",
FeedStream.DVEFEED,
demisto,
1000,
)
output = fetch_indicators_command(client, tags=["tag1", "tag2"], tlp_color=tlp_color)
assert all(item in output[0]["fields"]["tags"] for item in ["tag1", "tag2"])
if tlp_color:
assert output[0]["fields"]["trafficlightprotocol"] == tlp_color
else:
assert not output[0]["fields"].get("trafficlightprotocol")
bundle_index -= 1
| demisto/content | Packs/Cybersixgill-DVE/Integrations/CybersixgillDVEFeed/CybersixgillDVEFeed_test.py | Python | mit | 41,917 |
"""Views for the branding app. """
import logging
import urllib
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.views.decorators.cache import cache_control
from django.http import HttpResponse, Http404
from django.utils import translation
from django.shortcuts import redirect
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.staticfiles.storage import staticfiles_storage
from edxmako.shortcuts import render_to_response
import student.views
from student.models import CourseEnrollment
import courseware.views
from microsite_configuration import microsite
from edxmako.shortcuts import marketing_link
from util.cache import cache_if_anonymous
from util.json_request import JsonResponse
import branding.api as branding_api
log = logging.getLogger(__name__)
def get_course_enrollments(user):
"""
Returns the course enrollments for the passed in user within the context of a microsite, that
is filtered by course_org_filter
"""
enrollments = CourseEnrollment.enrollments_for_user(user)
microsite_org = microsite.get_value('course_org_filter')
if microsite_org:
site_enrollments = [
enrollment for enrollment in enrollments if enrollment.course_id.org == microsite_org
]
else:
site_enrollments = [
enrollment for enrollment in enrollments
]
return site_enrollments
@ensure_csrf_cookie
@cache_if_anonymous()
def index(request):
'''
Redirects to main page -- info page if user authenticated, or marketing if not
'''
if request.user.is_authenticated():
# For microsites, only redirect to dashboard if user has
# courses in his/her dashboard. Otherwise UX is a bit cryptic.
# In this case, we want to have the user stay on a course catalog
# page to make it easier to browse for courses (and register)
if microsite.get_value(
'ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER',
settings.FEATURES.get('ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER', True)):
return redirect(reverse('dashboard'))
if settings.FEATURES.get('AUTH_USE_CERTIFICATES'):
from external_auth.views import ssl_login
# Set next URL to dashboard if it isn't set to avoid
# caching a redirect to / that causes a redirect loop on logout
if not request.GET.get('next'):
req_new = request.GET.copy()
req_new['next'] = reverse('dashboard')
request.GET = req_new
return ssl_login(request)
enable_mktg_site = microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
if enable_mktg_site:
return redirect(settings.MKTG_URLS.get('ROOT'))
domain = request.META.get('HTTP_HOST')
# keep specialized logic for Edge until we can migrate over Edge to fully use
# microsite definitions
if domain and 'edge.edx.org' in domain:
return redirect(reverse("signin_user"))
# we do not expect this case to be reached in cases where
# marketing and edge are enabled
return student.views.index(request, user=request.user)
@ensure_csrf_cookie
@cache_if_anonymous()
def courses(request):
"""
Render the "find courses" page. If the marketing site is enabled, redirect
to that. Otherwise, if subdomain branding is on, this is the university
profile page. Otherwise, it's the edX courseware.views.courses page
"""
enable_mktg_site = microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
if enable_mktg_site:
return redirect(marketing_link('COURSES'), permanent=True)
if not settings.FEATURES.get('COURSES_ARE_BROWSABLE'):
raise Http404
# we do not expect this case to be reached in cases where
# marketing is enabled or the courses are not browsable
return courseware.views.courses(request)
def _footer_static_url(request, name):
"""Construct an absolute URL to a static asset. """
return request.build_absolute_uri(staticfiles_storage.url(name))
def _footer_css_urls(request, package_name):
"""Construct absolute URLs to CSS assets in a package. """
# We need this to work both in local development and in production.
# Unfortunately, in local development we don't run the full asset pipeline,
# so fully processed output files may not exist.
# For this reason, we use the *css package* name(s), rather than the static file name
# to identify the CSS file name(s) to include in the footer.
# We then construct an absolute URI so that external sites (such as the marketing site)
# can locate the assets.
package = settings.PIPELINE_CSS.get(package_name, {})
paths = [package['output_filename']] if not settings.DEBUG else package['source_filenames']
return [
_footer_static_url(request, path)
for path in paths
]
def _render_footer_html(request, show_openedx_logo, include_dependencies):
"""Render the footer as HTML.
Arguments:
show_openedx_logo (bool): If True, include the OpenEdX logo in the rendered HTML.
include_dependencies (bool): If True, include JavaScript and CSS dependencies.
Returns: unicode
"""
bidi = 'rtl' if translation.get_language_bidi() else 'ltr'
version = 'edx' if settings.FEATURES.get('IS_EDX_DOMAIN') else 'openedx'
css_name = settings.FOOTER_CSS[version][bidi]
context = {
'hide_openedx_link': not show_openedx_logo,
'footer_js_url': _footer_static_url(request, 'js/footer-edx.js'),
'footer_css_urls': _footer_css_urls(request, css_name),
'bidi': bidi,
'include_dependencies': include_dependencies,
}
return (
render_to_response("footer-edx-v3.html", context)
if settings.FEATURES.get("IS_EDX_DOMAIN", False)
else render_to_response("footer.html", context)
)
@cache_control(must_revalidate=True, max_age=settings.FOOTER_BROWSER_CACHE_MAX_AGE)
def footer(request):
"""Retrieve the branded footer.
This end-point provides information about the site footer,
allowing for consistent display of the footer across other sites
(for example, on the marketing site and blog).
It can be used in one of two ways:
1) A client renders the footer from a JSON description.
2) A browser loads an HTML representation of the footer
and injects it into the DOM. The HTML includes
CSS and JavaScript links.
In case (2), we assume that the following dependencies
are included on the page:
a) JQuery (same version as used in edx-platform)
b) font-awesome (same version as used in edx-platform)
c) Open Sans web fonts
Example: Retrieving the footer as JSON
GET /api/branding/v1/footer
Accepts: application/json
{
"navigation_links": [
{
"url": "http://example.com/about",
"name": "about",
"title": "About"
},
# ...
],
"social_links": [
{
"url": "http://example.com/social",
"name": "facebook",
"icon-class": "fa-facebook-square",
"title": "Facebook",
"action": "Sign up on Facebook!"
},
# ...
],
"mobile_links": [
{
"url": "http://example.com/android",
"name": "google",
"image": "http://example.com/google.png",
"title": "Google"
},
# ...
],
"legal_links": [
{
"url": "http://example.com/terms-of-service.html",
"name": "terms_of_service",
"title': "Terms of Service"
},
# ...
],
"openedx_link": {
"url": "http://open.edx.org",
"title": "Powered by Open edX",
"image": "http://example.com/openedx.png"
},
"logo_image": "http://example.com/static/images/logo.png",
"copyright": "EdX, Open edX, and the edX and Open edX logos are \
registered trademarks or trademarks of edX Inc."
}
Example: Retrieving the footer as HTML
GET /api/branding/v1/footer
Accepts: text/html
Example: Including the footer with the "Powered by OpenEdX" logo
GET /api/branding/v1/footer?show-openedx-logo=1
Accepts: text/html
Example: Retrieving the footer in a particular language
GET /api/branding/v1/footer?language=en
Accepts: text/html
Example: Retrieving the footer with all JS and CSS dependencies (for testing)
GET /api/branding/v1/footer?include-dependencies=1
Accepts: text/html
"""
if not branding_api.is_enabled():
raise Http404
# Use the content type to decide what representation to serve
accepts = request.META.get('HTTP_ACCEPT', '*/*')
# Show the OpenEdX logo in the footer
show_openedx_logo = bool(request.GET.get('show-openedx-logo', False))
# Include JS and CSS dependencies
# This is useful for testing the end-point directly.
include_dependencies = bool(request.GET.get('include-dependencies', False))
# Override the language if necessary
language = request.GET.get('language', translation.get_language())
# Render the footer information based on the extension
if 'text/html' in accepts or '*/*' in accepts:
cache_key = u"branding.footer.{params}.html".format(
params=urllib.urlencode({
'language': language,
'show_openedx_logo': show_openedx_logo,
'include_dependencies': include_dependencies,
})
)
content = cache.get(cache_key)
if content is None:
with translation.override(language):
content = _render_footer_html(request, show_openedx_logo, include_dependencies)
cache.set(cache_key, content, settings.FOOTER_CACHE_TIMEOUT)
return HttpResponse(content, status=200, content_type="text/html; charset=utf-8")
elif 'application/json' in accepts:
cache_key = u"branding.footer.{params}.json".format(
params=urllib.urlencode({
'language': language,
'is_secure': request.is_secure(),
})
)
footer_dict = cache.get(cache_key)
if footer_dict is None:
with translation.override(language):
footer_dict = branding_api.get_footer(
is_secure=request.is_secure(),
language=language,
)
cache.set(cache_key, footer_dict, settings.FOOTER_CACHE_TIMEOUT)
return JsonResponse(footer_dict, 200, content_type="application/json; charset=utf-8")
else:
return HttpResponse(status=406)
| nttks/edx-platform | lms/djangoapps/branding/views.py | Python | agpl-3.0 | 11,288 |
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext_lazy
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
class AddProtocol(policy.PolicyTargetMixin, tables.LinkAction):
name = "create"
verbose_name = _("Add Protocol")
url = "horizon:identity:identity_providers:protocols:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_protocol"),)
def get_link_url(self, datum=None):
idp_id = self.table.kwargs['identity_provider_id']
return reverse(self.url, args=(idp_id,))
class RemoveProtocol(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ngettext_lazy(
"Delete Protocol",
"Delete Protocols",
count
)
@staticmethod
def action_past(count):
return ngettext_lazy(
"Deleted Protocol",
"Deleted Protocols",
count
)
policy_rules = (("identity", "identity:delete_protocol"),)
def delete(self, request, obj_id):
identity_provider = self.table.kwargs['identity_provider_id']
protocol = obj_id
api.keystone.protocol_delete(request, identity_provider, protocol)
class ProtocolsTable(tables.DataTable):
protocol = tables.Column("id",
verbose_name=_("Protocol ID"))
mapping = tables.Column("mapping_id",
verbose_name=_("Mapping ID"))
def get_object_display(self, datum):
return datum.id
class Meta(object):
name = "idp_protocols"
verbose_name = _("Protocols")
table_actions = (AddProtocol, RemoveProtocol)
row_actions = (RemoveProtocol, )
| openstack/horizon | openstack_dashboard/dashboards/identity/identity_providers/protocols/tables.py | Python | apache-2.0 | 2,469 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from invoke import task
from website import settings
HERE = os.path.dirname(os.path.abspath(__file__))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
@task()
def manage(ctx, cmd_str):
"""Take command string for manage commands
:param cmd_str: ex. runserver, migrate, "migrate module"
"""
manage_cmd = os.path.join(HERE, '..', 'manage.py')
env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"'
cmd = '{} python {} {}'.format(env, manage_cmd, cmd_str)
ctx.run(cmd, echo=True, pty=True)
@task()
def assets(ctx, dev=False, watch=False):
"""Install and build static assets for admin.
use -d for dev environments
"""
if os.getcwd() != HERE:
os.chdir(HERE)
npm = 'npm install'
if not dev:
npm += ' --production'
ctx.run(npm, echo=True)
bower_install(ctx)
# Always set clean=False to prevent possible mistakes
# on prod
webpack(ctx, clean=False, watch=watch, dev=dev)
@task(aliases=['pack'])
def webpack(ctx, clean=False, watch=False, dev=False):
"""Build static assets with webpack."""
if clean:
clean_assets(ctx)
if os.getcwd() != HERE:
os.chdir(HERE)
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin',
'webpack.js')
args = [webpack_bin]
if settings.DEBUG_MODE and dev:
args += ['--colors']
else:
args += ['--progress']
if watch:
args += ['--watch']
config_file = 'webpack.admin.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
ctx.run(command, echo=True)
@task
def clean_assets(ctx):
"""Remove built JS files."""
public_path = os.path.join(HERE, 'static', 'public')
js_path = os.path.join(public_path, 'js')
ctx.run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['bower'])
def bower_install(ctx):
if os.getcwd() != HERE:
os.chdir(HERE)
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
ctx.run('{} prune --allow-root'.format(bower_bin), echo=True)
ctx.run('{} install --allow-root'.format(bower_bin), echo=True)
| mluo613/osf.io | admin/tasks.py | Python | apache-2.0 | 2,257 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cgi
import html
import http.cookies
import os
from _wall import Wall
wall = Wall()
cookie = http.cookies.SimpleCookie(os.environ.get("HTTP_COOKIE"))
session = cookie.get("session")
if session is not None:
session = session.value
user = wall.find_cookie(session) # Ищем пользователя по переданной куке
form = cgi.FieldStorage()
action = form.getfirst("action", "")
if action == "publish":
text = form.getfirst("text", "")
text = html.escape(text)
if text and user is not None:
wall.publish(user, text)
elif action == "login":
login = form.getfirst("login", "")
login = html.escape(login)
password = form.getfirst("password", "")
password = html.escape(password)
if wall.find(login, password):
cookie = wall.set_cookie(login)
print('Set-cookie: session={}'.format(cookie))
elif wall.find(login):
pass # А надо бы предупреждение выдать
else:
wall.register(login, password)
cookie = wall.set_cookie(login)
print('Set-cookie: session={}'.format(cookie))
pattern = '''
<!DOCTYPE HTML>
<html>
<head>
<meta charset="utf-8">
<title>Стена</title>
</head>
<body>
Форма логина и регистрации. При вводе несуществующего имени зарегистрируется новый пользователь.
<form action="/cgi-bin/wall.py">
Логин: <input type="text" name="login">
Пароль: <input type="password" name="password">
<input type="hidden" name="action" value="login">
<input type="submit">
</form>
{posts}
{publish}
</body>
</html>
'''
if user is not None:
pub = '''
<form action="/cgi-bin/wall.py">
<textarea name="text"></textarea>
<input type="hidden" name="action" value="publish">
<input type="submit">
</form>
'''
else:
pub = ''
print('Content-type: text/html\n')
print(pattern.format(posts=wall.html_list(), publish=pub))
| deadsquirrel/helloworld | tw/wall.py | Python | unlicense | 2,103 |
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import os
import yaml
import config as cfg
from generate_load import Prerequisites
from filtering_utils import FilteringUtils
class DataCollector(object):
"""
Class to collect data for existing objects on both clusters. As a result
returns __dict__ with info for both SRC and DST clusters.
Methods description:
- unified_method: method to get resources for each tenant separately in
case the resource is specific for each tenant,
example key-pairs, security-groups, etc.
- nova_collector: method to get nova resources, list of resources can
be obtained in config.py:
config.rollback_params['param_dict']['Nova']
- cinder_collector: method to get cinder resources, list of resources
can be obtained in config.py:
config.rollback_params['param_dict']['Cinder']
- glance_collector: method to get glance resources, list of resources
can be obtained in config.py:
config.rollback_params['param_dict']['Glance']
- neutron_collector: method to get neutron resources, list of resources
can be obtained in config.py:
config.rollback_params['param_dict']['Neutron']
- keystone_collector: method to get keystone resources, list of
resources can be obtained in config.py:
config.rollback_params['param_dict']['Keystone']
"""
def __init__(self, config):
self.cloud_info = None
self.migration_utils = FilteringUtils()
self.main_folder = self.migration_utils.main_folder
self.config = config
def chose_destination_cloud(self, destination):
self.cloud_info = Prerequisites(cloud_prefix=destination,
config=self.config)
def return_to_admin_privileges(self):
self.cloud_info.switch_user(user=self.cloud_info.username,
password=self.cloud_info.password,
tenant=self.cloud_info.tenant)
def form_client_method(self, *arguments):
client = self.cloud_info
for argument in arguments:
client = getattr(client, argument)
return client()
def unified_method(self, destination, collected_items, _res, *args):
main_dict = {}
if destination == 'SRC':
for user, key_pair in zip(self.config.users, self.config.keypairs):
self.cloud_info.switch_user(user=user['name'],
password=user['password'],
tenant=user['tenant'])
names_list = self.form_client_method(*args)
instance_list = []
for instance in names_list:
instance_list.append(instance.__dict__['_info'])
main_dict[user['tenant']] = instance_list
self.return_to_admin_privileges()
names_list = self.form_client_method(*args)
instance_list = []
for instance in names_list:
instance_list.append(instance.__dict__['_info'])
main_dict['admin'] = instance_list
collected_items[_res] = main_dict
elif destination == 'DST':
names_list = self.form_client_method(*args)
instance_list = []
for instance in names_list:
instance_list.append(instance.__dict__['_info'])
collected_items[_res] = instance_list
def nova_collector(self, destination, *args):
"""
Nova data collector method.
"""
collected_items = {}
self.chose_destination_cloud(destination)
for arg in args[0]:
if arg == 'servers':
vm_list = []
servers_list = self.cloud_info.novaclient.servers.list(
search_opts={'all_tenants': 1})
for server in servers_list:
vm = server.__dict__['_info']
for data in vm.keys():
if data == u'updated':
del vm[data]
vm_list.append(vm)
collected_items[arg] = vm_list
elif arg == 'security_groups':
self.unified_method(destination, collected_items, arg,
'novaclient', arg, 'list')
elif arg == 'flavors':
flavor_list = []
flavors = self.cloud_info.novaclient.flavors.list()
for inst in flavors:
flavor = inst.__dict__
flavor_list.append(flavor['_info'])
collected_items[arg] = flavor_list
elif arg == 'quotas':
quotas = {}
tenant_list = self.cloud_info.keystoneclient.tenants.list()
for tenant in tenant_list:
tenant = tenant.__dict__
quota_list = self.cloud_info.novaclient.quotas.get(
tenant['id'])
quotas[tenant['name']] = quota_list.__dict__['_info']
collected_items[arg] = quotas
elif arg == 'keypairs':
self.unified_method(destination, collected_items, arg,
'novaclient', arg, 'list')
return collected_items
def neutron_collector(self, destination, *args):
"""
Neutron data collector method.
"""
collected_items = {}
self.chose_destination_cloud(destination)
for arg in args[0]:
if arg == 'networks':
networks_list = self.cloud_info.neutronclient.list_networks()
collected_items['networks'] = networks_list['networks']
elif arg == 'subnets':
subnets_list = self.cloud_info.neutronclient.list_subnets()
collected_items['subnets'] = subnets_list['subnets']
elif arg == 'routers':
routers_list = self.cloud_info.neutronclient.list_routers()
collected_items['routers'] = routers_list['routers']
elif arg == 'ports':
ports_list = self.cloud_info.neutronclient.list_ports()
collected_items['ports'] = ports_list['ports']
elif arg == 'quotas':
quotas = {}
tenant_list = self.cloud_info.keystoneclient.tenants.list()
for tenant in tenant_list:
tenant = tenant.__dict__
quota_list = self.cloud_info.neutronclient.show_quota(
tenant['id'])
quotas[tenant['name']] = quota_list
collected_items[arg] = quotas
return collected_items
def keystone_collector(self, destination, *args):
"""
Keystone data collector method.
"""
def optimizer(resource_list):
final_list = []
for resource in resource_list:
final_list.append(resource.__dict__['_info'])
return final_list
collected_items = {}
self.chose_destination_cloud(destination)
for arg in args[0]:
if arg == 'users':
user_list = self.cloud_info.keystoneclient.users.list()
data_list = optimizer(user_list)
collected_items[arg] = data_list
elif arg == 'tenants':
tenant_list = self.cloud_info.keystoneclient.tenants.list()
data_list = optimizer(tenant_list)
collected_items[arg] = data_list
elif arg == 'roles':
role_list = self.cloud_info.keystoneclient.roles.list()
data_list = optimizer(role_list)
collected_items[arg] = data_list
return collected_items
def glance_collector(self, destination, *args):
"""
Glance data collector method.
"""
collected_items = {}
self.chose_destination_cloud(destination)
for arg in args[0]:
if arg == 'images':
image_list = [x.__dict__['_info'] for x in
self.cloud_info.glanceclient.images.list()]
collected_items[arg] = image_list
elif arg == 'members':
members = {}
image_list = [x.__dict__ for x in
self.cloud_info.glanceclient.images.list()]
for image in image_list:
member_list = \
self.cloud_info.glanceclient.image_members.list(
image['id'])
final_list = []
for member in member_list:
final_list.append(member.__dict__['_info'])
members[image['name']] = final_list
collected_items[arg] = members
return collected_items
def cinder_collector(self, destination, *args):
"""
Cinder data collector method.
"""
collected_items = {}
self.chose_destination_cloud(destination)
for arg in args[0]:
if arg == 'volumes':
self.unified_method(destination, collected_items, arg,
'cinderclient', arg, 'list')
elif arg == 'volume_snapshots':
self.unified_method(destination, collected_items, arg,
'cinderclient', arg, 'list')
elif arg == 'quotas':
quotas = {}
tenant_list = self.cloud_info.keystoneclient.tenants.list()
for tenant in tenant_list:
tenant = tenant.__dict__
quota_list = self.cloud_info.cinderclient.quotas.get(
tenant['id'])
quotas[tenant['name']] = quota_list.__dict__['_info']
collected_items[arg] = quotas
return collected_items
def data_collector(self):
all_data = {'SRC': {}, 'DST': {}}
param_dict = self.config.rollback_params['param_dict']
for key in all_data.keys():
for service in param_dict.keys():
if service == 'Nova':
nova_data_list = \
self.nova_collector(key, param_dict[service])
all_data[key][service] = nova_data_list
elif service == 'Keystone':
keystone_data_list = \
self.keystone_collector(key, param_dict[service])
all_data[key][service] = keystone_data_list
elif service == 'Neutron':
neutron_data_list = \
self.neutron_collector(key, param_dict[service])
all_data[key][service] = neutron_data_list
elif service == 'Cinder':
cinder_data_list = \
self.cinder_collector(key, param_dict[service])
all_data[key][service] = cinder_data_list
elif service == 'Glance':
glance_data_list = \
self.glance_collector(key, param_dict[service])
all_data[key][service] = glance_data_list
return all_data
def dump_data(self, file_name=None):
if not file_name:
file_name = self.config.rollback_params['data_file_names']['PRE']
path = 'devlab/tests'
pre_file_path = os.path.join(self.main_folder, path, file_name)
data = self.data_collector()
with open(pre_file_path, "w") as f:
yaml.dump(data, f, default_flow_style=False)
if __name__ == '__main__':
rollback = DataCollector(cfg)
rollback.dump_data()
| japaniel/CloudFerry | devlab/tests/data_collector.py | Python | apache-2.0 | 12,617 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-07 12:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups', '0008_group_url_import_feed'),
]
operations = [
migrations.AlterField(
model_name='group',
name='url_import_feed',
field=models.BooleanField(default=False, help_text='Öffentliche Beiträge der angegebenen Website automatisch veröffentlichen, wenn technisch möglich', verbose_name='Beiträge von Website übernehmen'),
),
]
| stadtgestalten/stadtgestalten | grouprise/features/groups/migrations/0009_auto_20170607_1402.py | Python | agpl-3.0 | 628 |
# -*- coding:utf-8 -*-
# Copyright 2014, Quixey Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import aliyun.slb.connection as slb
import unittest
from aliyun.slb.model import (
BackendServer,
BackendServerStatus,
HTTPListener,
LoadBalancer,
LoadBalancerStatus,
Listener,
ListenerStatus,
Region,
TCPListener
)
class SlbRegionTest(unittest.TestCase):
def testRegionEqual(self):
r1 = Region('id1')
r2 = Region('id1')
self.assertEqual(r1, r2)
def testRegionNotEqual(self):
r1 = Region('id1')
r2 = Region('id2')
self.assertNotEqual(r1, r2)
def testRegionRepr(self):
r = Region('id')
self.assertTrue(repr(r).startswith('<SLBRegion id at'))
class SlbLoadBalancerStatusTest(unittest.TestCase):
def testLoadBalancerStatusEqual(self):
lbs1 = LoadBalancerStatus('id1', 'name1', 'status1')
lbs2 = LoadBalancerStatus('id1', 'name1', 'status1')
self.assertEqual(lbs1, lbs2)
def testLoadBalancerStatusNotEqual(self):
lb1 = LoadBalancerStatus('id1', 'name1', 'status1')
lb2 = LoadBalancerStatus('id2', 'name2', 'status2')
self.assertNotEqual(lb1, lb2)
def testLoadBalancerStatusIDNotEqual(self):
lb1 = LoadBalancerStatus('id1', 'name1', 'status1')
lb2 = LoadBalancerStatus('id2', 'name1', 'status1')
self.assertNotEqual(lb1, lb2)
def testLoadBalancerStatusNameNotEqual(self):
lb1 = LoadBalancerStatus('id1', 'name1', 'status1')
lb2 = LoadBalancerStatus('id1', 'name2', 'status1')
self.assertNotEqual(lb1, lb2)
def testLoadBalancerStatusStatusNotEqual(self):
lb1 = LoadBalancerStatus('id1', 'name1', 'status1')
lb2 = LoadBalancerStatus('id1', 'name1', 'status2')
self.assertNotEqual(lb1, lb2)
def testLoadBalancerStatusRepr(self):
lb1 = LoadBalancerStatus('id', 'name', 'status')
self.assertTrue(
repr(lb1).startswith('<LoadBalancerStatus id is status at'))
class SlbLoadBalancerTest(unittest.TestCase):
def testNoLoadBalancerId(self):
try:
LoadBalancer(
None,
'region',
'name',
'status',
'ip',
True,
[1, 2],
['bs1', 'bs2']) # BackendServers are not validated
self.fail('Error expected without load balancer id')
except slb.Error as err:
self.assertTrue('requires load_balancer_id' in str(err))
def testLBEqual(self):
lb1 = LoadBalancer(
'id',
'region',
'name',
'status',
'ip',
True,
[1,
2])
lb2 = LoadBalancer(
'id',
'region',
'name',
'status',
'ip',
True,
[1,
2])
self.assertEqual(lb1, lb2)
def testLBNotEqual(self):
lb1 = LoadBalancer(
'id',
'region',
'name',
'status',
'ip',
True,
[1,
2])
lb2 = LoadBalancer(
'id',
'region',
'name2',
'status',
'ip',
True,
[1,
2])
self.assertNotEqual(lb1, lb2)
def testRepr(self):
lb = LoadBalancer(
'id',
'region',
'name',
'status',
'ip',
True,
[1,
2])
self.assertTrue(repr(lb).startswith('<LoadBalancer id (name) at'))
class BackendServerTest(unittest.TestCase):
def testEqual(self):
bs1 = BackendServer('id', 1)
bs2 = BackendServer('id', 1)
self.assertEqual(bs1, bs2)
def testNotEqual(self):
bs1 = BackendServer('id', 1)
bs2 = BackendServer('id2', 1)
self.assertNotEqual(bs1, bs2)
def testRepr(self):
bs = BackendServer('id', 1)
self.assertTrue(repr(bs).startswith(u'<BackendServer id'))
class ListenerStatusTest(unittest.TestCase):
def testEqual(self):
bs1 = BackendServer('id1', 1)
bs2 = BackendServer('id2', 1)
ls1 = ListenerStatus(1, [bs1, bs2])
ls2 = ListenerStatus(1, [bs1, bs2])
self.assertEqual(ls1, ls2)
def testPortNotEqual(self):
bs1 = BackendServer('id1', 1)
bs2 = BackendServer('id2', 1)
ls1 = ListenerStatus(1, [bs1, bs2])
ls2 = ListenerStatus(2, [bs1, bs2])
self.assertNotEqual(ls1, ls2)
def testBackendsNotEqual(self):
bs1 = BackendServer('id1', 1)
bs2 = BackendServer('id2', 1)
bs3 = BackendServer('id3', 1)
bs4 = BackendServer('id4', 1)
ls1 = ListenerStatus(1, [bs1, bs2])
ls2 = ListenerStatus(1, [bs3, bs4])
self.assertNotEqual(ls1, ls2)
def testListenerStatusRepr(self):
ls = ListenerStatus(1, [])
self.assertTrue(repr(ls).startswith(u'<ListenerStatus 1 at '))
class TCPListenerTest(unittest.TestCase):
def testEqual(self):
l1 = TCPListener('id', 1, 1)
l2 = TCPListener('id', 1, 1)
self.assertEqual(l1, l2)
def testNotEqual(self):
l1 = TCPListener('id', 1, 1)
l2 = TCPListener('id', 1, 2)
self.assertNotEqual(l1, l2)
def testRepr(self):
listener = TCPListener('id', 1, 1)
self.assertTrue(repr(listener).startswith(u'<TCPListener on 1 for id'))
class HTTPListenerTest(unittest.TestCase):
def testEqual(self):
l1 = HTTPListener('id', 1, 1)
l2 = HTTPListener('id', 1, 1)
self.assertEqual(l1, l2)
def testNotEqual(self):
l1 = HTTPListener('id', 1, 1)
l2 = HTTPListener('id', 1, 2)
self.assertNotEqual(l1, l2)
def testStickyMismatch(self):
try:
lstn = HTTPListener('id', 1, 1, sticky_session=True)
self.fail("sticky_session mismatches sticky_session_type.")
except slb.Error as e:
self.assertTrue('sticky_session_type must be specified' in str(e))
def testStickyServerCookie(self):
try:
lstn = HTTPListener('id', 1, 1,
sticky_session=True,
sticky_session_type='server')
self.fail(
'cookie must be specified when using '
'sticky_session_type="server"')
except slb.Error as e:
self.assertTrue(
'cookie must be specified when using '
'sticky_session_type' in str(e))
def testRepr(self):
lstn = HTTPListener('id', 1, 1)
self.assertTrue(repr(lstn).startswith(u'<HTTPListener on 1 at '))
class BackendServerStatusTest(unittest.TestCase):
def testEqual(self):
bss1 = BackendServerStatus('id', 's')
bss2 = BackendServerStatus('id', 's')
self.assertEqual(bss1, bss2)
def testNotEqual(self):
bss1 = BackendServerStatus('id1', 's')
bss2 = BackendServerStatus('id2', 's')
self.assertNotEqual(bss1, bss2)
def testRepr(self):
bss = BackendServerStatus('id', 's')
self.assertTrue(
repr(bss).startswith(u'<BackendServerStatus id is s at '))
| quixey/python-aliyun | tests/unit/aliyun/slb/model_test.py | Python | apache-2.0 | 7,883 |
#
# for single user:
# pip install jsonpath-rw jsonpath-rw-ext --user
# for global:
# sudo -H pip install jsonpath-rw jsonpath-rw-ext
#
# OR preferred way through virtualenv
# virtualenv jsonpath
# source jsonpath/bin/activate
# pip install jsonpath-rw jsonpath-rw-ext
#
import json
from jsonpath_rw import jsonpath
#from jsonpath_rw import parse
# override parse for more capabilities
from jsonpath_rw_ext import parse
import jsonpath_rw_ext as jp
# does work, but extension makes it easier to just use 'match' and 'match1'
# so this goes unused now
def showJSONValues( json_data, expr ):
jsonpath_expr = parse(expr)
for match in jsonpath_expr.find(json_data):
print(match.value)
return
####### MAIN ##########################################################3
# read file from disk
json_file = open("squad.json")
json_data=json.load(json_file)
# show simple attribute, then values from array
print("Squad: {}".format( jp.match1("squadName",json_data) ) )
print("\nMembers:")
for name in jp.match("$.members[*].name",json_data):
print(" {}".format(name))
# get all members, count length of returned list
print("\nCount members in list: {}".format( len(jp.match("$.members[*]",json_data )) ))
# use extensions to provide direct count of number of members in array
print("Count members using len extension: {}".format( jp.match1("$.members.`len`",json_data ) ))
# lookup array element given simple embedded element
lookFor="Madame Uppercut"
print("\nPowers of {}".format(lookFor))
powers = jp.match1("members[?name='" + lookFor + "'].powers",json_data)
for power in powers:
print(" {} has the power of {}".format(lookFor,power))
# find only array items that have element
print("\nAliases?")
memberHasAliases=jp.match("members[?(aliases)]",json_data)
for member in memberHasAliases:
print("{} has aliases: {}".format( member['name'],member['aliases'] ))
# find only array items where embedded structure has matching word
print("\nDoes anyone have an alias that contains 'Red'?")
memberHasAliases=jp.match("members[?(aliases[*]~'.*Red.*')]",json_data)
for member in memberHasAliases:
print("{} has alias that contains 'Red', {}".format( member['name'],member['aliases'] ))
# find nested array items that contain word
print("\nWhich specific aliases contain the word 'Red'?")
for thisalias in jp.match("members[*].aliases[?(@~'.*Red.*')]",json_data):
print(" Alias that contains 'Red': {}".format( thisalias ))
| fabianlee/blogcode | python-jsonpath/SquadTestJSONPath.py | Python | mit | 2,447 |
"""
Example of how to use byte-code execution technique to trace accesses to numpy
arrays.
This file demonstrates two applications of this technique:
* optimize numpy computations for repeated calling
* provide automatic differentiation of procedural code
"""
import __builtin__
import ctypes
import inspect
import logging
import opcode
#import os
import sys
#import trace
import traceback
import types
import numpy as np
import theano
import autodiff
from autodiff.utils import itercode, orderedcallargs, flat_from_doc
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# from theano.tensor.shared_randomstreams import RandomStreams
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
# XXX FIXME This will not do - seed must be exposed.
global_randomstreams = RandomStreams(seed=123)
# Opcode help: http://docs.python.org/library/dis.html
# -- cellget returns the contents of a cell
cellget = ctypes.pythonapi.PyCell_Get
cellget.restype = ctypes.py_object
cellget.argtypes = (ctypes.py_object,)
# -- cellmake creates a cell pointer
cellmake = ctypes.pythonapi.PyCell_New
cellmake.restype = ctypes.py_object
cellmake.argtypes = (ctypes.py_object,)
def istensor(x):
tensortypes = (theano.tensor.TensorConstant,
theano.tensor.TensorVariable)
return isinstance(x, tensortypes)
class Unassigned(object):
"""Unassigned value"""
class LoadUnassigned(Exception):
"""Access to Unassigned value"""
class FrameVM(object):
"""
A Class for evaluating a code block of CPython bytecode,
and tracking accesses to numpy arrays.
"""
def __init__(self, watcher, func):
logger.debug('FrameVM: {0}'.format(func))
self.watcher = watcher
if isinstance(func, autodiff.symbolic.Function):
func = func.pyfn
self.func = func
self.stack = []
self._locals = None
self._myglobals = None
self.code_iter = None
self.print_ops = False
self.print_stack = False
def push(self, item):
if item is Unassigned:
raise LoadUnassigned()
self.stack.append(item)
def pop(self):
return self.stack.pop(-1)
def pushN(self, items):
for item in items:
if item is Unassigned:
raise LoadUnassigned()
self.stack.extend(items)
def popN(self, N):
rval = self.stack[-N:]
self.stack[-N:] = []
return rval
def add_shadow(self, x):
if id(x) in self.watcher.constants:
return
# -- We cannot safely set up shadow variables that are aliased to
# memory that is visible to the running program, unless that
# program can guarantee that all views of that memory are
# immutable. CPython caches small ints (-5 <= i <= 256), so
# we wrap them in a non-cached _int() instance.
if isinstance(x, int):
if type(x) is int and -5 <= x <= 256:
x = np.int_(x)
s_x = self.watcher.shared(np.asarray(x))
elif isinstance(x, float):
s_x = self.watcher.shared(np.asarray(x))
elif getattr(x, 'dtype', None) == bool:
print >> sys.stderr, ('Warning: Theano has no bool, '
'upgrading to int8')
s_x = self.watcher.shared(x.astype('int8'))
elif isinstance(x, (np.ndarray, np.number)):
s_x = self.watcher.shared(x)
else:
return
self.watcher.shadow(x, s_x)
def ensure_shadow(self, x):
# small ints can not be shadowed due to CPython memory caching, so we
# wrap them in non-cached _ints.
if type(x) is int and -5 <= x <= 256:
x = np.int_(x)
if id(x) not in self.watcher:
self.add_shadow(x)
return self.watcher.getvar(x)
def call(self, args, kwargs):
if not isinstance(args, tuple):
raise TypeError('vm.call: args must be tuple', args)
if not isinstance(kwargs, dict):
raise TypeError('vm.call: kwargs must be dict', kwargs)
func = self.func
if isinstance(func, type) and issubclass(func, BaseException):
# XXX not shadowing exception creation, because exceptions
# do not have func_code. Is this OK? can we do better?
return func(*args, **kwargs)
func_code = self.func.func_code
self._myglobals = {}
self._locals = []
for name in func_code.co_names:
#print 'name', name
try:
self._myglobals[name] = func.func_globals[name]
except KeyError:
try:
self._myglobals[name] = __builtin__.__getattribute__(name)
except AttributeError:
#print 'WARNING: name lookup failed', name
pass
# get function arguments
argspec = inspect.getargspec(func)
# match function arguments to passed parameters
callargs = orderedcallargs(func, *args, **kwargs)
# named args => locals
self._locals.extend(callargs[arg] for arg in argspec.args)
# *args => locals
if argspec.varargs:
self._locals.append(callargs[argspec.varargs])
# **kwargs => locals
if argspec.keywords:
self._locals.append(callargs[argspec.keywords])
# other vars => locals
no_unbound_args = len(func_code.co_varnames) - len(self._locals)
self._locals.extend([Unassigned] * no_unbound_args)
# shadow arguments
for val in flat_from_doc(callargs):
if id(val) not in self.watcher:
self.add_shadow(val)
self.code_iter = itercode(func_code.co_code)
jmp = None
while not hasattr(self, 'rval'):
try:
i, op, arg = self.code_iter.send(jmp)
except StopIteration:
break
name = opcode.opname[op]
# method names can't have '+' in them
name = {'SLICE+0': 'SLICE_PLUS_0',
'SLICE+1': 'SLICE_PLUS_1',
'SLICE+2': 'SLICE_PLUS_2',
'SLICE+3': 'SLICE_PLUS_3',
'STORE_SLICE+0': 'STORE_SLICE_PLUS_0',
'STORE_SLICE+1': 'STORE_SLICE_PLUS_1',
'STORE_SLICE+2': 'STORE_SLICE_PLUS_2',
'STORE_SLICE+3': 'STORE_SLICE_PLUS_3',
}.get(name, name)
if self.print_ops:
print 'OP: ', i, name
if self.print_stack:
print self.stack
try:
op_method = getattr(self, 'op_' + name)
except AttributeError:
raise AttributeError('FrameVM does not have a method defined '
'for \'op_{0}\''.format(name))
except:
raise
jmp = op_method(i, op, arg)
return self.rval
def op_BINARY_ADD(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
# No Theano vars allowed on the stack
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 + arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 + s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 + s2)
#print 'added sym'
def op_BINARY_DIVIDE(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 / arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 / s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 / s2)
def op_BINARY_FLOOR_DIVIDE(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 // arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 // s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 // s2)
def op_BINARY_SUBTRACT(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
r = arg1 - arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 - s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 - s2)
def op_BINARY_MULTIPLY(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
r = arg1 * arg2
self.push(r)
assert not hasattr(arg1, 'type')
assert not hasattr(arg2, 'type')
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 * s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 * s2)
#print 'mul sym', id(r)
def op_BINARY_POWER(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
r = arg1 ** arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2).astype(s1.dtype)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 ** s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 ** s2)
#print 'mul sym', id(r)
def op_BINARY_MODULO(self, i, op, arg):
arg2 = self.pop()
arg1 = self.pop()
r = arg1 % arg2
self.push(r)
if (id(arg1) in self.watcher or id(arg2) in self.watcher):
s1 = self.ensure_shadow(arg1)
s2 = self.ensure_shadow(arg2)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s1 % s2).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s1 % s2)
def op_BINARY_SUBSCR(self, i, op, arg):
# Implements TOS = TOS1[TOS].
tos1, tos = self.popN(2)
#print 'tos', tos
#print 'tos1', tos1
rval = tos1[tos]
self.push(rval)
if id(tos) in self.watcher:
s_tos = self.ensure_shadow(tos)
else:
s_tos = tos
if id(tos1) in self.watcher:
s_tos1 = self.ensure_shadow(tos1)
else:
s_tos1 = tos1
if isinstance(tos, np.ndarray) and tos.dtype == bool:
s_rval = s_tos1[s_tos.nonzero()]
else:
s_rval = s_tos1[s_tos]
if id(tos) in self.watcher or id(tos1) in self.watcher:
self.watcher.shadow(rval, s_rval)
def op_BUILD_MAP(self, i, op, arg):
self.push({})
def op_BUILD_SLICE(self, i, op, arg):
if arg == 2:
tos1, tos = self.popN(2)
self.push(slice(tos1, tos))
elif arg == 3:
tos2, tos1, tos = self.popN(3)
self.push(slice(tos2, tos1, tos))
else:
raise NotImplementedError()
def op_BUILD_TUPLE(self, i, op, arg):
if arg:
self.push(tuple(self.popN(arg)))
else:
self.push(())
def op_BUILD_LIST(self, i, op, arg):
if arg:
self.push(list(self.popN(arg)))
else:
self.push([])
def op_CALL_FUNCTION(self, i, op, arg, call_vargs=None, call_kwargs=None):
if call_vargs is None:
# -- these are the things passed with *foo syntax
call_vargs = ()
if call_kwargs is None:
# -- these are the things passed with **foo syntax
call_kwargs = {}
n_args = arg & 0xFF
n_kwargs = (arg & 0xFF00) >> 8
#print 'N_ARGS', n_args, n_kwargs, call_vargs
assert not (arg >> 16) # what would this stuff up here mean?
kwargs = dict([(self.stack[-2 * ii], self.stack[-2 * ii + 1])
for ii in range(n_kwargs, 0, -1)])
args = [self.stack[-ii - 2 * n_kwargs] for ii in range(n_args, 0, -1)]
assert all(Unassigned is not ai for ai in args)
# -- pop all args off the stack
if arg:
self.stack = self.stack[:- n_args - 2 * n_kwargs]
# -- pop the function itself off the stack
func = self.pop()
args = args + list(call_vargs)
orig_kwargs_size = len(kwargs)
kwargs.update(call_kwargs)
assert len(kwargs) == orig_kwargs_size + len(call_kwargs)
#print dir(func)
#print func.__self__
all_args = args + kwargs.values()
# -- get symbolic args
if len(call_vargs) > 0:
s_args = [self.watcher.getvar(a) for a in args[:-len(call_vargs)]]
s_args.extend(self.watcher.getvar(a) for a in call_vargs)
s_args = tuple(s_args)
else:
s_args = tuple(self.watcher.getvar(a) for a in args)
s_kwargs = dict([(kw, self.watcher.getvar(val))
for kw, val in kwargs.items()])
if hasattr(func, '__theano_op__'):
# XXX: document that we are assuming func is pure -
# if rval depends on globals or closure this Context is not
# going to know that.
# -- hand control back to Python for duration of func
rval = func(*args, **kwargs)
if any(id(a) in self.watcher for a in all_args):
s_rval = func.__theano_op__(*s_args, **s_kwargs)
self.watcher.shadow(rval, s_rval)
# ================ NumPy and builtin functions
elif ((getattr(func, '__module__', None)
and func.__module__.startswith('numpy'))
or isinstance(func, np.ufunc)
or str(func) == '<built-in function abs>'
or str(func) == '<built-in function max>'
or str(func) == '<built-in function min>'
or str(func) == '<built-in function sum>'):
rval = func(*args, **kwargs)
if any(id(a) in self.watcher for a in all_args):
if func.__name__ == 'sum':
if type(rval) == int:
rval = np.int_(rval)
s_rval = theano.tensor.sum(*s_args, **s_kwargs)
self.watcher.shadow(rval, s_rval)
elif func.__name__ in ('abs', 'absolute'):
self.watcher.shadow(rval, abs(*s_args))
elif func.__name__ == 'max':
assert str(func) == '<built-in function max>'
s_rval = theano.tensor.maximum(*s_args, **s_kwargs)
assert s_rval.ndim == 0 # builtin max can't make vector
self.watcher.shadow(rval, s_rval)
elif func.__name__ == 'min':
assert str(func) == '<built-in function min>'
s_rval = theano.tensor.minimum(*s_args, **s_kwargs)
assert s_rval.ndim == 0 # builtin min can't make vector
self.watcher.shadow(rval, s_rval)
elif func.__name__ == 'reshape':
self.watcher.shadow(
rval, theano.tensor.reshape(*s_args, **s_kwargs))
elif func.__name__ == 'arange':
# tensor.arange takes the dtype of its input but
# numpy.arange does not. Since we are compiling the Theano
# graph, recast the numpy value to match the symbolic dtype
sval = theano.tensor.arange(*s_args, **s_kwargs)
rval = rval.astype(sval.dtype)
elif func.__name__ in theano.tensor.basic._cast_mapping.keys():
# handle cast functions
rval = func(*args, **kwargs)
sval = theano.tensor.cast(*s_args, dtype=func.__name__)
self.watcher.shadow(rval, sval)
elif func.__name__ in ['bool', 'bool_', 'bool8']:
# Theano has no bool type, cast to int8 instead
sval = theano.tensor.cast(*s_args, dtype='int8')
elif func.__name__ in ['ones', 'zeros']:
s_fn = getattr(theano.tensor, func.__name__)
sval = s_fn(*s_args, **s_kwargs).astype(str(rval.dtype))
self.watcher.shadow(rval, sval)
elif func.__name__ == 'identity':
# theano has no identity function, only 'eye'
dtype = s_kwargs.get('dtype', None)
if not dtype and len(s_args) > 1:
dtype = s_args[1]
sval = theano.tensor.eye(s_args[0], dtype=dtype)
self.watcher.shadow(rval, sval)
else:
try:
theano_fn = getattr(theano.tensor, func.__name__)
except:
raise NotImplementedError(func)
# XXX should we do this? since it is not obvious that
# reductions don't take symbolic args, this could lead to
# users compiling functions that are supposed to have axis
# arguments but silently ignore them. Leaving this
# functionality out for now -- Users must call Constant()
# explicitly.
# many Theano reductions do not support symbolic axes
# by checking for it here we don't have to wrap the
# argument in a Constant()
# argspec = orderedargspec(theano_fn, *s_args, **s_kwargs)
# if (istensor(argspec.get('axis', None)) and
# func.__name__ not in ['concatenate']):
# if 'axis' in s_kwargs:
# s_kwargs['axis'] = kwargs['axis']
# else:
# r_axis = args[argspec.args.index('axis')]
# s_args[argspec.args.index('axis')] = r_axis
self.watcher.shadow(rval, theano_fn(*s_args, **s_kwargs))
else:
# no argument was shadowed (e.g. zeros())
self.add_shadow(rval)
# ================ Array methods
elif isinstance(getattr(func, '__self__', None),
(np.ndarray, np.number)):
assert id(func.__self__) in self.watcher
s_self = self.watcher.svars[id(func.__self__)]
if 0:
pass
elif func.__name__ == 'copy':
assert not args
assert not kwargs
rval = func()
self.watcher.shadow(rval, s_self.copy())
elif func.__name__ == 'reshape':
rval = func(*args, **kwargs)
# Theano requires shape to be a tuple
if not isinstance(s_args[0], (list, tuple)):
s_args = (s_args,)
self.watcher.shadow(rval, s_self.reshape(*s_args, **s_kwargs))
elif func.__name__ == 'swapaxes':
rval = func(*args, **kwargs)
axis1, axis2 = args
s_dims = range(s_self.ndim)
s_dims[axis1], s_dims[axis2] = s_dims[axis2], s_dims[axis1]
self.watcher.shadow(rval, s_self.dimshuffle(*s_dims))
elif func.__name__ == 'astype':
rval = func(*args, **kwargs)
if 'dtype' in kwargs:
dtype = kwargs['dtype']
else:
dtype = args[0]
if not isinstance(dtype, str):
# catch numpy dtype objects like np.float32
try:
dtype = dtype.__name__
except:
raise NotImplementedError
if dtype == 'bool':
dtype == 'int8'
self.watcher.shadow(rval, s_self.astype(dtype))
elif func.__name__ == 'sort':
# sort is an inplace method
rval = func() # returns None
# shadow the original array; it has been updated inplace
self.watcher.shadow(func.__self__, s_self.sort())
else:
try:
theano_fn = getattr(s_self, func.__name__)
except:
raise NotImplementedError(func)
rval = func(*args, **kwargs)
self.watcher.shadow(rval, theano_fn(*s_args, **s_kwargs))
# ================ built-ins
elif 'built-in' in str(func):
if len(args) == len(kwargs) == 0:
rval = func()
# -- built-in ndarray methods should be caught above, not here.
elif func.__name__ in ('setdefault',):
rval = func(*args, **kwargs)
elif func.__name__ in ('enumerate', 'range', 'xrange', 'zip'):
rval = func(*args, **kwargs)
elif 'method rand of mtrand.RandomState' in str(func):
# build Theano random uniform numbers
rval = func(*args, **kwargs)
self.watcher.shadow(
rval,
global_randomstreams.uniform(
low=0,
high=1,
size=tuple(args),
dtype=str(np.asarray(rval).dtype)))
elif ('method random of mtrand.RandomState' in str(func)
or 'method random_sample of mtrand.RandomState'
in str(func)):
# build Theano random uniform numbers
rval = func(*args, **kwargs)
self.watcher.shadow(
rval,
global_randomstreams.uniform(
low=0,
high=1,
size=autodiff.utils.as_seq(args[0], tuple),
dtype=str(np.asarray(rval).dtype)))
elif 'method uniform of mtrand.RandomState' in str(func):
# build Theano random normal numbers
rval = func(*args, **kwargs)
self.watcher.shadow(
rval,
global_randomstreams.uniform(
*args,
dtype=str(np.asarray(rval).dtype),
**kwargs))
else:
raise NotImplementedError(func)
# ================ Types
elif type(func) == type:
rval = func(*args, **kwargs)
# ================ AutoDiff Functions
elif func is autodiff.functions.constant:
# make sure the rval will have a vaild id, then add it to the
# Context's constants set (so it can be ignored)
rval = func(*args, **kwargs)
if isinstance(rval, int):
rval = np.int_(rval)
elif isinstance(rval, float):
rval = np.float_(rval)
elif isinstance(rval, bool):
rval = np.bool_(rval)
else:
rval = np.asarray(rval)
self.watcher.constants.add(id(rval))
elif func is autodiff.functions.tag:
# make sure the rval is shadowed, then add a new svar with the
# appropriate tag
rval = func(*args, **kwargs)
tag = kwargs.pop('tag', args[1])
sval = self.ensure_shadow(rval)
self.watcher.svars[tag] = sval
# ================ Everything Else
else:
logger.debug('stepping into %s' % str(func))
vm = FrameVM(self.watcher, func)
rval = vm.call(tuple(args), kwargs)
self.push(rval)
def op_CALL_FUNCTION_VAR(self, i, op, arg):
call_vargs = self.pop()
return self.op_CALL_FUNCTION(i, op, arg, call_vargs=call_vargs)
def op_CALL_FUNCTION_VAR_KW(self, i, op, arg):
call_vargs, call_kwargs = self.popN(2)
rval = self.op_CALL_FUNCTION(i,
op,
arg,
call_vargs=call_vargs,
call_kwargs=call_kwargs)
return rval
def op_COMPARE_OP(self, i, op, arg):
opname = opcode.cmp_op[arg]
right = self.pop()
left = self.pop()
if 0:
pass
elif opname == '==':
self.push(left == right)
elif opname == '!=':
self.push(left != right)
elif opname == '>':
self.push(left > right)
elif opname == '<':
self.push(left < right)
elif opname == '>=':
self.push(left >= right)
elif opname == '<=':
self.push(left <= right)
elif opname == 'is':
self.push(left is right)
elif opname == 'in':
self.push(left in right)
else:
raise NotImplementedError('comparison: %s' % opname)
if any(id(a) in self.watcher for a in [left, right]):
sargs = [self.watcher.getvar(ai) for ai in [left, right]]
tos = self.stack[-1]
if 0:
pass
elif opname == '==':
self.watcher.shadow(tos, theano.tensor.eq(*sargs))
elif opname == '!=':
self.watcher.shadow(tos, theano.tensor.neq(*sargs))
elif opname == '<':
self.watcher.shadow(tos, theano.tensor.lt(*sargs))
elif opname == '>':
self.watcher.shadow(tos, theano.tensor.gt(*sargs))
elif opname == '<=':
self.watcher.shadow(tos, theano.tensor.le(*sargs))
elif opname == '>=':
self.watcher.shadow(tos, theano.tensor.ge(*sargs))
elif opname == 'is':
pass
else:
raise NotImplementedError('Comparison on watched args',
opname)
def op_DUP_TOP(self, i, op, arg):
self.stack.append(self.stack[-1])
def op_DUP_TOPX(self, i, op, arg):
assert arg > 0
self.stack.extend(self.stack[-arg:])
def op_FOR_ITER(self, i, op, arg):
# either push tos.next()
# or pop tos and send (arg)
tos = self.stack[-1]
try:
next = tos.next()
# print 'next', next
self.push(next)
except StopIteration:
self.pop()
return ('rel', arg)
def op_INPLACE_ADD(self, i, op, arg):
tos = self.pop()
tos1 = self.pop()
r = tos1
r += tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos + s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos + s_tos1)
def op_INPLACE_DIVIDE(self, i, op, arg):
tos = self.pop()
tos1 = self.pop()
r = tos1
r /= tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos / s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos / s_tos1)
def op_INPLACE_MULTIPLY(self, i, op, arg):
tos = self.pop()
tos1 = self.pop()
r = tos1
r *= tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos * s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos * s_tos1)
def op_INPLACE_SUBTRACT(self, i, op, arg):
tos1, tos = self.popN(2)
r = tos1
r -= tos
self.push(r)
if (id(tos) in self.watcher or id(tos1) in self.watcher):
s_tos = self.ensure_shadow(tos)
s_tos1 = self.ensure_shadow(tos1)
if isinstance(r, np.ndarray):
self.watcher.shadow(r, (s_tos - s_tos1).astype(str(r.dtype)))
else:
self.watcher.shadow(r, s_tos - s_tos1)
def op_JUMP_ABSOLUTE(self, i, op, arg):
# print 'sending', arg
return ('abs', arg)
def op_JUMP_FORWARD(self, i, op, arg):
return ('rel', arg)
def op_JUMP_IF_TRUE(self, i, op, arg):
tos = self.stack[-1]
if tos:
return ('rel', arg)
def op_GET_ITER(self, i, op, arg):
# replace tos -> iter(tos)
tos = self.stack[-1]
if id(tos) in self.watcher:
raise NotImplementedError('iterator of watched value')
self.stack[-1] = iter(tos)
def op_LOAD_GLOBAL(self, i, op, arg):
# print 'LOAD_GLOBAL', self.names[arg]
tos = self._myglobals[self.func.func_code.co_names[arg]]
if type(tos) is int and -5 <= tos <= 256:
tos = np.int_(tos)
self.push(tos)
if id(tos) not in self.watcher:
self.add_shadow(self.stack[-1])
def op_LOAD_ATTR(self, i, op, arg):
# print 'LOAD_ATTR', self.names[arg]
attr = self.func.func_code.co_names[arg]
#
# we would like to do
# self.stack[-1] = getattr(TOS, attr)
#
# *EXCEPT* if attr is a property, then it actually represents a
# function call
tos = self.pop()
if isinstance(tos, np.ndarray):
if id(tos) not in self.watcher:
raise NotImplementedError(
'how did this var get here?', (id(tos), tos))
if id(tos) in self.watcher:
s_tos = self.watcher.svars[id(tos)]
if attr == 'shape':
rval = tos.shape
# note this old comment... what does it mean?
# XXX: NOT TRACKING SHAPE CHANGES BECAUSE
# BAD INTERACTION WITH fbncc.__theano_op__
self.watcher.shadow(rval, s_tos.shape)
elif attr == 'T':
rval = tos.T
self.watcher.shadow(rval, s_tos.T)
elif attr == 'imag':
rval = tos.imag
self.watcher.shadow(rval, s_tos.imag)
else:
try:
rval = getattr(tos, attr)
except:
raise NotImplementedError('ndarray attribute %s' % attr)
self.push(rval)
else:
logger.debug('attribute access %s' % attr)
rval = getattr(tos, attr)
self.push(rval)
# if (isinstance(rval, np.ndarray)
# and id(rval) not in self.watcher):
# self.add_shadow(rval)
if id(rval) not in self.watcher:
self.add_shadow(rval)
def op_LOAD_CONST(self, i, op, arg):
tos = self.func.func_code.co_consts[arg]
if type(tos) is int and -5 <= tos <= 256:
tos = np.int_(tos)
self.push(tos)
# if isinstance(tos, float):
# if id(tos) not in self.watcher:
# var = theano.tensor.as_tensor_variable(tos)
# self.watcher.svars[id(tos)] = var
if (isinstance(tos, np.ndarray) and id(tos) not in self.watcher):
raise NotImplementedError()
def op_LOAD_CLOSURE(self, i, op, arg):
co_cellvars = self.func.func_code.co_cellvars
co_freevars = self.func.func_code.co_freevars
co_varnames = self.func.func_code.co_varnames
if arg < len(co_cellvars):
name = co_cellvars[arg]
else:
name = co_freevars[arg - len(co_cellvars)]
thing = self._locals[co_varnames.index(name)]
cell = cellmake(thing)
self.push(cell)
def op_LOAD_DEREF(self, i, op, arg):
# -- this is called to access a variable that appears in multiple
# scopes.
# -- vars *referenced* by nested scopes
co_cellvars = self.func.func_code.co_cellvars
# -- vars read from enclosing scopes
co_freevars = self.func.func_code.co_freevars
# -- all varnames
co_varnames = self.func.func_code.co_varnames
if arg < len(co_cellvars):
# -- normal case
name = co_cellvars[arg]
# -- XXX: Is this really the right thing to do??
thing = self._locals[co_varnames.index(name)]
else:
name = co_freevars[arg - len(co_cellvars)]
closure = self.func.func_closure
assert len(co_freevars) == len(closure)
# print 'LOAD_DEREF (%s:%s)' % (self.func, name)
cell = closure[arg - len(co_cellvars)]
thing = cellget(cell)
self.push(thing)
# if (isinstance(thing, np.ndarray) and id(thing) not in self.watcher):
# self.add_shadow(thing)
if id(thing) not in self.watcher:
self.add_shadow(thing)
def op_LOAD_FAST(self, i, op, arg):
tos = self._locals[arg]
try:
self.push(tos)
except LoadUnassigned:
raise LoadUnassigned(self.func.func_code.co_varnames[arg])
if not isinstance(tos, (int, float)):
if id(tos) not in self.watcher:
self.add_shadow(tos)
def op_MAKE_CLOSURE(self, i, op, arg):
return self.op_MAKE_FUNCTION(i, op, arg, w_closure=True)
def op_MAKE_FUNCTION(self, i, op, arg, w_closure=False):
func_code = self.pop()
if w_closure:
cells = self.pop()
if arg:
argdefs = tuple(self.stack[-arg:])
self.stack[-arg:] = []
else:
argdefs = ()
if w_closure:
fn = types.FunctionType(func_code,
self.func.func_globals,
argdefs=argdefs,
closure=cells,)
else:
fn = types.FunctionType(func_code,
self.func.func_globals,
argdefs=argdefs)
self.push(fn)
def op_POP_BLOCK(self, i, op, arg):
logger.debug('POP_BLOCK, what to do?')
pass
def op_POP_JUMP_IF_FALSE(self, i, op, arg):
#tos = self.stack[-1]
tos = self.pop()
if not tos:
return ('abs', arg)
def op_POP_JUMP_IF_TRUE(self, i, op, arg):
#tos = self.stack[-1]
tos = self.pop()
if tos:
return ('abs', arg)
def op_POP_TOP(self, i, op, arg):
self.pop()
def op_PRINT_ITEM(self, i, op, arg):
thing = self.pop()
if str(thing) == 'PRINT_OPS:True':
self.print_ops = True
if str(thing) == 'PRINT_STACK:True':
self.print_stack = True
print thing,
def op_PRINT_NEWLINE(self, i, op, arg):
print ''
def op_SETUP_LOOP(self, i, op, arg):
logger.debug('SETUP_LOOP, what to do?')
pass
def op_SLICE_PLUS_0(self, i, op, arg):
#Implements TOS = TOS[:].
TOS = self.pop()
new_tos = TOS[:]
self.push(new_tos)
if id(TOS) in self.watcher:
s = self.watcher.getvar(TOS)
s_rval = s[:]
self.watcher.shadow(new_tos, s_rval)
def op_SLICE_PLUS_1(self, i, op, arg):
# TOS = TOS1[TOS:]
TOS1, TOS = self.popN(2)
new_tos = TOS1[TOS:]
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1]):
s = self.watcher.getvar(TOS)
s1 = self.watcher.getvar(TOS1)
s_rval = s1[s:]
self.watcher.shadow(new_tos, s_rval)
def op_SLICE_PLUS_2(self, i, op, arg):
# TOS = TOS1[:TOS]
TOS1, TOS = self.popN(2)
new_tos = TOS1[:TOS]
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1]):
s = self.watcher.getvar(TOS)
s1 = self.watcher.getvar(TOS1)
s_rval = s1[:s]
self.watcher.shadow(new_tos, s_rval)
def op_SLICE_PLUS_3(self, i, op, arg):
# Implements TOS = TOS2[TOS1:TOS]
TOS2, TOS1, TOS = self.popN(3)
new_tos = TOS2[TOS1:TOS]
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2]):
s = self.watcher.getvar(TOS)
s1 = self.watcher.getvar(TOS1)
s2 = self.watcher.getvar(TOS2)
s_rval = s2[s1:s]
self.watcher.shadow(new_tos, s_rval)
def op_STORE_ATTR(self, i, op, arg):
# implements TOS.name = TOS1
TOS1, TOS = self.popN(2)
if TOS in self.watcher:
raise NotImplementedError()
name = self.func.func_code.co_names[arg]
setattr(TOS, name, TOS1)
def op_STORE_SLICE_PLUS_0(self, i, op, arg):
#Implements TOS[:] = TOS1
TOS1, TOS = self.popN(2)
new_tos = TOS
new_tos[:] = TOS1
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_rval = theano.tensor.set_subtensor(s_tos[:], s_tos1)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_SLICE_PLUS_1(self, i, op, arg):
TOS2, TOS1, TOS = self.popN(3)
new_tos = TOS1
new_tos[TOS:] = TOS2
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_tos2 = self.watcher.getvar(TOS2)
s_rval = theano.tensor.set_subtensor(s_tos1[s_tos:], s_tos2)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_SLICE_PLUS_2(self, i, op, arg):
# TOS1[:TOS] = TOS2
TOS2, TOS1, TOS = self.popN(3)
new_tos = TOS1
new_tos[:TOS] = TOS2
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_tos2 = self.watcher.getvar(TOS2)
s_rval = theano.tensor.set_subtensor(s_tos1[:s_tos], s_tos2)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_SLICE_PLUS_3(self, i, op, arg):
# Implements TOS2[TOS1:TOS] = TOS3
TOS3, TOS2, TOS1, TOS = self.popN(4)
new_tos = TOS2
new_tos[TOS1:TOS] = TOS3
self.push(new_tos)
if any(id(t) in self.watcher for t in [TOS, TOS1, TOS2, TOS3]):
s_tos = self.watcher.getvar(TOS)
s_tos1 = self.watcher.getvar(TOS1)
s_tos2 = self.watcher.getvar(TOS2)
s_tos3 = self.watcher.getvar(TOS3)
s_rval = theano.tensor.set_subtensor(s_tos2[s_tos1:s_tos], s_tos3)
self.watcher.shadow(new_tos, s_rval)
def op_STORE_FAST(self, i, op, arg):
self._locals[arg] = self.pop()
def op_STORE_MAP(self, i, op, arg):
key = self.pop()
val = self.pop()
dct = self.stack[-1]
dct[key] = val
def op_STORE_SUBSCR(self, i, op, arg):
# Implements TOS1[TOS] = TOS2.
tos = self.pop()
tos1 = self.pop()
tos2 = self.pop()
tos1[tos] = tos2
# tos can't be real-valued so there's no gradient through it
if id(tos1) in self.watcher or id(tos2) in self.watcher:
s_tos1 = self.ensure_shadow(tos1)
s_tos2 = self.ensure_shadow(tos2)
new_s_tos1 = theano.tensor.set_subtensor(s_tos1[tos], s_tos2)
self.watcher.svars[id(tos1)] = new_s_tos1
def op_RAISE_VARARGS(self, i, op, arg):
print >> sys.stderr, "Exception in autodiff.Context:"
if 1 <= arg:
exc = self.pop()
else:
exc = None
if 2 <= arg:
param = self.pop()
else:
param = None
if 3 <= arg:
tb = self.pop()
traceback.print_tb(tb, file=sys.stderr)
else:
print >> sys.stderr, "No traceback info available"
if param is not None:
raise param
elif exc is not None:
raise exc()
else:
raise Exception('Completely mysterious exception')
def op_RETURN_VALUE(self, i, op, arg):
self.rval = self.pop()
if id(self.rval) not in self.watcher:
self.add_shadow(self.rval)
def op_ROT_TWO(self, i, op, arg):
a = self.stack[-1]
b = self.stack[-2]
self.stack[-1] = b
self.stack[-2] = a
def op_ROT_THREE(self, i, op, arg):
a = self.stack[-1]
b = self.stack[-2]
c = self.stack[-3]
self.stack[-1] = b
self.stack[-2] = c
self.stack[-3] = a
def op_ROT_FOUR(self, i, op, arg):
a = self.stack[-1]
b = self.stack[-2]
c = self.stack[-3]
d = self.stack[-4]
self.stack[-1] = b
self.stack[-2] = c
self.stack[-3] = d
self.stack[-4] = a
def op_UNARY_NEGATIVE(self, i, op, arg):
arg1 = self.pop()
assert not hasattr(arg1, 'type')
r = -arg1
self.push(r)
if id(arg1) in self.watcher:
s1 = self.ensure_shadow(arg1)
self.watcher.shadow(r, -s1)
def op_UNPACK_SEQUENCE(self, i, op, arg):
tos = self.pop()
self.stack.extend(tos[::-1])
class Context(object):
def __init__(self, device=None, borrowable=(), force_floatX=False):
"""
borrowable : tuple of objects
If an object in this tuple is encountered while tracing the
function, then its symbolic representation will alias that object's
memory location. This means that *inplace* operations on the Python
(likely NumPy) object will affect the symbolic function.
force_floatX : bool
If True, floats and float NumPy ndarrays will be cast to the dtype
specified at theano.config.floatX when forming symbolic shared
variables, if they do not have it already. Objects in `borrowable`
are never cast.
"""
self.svars = {}
self.nogc = [] # ids that must not be reused
# XXX: rethink to avoid actually holding on to all these intermediates.
self.device = device
self.borrowable_ids = [id(b) for b in borrowable]
self.force_floatX = force_floatX
self.constants = set()
def __iter__(self):
return self.svars.__iter__()
def shadow(self, rval, sval, force=True):
assert hasattr(sval, 'type') # assert sval is Theano variable
if force:
self.svars[id(rval)] = sval
else:
self.svars.setdefault(id(rval), sval)
# -- shadow vars have to match dtype and ndim
if isinstance(rval, np.ndarray):
if str(rval.dtype) == 'bool':
assert sval.dtype == 'int8', (rval.dtype, sval.dtype)
elif not self.force_floatX:
assert str(rval.dtype) == sval.dtype, (rval, sval)
assert rval.ndim == sval.ndim, (rval, sval)
# -- assert postcondition
assert sval is self.getvar(rval)
self.nogc.append(rval)
def call(self, fn, args=(), kwargs={}):
vm = FrameVM(self, fn)
return vm.call(args, kwargs)
def shared(self, obj, name=None, borrow=None):
if borrow is None:
borrow = (id(obj) in self.borrowable_ids)
if self.force_floatX and not borrow:
if (isinstance(obj, np.ndarray)
and 'float' in str(obj.dtype)
and str(obj.dtype) != theano.config.floatX):
obj = obj.astype(theano.config.floatX)
# not all objects have shared constructors with a borrow keyword
# for example theano.shared(np.float32(1)) works but
# theano.shared(np.float32(1), borrow=[False|True]) fails
if self.device == 'cpu':
try:
return theano.tensor._shared(obj, borrow=borrow)
except:
return theano.tensor._shared(obj)
else:
try:
return theano.shared(obj, borrow=borrow)
except:
return theano.shared(obj)
def getvar(self, var):
return self.svars.get(id(var), var)
def reset(self):
self.constants.clear()
| sujason/quantitative | autodiff/context.py | Python | mit | 45,855 |
import datetime
from django.conf import settings
from django.db.models import Q
from django.utils import timezone
from django.core.cache import cache
from funfactory.urlresolvers import reverse
from airmozilla.main.models import (
Event,
Channel,
EventHitStats,
most_recent_event
)
from airmozilla.main.views import is_contributor
from airmozilla.search.forms import SearchForm
from airmozilla.staticpages.models import StaticPage
def nav_bar(request):
def get_nav_bar():
items = [
('Home', reverse('main:home'), 'home', ''),
('About', '/about/', '/about', ''),
('Channels', reverse('main:channels'), 'channels', ''),
('Calendar', reverse('main:calendar'), 'calendar', ''),
]
if not request.user.is_staff:
items.append(
('Tag Cloud', reverse('main:tag_cloud'), 'tag_cloud', '')
)
items.append(
('Starred', reverse('starred:home'), 'starred', '')
)
unfinished_events = 0
if request.user.is_active:
unfinished_events = Event.objects.filter(
creator=request.user,
status=Event.STATUS_INITIATED,
upload__isnull=False,
).count()
if settings.USE_NEW_UPLOADER:
items.append(
('New/Upload', reverse('new:home'), 'new', ''),
)
else:
items.append(
('Requests', reverse('suggest:start'), 'suggest', ''),
)
if request.user.is_staff:
items.append(
('Management', reverse('manage:events'), '', ''),
)
if not settings.BROWSERID_DISABLED:
items.append(
('Sign out', '/browserid/logout/', '', 'browserid-logout'),
)
return {'items': items, 'unfinished_events': unfinished_events}
# The reason for making this a closure is because this stuff is not
# needed on every single template render. Only the main pages where
# there is a nav bar at all.
return {'nav_bar': get_nav_bar}
def dev(request):
return {
'DEV': settings.DEV,
'DEBUG': settings.DEBUG,
'BROWSERID_DISABLED': settings.BROWSERID_DISABLED,
}
def search_form(request):
return {'search_form': SearchForm(request.GET)}
def base(request):
def get_feed_data():
feed_privacy = _get_feed_privacy(request.user)
if getattr(request, 'channels', None):
channels = request.channels
else:
channels = Channel.objects.filter(
slug=settings.DEFAULT_CHANNEL_SLUG
)
if settings.DEFAULT_CHANNEL_SLUG in [x.slug for x in channels]:
title = 'Air Mozilla RSS'
url = reverse('main:feed', args=(feed_privacy,))
else:
_channel = channels[0]
title = 'Air Mozilla - %s - RSS' % _channel.name
url = reverse(
'main:channel_feed',
args=(_channel.slug, feed_privacy)
)
return {
'title': title,
'url': url,
}
return {
# used for things like {% if event.attr == Event.ATTR1 %}
'Event': Event,
'get_feed_data': get_feed_data,
}
def sidebar(request):
# none of this is relevant if you're in certain URLs
def get_sidebar():
data = {}
if not getattr(request, 'show_sidebar', True):
return data
# if viewing a specific page is limited by channel, apply that
# filtering here too
if getattr(request, 'channels', None):
channels = request.channels
else:
channels = Channel.objects.filter(
slug=settings.DEFAULT_CHANNEL_SLUG
)
if settings.DEFAULT_CHANNEL_SLUG in [x.slug for x in channels]:
sidebar_channel = settings.DEFAULT_CHANNEL_SLUG
else:
_channel = channels[0]
sidebar_channel = _channel.slug
data['upcoming'] = get_upcoming_events(channels, request.user)
data['featured'] = get_featured_events(channels, request.user)
data['sidebar_top'] = None
data['sidebar_bottom'] = None
sidebar_urls_q = (
Q(url='sidebar_top_%s' % sidebar_channel) |
Q(url='sidebar_bottom_%s' % sidebar_channel) |
Q(url='sidebar_top_*') |
Q(url='sidebar_bottom_*')
)
# to avoid having to do 2 queries, make a combined one
# set it up with an iterator
for page in StaticPage.objects.filter(sidebar_urls_q):
if page.url.startswith('sidebar_top_'):
data['sidebar_top'] = page
elif page.url.startswith('sidebar_bottom_'):
data['sidebar_bottom'] = page
return data
# Make this context processor return a closure so it's explicit
# from the template if you need its data.
return {'get_sidebar': get_sidebar}
def get_upcoming_events(channels, user,
length=settings.UPCOMING_SIDEBAR_COUNT):
"""return a queryset of upcoming events"""
anonymous = True
contributor = False
if user.is_active:
anonymous = False
if is_contributor(user):
contributor = True
cache_key = 'upcoming_events_%s_%s' % (int(anonymous), int(contributor))
cache_key += ','.join(str(x.id) for x in channels)
event = most_recent_event()
if event:
cache_key += str(event.modified.microsecond)
upcoming = cache.get(cache_key)
if upcoming is None:
upcoming = _get_upcoming_events(channels, anonymous, contributor)
upcoming = upcoming[:length]
cache.set(cache_key, upcoming, 60 * 60)
return upcoming
def _get_upcoming_events(channels, anonymous, contributor):
"""do the heavy lifting of getting the featured events"""
upcoming = Event.objects.upcoming().order_by('start_time')
upcoming = upcoming.filter(channels__in=channels).distinct()
upcoming = upcoming.select_related('picture')
if anonymous:
upcoming = upcoming.exclude(privacy=Event.PRIVACY_COMPANY)
elif contributor:
upcoming = upcoming.filter(privacy=Event.PRIVACY_PUBLIC)
return upcoming
def get_featured_events(
channels,
user,
length=settings.FEATURED_SIDEBAR_COUNT
):
"""return a list of events that are sorted by their score"""
anonymous = True
contributor = False
if user.is_active:
anonymous = False
if is_contributor(user):
contributor = True
cache_key = 'featured_events_%s_%s' % (int(anonymous), int(contributor))
if channels:
cache_key += ','.join(str(x.id) for x in channels)
event = most_recent_event()
if event:
cache_key += str(event.modified.microsecond)
featured = cache.get(cache_key)
if featured is None:
featured = _get_featured_events(channels, anonymous, contributor)
featured = featured[:length]
cache.set(cache_key, featured, 60 * 60)
# Sadly, in Django when you do a left outer join on a many-to-many
# table you get repeats and you can't fix that by adding a simple
# `distinct` on the first field.
# In django, if you do `myqueryset.distinct('id')` it requires
# that that's also something you order by.
# In pure Postgresql you can do this:
# SELECT
# DISTINCT main_eventhitstats.id as id,
# (some formula) AS score,
# ...
# FROM ...
# INNER JOIN ...
# INNER JOIN ...
# ORDER BY score DESC
# LIMIT 5;
#
# But you can't do that with Django.
# So we have to manually de-dupe. Hopefully we can alleviate this
# problem altogether when we start doing aggregates where you have
# many repeated EventHitStats *per* event and you need to look at
# their total score across multiple vidly shortcodes.
events = []
for each in featured:
if each.event not in events:
events.append(each.event)
return events
def _get_featured_events(channels, anonymous, contributor):
"""do the heavy lifting of getting the featured events"""
now = timezone.now()
yesterday = now - datetime.timedelta(days=1)
# subtract one second to not accidentally tip it
yesterday -= datetime.timedelta(seconds=1)
featured = (
EventHitStats.objects
.filter(
Q(event__status=Event.STATUS_SCHEDULED) |
Q(event__status=Event.STATUS_PROCESSING)
)
.exclude(event__archive_time__isnull=True)
.filter(event__archive_time__lt=yesterday)
.exclude(event__channels__exclude_from_trending=True)
.extra(
select={
# being 'featured' pretends the event has twice as
# many hits as actually does
'score': '(featured::int + 1) * total_hits'
'/ extract(days from (now() - archive_time)) ^ 1.8',
}
)
.select_related('event')
.order_by('-score')
)
if channels:
featured = featured.filter(event__channels__in=channels)
if anonymous:
featured = featured.filter(event__privacy=Event.PRIVACY_PUBLIC)
elif contributor:
featured = featured.exclude(event__privacy=Event.PRIVACY_COMPANY)
featured = featured.select_related('event__picture')
return featured
def analytics(request):
# unless specified, the analytics is include if DEBUG = False
if request.path_info.startswith('/manage/'):
include = False
else:
include = getattr(
settings,
'INCLUDE_ANALYTICS',
not settings.DEBUG
)
return {'include_analytics': include}
def _get_feed_privacy(user):
"""return 'public', 'contributors' or 'company' depending on the user
profile.
Because this is used very frequently and because it's expensive to
pull out the entire user profile every time, we use cache to remember
if the user is a contributor or not (applicable only if logged in)
"""
if user.is_active:
if is_contributor(user):
return 'contributors'
return 'company'
return 'public'
def browserid(request):
# by making this a function, it means we only need to run this
# when ``redirect_next()`` is called
def redirect_next():
next = request.GET.get('next')
if next:
if '://' in next:
return reverse('main:home')
return next
url = request.META['PATH_INFO']
if url in (reverse('main:login'), reverse('main:login_failure')):
# can't have that!
url = reverse('main:home')
return url
return {'redirect_next': redirect_next}
def faux_i18n(request):
"""We don't do I18N but we also don't want to necessarily delete
all the hard work on using `_('English')` in templates because
maybe one day we'll start doing I18N and then it might be good
to keep these annotations in the templates."""
def _(*args, **kwargs):
return args[0]
return {'_': _}
def autocompeter(request):
"""We need to tell the Autocompeter service which groups the current
user should be able to view."""
key = getattr(settings, 'AUTOCOMPETER_KEY', None)
if not key:
return {}
groups = []
if request.user and request.user.is_active:
groups.append(Event.PRIVACY_CONTRIBUTORS)
if not is_contributor(request.user):
groups.append(Event.PRIVACY_COMPANY)
url = getattr(settings, 'AUTOCOMPETER_URL', '')
domain = getattr(settings, 'AUTOCOMPETER_DOMAIN', '')
enabled = getattr(settings, 'AUTOCOMPETER_ENABLED', True)
return {
'include_autocompeter': enabled,
'autocompeter_domain': domain,
'autocompeter_groups': ','.join(groups),
'autocompeter_url': url,
}
| lcamacho/airmozilla | airmozilla/main/context_processors.py | Python | bsd-3-clause | 12,078 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import oslotest.base
from designate.conf import opts
from designate.conf import worker
class TestConfOpts(oslotest.base.BaseTestCase):
def setUp(self):
super(TestConfOpts, self).setUp()
def test_opts_tupleize(self):
self.assertEqual([('a', 'b')], opts._tupleize({'a': 'b'}))
def test_opts_list(self):
self.assertIsInstance(opts.list_opts(), list)
@mock.patch('pkgutil.iter_modules')
def test_opts_list_module_names(self, mock_iter_modules):
mock_iter_modules.return_value = iter(
[
(None, 'api', False),
(None, 'worker', False),
(None, 'unknown', True),
]
)
self.assertEqual(['api', 'worker'], opts._list_module_names())
def test_opts_import_modules(self):
self.assertEqual([worker], opts._import_modules(['worker']))
@mock.patch('importlib.import_module')
def test_opts_import_invalid_module(self, mock_import_module):
mock_import_module.return_value = None
self.assertRaisesRegex(
Exception,
"The module 'designate.conf.invalid' should have a 'list_opts' "
"function which returns the config options.",
opts._import_modules, ['invalid']
)
| openstack/designate | designate/tests/unit/test_conf.py | Python | apache-2.0 | 1,855 |
## func
##
## Copyright 2007, Red Hat, Inc
## See AUTHORS
##
## This software may be freely redistributed under the terms of the GNU
## general public license.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
import distutils.sysconfig
import os
import sys
import traceback
import inspect
from gettext import gettext
import fnmatch
_ = gettext
from func import logger
from inspect import isclass
from func.minion.modules import func_module
from func.utils import is_public_valid_method
def module_walker(topdir):
module_files = []
for root, dirs, files in os.walk(topdir):
# we should get here for each subdir
for filename in files:
# ASSUMPTION: all module files will end with .py, .pyc, .pyo
if filename[-3:] == ".py" or filename[-4:] == ".pyc" or filename[-4:] == ".pyo":
# the normpath is important, since we eventually replace /'s with .'s
# in the module name, and foo..bar doesnt work -akl
module_files.append(os.path.normpath("%s/%s" % (root, filename)))
return module_files
def load_methods(path, main_class, parent_class=None):
log = logger.Logger().logger
methods = {}
modules = load_modules(path, main_class, parent_class=parent_class)
for x in modules.keys():
for method in dir(modules[x]):
if is_public_valid_method(modules[x], method):
methods["%s.%s" % (x,method)]=getattr(modules[x], method)
return methods
def load_modules(path='func/minion/modules/', main_class=func_module.FuncModule,
blacklist=None, parent_class=None, module_list=[]):
log = logger.Logger().logger
python_path = distutils.sysconfig.get_python_lib()
module_file_path = "%s/%s" % (python_path, path)
(mod_path, mod_dir) = os.path.split(os.path.normpath(module_file_path))
mod_dir = "func."+module_file_path[len(python_path+'/func/'):].replace("/",".")
sys.path.insert(0, mod_path)
mods = {}
bad_mods = {}
filenames = module_walker(module_file_path)
# FIXME: this is probably more complicated than it needs to be -akl
for fn in filenames:
# aka, everything after the module_file_path
module_name_part = fn[len(module_file_path):]
dirname, basename = os.path.split(module_name_part)
if basename[:8] == "__init__":
modname = dirname
dirname = ""
elif basename[-3:] == ".py":
modname = basename[:-3]
elif basename[-4:] in [".pyc", ".pyo"]:
modname = basename[:-4]
pathname = modname
if dirname != "":
pathname = "%s/%s" % (dirname, modname)
mod_imp_name = pathname.replace("/", ".")
if module_list: # only do this if we have a module list at all, otherwise everything comes in
matched = False
for match in module_list:
if fnmatch.fnmatch(mod_imp_name, match):
matched = True
if not matched: # if we are not matched against anything in the module_list then skip it
continue
if mods.has_key(mod_imp_name):
# If we've already imported mod_imp_name, don't import it again
continue
# ignore modules that we've already determined aren't valid modules
if bad_mods.has_key(mod_imp_name):
continue
try:
# Auto-detect and load all FuncModules
blip = __import__("%s%s" % ( mod_dir,mod_imp_name), globals(), locals(), [mod_imp_name])
for obj in dir(blip):
attr = getattr(blip, obj)
if isclass(attr) and issubclass(attr, main_class):
log.debug("Loading %s module" % attr)
if parent_class:
mods[mod_imp_name] = attr(parent_class)
else:
mods[mod_imp_name] = attr()
except ImportError, e:
# A module that raises an ImportError is (for now) simply not loaded.
errmsg = _("Import error while loading %s module: %s")
log.warning(errmsg % (mod_imp_name, e))
etype, value, tb = sys.exc_info()
log.warning(traceback.format_exception(etype, value, tb))
bad_mods[mod_imp_name] = True
continue
except:
errmsg = _("Could not load %s module")
log.warning(errmsg % (mod_imp_name))
etype, value, tb = sys.exc_info()
log.warning(traceback.format_exception(etype, value, tb))
bad_mods[mod_imp_name] = True
continue
return mods
if __name__ == "__main__":
module_file_path = "/usr/lib/python2.5/site-packages/func/minion/modules/"
bar = module_walker(module_file_path)
print bar
for f in bar:
print f
print os.path.basename(f)
print os.path.split(f)
g = f[len(module_file_path):]
print g
print os.path.split(g)
print load_modules()
| dockerera/func | func/module_loader.py | Python | gpl-2.0 | 5,193 |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 10 11:10:38 2015
@author: boland
"""
from obspy import read
import numpy as np
import itertools
import os
import datetime
import sqlite3 as lite
try:
import cPickle as pickle
except:
import pickle
print "Caution, database code may run slow because cPickle failed to import"
#from pysismo.psconfig import MSEED_DIR
multiprocess = False
if multiprocess:
import multiprocessing as mp
t_total0 = datetime.datetime.now()
#SCANNING FUNCTIONS
def paths_sort(path):
"""
Function defined for customised sorting of the abs_paths list
and will be used in conjunction with the sorted() built in python
function in order to produce file paths in chronological order.
"""
base_name = os.path.basename(path)
stat_name, date= base_name.split('.')[0], base_name.split('.')[1]
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
return date, stat_name
except Exception as e:
print(e)
def paths(folder_path, extension, sort=False):
"""
Function that returns a list of desired absolute paths called abs_paths
of files that contains a given extension e.g. .txt should be entered as
folder_path, txt. This function will run recursively through and find
any and all files within this folder with that extension!
"""
abs_paths = []
for root, dirs, files in os.walk(folder_path):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1] == '.{}'.format(extension):
abs_paths.append(fullpath)
if sort:
abs_paths = sorted(abs_paths, key=paths_sort)
return abs_paths
# set folder with all waveform files in it. Can be recursive!
#folder_path = 'small_borehole_quakes'
#folder_path = '/storage/ABE/borehole_data'
#folder_path = '/storage/ANT/INPUT/DATA/AGOS-FULL-DATA_LOWSAMPLE'
folder_path = 'small_borehole_quakes'
# set file extension
extensions = ['m', 'mseed', 'miniseed', 'MSEED']
# set desired component e.g. E, N or Z
abs_paths = []
for extension in extensions:
abs_paths.append(paths(folder_path, extension))
#['/home/boland/Dropbox/University/UniMelb/Research/SIMULATIONS/Triggers/chch_earthquake.MSEED']
#flatten the list of absolute paths containing all relevant data files
abs_paths = list(itertools.chain(*abs_paths))
# initialise timeline dictionary, one key per station!
timeline = {}
if not os.path.exists('tmp'): os.mkdir('tmp')
LENGTH = len(abs_paths)
counter = 0
loop_times = []
try:
for path in abs_paths:
t0 = datetime.datetime.now()
headers = read(path, headonly=True)
headers.select(component='Z')
for trace in headers:
code ='{}.{}'.format(trace.stats.network, trace.stats.station)
starttime = trace.stats.starttime
endtime = trace.stats.starttime + trace.stats.npts * \
(1/trace.stats.sampling_rate)
if code not in timeline.keys():
timeline[code] = []
timeline[code].append([starttime, endtime, path])
else:
timeline[code].append([starttime, endtime, path])
t1 = datetime.datetime.now()
#print 'time taken to process previous loop: ', t1-t0
loop_times.append((t1-t0).total_seconds())
avg_time = np.average(loop_times)
counter += 1
loops_remaining = LENGTH - counter
print "loops remaining: ", loops_remaining
print "time for previous loop was: ", t1-t0
#time_remaining = avg_time * loops_remaining
#mins_remaining = int(time_remaining / 60)
#seconds_left = time_remaining % 60
#print "estimated processing time remaining: %d mins %0.2f secs" \
#%(mins_remaining, seconds_left)
except Exception as error:
print error
#pool = mp.Pool(4)
#info = pool.map(scan_path, abs_paths)
#pool.close()
#pool.join()
# sort the dictionary database using numpy
for key in timeline.keys():
timeline[key] = np.sort(np.asarray(timeline[key]), axis=0)
# =============================================================================
# USING SQL
# =============================================================================
# create database if it doesn't exist already, if it does, stop the programme.
database_name = 'timeline_database.db'
if os.path.exists(database_name):
raise Exception("The SQL database {} already exists, quitting programme."\
.format(database_name))
conn = lite.connect(database_name)
c = conn.cursor()
# Create table called timeline for timeline database
# the table will contain the station ID which is net.stat_name
# the start time and end time for each trace and the absolute file path loc
# the goal is to have one line per trace!
try:
c.execute('''CREATE TABLE timeline
(stat_name, starttime, endtime, file_path)''')
except Exception as error:
print error
for key in timeline.keys():
try:
stat_name = key
for stat_info in timeline[key]:
start, end, file_path = stat_info
example_row = '\'{}\',\'{}\',\'{}\',\'{}\''.format(stat_name,
start,
end,
file_path)
#print example_row
# Insert a row of data
c.execute("INSERT INTO timeline VALUES ({})".format(example_row))
# Save (commit) the changes
conn.commit()
except Exception as error:
print error
for row in c.execute('SELECT * FROM timeline ORDER BY stat_name'):
print row
# =============================================================================
# USING PICKLE
# =============================================================================
#outfile = 'tmp/timeline_database.pickle'
#if not os.path.exists(outfile):
# with open(outfile, 'wb') as f:
# print "\nExporting new timeline database to: " + f.name
# pickle.dump(timeline, f, protocol=2)
#else:
# raise Exception("Could not create new pickle database as one already exists")
#t_total1 = datetime.datetime.now()
#print 'Total time taken to initialise timeline database for {} was: {}'.format(
# os.path.basename(folder_path), t_total1-t_total0) | boland1992/seissuite_iran | build/lib/ambient/database/create_database.py | Python | gpl-3.0 | 6,655 |
"""Help getting the etree.
This method is copied as suggested by lmxl.
http://lxml.de/tutorial.html
"""
try:
from lxml import etree
print("running with lxml.etree")
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
print("running with ElementTree")
except ImportError:
print("Failed to import ElementTree from any known place")
| zorion/acm-uva | jsonml/python/xmlhelper.py | Python | gpl-3.0 | 1,020 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.