code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
__all__ = ['VAR', 'JVAR']
class Demeter(object):
loves = ''
@property
def hates(self):
return self.loves.upper()
class Variable(object):
attr = 'value'
_attr2 = 'v2'
attr2 = property(lambda self: self._attr2,
lambda self, value: setattr(self, '_attr2', value.upper()))
demeter = Demeter()
@property
def not_settable(self):
return None
VAR = Variable()
try:
import JavaClass
except ImportError:
JVAR = None
else:
JVAR = JavaClass()
| dkentw/robotframework | atest/testdata/variables/extended_assign_vars.py | Python | apache-2.0 | 524 |
# -*- coding: utf-8 -*-
"""Library Tests
.. Note::
- for safety test database is not dropped after tests are run it must be dropped manually
"""
import unittest
import gzip
import json
import codecs
from mongoUtils.client import muClient
from mongoUtils.configuration import testDbConStr
from mongoUtils import _PATH_TO_DATA
from mongoUtils import importsExports, mapreduce, schema, helpers
from mongoUtils.aggregation import AggrCounts, Aggregation
from mongoUtils.tests.PubSubBench import ps_tests
try:
import xlrd
xlrd_installed = True
except ImportError:
xlrd_installed = False
def setUpStrEr(msg):
return "{}" "check your testDbConStr in configuration.py".format(msg)
class Test(unittest.TestCase):
def setUp(self):
try:
self.client = muClient(testDbConStr, connectTimeoutMS=5000, serverSelectionTimeoutMS=5000)
self.server_info = self.client.server_info()
self.db = self.client.get_default_database()
except Exception as e:
self.fail(setUpStrEr("test setup Error " + e.message))
def tearDown(self):
# self.db.drop_collections_startingwith(['muTest_'])
pass
def test_01_importsample(self):
with gzip.open(_PATH_TO_DATA + "muTest_tweets.json .gz", 'rb') as fin:
reader = codecs.getreader("utf-8")
# read through reader for python3 see
# http://stackoverflow.com/questions/6862770/python-3-let-json-object-accept-bytes-or-let-urlopen-output-strings
tweets_sample = json.load(reader(fin))
self.db.drop_collections_startingwith(['muTest_', 'del_'])
# for i in tweets_sample:
# print i
# self.db.muTest_tweets1000.insert(i)
self.db.muTest_tweets.insert_many(tweets_sample)
self.db.muTest_tweets.create_index([('user.screen_name', 1)], background=True)
cnt = self.db.muTest_tweets.find().count()
self.assertEqual(cnt, 1000, str(cnt) + " only records written to db instead of 1000")
l = []
for i in self.db.muTest_tweets.find():
i['_id'] = i['user']['id_str']
if not i['_id'] in l:
l.append(i['_id'])
self.db.muTest_tweets_users.insert_one(i['user'])
@unittest.skipIf(xlrd_installed is False, 'pip install xlrd to test importing excel workbooks')
def test_02_imports_xlsx(self):
"""tests imports from xlsx workbooks and sheets"""
res = importsExports.ImportXls(_PATH_TO_DATA + "example_workbook.xlsx", "muTest_weather",
self.db, stats_every=0)()
self.assertEqual(res['rows'], 367, "can't import example_workbook")
res = importsExports.import_workbook(_PATH_TO_DATA + "example_workbook.xlsx", self.db, stats_every=0)
self.assertGreater(res[0]['rows'], 300, "can't import example_workbook")
def test_aggregation(self):
aggr_obj = Aggregation(self.db.muTest_tweets_users, allowDiskUse=True)
aggr_obj.match({'lang': 'en'})
aggr_obj.group({'_id': None, "avg_followers": {"$avg": "$followers_count"}})
res = next(aggr_obj())
self.assertAlmostEqual(res['avg_followers'], 2943.8, 1, "wrong aggregation average")
res = next(AggrCounts(self.db.muTest_tweets_users, "lang", sort={'count': -1})())
self.assertEqual(res['count'], 352, "wrong aggregation count")
def test_mapreduce(self):
res = mapreduce.group_counts(self.db.muTest_tweets_users, 'lang', out={"replace": "muTest_mr"}, verbose=0)
res00 = res[0].find(sort=[('value', -1)])[0]
self.assertAlmostEqual(res00['value'], 352, 1, "wrong map reduce value in replace")
res = mapreduce.group_counts(self.db.muTest_tweets_users, 'lang', out={"inline": 1}, verbose=0)
res00 = sorted(res[0], key=lambda x: x['value'])[-1]
self.assertAlmostEqual(res00['value'], 352, 1, "wrong map reduce value in inline")
res = mapreduce.mr2('Orphans', self.db.muTest_tweets, 'user.screen_name',
col_b=self.db.muTest_tweets_users, col_b_key='screen_name',
col_b_query={'screen_name': {'$ne': 'Albert000G'}}, verbose=0)
self.assertAlmostEqual(res[0].find({'value.b': 0}).count(), 1, "wrong map reduce count")
res = mapreduce.mr2('Join', self.db.muTest_tweets, 'user.screen_name', col_b=self.db.muTest_tweets_users,
col_b_key='screen_name', col_b_query={'screen_name': {'$ne': 'Albert000G'}}, verbose=0)
res = res[0].find_one({'value.b': None})['value']['a']['user']['screen_name']
self.assertEqual(res, 'Albert000G', "wrong aggregation mr2 result")
def test_schema(self):
r = schema.schema(self.db.muTest_tweets_users, meta=True, verbose=0)
fields = r[0][0].find()[0]['value']['fields']
self.assertTrue('_id' in fields, "_id not found in schema")
def test_helpers_coll_range(self):
"""get min max of a collection field"""
res = helpers.coll_range(self.db.muTest_tweets_users, 'id_str')
self.assertEqual(res[1], '999314042', "wrong coll_range results")
def test_helpers_coll_copy(self):
res = helpers.coll_copy(self.db.muTest_tweets, self.db['muTest_tweets_copy'],
create_indexes=True, dropTarget=True, write_options={}, verbose=0)
self.assertEqual(res.count(), self.db.muTest_tweets.count(), "error in coll_copy")
def test_helpers_coll_chunks(self):
"""guarantees that all documents are fetched and no overlaps occur
"""
doc_count = self.db.muTest_tweets.count()
res = helpers.coll_chunks(self.db.muTest_tweets, '_id', 0.3)
out_lst = []
for i in res:
out_lst.extend([i['_id'] for i in self.db.muTest_tweets.find(i[1], projection={})])
out_lst = set(out_lst)
self.assertEqual(len(out_lst), doc_count, "wrong coll_chunks ranges or overlaps occurred")
def test_pubsub(self):
res = ps_tests('speed', testDbConStr)
self.assertGreater(res.msgsPerSecPub, 1000, "message publishing too slow")
self.assertGreater(res.msgsPerSecSub, 1000, "message reading too slow")
res = ps_tests('speedThread', testDbConStr)
self.assertGreater(res.msgsPerSec, 500, "message absorption too slow")
self.assertEqual(res.msgs, 2000, "messages skipped")
if __name__ == "__main__":
unittest.main()
| nickmilon/mongoUtils | mongoUtils/tests/tests.py | Python | apache-2.0 | 6,489 |
#!/usr/bin/env python
#
# Copyright (c) 2012, Dave Eddy <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the project nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import cPickle
try:
import sqlite3
except:
print "[Warn] sqlite3 not found -- loading sqlite2"
from pysqlite2 import dbapi2 as sqlite3
class DatabaseSession:
"""
A class to access and modify a sqlite database.
"""
def __init__(self, database):
"""
Initialize the database session and create a `variable` table.
"""
self.db_conn = sqlite3.connect(database)
c = self.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS variable
(name text NOT NULL DEFAULT '',
value text NOT NULL DEFAULT ''
)
''')
self.commit()
c.close()
def cursor(self):
"""
Returns a cursor to the database.
"""
return self.db_conn.cursor()
def commit(self):
"""
Commits the database.
"""
self.db_conn.commit()
def table_is_empty(self, table_name):
"""
Returns True if the table is empty.
"""
c = self.cursor()
c.execute("""SELECT 1 FROM %s LIMIT 1""" % table_name)
result = c.fetchone()
c.close()
if not result:
return True
return False
def variable_set(self, var_name, var_value):
"""
Save a variable in the database.
"""
#var_value = self.__convert_specials_to_strings(var_value)
c = self.cursor()
c.execute("""DELETE FROM variable WHERE name = ?""", [var_name])
c.execute("""INSERT INTO variable (name, value) VALUES (?, ?)""", [var_name, str(cPickle.dumps(var_value))])
self.commit()
c.close()
def variable_get(self, var_name, default_value=None):
"""
Retrieve a variable from the database.
"""
try:
c = self.cursor()
c.execute("""SELECT value FROM variable WHERE name = ?""", [var_name])
result = c.fetchone()[0]
c.close()
except:
c.close()
return default_value
return cPickle.loads(str(result))
| bahamas10/Viridian | AmpacheTools/DatabaseSession.py | Python | bsd-3-clause | 3,664 |
# -*- coding: utf-8 -*-
"""Python library module for LSM6DS33 accelerometer and gyroscope.
This module for the Raspberry Pi computer helps interface the LSM6DS33
accelerometer and gyro.The library makes it easy to read
the raw accelerometer and gyro data through I²C interface and it also provides
methods for getting angular velocity and g forces.
The datasheet for the LSM6DS33 is available at
[https://www.pololu.com/file/download/LSM6DS33.pdf?file_id=0J1087]
"""
import math
from i2c import I2C
from time import sleep
from constants import *
class LSM6DS33(I2C):
""" Set up and access LSM6DS33 accelerometer and gyroscope.
"""
# Output registers used by the gyroscope
gyro_registers = [
LSM6DS33_OUTX_L_G, # low byte of X value
LSM6DS33_OUTX_H_G, # high byte of X value
LSM6DS33_OUTY_L_G, # low byte of Y value
LSM6DS33_OUTY_H_G, # high byte of Y value
LSM6DS33_OUTZ_L_G, # low byte of Z value
LSM6DS33_OUTZ_H_G, # high byte of Z value
]
# Output registers used by the accelerometer
accel_registers = [
LSM6DS33_OUTX_L_XL, # low byte of X value
LSM6DS33_OUTX_H_XL, # high byte of X value
LSM6DS33_OUTY_L_XL, # low byte of Y value
LSM6DS33_OUTY_H_XL, # high byte of Y value
LSM6DS33_OUTZ_L_XL, # low byte of Z value
LSM6DS33_OUTZ_H_XL, # high byte of Z value
]
def __init__(self, bus_id=1):
""" Set up I2C connection and initialize some flags and values.
"""
super(LSM6DS33, self).__init__(bus_id)
self.is_accel_enabled = False
self.is_gyro_enabled = False
self.is_gyro_calibrated = False
self.gyro_cal = [0, 0, 0]
self.is_accel_calibrated = False
self.accel_angle_cal = [0, 0]
def __del__(self):
""" Clean up."""
try:
# Power down accelerometer and gyro
self.writeRegister(LSM6DS33_ADDR, LSM6DS33_CTRL1_XL, 0x00)
self.writeRegister(LSM6DS33_ADDR, LSM6DS33_CTRL2_G, 0x00)
super(LSM6DS33, self).__del__()
print('Destroying')
except:
pass
def enable(self, accelerometer=True, gyroscope=True, calibration=True):
""" Enable and set up the given sensors in the IMU."""
if accelerometer:
# 1.66 kHz (high performance) / +/- 4g
# binary value -> 0b01011000, hex value -> 0x58
self.write_register(LSM6DS33_ADDR, LSM6DS33_CTRL1_XL, 0x58)
self.is_accel_enabled = True
if gyroscope:
# 208 Hz (high performance) / 1000 dps
# binary value -> 0b01011000, hex value -> 0x58
self.write_register(LSM6DS33_ADDR, LSM6DS33_CTRL2_G, 0x58)
self.is_gyro_enabled = True
if calibration:
self.calibrate()
self.is_gyro_calibrated = True
self.is_accel_calibrated = True
def calibrate(self, iterations=2000):
""" Calibrate the gyro's raw values."""
print('Calibrating Gryo and Accelerometer...')
for i in range(iterations):
gyro_raw = self.get_gyroscope_raw()
accel_angles = self.get_accelerometer_angles()
self.gyro_cal[0] += gyro_raw[0]
self.gyro_cal[1] += gyro_raw[1]
self.gyro_cal[2] += gyro_raw[2]
self.accel_angle_cal[0] += accel_angles[0]
self.accel_angle_cal[1] += accel_angles[1]
sleep(0.004)
self.gyro_cal[0] /= iterations
self.gyro_cal[1] /= iterations
self.gyro_cal[2] /= iterations
self.accel_angle_cal[0] /= iterations
self.accel_angle_cal[1] /= iterations
print('Calibration Done')
def get_gyroscope_raw(self):
""" Return a 3D vector of raw gyro data.
"""
# Check if gyroscope has been enabled
if not self.is_gyro_enabled:
raise(Exception('Gyroscope is not enabled!'))
sensor_data = self.read_3d_sensor(LSM6DS33_ADDR, self.gyro_registers)
# Return the vector
if self.is_gyro_calibrated:
calibrated_gyro_data = sensor_data
calibrated_gyro_data[0] -= self.gyro_cal[0]
calibrated_gyro_data[1] -= self.gyro_cal[1]
calibrated_gyro_data[2] -= self.gyro_cal[2]
return calibrated_gyro_data
else:
return sensor_data
def get_gyro_angular_velocity(self):
""" Return a 3D vector of the angular velocity measured by the gyro
in degrees/second.
"""
# Check if gyroscope has been enabled
if not self.is_gyro_enabled:
raise(Exception('Gyroscope is not enabled!'))
# Check if gyroscope has been calibrated
if not self.is_gyro_calibrated:
raise(Exception('Gyroscope is not calibrated!'))
gyro_data = self.get_gyroscope_raw()
gyro_data[0] = (gyro_data[0] * GYRO_GAIN) / 1000
gyro_data[1] = (gyro_data[1] * GYRO_GAIN) / 1000
gyro_data[2] = (gyro_data[2] * GYRO_GAIN) / 1000
return gyro_data
def get_accelerometer_raw(self):
""" Return a 3D vector of raw accelerometer data.
"""
# Check if accelerometer has been enabled
if not self.is_accel_enabled:
raise(Exception('Accelerometer is not enabled!'))
return self.read_3d_sensor(LSM6DS33_ADDR, self.accel_registers)
def get_accelerometer_g_forces(self):
""" Return a 3D vector of the g forces measured by the accelerometer"""
[x_val, y_val, z_val] = self.get_accelerometer_raw()
x_val = (x_val * ACCEL_CONVERSION_FACTOR) / 1000
y_val = (y_val * ACCEL_CONVERSION_FACTOR) / 1000
z_val = (z_val * ACCEL_CONVERSION_FACTOR) / 1000
return [x_val, y_val, z_val]
def get_accelerometer_angles(self, round_digits=0):
""" Return a 2D vector of roll and pitch angles,
based on accelerometer g forces
"""
# Get raw accelerometer g forces
[acc_xg_force, acc_yg_force, acc_zg_force] = self.get_accelerometer_g_forces()
# Calculate angles
xz_dist = self._get_dist(acc_xg_force, acc_zg_force)
yz_dist = self._get_dist(acc_yg_force, acc_zg_force)
accel_roll_angle = math.degrees(math.atan2(acc_yg_force, xz_dist))
accel_pitch_angle = -math.degrees(math.atan2(acc_xg_force, yz_dist))
if self.is_accel_calibrated:
accel_roll_angle -= self.accel_angle_cal[0]
accel_pitch_angle -= self.accel_angle_cal[1]
if round_digits != 0:
return [round(accel_roll_angle, round_digits), round(accel_pitch_angle, round_digits)]
else:
return [accel_roll_angle, accel_pitch_angle]
else:
return [accel_roll_angle, accel_pitch_angle]
def _get_dist(self, a, b):
return math.sqrt((a * a) + (b * b))
| SvetoslavKuzmanov/altimu10v5 | altimu10v5/lsm6ds33.py | Python | mit | 6,967 |
"""distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: cygwinccompiler.py 37828 2004-11-10 22:23:15Z loewis $"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
class CygwinCCompiler (UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or 7.1.
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
self.dll_libraries = ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
self.dll_libraries = ['msvcr71']
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# class CygwinCCompiler
# the same as cygwin plus some additional parameters
class Mingw32CCompiler (CygwinCCompiler):
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
compiler_so='gcc -mno-cygwin -mdll -O -Wall',
compiler_cxx='g++ -mno-cygwin -O -Wall',
linker_exe='gcc -mno-cygwin',
linker_so='%s -mno-cygwin %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or 7.1.
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
self.dll_libraries = ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
self.dll_libraries = ['msvcr71']
# __init__ ()
# class Mingw32CCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
s = f.read()
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import StrictVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
gcc_version = StrictVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
ld_version = StrictVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version','r')
out_string = out.read()
out.close()
result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
if result:
dllwrap_version = StrictVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
| xbmc/atv2 | xbmc/lib/libPython/Python/Lib/distutils/cygwinccompiler.py | Python | gpl-2.0 | 17,279 |
"""
Tests for the logic in input type mako templates.
"""
from __future__ import absolute_import
import json
import unittest
from collections import OrderedDict
from lxml import etree
from mako import exceptions
from six.moves import range
from capa.inputtypes import Status
from capa.tests.helpers import capa_render_template
from openedx.core.djangolib.markup import HTML
from xmodule.stringify import stringify_children
class TemplateError(Exception):
"""
Error occurred while rendering a Mako template.
"""
pass
class TemplateTestCase(unittest.TestCase):
"""
Utilities for testing templates.
"""
# Subclasses override this to specify the file name of the template
# to be loaded from capa/templates.
# The template name should include the .html extension:
# for example: choicegroup.html
TEMPLATE_NAME = None
DESCRIBEDBY = 'aria-describedby="desc-1 desc-2"'
DESCRIPTIONS = OrderedDict(
[
('desc-1', 'description text 1'),
('desc-2', '<em>description</em> <mark>text</mark> 2')
]
)
DESCRIPTION_IDS = ' '.join(list(DESCRIPTIONS.keys()))
RESPONSE_DATA = {
'label': 'question text 101',
'descriptions': DESCRIPTIONS
}
def setUp(self):
"""
Initialize the context.
"""
super(TemplateTestCase, self).setUp()
self.context = {}
def render_to_xml(self, context_dict):
"""
Render the template using the `context_dict` dict.
Returns an `etree` XML element.
"""
# add dummy STATIC_URL to template context
context_dict.setdefault("STATIC_URL", "/dummy-static/")
try:
xml_str = capa_render_template(self.TEMPLATE_NAME, context_dict)
except:
raise TemplateError(exceptions.text_error_template().render())
# Attempt to construct an XML tree from the template
# This makes it easy to use XPath to make assertions, rather
# than dealing with a string.
# We modify the string slightly by wrapping it in <test>
# tags, to ensure it has one root element.
try:
xml = etree.fromstring("<test>" + xml_str + "</test>")
except Exception as exc:
raise TemplateError("Could not parse XML from '{0}': {1}".format(
xml_str, str(exc)))
else:
return xml
def assert_has_xpath(self, xml_root, xpath, context_dict, exact_num=1):
"""
Asserts that the xml tree has an element satisfying `xpath`.
`xml_root` is an etree XML element
`xpath` is an XPath string, such as `'/foo/bar'`
`context` is used to print a debugging message
`exact_num` is the exact number of matches to expect.
"""
message = ("XML does not have %d match(es) for xpath '%s'\nXML: %s\nContext: %s"
% (exact_num, str(xpath), etree.tostring(xml_root), str(context_dict)))
self.assertEqual(len(xml_root.xpath(xpath)), exact_num, msg=message)
def assert_no_xpath(self, xml_root, xpath, context_dict):
"""
Asserts that the xml tree does NOT have an element
satisfying `xpath`.
`xml_root` is an etree XML element
`xpath` is an XPath string, such as `'/foo/bar'`
`context` is used to print a debugging message
"""
self.assert_has_xpath(xml_root, xpath, context_dict, exact_num=0)
def assert_has_text(self, xml_root, xpath, text, exact=True):
"""
Find the element at `xpath` in `xml_root` and assert
that its text is `text`.
`xml_root` is an etree XML element
`xpath` is an XPath string, such as `'/foo/bar'`
`text` is the expected text that the element should contain
If multiple elements are found, checks the first one.
If no elements are found, the assertion fails.
"""
element_list = xml_root.xpath(xpath)
self.assertGreater(len(element_list), 0, "Could not find element at '%s'\n%s" %
(str(xpath), etree.tostring(xml_root)))
if exact:
self.assertEqual(text, element_list[0].text.strip())
else:
self.assertIn(text, element_list[0].text.strip())
def assert_description(self, describedby_xpaths):
"""
Verify that descriptions information is correct.
Arguments:
describedby_xpaths (list): list of xpaths to check aria-describedby attribute
"""
xml = self.render_to_xml(self.context)
# Verify that each description <p> tag has correct id, text and order
descriptions = OrderedDict(
(tag.get('id'), stringify_children(tag)) for tag in xml.xpath('//p[@class="question-description"]')
)
self.assertEqual(self.DESCRIPTIONS, descriptions)
# for each xpath verify that description_ids are set correctly
for describedby_xpath in describedby_xpaths:
describedbys = xml.xpath(describedby_xpath)
# aria-describedby attributes must have ids
self.assertTrue(describedbys)
for describedby in describedbys:
self.assertEqual(describedby, self.DESCRIPTION_IDS)
def assert_describedby_attribute(self, describedby_xpaths):
"""
Verify that an element has no aria-describedby attribute if there are no descriptions.
Arguments:
describedby_xpaths (list): list of xpaths to check aria-describedby attribute
"""
self.context['describedby_html'] = ''
xml = self.render_to_xml(self.context)
# for each xpath verify that description_ids are set correctly
for describedby_xpath in describedby_xpaths:
describedbys = xml.xpath(describedby_xpath)
self.assertFalse(describedbys)
def assert_status(self, status_div=False, status_class=False):
"""
Verify status information.
Arguments:
status_div (bool): check presence of status div
status_class (bool): check presence of status class
"""
cases = [
('correct', 'correct'),
('unsubmitted', 'unanswered'),
('submitted', 'submitted'),
('incorrect', 'incorrect'),
('incomplete', 'incorrect')
]
for context_status, div_class in cases:
self.context['status'] = Status(context_status)
xml = self.render_to_xml(self.context)
# Expect that we get a <div> with correct class
if status_div:
xpath = "//div[normalize-space(@class)='%s']" % div_class
self.assert_has_xpath(xml, xpath, self.context)
# Expect that we get a <span> with class="status"
# (used to by CSS to draw the green check / red x)
self.assert_has_text(
xml,
"//span[@class='status {}']/span[@class='sr']".format(
div_class if status_class else ''
),
self.context['status'].display_name
)
def assert_label(self, xpath=None, aria_label=False):
"""
Verify label is rendered correctly.
Arguments:
xpath (str): xpath expression for label element
aria_label (bool): check aria-label attribute value
"""
labels = [
{
'actual': "You see, but you do not observe. The distinction is clear.",
'expected': "You see, but you do not observe. The distinction is clear.",
},
{
'actual': "I choose to have <mark>faith</mark> because without that, I have <em>nothing</em>.",
'expected': "I choose to have faith because without that, I have nothing.",
}
]
response_data = {
'response_data': {
'descriptions': {},
'label': ''
}
}
self.context.update(response_data)
for label in labels:
self.context['response_data']['label'] = label['actual']
xml = self.render_to_xml(self.context)
if aria_label:
self.assert_has_xpath(xml, "//*[@aria-label='%s']" % label['expected'], self.context)
else:
element_list = xml.xpath(xpath)
self.assertEqual(len(element_list), 1)
self.assertEqual(stringify_children(element_list[0]), label['actual'])
class ChoiceGroupTemplateTest(TemplateTestCase):
"""
Test mako template for `<choicegroup>` input.
"""
TEMPLATE_NAME = 'choicegroup.html'
def setUp(self):
super(ChoiceGroupTemplateTest, self).setUp()
choices = [('1', 'choice 1'), ('2', 'choice 2'), ('3', 'choice 3')]
self.context = {
'id': '1',
'choices': choices,
'status': Status('correct'),
'input_type': 'checkbox',
'name_array_suffix': '1',
'value': '3',
'response_data': self.RESPONSE_DATA,
'describedby_html': HTML(self.DESCRIBEDBY),
}
def test_problem_marked_correct(self):
"""
Test conditions under which the entire problem
(not a particular option) is marked correct.
"""
self.context['status'] = Status('correct')
self.context['input_type'] = 'checkbox'
self.context['value'] = ['1', '2']
# Should mark the entire problem correct
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator-container']/span[@class='status correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml, "//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml, "//label[@class='choicegroup_correct']",
self.context)
def test_problem_marked_incorrect(self):
"""
Test all conditions under which the entire problem
(not a particular option) is marked incorrect.
"""
conditions = [
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': []},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': ['2']},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': ['2', '3']},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': []},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': ['2']},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': ['2', '3']}]
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator-container']/span[@class='status incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml,
"//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml,
"//label[@class='choicegroup_correct']",
self.context)
def test_problem_marked_unsubmitted(self):
"""
Test all conditions under which the entire problem
(not a particular option) is marked unanswered.
"""
conditions = [
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': ''},
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': []},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': []},
{'input_type': 'radio', 'value': ''},
{'input_type': 'radio', 'value': []},
{'input_type': 'checkbox', 'value': []},
{'input_type': 'checkbox', 'value': ['1']},
{'input_type': 'checkbox', 'value': ['1', '2']}]
self.context['status'] = Status('unanswered')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator-container']/span[@class='status unanswered']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml,
"//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml,
"//label[@class='choicegroup_correct']",
self.context)
def test_option_marked_correct(self):
"""
Test conditions under which a particular option
and the entire problem is marked correct.
"""
conditions = [
{'input_type': 'radio', 'value': '2'},
{'input_type': 'radio', 'value': ['2']}]
self.context['status'] = Status('correct')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//label[contains(@class, 'choicegroup_correct')]"
self.assert_has_xpath(xml, xpath, self.context)
# Should also mark the whole problem
xpath = "//div[@class='indicator-container']/span[@class='status correct']"
self.assert_has_xpath(xml, xpath, self.context)
def test_option_marked_incorrect(self):
"""
Test conditions under which a particular option
and the entire problem is marked incorrect.
"""
conditions = [
{'input_type': 'radio', 'value': '2'},
{'input_type': 'radio', 'value': ['2']}]
self.context['status'] = Status('incorrect')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//label[contains(@class, 'choicegroup_incorrect')]"
self.assert_has_xpath(xml, xpath, self.context)
# Should also mark the whole problem
xpath = "//div[@class='indicator-container']/span[@class='status incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
def test_never_show_correctness(self):
"""
Test conditions under which we tell the template to
NOT show correct/incorrect, but instead show a message.
This is used, for example, by the Justice course to ask
questions without specifying a correct answer. When
the student responds, the problem displays "Thank you
for your response"
"""
conditions = [
{'input_type': 'radio', 'status': Status('correct'), 'value': ''},
{'input_type': 'radio', 'status': Status('correct'), 'value': '2'},
{'input_type': 'radio', 'status': Status('correct'), 'value': ['2']},
{'input_type': 'radio', 'status': Status('incorrect'), 'value': '2'},
{'input_type': 'radio', 'status': Status('incorrect'), 'value': []},
{'input_type': 'radio', 'status': Status('incorrect'), 'value': ['2']},
{'input_type': 'checkbox', 'status': Status('correct'), 'value': []},
{'input_type': 'checkbox', 'status': Status('correct'), 'value': ['2']},
{'input_type': 'checkbox', 'status': Status('incorrect'), 'value': []},
{'input_type': 'checkbox', 'status': Status('incorrect'), 'value': ['2']}]
self.context['show_correctness'] = 'never'
self.context['submitted_message'] = 'Test message'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
# Should NOT mark the entire problem correct/incorrect
xpath = "//div[@class='indicator-container']/span[@class='status correct']"
self.assert_no_xpath(xml, xpath, self.context)
xpath = "//div[@class='indicator-container']/span[@class='status incorrect']"
self.assert_no_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml,
"//label[@class='choicegroup_incorrect']",
self.context)
self.assert_no_xpath(xml,
"//label[@class='choicegroup_correct']",
self.context)
# Expect to see the message
self.assert_has_text(xml, "//div[@class='capa_alert']",
self.context['submitted_message'])
def test_no_message_before_submission(self):
"""
Ensure that we don't show the `submitted_message`
before submitting.
"""
conditions = [
{'input_type': 'radio', 'status': Status('unsubmitted'), 'value': ''},
{'input_type': 'radio', 'status': Status('unsubmitted'), 'value': []},
{'input_type': 'checkbox', 'status': Status('unsubmitted'), 'value': []},
# These tests expose bug #365
# When the bug is fixed, uncomment these cases.
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'},
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']},
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'},
#{'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']},
#{'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']},
#{'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']}]
]
self.context['show_correctness'] = 'never'
self.context['submitted_message'] = 'Test message'
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
# Expect that we do NOT see the message yet
self.assert_no_xpath(xml, "//div[@class='capa_alert']", self.context)
def test_label(self):
"""
Verify label element value rendering.
"""
self.assert_label(xpath="//legend")
def test_description(self):
"""
Test that correct description information is set on desired elements.
"""
xpaths = ['//fieldset/@aria-describedby', '//label/@aria-describedby']
self.assert_description(xpaths)
self.assert_describedby_attribute(xpaths)
def test_status(self):
"""
Verify status information.
"""
self.assert_status(status_class=True)
class TextlineTemplateTest(TemplateTestCase):
"""
Test mako template for `<textline>` input.
"""
TEMPLATE_NAME = 'textline.html'
def setUp(self):
super(TextlineTemplateTest, self).setUp()
self.context = {
'id': '1',
'status': Status('correct'),
'value': '3',
'preprocessor': None,
'trailing_text': None,
'response_data': self.RESPONSE_DATA,
'describedby_html': HTML(self.DESCRIBEDBY),
}
def test_section_class(self):
cases = [({}, ' capa_inputtype textline'),
({'do_math': True}, 'text-input-dynamath capa_inputtype textline'),
({'inline': True}, ' capa_inputtype inline textline'),
({'do_math': True, 'inline': True}, 'text-input-dynamath capa_inputtype inline textline'), ]
for (context, css_class) in cases:
base_context = self.context.copy()
base_context.update(context)
xml = self.render_to_xml(base_context)
xpath = "//div[@class='%s']" % css_class
self.assert_has_xpath(xml, xpath, self.context)
def test_status(self):
"""
Verify status information.
"""
self.assert_status(status_class=True)
def test_label(self):
"""
Verify label element value rendering.
"""
self.assert_label(xpath="//label[@class='problem-group-label']")
def test_hidden(self):
self.context['hidden'] = True
xml = self.render_to_xml(self.context)
xpath = "//div[@style='display:none;']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//input[@style='display:none;']"
self.assert_has_xpath(xml, xpath, self.context)
def test_do_math(self):
self.context['do_math'] = True
xml = self.render_to_xml(self.context)
xpath = "//input[@class='math']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//div[@class='equation']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//textarea[@id='input_1_dynamath']"
self.assert_has_xpath(xml, xpath, self.context)
def test_size(self):
self.context['size'] = '20'
xml = self.render_to_xml(self.context)
xpath = "//input[@size='20']"
self.assert_has_xpath(xml, xpath, self.context)
def test_preprocessor(self):
self.context['preprocessor'] = {'class_name': 'test_class',
'script_src': 'test_script'}
xml = self.render_to_xml(self.context)
xpath = "//div[contains(@class, 'text-input-dynamath_data') and @data-preprocessor='test_class']"
self.assert_has_xpath(xml, xpath, self.context)
xpath = "//div[@class='script_placeholder' and @data-src='test_script']"
self.assert_has_xpath(xml, xpath, self.context)
def test_do_inline_and_preprocessor(self):
self.context['preprocessor'] = {'class_name': 'test_class',
'script_src': 'test_script'}
self.context['inline'] = True
xml = self.render_to_xml(self.context)
xpath = "//div[contains(@class, 'text-input-dynamath_data inline') and @data-preprocessor='test_class']"
self.assert_has_xpath(xml, xpath, self.context)
def test_do_inline(self):
cases = [('correct', 'correct'),
('unsubmitted', 'unanswered'),
('incorrect', 'incorrect'),
('incomplete', 'incorrect')]
self.context['inline'] = True
for (context_status, div_class) in cases:
self.context['status'] = Status(context_status)
xml = self.render_to_xml(self.context)
# Expect that we get a <div> with correct class
xpath = "//div[@class='%s inline']" % div_class
self.assert_has_xpath(xml, xpath, self.context)
def test_message(self):
self.context['msg'] = "Test message"
xml = self.render_to_xml(self.context)
xpath = "//span[@class='message']"
self.assert_has_text(xml, xpath, self.context['msg'])
def test_description(self):
"""
Test that correct description information is set on desired elements.
"""
xpaths = ['//input/@aria-describedby']
self.assert_description(xpaths)
self.assert_describedby_attribute(xpaths)
class FormulaEquationInputTemplateTest(TemplateTestCase):
"""
Test make template for `<formulaequationinput>`s.
"""
TEMPLATE_NAME = 'formulaequationinput.html'
def setUp(self):
super(FormulaEquationInputTemplateTest, self).setUp()
self.context = {
'id': 2,
'value': 'PREFILLED_VALUE',
'status': Status('unsubmitted'),
'previewer': 'file.js',
'reported_status': 'REPORTED_STATUS',
'trailing_text': None,
'response_data': self.RESPONSE_DATA,
'describedby_html': HTML(self.DESCRIBEDBY),
}
def test_no_size(self):
xml = self.render_to_xml(self.context)
self.assert_no_xpath(xml, "//input[@size]", self.context)
def test_size(self):
self.context['size'] = '40'
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, "//input[@size='40']", self.context)
def test_description(self):
"""
Test that correct description information is set on desired elements.
"""
xpaths = ['//input/@aria-describedby']
self.assert_description(xpaths)
self.assert_describedby_attribute(xpaths)
def test_status(self):
"""
Verify status information.
"""
self.assert_status(status_class=True)
def test_label(self):
"""
Verify label element value rendering.
"""
self.assert_label(xpath="//label[@class='problem-group-label']")
class AnnotationInputTemplateTest(TemplateTestCase):
"""
Test mako template for `<annotationinput>` input.
"""
TEMPLATE_NAME = 'annotationinput.html'
def setUp(self):
super(AnnotationInputTemplateTest, self).setUp()
self.context = {
'id': 2,
'value': '<p>Test value</p>',
'title': '<h1>This is a title</h1>',
'text': '<p><b>This</b> is a test.</p>',
'comment': '<p>This is a test comment</p>',
'comment_prompt': '<p>This is a test comment prompt</p>',
'comment_value': '<p>This is the value of a test comment</p>',
'tag_prompt': '<p>This is a tag prompt</p>',
'options': [],
'has_options_value': False,
'debug': False,
'status': Status('unsubmitted'),
'return_to_annotation': False,
'msg': '<p>This is a test message</p>',
}
def test_return_to_annotation(self):
"""
Test link for `Return to Annotation` appears if and only if
the flag is set.
"""
xpath = "//a[@class='annotation-return']"
# If return_to_annotation set, then show the link
self.context['return_to_annotation'] = True
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, xpath, self.context)
# Otherwise, do not show the links
self.context['return_to_annotation'] = False
xml = self.render_to_xml(self.context)
self.assert_no_xpath(xml, xpath, self.context)
def test_option_selection(self):
"""
Test that selected options are selected.
"""
# Create options 0-4 and select option 2
self.context['options_value'] = [2]
self.context['options'] = [
{'id': id_num,
'choice': 'correct',
'description': '<p>Unescaped <b>HTML {0}</b></p>'.format(id_num)}
for id_num in range(5)]
xml = self.render_to_xml(self.context)
# Expect that each option description is visible
# with unescaped HTML.
# Since the HTML is unescaped, we can traverse the XML tree
for id_num in range(5):
xpath = "//span[@data-id='{0}']/p/b".format(id_num)
self.assert_has_text(xml, xpath, 'HTML {0}'.format(id_num), exact=False)
# Expect that the correct option is selected
xpath = "//span[contains(@class,'selected')]/p/b"
self.assert_has_text(xml, xpath, 'HTML 2', exact=False)
def test_submission_status(self):
"""
Test that the submission status displays correctly.
"""
# Test cases of `(input_status, expected_css_class)` tuples
test_cases = [('unsubmitted', 'unanswered'),
('incomplete', 'incorrect'),
('incorrect', 'incorrect')]
for (input_status, expected_css_class) in test_cases:
self.context['status'] = Status(input_status)
xml = self.render_to_xml(self.context)
xpath = "//span[@class='status {0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
# If individual options are being marked, then expect
# just the option to be marked incorrect, not the whole problem
self.context['has_options_value'] = True
self.context['status'] = Status('incorrect')
xpath = "//span[@class='incorrect']"
xml = self.render_to_xml(self.context)
self.assert_no_xpath(xml, xpath, self.context)
def test_display_html_comment(self):
"""
Test that HTML comment and comment prompt render.
"""
self.context['comment'] = "<p>Unescaped <b>comment HTML</b></p>"
self.context['comment_prompt'] = "<p>Prompt <b>prompt HTML</b></p>"
self.context['text'] = "<p>Unescaped <b>text</b></p>"
xml = self.render_to_xml(self.context)
# Because the HTML is unescaped, we should be able to
# descend to the <b> tag
xpath = "//div[@class='block']/p/b"
self.assert_has_text(xml, xpath, 'prompt HTML')
xpath = "//div[@class='block block-comment']/p/b"
self.assert_has_text(xml, xpath, 'comment HTML')
xpath = "//div[@class='block block-highlight']/p/b"
self.assert_has_text(xml, xpath, 'text')
def test_display_html_tag_prompt(self):
"""
Test that HTML tag prompts render.
"""
self.context['tag_prompt'] = "<p>Unescaped <b>HTML</b></p>"
xml = self.render_to_xml(self.context)
# Because the HTML is unescaped, we should be able to
# descend to the <b> tag
xpath = "//div[@class='block']/p/b"
self.assert_has_text(xml, xpath, 'HTML')
class MathStringTemplateTest(TemplateTestCase):
"""
Test mako template for `<mathstring>` input.
"""
TEMPLATE_NAME = 'mathstring.html'
def setUp(self):
super(MathStringTemplateTest, self).setUp()
self.context = {'isinline': False, 'mathstr': '', 'tail': ''}
def test_math_string_inline(self):
self.context['isinline'] = True
self.context['mathstr'] = 'y = ax^2 + bx + c'
xml = self.render_to_xml(self.context)
xpath = "//section[@class='math-string']/span[1]"
self.assert_has_text(xml, xpath,
'[mathjaxinline]y = ax^2 + bx + c[/mathjaxinline]')
def test_math_string_not_inline(self):
self.context['isinline'] = False
self.context['mathstr'] = 'y = ax^2 + bx + c'
xml = self.render_to_xml(self.context)
xpath = "//section[@class='math-string']/span[1]"
self.assert_has_text(xml, xpath,
'[mathjax]y = ax^2 + bx + c[/mathjax]')
def test_tail_html(self):
self.context['tail'] = "<p>This is some <b>tail</b> <em>HTML</em></p>"
xml = self.render_to_xml(self.context)
# HTML from `tail` should NOT be escaped.
# We should be able to traverse it as part of the XML tree
xpath = "//section[@class='math-string']/span[2]/p/b"
self.assert_has_text(xml, xpath, 'tail')
xpath = "//section[@class='math-string']/span[2]/p/em"
self.assert_has_text(xml, xpath, 'HTML')
class OptionInputTemplateTest(TemplateTestCase):
"""
Test mako template for `<optioninput>` input.
"""
TEMPLATE_NAME = 'optioninput.html'
def setUp(self):
super(OptionInputTemplateTest, self).setUp()
self.context = {
'id': 2,
'options': [],
'status': Status('unsubmitted'),
'value': 0,
'default_option_text': 'Select an option',
'response_data': self.RESPONSE_DATA,
'describedby_html': HTML(self.DESCRIBEDBY),
}
def test_select_options(self):
# Create options 0-4, and select option 2
self.context['options'] = [(id_num, 'Option {0}'.format(id_num))
for id_num in range(5)]
self.context['value'] = 2
xml = self.render_to_xml(self.context)
# Should have a dummy default
xpath = "//option[@value='option_2_dummy_default']"
self.assert_has_xpath(xml, xpath, self.context)
for id_num in range(5):
xpath = "//option[@value='{0}']".format(id_num)
self.assert_has_text(xml, xpath, 'Option {0}'.format(id_num))
# Should have the correct option selected
xpath = "//option[@selected='true']"
self.assert_has_text(xml, xpath, 'Option 2')
def test_status(self):
"""
Verify status information.
"""
self.assert_status(status_class=True)
def test_label(self):
"""
Verify label element value rendering.
"""
self.assert_label(xpath="//label[@class='problem-group-label']")
def test_description(self):
"""
Test that correct description information is set on desired elements.
"""
xpaths = ['//select/@aria-describedby']
self.assert_description(xpaths)
self.assert_describedby_attribute(xpaths)
class DragAndDropTemplateTest(TemplateTestCase):
"""
Test mako template for `<draganddropinput>` input.
"""
TEMPLATE_NAME = 'drag_and_drop_input.html'
def setUp(self):
super(DragAndDropTemplateTest, self).setUp()
self.context = {'id': 2,
'drag_and_drop_json': '',
'value': 0,
'status': Status('unsubmitted'),
'msg': ''}
def test_status(self):
# Test cases, where each tuple represents
# `(input_status, expected_css_class, expected_text)`
test_cases = [('unsubmitted', 'unanswered', 'unanswered'),
('correct', 'correct', 'correct'),
('incorrect', 'incorrect', 'incorrect'),
('incomplete', 'incorrect', 'incomplete')]
for (input_status, expected_css_class, expected_text) in test_cases:
self.context['status'] = Status(input_status)
xml = self.render_to_xml(self.context)
# Expect a <div> with the status
xpath = "//div[@class='{0}']".format(expected_css_class)
self.assert_has_xpath(xml, xpath, self.context)
# Expect a <span> with the status
xpath = "//span[@class='status {0}']/span[@class='sr']".format(expected_css_class)
self.assert_has_text(xml, xpath, expected_text, exact=False)
def test_drag_and_drop_json_html(self):
json_with_html = json.dumps({'test': '<p>Unescaped <b>HTML</b></p>'})
self.context['drag_and_drop_json'] = json_with_html
xml = self.render_to_xml(self.context)
# Assert that the JSON-encoded string was inserted without
# escaping the HTML. We should be able to traverse the XML tree.
xpath = "//div[@class='drag_and_drop_problem_json']/p/b"
self.assert_has_text(xml, xpath, 'HTML')
class ChoiceTextGroupTemplateTest(TemplateTestCase):
"""Test mako template for `<choicetextgroup>` input"""
TEMPLATE_NAME = 'choicetext.html'
VALUE_DICT = {'1_choiceinput_0bc': '1_choiceinput_0bc', '1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
EMPTY_DICT = {'1_choiceinput_0_textinput_0': '',
'1_choiceinput_1_textinput_0': ''}
BOTH_CHOICE_CHECKBOX = {'1_choiceinput_0bc': 'choiceinput_0',
'1_choiceinput_1bc': 'choiceinput_1',
'1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
WRONG_CHOICE_CHECKBOX = {'1_choiceinput_1bc': 'choiceinput_1',
'1_choiceinput_0_textinput_0': '0',
'1_choiceinput_1_textinput_0': '0'}
def setUp(self):
super(ChoiceTextGroupTemplateTest, self).setUp()
choices = [
(
'1_choiceinput_0bc',
[
{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''},
{'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_0_textinput_0'},
]
),
(
'1_choiceinput_1bc',
[
{'tail_text': '', 'type': 'text', 'value': '', 'contents': ''},
{'tail_text': '', 'type': 'textinput', 'value': '', 'contents': 'choiceinput_1_textinput_0'},
]
)
]
self.context = {
'id': '1',
'choices': choices,
'status': Status('correct'),
'input_type': 'radio',
'value': self.VALUE_DICT,
'response_data': self.RESPONSE_DATA
}
def test_grouping_tag(self):
"""
Tests whether we are using a section or a label to wrap choice elements.
Section is used for checkbox, so inputting text does not deselect
"""
input_tags = ('radio', 'checkbox')
self.context['status'] = Status('correct')
xpath = "//section[@id='forinput1_choiceinput_0bc']"
self.context['value'] = {}
for input_type in input_tags:
self.context['input_type'] = input_type
xml = self.render_to_xml(self.context)
self.assert_has_xpath(xml, xpath, self.context)
def test_problem_marked_correct(self):
"""Test conditions under which the entire problem
(not a particular option) is marked correct"""
self.context['status'] = Status('correct')
self.context['input_type'] = 'checkbox'
self.context['value'] = self.VALUE_DICT
# Should mark the entire problem correct
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator-container']/span[@class='status correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
self.assert_no_xpath(xml, "//label[@class='choicetextgroup_incorrect']",
self.context)
self.assert_no_xpath(xml, "//label[@class='choicetextgroup_correct']",
self.context)
def test_problem_marked_incorrect(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked incorrect"""
grouping_tags = {'radio': 'label', 'checkbox': 'section'}
conditions = [
{'status': Status('incorrect'), 'input_type': 'radio', 'value': {}},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
{'status': Status('incorrect'), 'input_type': 'checkbox', 'value': self.VALUE_DICT},
{'status': Status('incomplete'), 'input_type': 'radio', 'value': {}},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.WRONG_CHOICE_CHECKBOX},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
{'status': Status('incomplete'), 'input_type': 'checkbox', 'value': self.VALUE_DICT}]
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator-container']/span[@class='status incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
grouping_tag = grouping_tags[test_conditions['input_type']]
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag),
self.context)
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_correct']".format(grouping_tag),
self.context)
def test_problem_marked_unsubmitted(self):
"""Test all conditions under which the entire problem
(not a particular option) is marked unanswered"""
grouping_tags = {'radio': 'label', 'checkbox': 'section'}
conditions = [
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': {}},
{'status': Status('unsubmitted'), 'input_type': 'radio', 'value': self.EMPTY_DICT},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': {}},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.EMPTY_DICT},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.VALUE_DICT},
{'status': Status('unsubmitted'), 'input_type': 'checkbox', 'value': self.BOTH_CHOICE_CHECKBOX},
]
self.context['status'] = Status('unanswered')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//div[@class='indicator-container']/span[@class='status unanswered']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark individual options
grouping_tag = grouping_tags[test_conditions['input_type']]
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_incorrect']".format(grouping_tag),
self.context)
self.assert_no_xpath(xml,
"//{0}[@class='choicetextgroup_correct']".format(grouping_tag),
self.context)
def test_option_marked_correct(self):
"""Test conditions under which a particular option
(not the entire problem) is marked correct."""
conditions = [
{'input_type': 'radio', 'value': self.VALUE_DICT}]
self.context['status'] = Status('correct')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//section[@id='forinput1_choiceinput_0bc' and\
@class='choicetextgroup_correct']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator-container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_option_marked_incorrect(self):
"""Test conditions under which a particular option
(not the entire problem) is marked incorrect."""
conditions = [
{'input_type': 'radio', 'value': self.VALUE_DICT}]
self.context['status'] = Status('incorrect')
for test_conditions in conditions:
self.context.update(test_conditions)
xml = self.render_to_xml(self.context)
xpath = "//section[@id='forinput1_choiceinput_0bc' and\
@class='choicetextgroup_incorrect']"
self.assert_has_xpath(xml, xpath, self.context)
# Should NOT mark the whole problem
xpath = "//div[@class='indicator-container']/span"
self.assert_no_xpath(xml, xpath, self.context)
def test_aria_label(self):
"""
Verify aria-label attribute rendering.
"""
self.assert_label(aria_label=True)
class ChemicalEquationTemplateTest(TemplateTestCase):
"""Test mako template for `<chemicalequationinput>` input"""
TEMPLATE_NAME = 'chemicalequationinput.html'
def setUp(self):
super(ChemicalEquationTemplateTest, self).setUp()
self.context = {
'id': '1',
'status': Status('correct'),
'previewer': 'dummy.js',
'value': '101',
}
def test_aria_label(self):
"""
Verify aria-label attribute rendering.
"""
self.assert_label(aria_label=True)
class SchematicInputTemplateTest(TemplateTestCase):
"""Test mako template for `<schematic>` input"""
TEMPLATE_NAME = 'schematicinput.html'
def setUp(self):
super(SchematicInputTemplateTest, self).setUp()
self.context = {
'id': '1',
'status': Status('correct'),
'previewer': 'dummy.js',
'value': '101',
'STATIC_URL': '/dummy-static/',
'msg': '',
'initial_value': 'two large batteries',
'width': '100',
'height': '100',
'parts': 'resistors, capacitors, and flowers',
'setup_script': '/dummy-static/js/capa/schematicinput.js',
'analyses': 'fast, slow, and pink',
'submit_analyses': 'maybe',
}
def test_aria_label(self):
"""
Verify aria-label attribute rendering.
"""
self.assert_label(aria_label=True)
class CodeinputTemplateTest(TemplateTestCase):
"""
Test mako template for `<textbox>` input
"""
TEMPLATE_NAME = 'codeinput.html'
def setUp(self):
super(CodeinputTemplateTest, self).setUp()
self.context = {
'id': '1',
'status': Status('correct'),
'mode': 'parrot',
'linenumbers': 'false',
'rows': '37',
'cols': '11',
'tabsize': '7',
'hidden': '',
'msg': '',
'value': 'print "good evening"',
'aria_label': 'python editor',
'code_mirror_exit_message': 'Press ESC then TAB or click outside of the code editor to exit',
'response_data': self.RESPONSE_DATA,
'describedby': HTML(self.DESCRIBEDBY),
}
def test_label(self):
"""
Verify question label is rendered correctly.
"""
self.assert_label(xpath="//label[@class='problem-group-label']")
def test_editor_exit_message(self):
"""
Verify that editor exit message is rendered.
"""
xml = self.render_to_xml(self.context)
self.assert_has_text(xml, '//span[@id="cm-editor-exit-message-1"]', self.context['code_mirror_exit_message'])
| ESOedX/edx-platform | common/lib/capa/capa/tests/test_input_templates.py | Python | agpl-3.0 | 46,422 |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
sys.path.insert(0, os.getcwd())
import json
import platform
import subprocess
import sys
from glob import glob
VERSION = "v0.7"
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s %(filename)s:%(lineno)d %(levelname)s] %(message)s")
from code.common.system_list import system_list
def is_xavier():
return platform.processor() == "aarch64"
def check_mig_enabled(gpuid):
p = subprocess.Popen("nvidia-smi mig -lgi -i {gpu}".format(gpu=gpuid), universal_newlines=True, shell=True, stdout=subprocess.PIPE)
for line in p.stdout:
if "No MIG-enabled devices found" in line:
return False
return True
def get_mig_compute_instances(gpuid):
p = subprocess.Popen("nvidia-smi -L", universal_newlines=True, shell=True, stdout=subprocess.PIPE)
instances = []
for line in p.stdout:
toks = line.split()
if len(toks) == 6 and toks[1] != "MIG":
instances.append(toks[5].replace(')',''))
return instances
def get_system_id():
arch = platform.processor()
if is_xavier():
# The only officially support aarch64 platform is Jetson Xavier
with open("/sys/firmware/devicetree/base/model") as product_f:
product_name = product_f.read()
if "jetson" in product_name.lower():
if "AGX" in product_name:
return "AGX_Xavier"
elif "NX" in product_name:
return "Xavier_NX"
else:
raise RuntimeError("Unrecognized aarch64 device. Only AGX Xavier and Xavier NX are supported.")
if check_mig_enabled(0):
compute_instances = get_mig_compute_instances(0)
number_of_instances = len(compute_instances)
logging.info("Found {:} compute instances".format(number_of_instances))
if number_of_instances == 7: # There is only a single configuration that can provide 7 instances
return "A100-SXM4x1-MIG_1x1g.5gb"
elif get_mig_compute_instances(0) == 3 or get_mig_compute_instances(0) == 2 or get_mig_compute_instances(0) == 1:
raise RuntimeError("Repo only supports 1x1g.5gb configuration")
else:
raise RuntimeError("Unknown MIG configuration.")
try:
import pycuda.driver
import pycuda.autoinit
name = pycuda.driver.Device(0).name()
count_actual = pycuda.driver.Device.count()
except:
nvidia_smi_out = run_command("nvidia-smi -L", get_output=True, tee=False)
# Strip empty lines
tmp = [ line for line in nvidia_smi_out if len(line) > 0 ]
count_actual = len(tmp)
if count_actual == 0:
raise RuntimeError("nvidia-smi did not detect any GPUs:\n{:}".format(nvidia_smi_out))
# Format: GPU #: <name> (UUID: <uuid>)
name = tmp[0].split("(")[0].split(": ")[1].strip()
system_id, matched, closest = ("", "", -1000)
for system in system_list:
if system[1] not in name:
continue
# Match exact name with higher priority than partial name
if matched == name and system[1] != name:
continue
closer = (abs(count_actual - system[2]) < abs(count_actual - closest))
if closer or (matched != name and system[1] == name):
system_id, matched, closest = system
if closest == -1000:
raise RuntimeError("Cannot find valid configs for {:d}x {:}. Please pass in config path using --configs=<PATH>.".format(count_actual, name))
elif closest != count_actual:
logging.warn("Cannot find valid configs for {:d}x {:}. Using {:d}x {:} configs instead.".format(count_actual, name, closest, name))
return system_id
class BENCHMARKS:
# Official names for benchmarks
ResNet50 = "resnet50"
SSDResNet34 = "ssd-resnet34"
SSDMobileNet = "ssd-mobilenet"
RNNT = "rnnt"
DLRM = "dlrm"
BERT = "bert"
UNET = "3d-unet"
ALL = [ResNet50, SSDResNet34, SSDMobileNet, BERT, DLRM, UNET, RNNT]
HIGH_ACC_ENABLED = { BERT, DLRM, UNET }
# Whatever we might call it
alias_map = {
"resnet": ResNet50,
"Resnet": ResNet50,
"ResNet": ResNet50,
"resnet50": ResNet50,
"Resnet50": ResNet50,
"ResNet50": ResNet50,
"SSDResNet34": SSDResNet34,
"SSD-ResNet34": SSDResNet34,
"ssd-resnet34": SSDResNet34,
"ssd-large": SSDResNet34,
"SSDMobileNet": SSDMobileNet,
"SSD-MobileNet": SSDMobileNet,
"ssd-mobilenet": SSDMobileNet,
"ssd-small": SSDMobileNet,
"RNNT": RNNT,
"RNN-T": RNNT,
"rnnt": RNNT,
"rnn-t": RNNT,
"DLRM": DLRM,
"dlrm": DLRM,
"BERT": BERT,
"bert": BERT,
"UNET": UNET,
"Unet": UNET,
"unet": UNET,
"3d-unet": UNET,
"3DUnet": UNET,
"3D-Unet": UNET
}
def alias(name):
if not name in BENCHMARKS.alias_map:
raise ValueError("Unknown benchmark: {:}".format(name))
return BENCHMARKS.alias_map[name]
class SCENARIOS:
# Official names for scenarios
SingleStream = "SingleStream"
MultiStream = "MultiStream"
Offline = "Offline"
Server = "Server"
ALL = [SingleStream, MultiStream, Offline, Server]
# Whatever we might call it
alias_map = {
"SingleStream": SingleStream,
"Singlestream": SingleStream,
"singlestream": SingleStream,
"single_stream": SingleStream,
"single-stream": SingleStream,
"Single-Stream": SingleStream,
"MultiStream": MultiStream,
"Multistream": MultiStream,
"multistream": MultiStream,
"multi_stream": MultiStream,
"multi-stream": MultiStream,
"Multi-Stream": MultiStream,
"Offline": Offline,
"offline": Offline,
"Server": Server,
"server": Server
}
def alias(name):
if not name in SCENARIOS.alias_map:
raise ValueError("Unknown scenario: {:}".format(name))
return SCENARIOS.alias_map[name]
def run_command(cmd, get_output=False, tee=True, custom_env=None):
"""
Runs a command.
Args:
cmd (str): The command to run.
get_output (bool): If true, run_command will return the stdout output. Default: False.
tee (bool): If true, captures output (if get_output is true) as well as prints output to stdout. Otherwise, does
not print to stdout.
"""
logging.info("Running command: {:}".format(cmd))
if not get_output:
return subprocess.check_call(cmd, shell=True)
else:
output = []
if custom_env is not None:
logging.info("Overriding Environment")
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, env=custom_env)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
for line in iter(p.stdout.readline, b""):
line = line.decode("utf-8")
if tee:
sys.stdout.write(line)
sys.stdout.flush()
output.append(line.rstrip("\n"))
ret = p.wait()
if ret == 0:
return output
else:
raise subprocess.CalledProcessError(ret, cmd)
def args_to_string(d, blacklist=[], delimit=True, double_delimit=False):
flags = []
for flag in d:
# Skip unset
if d[flag] is None:
continue
# Skip blacklisted
if flag in blacklist:
continue
if type(d[flag]) is bool:
if d[flag] is True:
flags.append("--{:}=true".format(flag))
elif d[flag] is False:
flags.append("--{:}=false".format(flag))
elif type(d[flag]) in [int, float] or not delimit:
flags.append("--{:}={:}".format(flag, d[flag]))
else:
if double_delimit:
flags.append("--{:}=\\\"{:}\\\"".format(flag, d[flag]))
else:
flags.append("--{:}=\"{:}\"".format(flag, d[flag]))
return " ".join(flags)
def flags_bool_to_int(d):
for flag in d:
if type(d[flag]) is bool:
if d[flag]:
d[flag] = 1
else:
d[flag] = 0
return d
def dict_get(d, key, default=None):
val = d.get(key, default)
return default if val is None else val
def find_config_files(benchmarks, scenarios):
config_file_candidates = ["configs/{:}/{:}/config.json".format(benchmark, scenario)
for scenario in scenarios
for benchmark in benchmarks
]
# Only return existing files
config_file_candidates = [i for i in config_file_candidates if os.path.exists(i)]
return ",".join(config_file_candidates)
def load_configs(config_files):
configs = []
for config in config_files.split(","):
file_locs = glob(config)
if len(file_locs) == 0:
raise ValueError("Config file {:} cannot be found.".format(config))
for file_loc in file_locs:
with open(file_loc) as f:
logging.info("Parsing config file {:} ...".format(file_loc))
configs.append(json.load(f))
return configs
| mlperf/inference_results_v0.7 | closed/Gigabyte/code/common/__init__.py | Python | apache-2.0 | 9,856 |
"""
Django settings for TangoWithDjango project.
Generated by 'django-admin startproject' using Django 1.9.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#Dynamic path for the templates folder
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
#Dynamic path for static resources
STATIC_DIR = os.path.join(BASE_DIR, 'static')
#Dynamic bath for media
MEDIA_DIR = os.path.join(BASE_DIR, 'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nzfmpx*buqx_(3_2hb6uhniu8hnuz&+f8t&hxl6(#!+1s-k*b@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rango',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TangoWithDjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR, ]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media'
],
},
},
]
WSGI_APPLICATION = 'TangoWithDjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [STATIC_DIR, ]
#Media
MEDIA_ROOT = MEDIA_DIR
MEDIA_URL = '/media/'
#redirect url
LOGIN_URL = '/rango/login/'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
| GrimFanrango/TangoWithDjango | TangoWithDjango/settings.py | Python | agpl-3.0 | 3,703 |
import collections
from supriya import CalculationRate
from supriya.ugens.MultiOutUGen import MultiOutUGen
class Balance2(MultiOutUGen):
"""
A stereo signal balancer.
::
>>> left = supriya.ugens.WhiteNoise.ar()
>>> right = supriya.ugens.SinOsc.ar()
>>> balance_2 = supriya.ugens.Balance2.ar(
... left=left,
... level=1,
... position=0,
... right=right,
... )
>>> balance_2
UGenArray({2})
"""
### CLASS VARIABLES ###
__documentation_section__ = "Spatialization UGens"
_default_channel_count = 2
_has_settable_channel_count = False
_ordered_input_names = collections.OrderedDict(
[("left", None), ("right", None), ("position", 0.0), ("level", 1.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
| Pulgama/supriya | supriya/ugens/Balance2.py | Python | mit | 889 |
import json
import multiprocessing.dummy as multiprocessing
from datetime import datetime
import requests
import yaml
import time
import sys
data = {}
bad_sites = {}
slack_icon_mapping = {
"OK": ":innocent",
"internal_error":":interrobang:",
"timeout":":timer_clock:",
"bad_status":":skull_and_crossbones:"
}
def slack(url, channel, username, type, message):
print(username, type, message)
requests.post(url, json={"channel": channel, "icon_emoji": slack_icon_mapping[type], "username": "Monitoring: %s"%username, "text": message})
def cycle_actions(actions, username, type, msg):
global data
for action in actions:
if data["triggers"][action]["type"] == "slack":
print("Sending slack %s"%msg)
slack(data["triggers"][action]["url"], data["triggers"][action]["channel"], username, type, msg)
def on_error(reason, type, item):
global data
global bad_sites
msg = "<%s|%s> is unavailable!\n%s"%(item["url"], item["name"], reason)
now = datetime.now()
if "ignore_hours" in item and now.hour in item["ignore_hours"]:
print("Ignoring errors for %s because they occured within range of ignore_hours"%item['name'])
return
if item["url"] not in bad_sites:
bad_sites[item["url"]]= {"origin":now, "last_alarm":now}
else:
downtime = datetime.now()-bad_sites[item["url"]]["origin"]
msg += "\nHas been down for %s"%pretty_date(downtime)
time_since_alarm = now-bad_sites[item["url"]]["last_alarm"]
frequency = item.get("trigger_frequency", data["trigger_frequency"])
if time_since_alarm.seconds == 0 or time_since_alarm.seconds > frequency:
cycle_actions(item["on_error"], item['name'], type, msg)
bad_sites[item["url"]]["last_alarm"] = now
def ping(key):
global data, bad_sites
item = data["monitors"][key]
while True:
try:
res = requests.get(item["url"], timeout=3)
print("%s response: %s"%(item["url"], res.status_code))
if res.status_code == 200:
# check if we are back to normal
if item["url"] in bad_sites:
print("Back to normal")
downtime = datetime.now()-bad_sites[item["url"]]["origin"]
msg = "%s is back (after %s of downtime)"%(item["name"], pretty_date(downtime))
cycle_actions(item["on_error"], item['name'], "OK", msg)
del bad_sites[item["url"]]
else:
on_error("Status code %s"%res.status_code, "bad_status", item)
except requests.exceptions.Timeout as e:
on_error("Timed out", "timeout", item)
except Exception as e:
print("Exception ",e)
on_error("Unknown Internal Error", "internal_error", item)
except KeyboardInterrupt:
print("keyboard interrupt")
return
interval = item.get("interval", data["interval"])
time.sleep(interval)
def run():
global data, bad_sites
data = yaml.load(open("./config.yml", "r+").read())
pool = multiprocessing.Pool(len(data["monitors"]))
while True:
try:
pool.map_async(ping, data["monitors"]).get(9999999) # for keyboard interrupt
except KeyboardInterrupt:
print("Exiting")
sys.exit()
def pretty_date(delta):
d = delta.days
h, s = divmod(delta.seconds, 3600)
m, s = divmod(s, 60)
labels = ['day', 'hour', 'minute', 'second']
dhms = ['%s %s%s' % (i, lbl, 's' if i != 1 else '') for i, lbl in zip([d, h, m, s], labels)]
for start in range(len(dhms)):
if not dhms[start].startswith('0'):
break
for end in range(len(dhms)-1, -1, -1):
if not dhms[end].startswith('0'):
break
return ', '.join(dhms[start:end+1])
if __name__ == "__main__":
print("Starting monitoring")
run()
| k2xl/downtime_monitor | run.py | Python | apache-2.0 | 3,932 |
class Base(object):
def meth(self):
pass
class Derived1(Base):
def meth(self):
return super().meth()
class Derived2(Derived1):
def meth(self):
return super().meth()
class Derived3(Derived1):
pass
class Derived4(Derived3, Derived2):
def meth(self):
return super().meth()
class Derived5(Derived1):
def meth(self):
return super().meth()
class Derived6(Derived5, Derived2):
def meth(self):
return super().meth()
| github/codeql | python/ql/test/3/library-tests/PointsTo/inheritance/test.py | Python | mit | 496 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('defcdb', '0029_remove_dc_finds_small_finds_type_category'),
]
operations = [
migrations.AddField(
model_name='dc_finds_small_finds_type',
name='category',
field=models.ForeignKey(blank=True, to='defcdb.DC_finds_small_finds_category', null=True),
),
]
| acdh-oeaw/defc-app | defcdb/migrations/0030_dc_finds_small_finds_type_category.py | Python | mit | 495 |
# -*- coding: UTF-8 -*-
'''
Created on Dec 10, 2009
@author: barthelemy
'''
from __future__ import unicode_literals, absolute_import
from decimal import Decimal
import gc
from multiprocessing import Process
import os
from socket import AF_INET, SOCK_STREAM, socket
import subprocess
from threading import Thread
import time
from traceback import print_exc
import unittest
from py4j.compat import range, isbytearray, bytearray2, long
from py4j.finalizer import ThreadSafeFinalizer
from py4j.java_gateway import JavaGateway, JavaMember, get_field, get_method, \
GatewayClient, set_field, java_import, JavaObject, is_instance_of
from py4j.protocol import *
SERVER_PORT = 25333
TEST_PORT = 25332
PY4J_JAVA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../../../../py4j-java/bin')
def start_echo_server():
subprocess.call(["java", "-cp", PY4J_JAVA_PATH, "py4j.EchoServer"])
def start_echo_server_process():
# XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
p = Process(target=start_echo_server)
p.start()
return p
def start_example_server():
subprocess.call(["java", "-Xmx512m", "-cp", PY4J_JAVA_PATH,
"py4j.examples.ExampleApplication"])
def start_example_app_process():
# XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
p = Process(target=start_example_server)
p.start()
return p
def get_socket():
testSocket = socket(AF_INET, SOCK_STREAM)
testSocket.connect(('127.0.0.1', TEST_PORT))
return testSocket
def safe_shutdown(instance):
try:
instance.gateway.shutdown()
except Exception:
print_exc()
class TestConnection(object):
"""Connection that does nothing. Useful for testing."""
counter = -1
def __init__(self, return_message='yro'):
self.address = '127.0.0.1'
self.port = 1234
self.return_message = return_message
self.is_connected = True
def start(self):
pass
def stop(self):
pass
def send_command(self, command):
TestConnection.counter += 1
if not command.startswith('m\nd\n'):
self.last_message = command
return self.return_message + str(TestConnection.counter)
class ProtocolTest(unittest.TestCase):
def tearDown(self):
# Safety check in case there was an exception...
safe_shutdown(self)
def testEscape(self):
self.assertEqual("Hello\t\rWorld\n\\", unescape_new_line(
escape_new_line("Hello\t\rWorld\n\\")))
self.assertEqual("Hello\t\rWorld\n\\", unescape_new_line(
escape_new_line("Hello\t\rWorld\n\\")))
def testProtocolSend(self):
testConnection = TestConnection()
self.gateway = JavaGateway(testConnection, False)
e = self.gateway.getExample()
self.assertEqual('c\nt\ngetExample\ne\n', testConnection.last_message)
e.method1(1, True, 'Hello\nWorld', e, None, 1.5)
self.assertEqual(
'c\no0\nmethod1\ni1\nbTrue\nsHello\\nWorld\nro0\nn\nd1.5\ne\n',
testConnection.last_message)
del(e)
def testProtocolReceive(self):
p = start_echo_server_process()
time.sleep(1)
try:
testSocket = get_socket()
testSocket.sendall('yo\n'.encode('utf-8'))
testSocket.sendall('yro0\n'.encode('utf-8'))
testSocket.sendall('yo\n'.encode('utf-8'))
testSocket.sendall('ysHello World\n'.encode('utf-8'))
# No extra echange (method3) because it is already cached.
testSocket.sendall('yi123\n'.encode('utf-8'))
testSocket.sendall('yd1.25\n'.encode('utf-8'))
testSocket.sendall('yo\n'.encode('utf-8'))
testSocket.sendall('yn\n'.encode('utf-8'))
testSocket.sendall('yo\n'.encode('utf-8'))
testSocket.sendall('ybTrue\n'.encode('utf-8'))
testSocket.sendall('yo\n'.encode('utf-8'))
testSocket.sendall('yL123\n'.encode('utf-8'))
testSocket.close()
time.sleep(1)
self.gateway = JavaGateway(auto_field=True)
ex = self.gateway.getNewExample()
self.assertEqual('Hello World', ex.method3(1, True))
self.assertEqual(123, ex.method3())
self.assertAlmostEqual(1.25, ex.method3())
self.assertTrue(ex.method2() is None)
self.assertTrue(ex.method4())
self.assertEqual(long(123), ex.method8())
self.gateway.shutdown()
except Exception:
print_exc()
self.fail('Problem occurred')
p.join()
class IntegrationTest(unittest.TestCase):
def setUp(self):
self.p = start_echo_server_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
def tearDown(self):
# Safety check in case there was an exception...
safe_shutdown(self)
self.p.join()
def testIntegration(self):
try:
testSocket = get_socket()
testSocket.sendall('yo\n'.encode('utf-8'))
testSocket.sendall('yro0\n'.encode('utf-8'))
testSocket.sendall('yo\n'.encode('utf-8'))
testSocket.sendall('ysHello World\n'.encode('utf-8'))
testSocket.sendall('yro1\n'.encode('utf-8'))
testSocket.sendall('yo\n'.encode('utf-8'))
testSocket.sendall('ysHello World2\n'.encode('utf-8'))
testSocket.close()
time.sleep(1)
self.gateway = JavaGateway(auto_field=True)
ex = self.gateway.getNewExample()
response = ex.method3(1, True)
self.assertEqual('Hello World', response)
ex2 = self.gateway.entry_point.getNewExample()
response = ex2.method3(1, True)
self.assertEqual('Hello World2', response)
self.gateway.shutdown()
except Exception:
self.fail('Problem occurred')
def testException(self):
try:
testSocket = get_socket()
testSocket.sendall('yo\n'.encode('utf-8'))
testSocket.sendall('yro0\n'.encode('utf-8'))
testSocket.sendall('yo\n'.encode('utf-8'))
testSocket.sendall(b'x\n')
testSocket.close()
time.sleep(1)
self.gateway = JavaGateway(auto_field=True)
ex = self.gateway.getNewExample()
self.assertRaises(Py4JError, lambda: ex.method3(1, True))
self.gateway.shutdown()
except Exception:
self.fail('Problem occurred')
class CloseTest(unittest.TestCase):
def testNoCallbackServer(self):
# Test that the program can continue to move on and that no close
# is required.
JavaGateway()
self.assertTrue(True)
def testCallbackServer(self):
# A close is required to stop the thread.
gateway = JavaGateway(start_callback_server=True)
gateway.close()
self.assertTrue(True)
time.sleep(1)
class MethodTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testNoneArg(self):
ex = self.gateway.getNewExample()
try:
ex.method2(None)
ex2 = ex.method4(None)
self.assertEquals(ex2.getField1(), 3)
self.assertEquals(2, ex.method7(None))
except Exception:
print_exc()
self.fail()
def testUnicode(self):
sb = self.gateway.jvm.java.lang.StringBuffer()
sb.append('\r\n\tHello\r\n\t')
self.assertEqual('\r\n\tHello\r\n\t', sb.toString())
def testEscape(self):
sb = self.gateway.jvm.java.lang.StringBuffer()
sb.append('\r\n\tHello\r\n\t')
self.assertEqual('\r\n\tHello\r\n\t', sb.toString())
class FieldTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testAutoField(self):
self.gateway = JavaGateway(auto_field=True)
ex = self.gateway.getNewExample()
self.assertEqual(ex.field10, 10)
self.assertEqual(ex.field11, long(11))
sb = ex.field20
sb.append('Hello')
self.assertEqual('Hello', sb.toString())
self.assertTrue(ex.field21 == None)
def testNoField(self):
self.gateway = JavaGateway(auto_field=True)
ex = self.gateway.getNewExample()
member = ex.field50
self.assertTrue(isinstance(member, JavaMember))
def testNoAutoField(self):
self.gateway = JavaGateway(auto_field=False)
ex = self.gateway.getNewExample()
self.assertTrue(isinstance(ex.field10, JavaMember))
self.assertTrue(isinstance(ex.field50, JavaMember))
self.assertEqual(10, get_field(ex, 'field10'))
# This field does not exist
self.assertRaises(Exception, get_field, ex, 'field50')
# With auto field = True
ex._auto_field = True
sb = ex.field20
sb.append('Hello')
self.assertEqual('Hello', sb.toString())
def testSetField(self):
self.gateway = JavaGateway(auto_field=False)
ex = self.gateway.getNewExample()
set_field(ex, 'field10', 2334)
self.assertEquals(get_field(ex, 'field10'), 2334)
sb = self.gateway.jvm.java.lang.StringBuffer('Hello World!')
set_field(ex, 'field21', sb)
self.assertEquals(get_field(ex, 'field21').toString(), 'Hello World!')
self.assertRaises(Exception, set_field, ex, 'field1', 123)
def testGetMethod(self):
# This is necessary if a field hides a method...
self.gateway = JavaGateway()
ex = self.gateway.getNewExample()
self.assertEqual(1, get_method(ex, 'method1')())
class UtilityTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testIsInstance(self):
a_list = self.gateway.jvm.java.util.ArrayList()
a_map = self.gateway.jvm.java.util.HashMap()
# FQN
self.assertTrue(is_instance_of(self.gateway, a_list, "java.util.List"))
self.assertFalse(is_instance_of(self.gateway, a_list, "java.lang.String"))
# JavaClass
self.assertTrue(is_instance_of(self.gateway, a_list,
self.gateway.jvm.java.util.List))
self.assertFalse(is_instance_of(self.gateway, a_list,
self.gateway.jvm.java.lang.String))
# JavaObject
self.assertTrue(is_instance_of(self.gateway, a_list, a_list))
self.assertFalse(is_instance_of(self.gateway, a_list, a_map))
class MemoryManagementTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
def tearDown(self):
safe_shutdown(self)
self.p.join()
gc.collect()
def testNoAttach(self):
self.gateway = JavaGateway()
gateway2 = JavaGateway()
sb = self.gateway.jvm.java.lang.StringBuffer()
sb.append('Hello World')
self.gateway.shutdown()
self.assertRaises(Exception, lambda : sb.append('Python'))
self.assertRaises(Exception,
lambda : gateway2.jvm.java.lang.StringBuffer())
def testDetach(self):
self.gateway = JavaGateway()
gc.collect()
finalizers_size_start = len(ThreadSafeFinalizer.finalizers)
sb = self.gateway.jvm.java.lang.StringBuffer()
sb.append('Hello World')
self.gateway.detach(sb)
sb2 = self.gateway.jvm.java.lang.StringBuffer()
sb2.append('Hello World')
sb2._detach()
gc.collect()
self.assertEqual(len(ThreadSafeFinalizer.finalizers) -
finalizers_size_start, 0)
self.gateway.shutdown()
class TypeConversionTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testLongInt(self):
ex = self.gateway.getNewExample()
self.assertEqual(1, ex.method7(1234))
self.assertEqual(4, ex.method7(2147483648))
self.assertEqual(4, ex.method7(long(2147483648)))
self.assertEqual(long(4), ex.method8(3))
self.assertEqual(4, ex.method8(3))
self.assertEqual(long(4), ex.method8(long(3)))
self.assertEqual(long(4), ex.method9(long(3)))
def testBigDecimal(self):
ex = self.gateway.getNewExample()
self.assertEqual(Decimal("2147483.647"), ex.method10(2147483647, 3))
self.assertEqual(Decimal("-13.456"), ex.method10(Decimal("-14.456")))
class UnicodeTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
#def testUtfMethod(self):
#ex = self.gateway.jvm.py4j.examples.UTFExample()
## Only works for Python 3
#self.assertEqual(2, ex.strangeMéthod())
def testUnicodeString(self):
# NOTE: this is unicode because of import future unicode literal...
ex = self.gateway.jvm.py4j.examples.UTFExample()
s1 = 'allo'
s2 = 'alloé'
array1 = ex.getUtfValue(s1)
array2 = ex.getUtfValue(s2)
self.assertEqual(len(s1), len(array1))
self.assertEqual(len(s2), len(array2))
self.assertEqual(ord(s1[0]), array1[0])
self.assertEqual(ord(s2[4]), array2[4])
class ByteTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testJavaByteConversion(self):
ex = self.gateway.jvm.py4j.examples.UTFExample()
ba = bytearray([0, 1, 127, 128, 255, 216, 1, 220])
self.assertEqual(0, ex.getPositiveByteValue(ba[0]))
self.assertEqual(1, ex.getPositiveByteValue(ba[1]))
self.assertEqual(127, ex.getPositiveByteValue(ba[2]))
self.assertEqual(128, ex.getPositiveByteValue(ba[3]))
self.assertEqual(255, ex.getPositiveByteValue(ba[4]))
self.assertEqual(216, ex.getPositiveByteValue(ba[5]))
self.assertEqual(0, ex.getJavaByteValue(ba[0]))
self.assertEqual(1, ex.getJavaByteValue(ba[1]))
self.assertEqual(127, ex.getJavaByteValue(ba[2]))
self.assertEqual(-128, ex.getJavaByteValue(ba[3]))
self.assertEqual(-1, ex.getJavaByteValue(ba[4]))
def testProtocolConversion(self):
#b1 = tobytestr('abc\n')
b2 = bytearray([1, 2, 3, 255, 0, 128, 127])
#encoded1 = encode_bytearray(b1)
encoded2 = encode_bytearray(b2)
#self.assertEqual(b1, decode_bytearray(encoded1))
self.assertEqual(b2, decode_bytearray(encoded2))
def testBytesType(self):
ex = self.gateway.jvm.py4j.examples.UTFExample()
int_list = [0, 1, 10, 127, 128, 255]
ba1 = bytearray(int_list)
# Same for Python2, bytes for Python 3
ba2 = bytearray2(int_list)
a1 = ex.getBytesValue(ba1)
a2 = ex.getBytesValue(ba2)
for i1, i2 in zip(a1, int_list):
self.assertEqual(i1, i2)
for i1, i2 in zip(a2, int_list):
self.assertEqual(i1, i2)
def testBytesType2(self):
ex = self.gateway.jvm.py4j.examples.UTFExample()
int_list = [0, 1, 10, 127, 255, 128]
a1 = ex.getBytesValue()
# Python 2: bytearray (because str is too easy to confuse with normal
# strings)
# Python 3: bytes (because bytes is closer to the byte[] representation
# in Java)
self.assertTrue(isbytearray(a1) or ispython3bytestr(a1))
for i1, i2 in zip(a1, int_list):
self.assertEqual(i1, i2)
def testLargeByteArray(self):
# Regression test for #109, an error when passing large byte arrays.
self.gateway.jvm.java.nio.ByteBuffer.wrap(bytearray(range(255)))
class ExceptionTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testJavaError(self):
try:
self.gateway.jvm.Integer.valueOf('allo')
except Py4JJavaError as e:
self.assertEqual('java.lang.NumberFormatException',
e.java_exception.getClass().getName())
except Exception:
self.fail()
def testJavaConstructorError(self):
try:
self.gateway.jvm.Integer('allo')
except Py4JJavaError as e:
self.assertEqual('java.lang.NumberFormatException',
e.java_exception.getClass().getName())
except Exception:
self.fail()
def doError(self):
id = ''
try:
self.gateway.jvm.Integer.valueOf('allo')
except Py4JJavaError as e:
id = e.java_exception._target_id
return id
def testJavaErrorGC(self):
id = self.doError()
java_object = JavaObject(id, self.gateway._gateway_client)
try:
# Should fail because it should have been garbage collected...
java_object.getCause()
self.fail()
except Py4JError:
self.assertTrue(True)
def testReflectionError(self):
try:
self.gateway.jvm.Integer.valueOf2('allo')
except Py4JJavaError:
self.fail()
except Py4JNetworkError:
self.fail()
except Py4JError:
self.assertTrue(True)
def testStrError(self):
try:
self.gateway.jvm.Integer.valueOf('allo')
except Py4JJavaError as e:
self.assertTrue(str(e).startswith(
'An error occurred while calling z:java.lang.Integer.valueOf.'
'\n: java.lang.NumberFormatException:'))
except Exception:
self.fail()
class JVMTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testConstructors(self):
jvm = self.gateway.jvm
sb = jvm.java.lang.StringBuffer('hello')
sb.append('hello world')
sb.append(1)
self.assertEqual(sb.toString(), 'hellohello world1')
l1 = jvm.java.util.ArrayList()
l1.append('hello world')
l1.append(1)
self.assertEqual(2, len(l1))
self.assertEqual('hello world', l1[0])
l2 = ['hello world', 1]
self.assertEqual(str(l2), str(l1))
def testStaticMethods(self):
System = self.gateway.jvm.java.lang.System
self.assertTrue(System.currentTimeMillis() > 0)
self.assertEqual('123', self.gateway.jvm.java.lang.String.valueOf(123))
def testStaticFields(self):
Short = self.gateway.jvm.java.lang.Short
self.assertEqual(-32768, Short.MIN_VALUE)
System = self.gateway.jvm.java.lang.System
self.assertFalse(System.out.checkError())
def testDefaultImports(self):
self.assertTrue(self.gateway.jvm.System.currentTimeMillis() > 0)
self.assertEqual('123', self.gateway.jvm.String.valueOf(123))
def testNone(self):
ex = self.gateway.entry_point.getNewExample()
ex.method4(None)
def testJVMView(self):
newView = self.gateway.new_jvm_view('myjvm')
time = newView.System.currentTimeMillis()
self.assertTrue(time > 0)
time = newView.java.lang.System.currentTimeMillis()
self.assertTrue(time > 0)
def testImport(self):
newView = self.gateway.new_jvm_view('myjvm')
java_import(self.gateway.jvm, 'java.util.*')
java_import(self.gateway.jvm, 'java.io.File')
self.assertTrue(self.gateway.jvm.ArrayList() is not None)
self.assertTrue(self.gateway.jvm.File('hello.txt') is not None)
self.assertRaises(Exception, lambda : newView.File('test.txt'))
java_import(newView, 'java.util.HashSet')
self.assertTrue(newView.HashSet() is not None)
def testEnum(self):
self.assertEqual('FOO', str(self.gateway.jvm.py4j.examples.Enum2.FOO))
def testInnerClass(self):
self.assertEqual('FOO',
str(self.gateway.jvm.py4j.examples.EnumExample.MyEnum.FOO))
self.assertEqual('HELLO2',
self.gateway.jvm.py4j.examples.EnumExample.InnerClass.MY_CONSTANT2)
class HelpTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
self.gateway = JavaGateway()
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testHelpObject(self):
ex = self.gateway.getNewExample()
help_page = self.gateway.help(ex, short_name=True, display=False)
#print(help_page)
self.assertTrue(len(help_page) > 1)
def testHelpObjectWithPattern(self):
ex = self.gateway.getNewExample()
help_page = self.gateway.help(ex, pattern='m*', short_name=True,
display=False)
#print(help_page)
self.assertTrue(len(help_page) > 1)
def testHelpClass(self):
String = self.gateway.jvm.java.lang.String
help_page = self.gateway.help(String, short_name=False, display=False)
#print(help_page)
self.assertTrue(len(help_page) > 1)
self.assertTrue("String" in help_page)
class Runner(Thread):
def __init__(self, runner_range, gateway):
Thread.__init__(self)
self.range = runner_range
self.gateway = gateway
self.ok = True
def run(self):
ex = self.gateway.getNewExample()
for i in self.range:
try:
l = ex.getList(i)
if len(l) != i:
self.ok = False
break
self.gateway.detach(l)
# gc.collect()
except Exception:
self.ok = False
break
class ThreadTest(unittest.TestCase):
def setUp(self):
self.p = start_example_app_process()
# This is to ensure that the server is started before connecting to it!
time.sleep(1)
gateway_client = GatewayClient()
self.gateway = JavaGateway(gateway_client=gateway_client)
def tearDown(self):
safe_shutdown(self)
self.p.join()
def testStress(self):
# Real stress test!
# runner1 = Runner(xrange(1,10000,2),self.gateway)
# runner2 = Runner(xrange(1000,1000000,10000), self.gateway)
# runner3 = Runner(xrange(1000,1000000,10000), self.gateway)
# Small stress test
runner1 = Runner(range(1, 10000, 1000), self.gateway)
runner2 = Runner(range(1000, 1000000, 100000), self.gateway)
runner3 = Runner(range(1000, 1000000, 100000), self.gateway)
runner1.start()
runner2.start()
runner3.start()
runner1.join()
runner2.join()
runner3.join()
self.assertTrue(runner1.ok)
self.assertTrue(runner2.ok)
self.assertTrue(runner3.ok)
class GatewayLauncherTest(unittest.TestCase):
def tearDown(self):
safe_shutdown(self)
def testDefaults(self):
self.gateway = JavaGateway.launch_gateway()
self.assertTrue(self.gateway.jvm)
def testJavaopts(self):
self.gateway = JavaGateway.launch_gateway(javaopts=["-Xmx64m"])
self.assertTrue(self.gateway.jvm)
if __name__ == "__main__":
unittest.main()
| rex1100/learning-spark | spark-1.3.1-bin-hadoop1/python/build/py4j/tests/java_gateway_test.py | Python | mit | 24,978 |
#!/usr/bin/env python
#
# !!!!!!!!! WARNING !!!!!!!!!!!!!!!
# This Script was bastardized To Read Password From /home/bspaans/.googlecode
#
#
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: [email protected] (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = '[email protected] (David Anderson)'
import http.client
import os.path
import optparse
import getpass
import base64
import sys
def get_svn_config_dir():
pass
def get_svn_auth(project_name, config_dir):
"""Return (username, password) for project_name in config_dir.
!!!!! CHANGED !!!!!!!!"""
f = open("/home/bspaans/.googlecode", 'r')
usr_data = f.read().split(":")
f.close()
return (usr_data[0], usr_data[1][:-1])
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of [email protected]. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = http.client.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
config_dir=None, user_name=None, tries=1):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
If config_dir is None, try get_svn_config_dir(); if it is 'none', skip
trying the Subversion configuration entirely. If user_name is not None, use
it for the first attempt; prompt for subsequent attempts.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
if config_dir != 'none':
# Try to load username/password from svn config for first try.
if config_dir is None:
config_dir = get_svn_config_dir()
(svn_username, password) = get_svn_auth(project_name, config_dir)
if user_name is None:
# If username was not supplied by caller, use svn config.
user_name = svn_username
else:
# Just initialize password for the first try.
password = None
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print('Please enter your googlecode.com password.')
print('** Note that this is NOT your Gmail account password! **')
print('It is the password you use to access Subversion repositories,')
print('and can be found here: http://code.google.com/hosting/settings')
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [http.client.FORBIDDEN]:
# Rest for another try.
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('--config-dir', dest='config_dir', metavar='DIR',
help='read svn auth data from DIR'
' ("none" means not to use svn auth data)')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of labels to attach to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.config_dir, options.user)
if url:
print('The file was uploaded successfully.')
print('URL: %s' % url)
return 0
else:
print('An error occurred. Your file was not uploaded.')
print('Google Code upload server said: %s (%s)' % (reason, status))
return 1
if __name__ == '__main__':
sys.exit(main())
| anthonyt/mingus-counterpoint | googlecode_upload.py | Python | gpl-3.0 | 9,994 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
import re
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp import exceptions
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
result = None
if to_install_ids:
result = ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
return res
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['project_mrp'],
}
will install both ``sale_crm`` and ``project_mrp`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's browse_record
:returns: a list of all installed modules in this installer
:rtype: [browse_record]
"""
modules = self.pool['ir.module.module']
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns[module_name]) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
The attribute 'group' may contain several xml ids, separated by commas.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise osv.except_osv(_("Cannot duplicate configuration!"), "")
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', [browse_group], browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool['ir.model.data']
ir_module = self.pool['ir.module.module']
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context=context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and isinstance(field, fields.boolean) and hasattr(field, 'implied_group'):
field_groups = getattr(field, 'group', 'base.group_user').split(',')
groups.append((name, map(ref, field_groups), ref(field.implied_group)))
elif name.startswith('module_') and isinstance(field, fields.boolean):
mod_ids = ir_module.search(cr, uid, [('name', '=', name[7:])])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool['ir.values']
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, groups, implied_group in classified['group']:
res[name] = all(implied_group in group.implied_ids for group in groups)
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module and module.state in ('installed', 'to install', 'to upgrade')
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool['ir.values']
ir_module = self.pool['ir.module.module']
res_groups = self.pool['res.groups']
classified = self._get_classified_fields(cr, uid, context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, SUPERUSER_ID, model, field, config[name])
# group fields: modify group / implied groups
for name, groups, implied_group in classified['group']:
gids = map(int, groups)
if config[name]:
res_groups.write(cr, uid, gids, {'implied_ids': [(4, implied_group.id)]}, context=context)
else:
res_groups.write(cr, uid, gids, {'implied_ids': [(3, implied_group.id)]}, context=context)
uids = set()
for group in groups:
uids.update(map(int, group.users))
implied_group.write({'users': [(3, u) for u in uids]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
def get_option_path(self, cr, uid, menu_xml_id, context=None):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "base.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
module_name, menu_xml_id = menu_xml_id.split('.')
dummy, menu_id = self.pool['ir.model.data'].get_object_reference(cr, uid, module_name, menu_xml_id)
ir_ui_menu = self.pool['ir.ui.menu'].browse(cr, uid, menu_id, context=context)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
def get_option_name(self, cr, uid, full_field_name, context=None):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.pool[model_name].fields_get(cr, uid, allfields=[field_name], context=context)[field_name]['string']
def get_config_warning(self, cr, msg, context=None):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from openerp.addons.base.res.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:base.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
res_config_obj = openerp.registry(cr.dbname)['res.config.settings']
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
# Process the message
# 1/ find the menu and/or field references, put them in a list
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = res_config_obj.get_option_path(cr, SUPERUSER_ID, ref, context=context)
elif ref_type == 'field':
values[item] = res_config_obj.get_option_name(cr, SUPERUSER_ID, ref, context=context)
# 3/ substitute and return the result
if (action_id):
return exceptions.RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return exceptions.Warning(msg % values)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| jmesteve/saas3 | openerp/addons/base/res/res_config.py | Python | agpl-3.0 | 29,051 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_5/ar_12/test_artificial_128_Quantization_Lag1Trend_5_12_0.py | Python | bsd-3-clause | 267 |
import datetime
from poll.models import gettext_db, Response
from rapidsms.apps.base import AppBase
from rapidsms_httprouter.models import Message
from script.models import Script, ScriptProgress, ScriptSession, ScriptStep
from django.conf import settings
from script.utils.incoming import incoming_progress
from unregister.models import Blacklist
from uganda_common.utils import handle_dongle_sms
import logging
logger = logging.getLogger(__name__)
class App (AppBase):
def handle (self, message):
if handle_dongle_sms(message):
return True
if message.text.strip().lower() in [i.lower() for i in getattr(settings, 'OPT_OUT_WORDS', ['quit'])]:
if Blacklist.objects.filter(connection=message.connection).exists():
message.respond('You cannot send Quit to 6200 (EduTrac) more than once.')
return True
else:
if ScriptProgress.objects.filter(connection=message.connection, script__slug='edtrac_autoreg').exists():
# user is attempting to quit before completing registration
message.respond('Your registration is not complete, you can not quit at this point')
return True
Blacklist.objects.create(connection=message.connection)
ScriptProgress.objects.filter(connection=message.connection).delete() # delete all script progress since the user has quit
ScriptSession.objects.filter(connection=message.connection, end_time=None).delete() # the non closed out sessions need to be expunged as well
if (message.connection.contact):
message.connection.contact.active = False
message.connection.contact.save()
message.respond(getattr(settings, 'OPT_OUT_CONFIRMATION', 'Thank you for your contribution to EduTrac. To rejoin the system, send join to 6200'))
return True
elif message.text.strip().lower() in [i.lower() for i in getattr(settings, 'OPT_IN_WORDS', ['join'])]:
if not message.connection.contact:
if ScriptProgress.objects.filter(script__slug='edtrac_autoreg', connection=message.connection).count() == 0:
ScriptProgress.objects.create(script=Script.objects.get(slug="edtrac_autoreg"),\
connection=message.connection)
else:
message.respond("Your registration is not complete yet, you do not need to 'Join' again.")
elif Blacklist.objects.filter(connection=message.connection).exists():
Blacklist.objects.filter(connection=message.connection).delete()
if not ScriptProgress.objects.filter(script__slug='edtrac_autoreg', connection=message.connection).count():
ScriptProgress.objects.create(script=Script.objects.get(slug="edtrac_autoreg"),\
connection=message.connection)
else:
message.respond("You are already in the system and do not need to 'Join' again.")
return True
elif Blacklist.objects.filter(connection=message.connection).count():
return True
# when all else fails, quit!
else:
try:
progress = ScriptProgress.objects.filter(connection=message.connection, time__lte=datetime.datetime.now()).order_by('-time')
response_message_string = {"n":"The answer you have provided is not in the correct format. Use figures like 3 to answer the question",
"t":"The answer you have provided is not in the correct format. Please follow instructions that were given to you"}
if progress.count():
progress = progress[0]
script_last_step = ScriptStep.objects.filter(script=progress.script).order_by('-order')[0]
if progress.step and progress.step.order == script_last_step.order and progress.status == 'C':
return False
else:
response = incoming_progress(message)
if not progress.script.slug == 'edtrac_autoreg':
r = Response.objects.filter(contact__connection=message.connection,date__lte=datetime.datetime.now(),message__text=message.text).latest('date')
if r is not None:
if r.has_errors:
progress.status = ScriptProgress.PENDING
progress.save()
Message.mass_text(response_message_string[r.poll.type], [message.connection])
Message.mass_text(r.poll.question , [message.connection])
if response:
message.respond(gettext_db(response,progress.language))
return True
except ScriptProgress.DoesNotExist:
logger.debug("\nScript Progress object not found for message %s with connection %s" % (message,message.connection))
return False
| unicefuganda/edtrac | edtrac_project/rapidsms_edtrac/education/app.py | Python | bsd-3-clause | 5,200 |
"""Runs the Treadmill application runner."""
from __future__ import absolute_import
import signal
import logging
import os
import click
from .. import appmgr
from .. import utils
from .. import logcontext as lc
from ..appmgr import run as app_run
from ..appmgr import abort as app_abort
_LOGGER = logging.getLogger(__name__)
def init():
"""Top level command handler."""
@click.command()
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
@click.argument('container_dir', type=click.Path(exists=True))
def run(approot, container_dir):
"""Runs container given a container dir."""
# Intercept SIGTERM from s6 supervisor, so that initialization is not
# left in broken state.
with lc.LogContext(_LOGGER, os.path.basename(container_dir),
lc.ContainerAdapter) as log:
terminated = utils.make_signal_flag(signal.SIGTERM)
try:
log.info('run %r %r', approot, container_dir)
app_env = appmgr.AppEnvironment(approot)
watchdog = app_run.create_watchdog(app_env, container_dir)
# Apply memsory limits first thing after start, so that app_run
# does not consume memory from treadmill/core.
app_run.apply_cgroup_limits(app_env, container_dir)
if not terminated:
app_run.run(app_env, container_dir, watchdog, terminated)
# If we reach here, the application was terminated.
except Exception as exc: # pylint: disable=W0703
if not terminated:
log.critical('Failed to start, app will be aborted.',
exc_info=True)
app_abort.flag_aborted(app_env, container_dir, exc)
else:
log.info('Exception while handling term, ignore.',
exc_info=True)
finally:
watchdog.remove()
return run
| toenuff/treadmill | lib/python/treadmill/sproc/run.py | Python | apache-2.0 | 2,089 |
from abc import ABC, abstractmethod
from copy import deepcopy
from tinydb import TinyDB
from tinydb.middlewares import Middleware
iteritems = getattr(dict, 'iteritems', dict.items)
itervalues = getattr(dict, 'itervalues', dict.values)
class Serializer(ABC):
"""
The abstract base class for Serializers.
Allows TinyDB to handle arbitrary objects by running them through a list
of registerd serializers.
Every serializer has to tell which class it can handle.
"""
@property
@abstractmethod
def OBJ_CLASS(self):
raise NotImplementedError('To be overriden!')
@abstractmethod
def encode(self, obj):
"""
Encode an object.
:param obj:
:return:
:rtype: str
"""
raise NotImplementedError('To be overriden!')
@abstractmethod
def decode(self, s):
"""
Decode an object.
:param s:
:type s: str
:return:
"""
raise NotImplementedError('To be overriden!')
def _enumerate_element(element):
"""
Make an element enumerable.
For dicts: return an iterator over the items (key, value).
For lists/tuples: return an iterator over (index, item)
"""
if isinstance(element, dict):
return iteritems(element)
else:
return enumerate(element)
def _decode_deep(element, serializer, tag):
"""
Recursively decode an element.
Takes into account elements in nested dicts, lists and tuples
"""
for key, value in _enumerate_element(element):
try:
if value.startswith(tag):
encoded = value[len(tag):]
element[key] = serializer.decode(encoded)
except AttributeError:
# Not a string
if isinstance(value, (dict, list, tuple)):
_decode_deep(value, serializer, tag)
def _encode_deep(element, serializer, tag, obj_class):
"""
Recursively encode an element.
Takes into account elements in nested dicts, lists and tuples
"""
for key, value in _enumerate_element(element):
if isinstance(value, obj_class):
encoded = serializer.encode(value)
element[key] = tag + encoded
elif isinstance(value, (dict, list, tuple)):
_encode_deep(value, serializer, tag, obj_class)
def has_encodable(element, obj_class):
"""
Check whether the element in question has an encodable item.
"""
found_encodable = False
for key, value in _enumerate_element(element):
if isinstance(value, (dict, list, tuple)):
found_encodable |= has_encodable(value, obj_class)
else:
found_encodable |= isinstance(value, obj_class)
return found_encodable
class SerializationMiddleware(Middleware):
"""
Provide custom serialization for TinyDB.
This middleware allows users of TinyDB to register custom serializations.
The serialized data will be passed to the wrapped storage and data that
is read from the storage will be deserialized.
"""
def __init__(self, storage_cls=TinyDB.default_storage_class):
super(SerializationMiddleware, self).__init__(storage_cls)
self._serializers = {}
def register_serializer(self, serializer, name):
"""
Register a new Serializer.
When reading from/writing to the underlying storage, TinyDB
will run all objects through the list of registered serializers
allowing each one to handle objects it recognizes.
.. note:: The name has to be unique among this database instance.
Re-using the same name will overwrite the old serializer.
Also, registering a serializer will be reflected in all
tables when reading/writing them.
:param serializer: an instance of the serializer
:type serializer: tinydb.serialize.Serializer
"""
self._serializers[name] = serializer
def read(self):
data = self.storage.read()
if data is None:
return None
for serializer_name in self._serializers:
serializer = self._serializers[serializer_name]
tag = '{{{0}}}:'.format(serializer_name) # E.g:'{TinyDate}:'
for table_name in data:
table = data[table_name]
for eid in table:
_decode_deep(data[table_name][eid], serializer, tag)
return data
def write(self, data):
# We only make a copy of the data if any serializer would overwrite
# existing data.
data_copied = False
for serializer_name in self._serializers:
# If no serializers are registered, this code will just look up
# the serializer list and continue. But if there are serializers,
# the inner loop will run very often.
# For that reason, the lookup of the serialized class is pulled
# out into the outer loop:
serializer = self._serializers[serializer_name]
obj_class = serializer.OBJ_CLASS
tag = '{{{0}}}:'.format(serializer_name)
for table_name in data:
table = data[table_name]
for eid in table:
# Before writing, copy data if we haven't already.
if not data_copied and has_encodable(data[table_name][eid],
obj_class):
data = deepcopy(data)
data_copied = True
item = data[table_name][eid]
_encode_deep(item, serializer, tag, obj_class)
self.storage.write(data)
| msiemens/tinydb-serialization | tinydb_serialization/__init__.py | Python | mit | 5,741 |
import keras
import pickle
import os.path
import numpy as np
# Hyperparameters
TWEET_LEN = 140
MAX_LEN = 200
# Other Constants
TWEET_FILE = "generated_trump_tweets.pickle"
MODEL_FILE = "Models/goodTrump(3-1024).h5"
BATCH_SIZE = 128
# Construct dictionaries to convert from tokens to strings and back
char_list = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 ()#@,.:;-$?!/'\"\n")
num2char = dict(enumerate(char_list, 1))
num2char[0] = "<PAD>"
char2num = dict(zip(num2char.values(), num2char.keys()))
VOCAB_SIZE = len(char_list) + 1
# Load pre-trained model
model = keras.models.load_model(MODEL_FILE)
print("Restored model")
# Load already generated tweets
tweets = []
if os.path.isfile(TWEET_FILE):
tweets = pickle.load(open(TWEET_FILE, "rb"))
def generate_tweets(num_tweets):
# Construct one-hot character array and set initial characters
ix = np.zeros((num_tweets, 1, VOCAB_SIZE))
for a in ix:
a[0, np.random.randint(VOCAB_SIZE)] = 1
while True:
# Get the character-probabilities from the model
iy = model.predict(ix)[:, ix.shape[1]-1, :]
# Select the next characters based on the output of the model
c = np.array([np.random.choice(np.arange(VOCAB_SIZE), p=ps) for ps in iy])
#c = np.array([c = np.argmax(ps)) for ps in iy])
# Break if all tweets are over (if we encounter the <PAD>-character) or have reached MAX_LEN
if np.all(c==0) or ix.shape[1] >= MAX_LEN:
break
# Add new characters to the character array
nx = np.eye(VOCAB_SIZE)[c].reshape(num_tweets, 1, VOCAB_SIZE)
ix = np.concatenate((ix, nx), axis=1)
# Convert the one-hot character array to a list of strings
tweets = ["".join(num2char[n] for n in np.argmax(tweet, axis=1) if n != 0) for tweet in ix]
return tweets
while True:
# Append the generated tweets to tweets array
tweets.extend(generate_tweets(BATCH_SIZE))
# Save the new array to TWEET_FILE
pickle.dump(tweets, open(TWEET_FILE, "wb"))
print("Wrote", len(tweets), "tweets") | DarkGuenther/TrumpBot | gen_trump_tweets.py | Python | mit | 2,081 |
# Carl is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.datasets import make_classification
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import brier_score_loss
from carl.learning import CalibratedClassifierCV
def check_calibration(method):
# Adpated from sklearn/tests/test_calibration.py
# Authors: Alexandre Gramfort
# License: BSD 3 clause
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train = X[:n_samples], y[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training set
pc_clf.fit(X_train, y_train)
prob_pos_pc_clf = pc_clf.predict_proba(X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(X_train, y_train + 1)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(X_train, 2 * y_train - 1)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(X_train, (y_train + 1) % 2)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
def test_calibration():
for method in ["isotonic", "sigmoid", "histogram", "kde",
"interpolated-isotonic"]:
yield check_calibration, method
| diana-hep/carl | tests/learning/test_calibration.py | Python | bsd-3-clause | 2,923 |
"""
Test the 'memory find' command.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.decorators import *
class MemoryFindTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.cpp', '// break here')
def test_memory_find(self):
"""Test the 'memory find' command."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break in main() after the variables are assigned values.
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped', 'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# Test the memory find commands.
self.expect(
'memory find -s "in const" `stringdata` `stringdata+(int)strlen(stringdata)`',
substrs=[
'data found at location: 0x',
'69 6e 20 63',
'in const'])
self.expect(
'memory find -e "(uint8_t)0x22" `&bytedata[0]` `&bytedata[15]`',
substrs=[
'data found at location: 0x',
'22 33 44 55 66'])
self.expect(
'memory find -e "(uint8_t)0x22" `&bytedata[0]` `&bytedata[2]`',
substrs=['data not found within the range.'])
self.expect('memory find -s "nothere" `stringdata` `stringdata+5`',
substrs=['data not found within the range.'])
self.expect('memory find -s "nothere" `stringdata` `stringdata+10`',
substrs=['data not found within the range.'])
| apple/swift-lldb | packages/Python/lldbsuite/test/functionalities/memory/find/TestMemoryFind.py | Python | apache-2.0 | 2,243 |
# coding: utf-8
import mimetypes
import os
import sys
import traceback
from time import sleep
from urllib.request import urlopen, Request
PARENT_DIR = 'downloaded_imgs'
MY_EMAIL_ADDR = ''
INTERVAL = 0.1
def fetch_and_save(fname):
with open(fname) as f:
for line in f:
fields = line.strip().split('\t')
if len(fields) != 2:
continue
name, img_url = fields
sleep(INTERVAL)
img, ext = download(img_url)
if img is None or ext is None:
print('Skipped ' + img_url)
else:
save(name, img, ext)
print('Fetched ' + img_url)
def download(img_url):
req = Request(img_url, headers={'User-Agent': MY_EMAIL_ADDR})
try:
with urlopen(req, timeout=3) as p:
byte_content = p.read()
content_type = p.getheader('Content-Type')
if not content_type:
return None, None
ext = mimetypes.guess_extension(content_type.split(';')[0])
if not ext:
return None, None
if ext in ('.jpeg', '.jpe'):
ext = '.jpg'
return byte_content, ext
except:
print('Error in downloading ' + img_url)
print(traceback.format_exc())
return None, None
def save(name, byte_content, extention):
dir_to_save = os.path.join(PARENT_DIR, name)
if not os.path.exists(dir_to_save):
os.makedirs(dir_to_save)
new_id = len(os.listdir(dir_to_save)) + 1
with open(os.path.join(dir_to_save, str(new_id) + extention), mode='wb') as fp:
fp.write(byte_content)
if __name__ == '__main__':
fetch_and_save(sys.argv[1])
| nknytk/face-classifier-cnn | tools/data_collector/dl_imgs.py | Python | mit | 1,730 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import timedelta, datetime
from distutils.version import LooseVersion
import sys
import nose
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas.compat import lrange
from pandas import (compat, isnull, notnull, DataFrame, Series,
MultiIndex, date_range, Timestamp, _np_version_under1p11)
import pandas as pd
import pandas.core.common as com
import pandas.core.nanops as nanops
from pandas.util.testing import (assert_almost_equal,
assert_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas import _np_version_under1p9
from pandas.tests.frame.common import TestData
class TestDataFrameAnalytics(tm.TestCase, TestData):
_multiprocess_can_split_ = True
# ---------------------------------------------------------------------=
# Correlation and covariance
def test_corr_pearson(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
def test_corr_kendall(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
def test_corr_spearman(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.ix['A', 'B'] = expected.ix['B', 'A'] = nan
assert_frame_equal(result, expected)
def test_corr_non_numeric(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
# exclude non-numeric types
result = self.mixed_frame.corr()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].corr()
assert_frame_equal(result, expected)
def test_corr_nooverlap(self):
tm._skip_if_no_scipy()
# nothing in common
for meth in ['pearson', 'kendall', 'spearman']:
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.ix['A', 'B']))
self.assertTrue(isnull(rs.ix['B', 'A']))
self.assertEqual(rs.ix['A', 'A'], 1)
self.assertEqual(rs.ix['B', 'B'], 1)
self.assertTrue(isnull(rs.ix['C', 'C']))
def test_corr_constant(self):
tm._skip_if_no_scipy()
# constant --> all NA
for meth in ['pearson', 'spearman']:
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.values).all())
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
# it works!
df3.cov()
df3.corr()
def test_corr_int_and_boolean(self):
tm._skip_if_no_scipy()
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
assert_frame_equal(df.corr(meth), expected)
def test_cov(self):
# min_periods no NAs (corner case)
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
self.assertTrue(isnull(result.values).all())
# with NAs
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.ix['A', 'B'] = np.nan
expected.ix['B', 'A'] = np.nan
# regular
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
# exclude non-numeric types
result = self.mixed_frame.cov()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].cov()
assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
assert_frame_equal(result, expected)
df.ix[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
self.assertNotIn('B', dropped)
dropped = a.corrwith(b, axis=1, drop=True)
self.assertNotIn(a.index[-1], dropped.index)
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
assert_almost_equal(correls[row], df1.ix[row].corr(df2.ix[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols])
assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols], axis=1)
assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
assert_almost_equal(c1, c2)
self.assertTrue(c1 < 1)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
assert_almost_equal(test.values, [2, 150, 'abcde'])
assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
# corner case
frame = DataFrame()
ct1 = frame.count(1)
tm.assertIsInstance(ct1, Series)
ct2 = frame.count(0)
tm.assertIsInstance(ct2, Series)
# GH #423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
assert_series_equal(result, expected)
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True)
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum,
frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
def test_stat_operators_attempt_obj_array(self):
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']
# GH #676
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
for meth in methods:
self.assertEqual(df.values.dtype, np.object_)
result = getattr(df, meth)(1)
expected = getattr(df.astype('f8'), meth)(1)
if not tm._incompat_bottleneck_version(meth):
assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
assert_frame_equal(cummin, expected)
# axis = 1
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = self.tsframe.cummin(axis=1)
self.assertEqual(np.shape(cummin_xs), np.shape(self.tsframe))
def test_cummax(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
assert_frame_equal(cummax, expected)
# axis = 1
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = self.tsframe.cummax(axis=1)
self.assertEqual(np.shape(cummax_xs), np.shape(self.tsframe))
def test_max(self):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_numeric_only_flag(self):
# GH #9201
methods = ['sem', 'var', 'std']
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.ix[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.ix[0, 'foo'] = 'a'
for meth in methods:
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
self.assertRaises(TypeError, lambda: getattr(df1, meth)
(axis=1, numeric_only=False))
self.assertRaises(TypeError, lambda: getattr(df2, meth)
(axis=1, numeric_only=False))
def test_quantile(self):
from numpy import percentile
q = self.tsframe.quantile(0.1, axis=0)
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.tsframe.quantile(0.9, axis=1)
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test degenerate case
q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)
assert(np.isnan(q['x']) and np.isnan(q['y']))
# non-numeric exclusion
df = DataFrame({'col1': ['A', 'A', 'B', 'B'], 'col2': [1, 2, 3, 4]})
rs = df.quantile(0.5)
xp = df.median()
assert_series_equal(rs, xp)
# axis
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])
assert_series_equal(result, expected)
result = df.quantile([.5, .75], axis=1)
expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],
3: [3.5, 3.75]}, index=[0.5, 0.75])
assert_frame_equal(result, expected, check_index_type=True)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
df = DataFrame([[1, 2, 3],
['a', 'b', 4]])
result = df.quantile(.5, axis=1)
expected = Series([3., 4.], index=[0, 1])
assert_series_equal(result, expected)
def test_quantile_axis_parameter(self):
# GH 9543/9544
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=0)
expected = Series([2., 3.], index=["A", "B"])
assert_series_equal(result, expected)
expected = df.quantile(.5, axis="index")
assert_series_equal(result, expected)
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])
assert_series_equal(result, expected)
result = df.quantile(.5, axis="columns")
assert_series_equal(result, expected)
self.assertRaises(ValueError, df.quantile, 0.1, axis=-1)
self.assertRaises(ValueError, df.quantile, 0.1, axis="column")
def test_quantile_interpolation(self):
# GH #10174
if _np_version_under1p9:
raise nose.SkipTest("Numpy version under 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test with and without interpolation keyword
q1 = self.intframe.quantile(0.1)
self.assertEqual(q1['A'], np.percentile(self.intframe['A'], 10))
assert_series_equal(q, q1)
# interpolation method other than default linear
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1, interpolation='nearest')
expected = Series([1., 2., 3.], index=[1, 2, 3])
assert_series_equal(result, expected)
# axis
result = df.quantile([.5, .75], axis=1, interpolation='lower')
expected = DataFrame({1: [1., 1.], 2: [2., 2.],
3: [3., 3.]}, index=[0.5, 0.75])
assert_frame_equal(result, expected)
# test degenerate case
df = DataFrame({'x': [], 'y': []})
q = df.quantile(0.1, axis=0, interpolation='higher')
assert(np.isnan(q['x']) and np.isnan(q['y']))
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5], interpolation='midpoint')
# https://github.com/numpy/numpy/issues/7163
if _np_version_under1p11:
expected = DataFrame([[1.5, 1.5, 1.5], [2.5, 2.5, 2.5]],
index=[.25, .5], columns=['a', 'b', 'c'])
else:
expected = DataFrame([[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
def test_quantile_interpolation_np_lt_1p9(self):
# GH #10174
if not _np_version_under1p9:
raise nose.SkipTest("Numpy version is greater than 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test with and without interpolation keyword
q1 = self.intframe.quantile(0.1)
self.assertEqual(q1['A'], np.percentile(self.intframe['A'], 10))
assert_series_equal(q, q1)
# interpolation method other than default linear
expErrMsg = "Interpolation methods other than linear"
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
with assertRaisesRegexp(ValueError, expErrMsg):
df.quantile(.5, axis=1, interpolation='nearest')
with assertRaisesRegexp(ValueError, expErrMsg):
df.quantile([.5, .75], axis=1, interpolation='lower')
# test degenerate case
df = DataFrame({'x': [], 'y': []})
with assertRaisesRegexp(ValueError, expErrMsg):
q = df.quantile(0.1, axis=0, interpolation='higher')
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
with assertRaisesRegexp(ValueError, expErrMsg):
df.quantile([.25, .5], interpolation='midpoint')
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5])
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
# axis = 1
result = df.quantile([.25, .5], axis=1)
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=[0, 1, 2])
# empty
result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)
expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},
index=[.1, .9])
assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})
# exclude datetime
result = df.quantile(.5)
expected = Series([2.5], index=['b'])
# datetime
result = df.quantile(.5, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],
index=['a', 'b'])
assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([.5], numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],
index=[.5], columns=['a', 'b'])
assert_frame_equal(result, expected)
# axis = 1
df['c'] = pd.to_datetime(['2011', '2012'])
result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')],
index=[0, 1])
assert_series_equal(result, expected)
result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')]],
index=[0.5], columns=[0, 1])
assert_frame_equal(result, expected)
def test_quantile_invalid(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assertRaisesRegexp(ValueError, msg):
self.tsframe.quantile(invalid)
def test_cumsum(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
self.assertEqual(np.shape(cumsum_xs), np.shape(self.tsframe))
def test_cumprod(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
self.assertEqual(np.shape(cumprod_xs), np.shape(self.tsframe))
# ints
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
ranks0 = self.frame.rank()
ranks1 = self.frame.rank(1)
mask = np.isnan(self.frame.values)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
assert_frame_equal(result, exp)
def test_rank2(self):
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2., nan, 1.],
[2., 3., 1.]])
result = df.rank(1, numeric_only=False, ascending=True)
assert_frame_equal(result, expected)
expected = DataFrame([[1., nan, 2.],
[2., 1., 3.]])
result = df.rank(1, numeric_only=False, ascending=False)
assert_frame_equal(result, expected)
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
result = self.mixed_frame.rank(1)
expected = self.mixed_frame.rank(1, numeric_only=True)
assert_frame_equal(result, expected)
df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10,
1e60, 1e80, 1e-30]})
exp = DataFrame({"a": [3.5, 1., 3.5, 5., 6., 7., 2.]})
assert_frame_equal(df.rank(), exp)
def test_rank_na_option(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
# bottom
ranks0 = self.frame.rank(na_option='bottom')
ranks1 = self.frame.rank(1, na_option='bottom')
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = self.frame.rank(na_option='top')
ranks1 = self.frame.rank(1, na_option='top')
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = self.frame.rank(na_option='top', ascending=False)
ranks1 = self.frame.rank(1, na_option='top', ascending=False)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = self.frame.rank(na_option='bottom', ascending=False)
ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
def test_rank_axis(self):
# check if using axes' names gives the same result
df = pd.DataFrame([[2, 1], [4, 3]])
assert_frame_equal(df.rank(axis=0), df.rank(axis='index'))
assert_frame_equal(df.rank(axis=1), df.rank(axis='columns'))
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
assert_series_equal(kurt, kurt2, check_names=False)
self.assertTrue(kurt.name is None)
self.assertEqual(kurt2.name, 'bar')
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True,
check_dates=False, check_less_precise=False):
if frame is None:
frame = self.frame
# set some NAs
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
self.assertIsInstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
self.assertIsInstance(result, Series)
self.assertTrue(len(result))
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if not tm._incompat_bottleneck_version(name):
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
self.assertEqual(lcd_dtype, result0.dtype)
self.assertEqual(lcd_dtype, result1.dtype)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
assertRaisesRegexp(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if not tm._incompat_bottleneck_version(name):
self.assertTrue(np.isnan(r0).all())
self.assertTrue(np.isnan(r1).all())
def test_mode(self):
df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
"B": [10, 10, 10, np.nan, 3, 4],
"C": [8, 8, 8, 9, 9, 9],
"D": np.arange(6, dtype='int64'),
"E": [8, 8, 1, 1, 3, 3]})
assert_frame_equal(df[["A"]].mode(),
pd.DataFrame({"A": [12]}))
expected = pd.Series([], dtype='int64', name='D').to_frame()
assert_frame_equal(df[["D"]].mode(), expected)
expected = pd.Series([1, 3, 8], dtype='int64', name='E').to_frame()
assert_frame_equal(df[["E"]].mode(), expected)
assert_frame_equal(df[["A", "B"]].mode(),
pd.DataFrame({"A": [12], "B": [10.]}))
assert_frame_equal(df.mode(),
pd.DataFrame({"A": [12, np.nan, np.nan],
"B": [10, np.nan, np.nan],
"C": [8, 9, np.nan],
"D": [np.nan, np.nan, np.nan],
"E": [1, 3, 8]}))
# outputs in sorted order
df["C"] = list(reversed(df["C"]))
com.pprint_thing(df["C"])
com.pprint_thing(df["C"].mode())
a, b = (df[["A", "B", "C"]].mode(),
pd.DataFrame({"A": [12, np.nan],
"B": [10, np.nan],
"C": [8, 9]}))
com.pprint_thing(a)
com.pprint_thing(b)
assert_frame_equal(a, b)
# should work with heterogeneous types
df = pd.DataFrame({"A": np.arange(6, dtype='int64'),
"B": pd.date_range('2011', periods=6),
"C": list('abcdef')})
exp = pd.DataFrame({"A": pd.Series([], dtype=df["A"].dtype),
"B": pd.Series([], dtype=df["B"].dtype),
"C": pd.Series([], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
# and also when not empty
df.loc[1, "A"] = 0
df.loc[4, "B"] = df.loc[3, "B"]
df.loc[5, "C"] = 'e'
exp = pd.DataFrame({"A": pd.Series([0], dtype=df["A"].dtype),
"B": pd.Series([df.loc[3, "B"]],
dtype=df["B"].dtype),
"C": pd.Series(['e'], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
self.assertEqual(result[0], diffs.ix[0, 'A'])
self.assertEqual(result[1], diffs.ix[0, 'B'])
result = diffs.min(axis=1)
self.assertTrue((result == diffs.ix[0, 'B']).all())
# max
result = diffs.max()
self.assertEqual(result[0], diffs.ix[2, 'A'])
self.assertEqual(result[1], diffs.ix[2, 'B'])
result = diffs.max(axis=1)
self.assertTrue((result == diffs['A']).all())
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.tseries.timedeltas import (
_coerce_scalar_to_timedelta_type as _coerce)
result = mixed.min()
expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),
_coerce(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
self.assertEqual(df['off1'].dtype, 'timedelta64[ns]')
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')
self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
tm.assertIsInstance(axis0, Series)
tm.assertIsInstance(axis1, Series)
self.assertEqual(len(axis0), 0)
self.assertEqual(len(axis1), 0)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
self.assertTrue(len(the_mean.index) < len(self.mixed_frame.columns))
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
self.assertEqual(means['bool'], self.frame['bool'].values.mean())
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
# Miscellanea
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
assert_series_equal(dm.count(), df.count())
assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isnull(df)
self.assertEqual(bools.sum(axis=1)[0], 10)
# Index of max / min
def test_idxmin(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmin, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmax, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmax, axis=2)
# ----------------------------------------------------------------------
# Logical reductions
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
df = DataFrame(randn(10, 4)) > 0
df.any(1)
df.all(1)
df.any(1, bool_only=True)
df.all(1, bool_only=True)
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper))
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper))
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
self.assertRaises(ValueError, f, axis=2)
# make sure works on mixed-type frame
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail:
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
self.assertFalse(r0.any())
self.assertFalse(r1.any())
else:
self.assertTrue(r0.all())
self.assertTrue(r1.all())
# ----------------------------------------------------------------------
# Top / bottom
def test_nlargest(self):
# GH10393
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nlargest(5, 'a')
expected = df.sort_values('a', ascending=False).head(5)
assert_frame_equal(result, expected)
def test_nlargest_multiple_columns(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nlargest(5, ['a', 'b'])
expected = df.sort_values(['a', 'b'], ascending=False).head(5)
assert_frame_equal(result, expected)
def test_nsmallest(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nsmallest(5, 'a')
expected = df.sort_values('a').head(5)
assert_frame_equal(result, expected)
def test_nsmallest_multiple_columns(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nsmallest(5, ['a', 'c'])
expected = df.sort_values(['a', 'c']).head(5)
assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH #4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
assert_frame_equal(result, expected)
def test_isin_empty(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
result = df.isin([])
expected = pd.DataFrame(False, df.index, df.columns)
assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with tm.assertRaises(TypeError):
df.isin('a')
with tm.assertRaises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
assert_frame_equal(result, expected)
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with tm.assertRaises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with tm.assertRaises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with tm.assertRaises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Row deduplication
def test_drop_duplicates(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.ix[[]]
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('AAA', take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.ix[[0]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(('AAA', 'B'), take_last=True)
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
# consider everything
df2 = df.ix[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df2.drop_duplicates(take_last=True)
with tm.assert_produces_warning(FutureWarning):
expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)
assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0, 2]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[-2, -1]]
assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0, 2]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('E', keep='last')
expected = df.iloc[[-2, -1]]
assert_frame_equal(result, expected)
# GH 11376
df = pd.DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
assert_frame_equal(df.drop_duplicates(), expected)
df = pd.DataFrame([[1, 0], [0, 2]])
assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-2, 0], [0, -4]])
assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = pd.DataFrame([[-x, x], [0, x + 4]])
assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-x, x], [x, x + 4]])
assert_frame_equal(df.drop_duplicates(), df)
# GH 11864
df = pd.DataFrame([i] * 9 for i in range(16))
df = df.append([[1] + [0] * 8], ignore_index=True)
for keep in ['first', 'last', False]:
assert_equal(df.duplicated(keep=keep).sum(), 0)
def test_drop_duplicates_for_take_all(self):
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
assert_frame_equal(result, expected)
def test_drop_duplicates_tuple(self):
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.ix[[]] # empty df
self.assertEqual(len(result), 0)
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(('AA', 'AB'), take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
assert_frame_equal(result, expected)
def test_drop_duplicates_NA(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('A')
expected = df.ix[[0, 2, 3]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.ix[[]] # empty df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('A', take_last=True)
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.ix[[0, 2, 3, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.ix[[6]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(['A', 'B'], take_last=True)
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.ix[[]] # empty df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('C', take_last=True)
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.ix[[0, 1, 2, 4]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.ix[[1]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(['C', 'B'], take_last=True)
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
# single column
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
assert_frame_equal(result, expected)
def test_drop_duplicates_inplace(self):
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.ix[[]]
result = df
assert_frame_equal(result, expected)
self.assertEqual(len(df), 0)
# deprecate take_last
df = orig.copy()
with tm.assert_produces_warning(FutureWarning):
df.drop_duplicates('A', take_last=True, inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.ix[[0, 1, 2, 3]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.ix[[0]]
result = df
assert_frame_equal(result, expected)
# deprecate take_last
df = orig.copy()
with tm.assert_produces_warning(FutureWarning):
df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
# consider everything
orig2 = orig.ix[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
assert_frame_equal(result, expected)
# deprecate take_last
df2 = orig2.copy()
with tm.assert_produces_warning(FutureWarning):
df2.drop_duplicates(take_last=True, inplace=True)
with tm.assert_produces_warning(FutureWarning):
expected = orig2.drop_duplicates(['A', 'B'], take_last=True)
result = df2
assert_frame_equal(result, expected)
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with self.assertRaises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
assert_frame_equal(
df.round(partial_round_dict), expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
assert_frame_equal(
df.round(wrong_round_dict), expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
assert_frame_equal(
big_df.round(negative_round_dict), expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': nan, 'col2': 1})
# TODO(wesm): unused?
expected_nan_round = DataFrame({ # noqa
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
if sys.version < LooseVersion('2.7'):
# Rounding with decimal is a ValueError in Python < 2.7
with self.assertRaises(ValueError):
df.round(nan_round_Series)
else:
with self.assertRaises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_round_mixed_type(self):
# GH11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
assert_frame_equal(df.round(), round_0)
assert_frame_equal(df.round(1), df)
assert_frame_equal(df.round({'col1': 1}), df)
assert_frame_equal(df.round({'col1': 0}), round_0)
assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
self.assertTrue(rounded.index.equals(dfs.index))
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
self.assertRaises(ValueError, df.round, decimals)
def test_built_in_round(self):
if not compat.PY3:
raise nose.SkipTest("build in round cannot be overriden "
"prior to Python 3")
# GH11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
assert_frame_equal(round(df), expected_rounded)
# Clip
def test_clip(self):
median = self.frame.median().median()
capped = self.frame.clip_upper(median)
self.assertFalse((capped.values > median).any())
floored = self.frame.clip_lower(median)
self.assertFalse((floored.values < median).any())
double = self.frame.clip(upper=median, lower=median)
self.assertFalse((double.values != median).any())
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
self.assertTrue((clipped_df.values[lb_mask] == lb).all())
self.assertTrue((clipped_df.values[ub_mask] == ub).all())
self.assertTrue((clipped_df.values[mask] ==
df.values[mask]).all())
def test_clip_against_series(self):
# GH #6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=0)
for i in range(2):
lb_mask = df.iloc[:, i] <= lb
ub_mask = df.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
assert_series_equal(result, lb[lb_mask], check_names=False)
self.assertEqual(result.name, i)
result = clipped_df.loc[ub_mask, i]
assert_series_equal(result, ub[ub_mask], check_names=False)
self.assertEqual(result.name, i)
assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
def test_clip_against_frame(self):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
assert_frame_equal(clipped_df[mask], df[mask])
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
assert_series_equal(result, expected['one'], check_names=False)
self.assertTrue(result.name is None)
result = a.dot(b1['one'])
assert_series_equal(result, expected['one'], check_names=False)
self.assertTrue(result.name is None)
# can pass correct-length arrays
row = a.ix[0].values
result = a.dot(row)
exp = a.dot(a.ix[0])
assert_series_equal(result, exp)
with assertRaisesRegexp(ValueError, 'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
assertRaisesRegexp(ValueError, 'aligned', df.dot, df2)
| pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/tests/frame/test_analytics.py | Python | gpl-2.0 | 80,870 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
piStrings description
Supports language text strings for different subsystems:
-- bottle with 'template' and {{string}} replacement
-- JS/HTML replacement
'lang' (eg. DE or EN) controls the return of the language string, if asked
with unknown code, returns default (EN) string
"piStrings.json" Details:
See objects like 'piSchedule' which holds json noted "stringName:stringValue"
to be used with the bottle function 'templateSet(template, rv)'.
{{stringName}} in the passed 'template' with the 'stringValue'.
'template' name is like 'piEdit' of 'piEdit.tpl'
'rv' json data with "stringName:stringValue" grouped by supported locale
'''
# /cVersion/06-01-21_1743/
import json
lang = ""
xS = ""
piStringsJSON = "piStrings.json"
if xS == "":
xfileJSON = open(piStringsJSON, 'r')
xS = json.loads(xfileJSON.read())
def getLocale():
global lang
try:
x = xS['piPrefs'][lang]
except:
lang = 'EN'
xS['piPrefs'][lang] = 'EN'
return lang
def piString():
#---------------------------------
global lang, xS
def get(n):
getLocale()
if n is None:
return ""
else:
strings = n.split(".")
try:
return xS[strings[0]][lang][strings[1]]
except:
return xS[strings[0]]['EN'][strings[1]]
return get
def getAllLocales():
global lang, xS
try:
x = xS['locales']
except:
x = 'locals return error'
return x
| neandr/piSchedule | piSchedule751/piStrings.py | Python | mit | 1,601 |
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe.utils import cstr, cint, flt, comma_or, nowdate
from frappe import _ ,msgprint
from erpnext.stock.utils import get_incoming_rate
from erpnext.stock.stock_ledger import get_previous_sle
from erpnext.controllers.queries import get_match_cond
#Check weather SUM of qty in all serials is equal to qty of item specified
def validate_serial_qty(doc,method):
for d in doc.get('mtn_details'):
if doc.purpose in ['Manufacture','Repack']:
if d.custom_serial_no and not d.target_batch and not d.qty_per_drum_bag and d.t_warehouse:
sr_no=(d.custom_serial_no).splitlines()
sr=''
for s in sr_no:
if sr:
sr+=','+'\''+s+'\''
else:
sr='\''+s+'\''
qty=frappe.db.sql("""select SUM(qty) from `tabSerial No`
where name in (%s)"""%(sr),as_list=1)
if not d.qty==qty[0][0]:
frappe.throw(_("Row {0} : Quantity in Serial No {1} must equal to Quantity for Item {2}").format(d.idx,d.custom_serial_no,d.item_code))
#Check weather Quality Checking is done for serials in serials field
def validate_serial_qc(doc,method):
for d in doc.get('mtn_details'):
if doc.purpose in ['Manufacture','Repack'] and d.s_warehouse:
qc_req=frappe.db.get_value('Item',{"item_code":d.item_code},'inspection_required')
ca_req=frappe.db.get_value('Item',{"item_code":d.item_code},'chemical_analysis')
psd_req=frappe.db.get_value('Item',{"item_code":d.item_code},'psd_analysis')
sa_req=frappe.db.get_value('Item',{"item_code":d.item_code},'ssa')
if d.custom_serial_no:
sr_no=(d.custom_serial_no).splitlines() or (d.custom_serial_no).split('\n')
for sr in sr_no:
check_qc_done(ca_req,sa_req,psd_req,sr,d.item_code)
def check_qc_done(qc,sa,psd,sr,item_code):
qc_status=frappe.db.get_value('Serial No',{"item_code":item_code,"name":sr},'qc_status')
sa_status=frappe.db.get_value('Serial No',{"item_code":item_code,"name":sr},'sa_analysis')
psd_status=frappe.db.get_value('Serial No',{"item_code":item_code,"name":sr},'psd_status')
if qc=='Yes' and qc_status=='':
frappe.throw(_("QC Required for Serial {0} ").format(sr))
elif sa=='Yes' and sa_status=='':
frappe.throw(_("Surface Anaysis Required for Serial {0} ").format(sr))
elif psd=='Yes' and psd_status=='':
frappe.throw(_("PSD Anaysis Required for Serial {0} ").format(sr))
#check if there is serial no and is valid serial no
def validate_serial_no(d):
if not d.custom_serial_no and frappe.db.get_value('Item',d.item_code,'serial_no')=='Yes':
frappe.throw(_("Row {0}: Enter serial no for Item {1}").format(d.idx,d.item_code))
elif d.custom_serial_no:
sr_no=(d.custom_serial_no).splitlines() or (d.custom_serial_no).split('\n')
for s in sr_no:
if not frappe.db.get_value('Serial No',s,'name'):
frappe.throw(_("Row {0}: Serial no {1} does not exist").format(d.idx,s))
elif not frappe.db.get_value('Serial No',s,'item_code')==d.item_code:
frappe.throw(_("Row {0}: Please select the Serial No regarding to Item Code {1}").format(d.idx,d.item_code))
#Check whether serial no specified delivery note
def validate_serial_no_dn(doc,method):
for d in doc.get('delivery_note_details'):
if not d.custom_serial_no:
if frappe.db.get_value('Item',d.item_code,'serial_no')=='Yes':
frappe.throw(_("Please select serial no at row {0}").format(d.idx))
#Check valid serial delivery note
def validate_serial_no_qty(doc,method):
sum_qty=0.0
sr=[]
for d in doc.get('delivery_note_details'):
if frappe.db.get_value('Item',d.item_code,'serial_no')=='Yes' and d.custom_serial_no:
serial_no=(d.custom_serial_no).splitlines() or (d.custom_serial_no).split('\n')
for sr_no in serial_no:
qty=frappe.db.get_value("Serial No",{'name':sr_no,'qc_status':'Accepted','status':'Available','item_code':d.item_code,'serial_no_warehouse':d.warehouse},'qty')
if qty:
sum_qty=flt(sum_qty)+flt(qty)
else:
frappe.throw(_("Please select valid serial no at row {0}").format(d.idx))
sr.append(sr_no)
if flt(d.qty) > flt(sum_qty):
frappe.throw(_("Negative stock error: {0} qty available in serial no {1}").format((flt(sum_qty)-flt(d.qty)),','.join(sr)))
#Check Whether QC done delivery note
def validate_qc_status(doc,method):
for d in doc.get('delivery_note_details'):
if d.custom_serial_no:#change it to custom_serial_no
sr_n=(d.custom_serial_no).splitlines()
for sr in sr_n:
ca_req=frappe.db.get_value('Item',{"item_code":d.item_code},'chemical_analysis')
psd_req=frappe.db.get_value('Item',{"item_code":d.item_code},'psd_analysis')
sa_req=frappe.db.get_value('Item',{"item_code":d.item_code},'ssa')
qc_status=frappe.db.get_value('Serial No',{"item_code":d.item_code,"name":sr},'qc_status')
sa_status=frappe.db.get_value('Serial No',{"item_code":d.item_code,"name":sr},'sa_analysis')
psd_status=frappe.db.get_value('Serial No',{"item_code":d.item_code,"name":sr},'psd_status')
if ca_req=='Yes' and qc_status!='Accepted':
frappe.throw(_("QC Not Accpeted for Serial {0} ").format(sr))
elif psd_req=='Yes' and psd_status!='Accepted':
frappe.throw(_("PSD Anaysis Not Accpeted for Serial {0} ").format(sr))
elif sa_req=='Yes' and sa_status!='Accepted':
frappe.throw(_("SA Anaysis Not Accpeted for Serial {0} ").format(sr))
def update_serial_no(doc,method): #Rohit_sw
for d in doc.get('delivery_note_details'):
if d.custom_serial_no:
serial_no=(d.custom_serial_no).splitlines()
qty=cstr(d.qty)
for sr_no in serial_no:
if cint(qty) > 0:
qty=flt(qty) - flt(frappe.db.get_value('Serial No',sr_no,'qty'))
make_serialgl_dn(d,sr_no,frappe.db.get_value('Serial No',sr_no,'qty'),doc)
frappe.db.sql("update `tabSerial No` set qty=0.0,status='Delivered' where name='%s'"%(sr_no))
if (cint(0)-cint(qty))>0:
amend_serial_no(d,sr_no,qty)
def make_serialgl_dn(d,serial_no,qty,doc):
bi=frappe.new_doc('Serial Stock')
bi.document=doc.name
bi.item_code=d.item_code
bi.serial_no=serial_no
bi.qty=cstr(qty)
bi.warehouse=d.warehouse
bi.parent=serial_no
bi.parentfield='serial_stock'
bi.parenttype='Serial No'
bi.save(ignore_permissions=True)
def amend_serial_no(d,serial_no,qty):
sr_no=frappe.new_doc("Serial No")
amend_qty=cint(frappe.db.get_value('Serial No',serial_no,'amend_qty')) or 0 + 1
sr_no.serial_no=serial_no.split('-')[0] + '-' + cstr(amend_qty)
sr_no.amend_qty=amend_qty
sr_no.make_from=serial_no
sr_no.status="Available"
sr_no.item_code=d.item_code
sr_no.item_name=d.item_name
sr_no.qty=cstr(flt(0.0)-flt(qty))
sr_no.serial_no_warehouse=d.warehouse
sr_no.item_group=d.item_group
sr_no.decription=d.description
sr_no.qc_status='Accepted'
sr_no.save(ignore_permissions=True)
def update_serialgl_dn(doc,method):
qty=0
for d in doc.get('delivery_note_details'):
if d.custom_serial_no:
serial_no=(d.custom_serial_no).splitlines()
for sr_no in serial_no:
serial_no_qty=frappe.db.sql("select ifnull(qty,0) from `tabSerial Stock` where parent='%s' and document='%s'"%(sr_no,doc.name),as_list=1)
if serial_no_qty:
qty=qty+cint(serial_no_qty[0][0])
amend_qty=frappe.db.get_value('Serial No',{'make_from':sr_no},'qty') or 0
qty = qty + amend_qty
frappe.db.sql("update `tabSerial No` set qty=%s,status='Available' where name='%s'"%(qty,sr_no))
frappe.db.sql("delete from `tabSerial Stock` where parent='%s' and document='%s'"%(sr_no,doc.name))
frappe.db.sql("delete from `tabSerial No` where make_from='%s'"%(sr_no))
#Function to handle serials
def generate_serial_no_fg(doc,method):
previous_source_batch=''
#source_batch_no=''
for d in doc.get('mtn_details'):
if doc.purpose in ['Manufacture','Repack','Material Receipt']:
if d.t_warehouse and d.qty_per_drum_bag:
generate_serial_no_per_drum(d,doc)
elif d.t_warehouse and not d.qty_per_drum_bag:
generate_serial_no_and_batch(d,previous_source_batch,doc)
validate_serial_no(d)
elif d.t_warehouse:
validate_serial_no(d)
if d.source_batch:
previous_source_batch=d.source_batch
elif doc.purpose in ['Material Issue','Purchase Return']:
validate_serial_no(d)
issue_serial_no(d,'Not Available',0)
elif doc.purpose in ['Sales Return']:
validate_serial_no(d)
elif doc.purpose in ['Material Transfer']:
validate_serial_no(d)
qty=validate_qty_in_serial(d)
update_serial_no_warehouse_qty(qty,d,doc)
if d.t_warehouse and d.target_batch and doc.purpose in ['Manufacture','Repack']:
update_batch_status("Yes",d.target_batch)
def update_serial_no_warehouse_qty(qty,d,doc):
sr_no=(d.custom_serial_no).splitlines()
qty_temp=cint(d.qty)
for sr in sr_no:
serial_qty=frappe.db.get_value("Serial No",sr,"qty")
if qty_temp > cint(serial_qty):
sn=frappe.get_doc("Serial No",sr)
sn.update({"serial_no_warehouse":d.t_warehouse})
sn.save(ignore_permissions=True)
qty_temp -= cint(serial_qty)
elif qty_temp < cint(serial_qty):
sn=frappe.get_doc("Serial No",sr)
sn.update({"qty":qty_temp,"serial_no_warehouse":d.t_warehouse})
sn.save(ignore_permissions=True)
rem_qty=cint(serial_qty)-qty_temp
amend_serial_no_mt(sr,rem_qty,serial_qty,sn.name,d,qty_temp,doc)
def update_serial_no_mt_cancel(doc,method):
for d in doc.get('mtn_details'):
if d.custom_serial_no and doc.purpose=='Material Transfer':
serials=get_serials_from_field(d)
update_amended_serials(serials,doc,d)
def update_amended_serials(serials,doc,d):
for sn in serials:
change_serial_details(sn,doc,d)
def change_serial_details(sn,doc,d):
amended_qty=frappe.db.sql("""select qty,amended_serial_no from `tabSerial QTY Maintain` where parent='%s' and document='%s'"""%(sn,doc.name))
srn=frappe.get_doc("Serial No",sn)
if amended_qty:
qty=srn.qty+flt(amended_qty[0][0])
else:
qty=srn.qty+0
srn.update({"qty":qty,"serial_no_warehouse":d.s_warehouse})
srn.save(ignore_permissions=True)
if amended_qty:
sr=frappe.get_doc("Serial No",amended_qty[0][1])
qty=sr.qty-flt(amended_qty[0][0])
sr.update({"qty":qty})
sr.save(ignore_permissions=True)
def get_serials_from_field(d):
serials=(d.custom_serial_no).splitlines()
return serials
def amend_serial_no_mt(sr,rem_qty,serial_qty,name,d,qty_temp,doc):
parent=(name).split('-') or name
idx=frappe.db.sql("""select ifnull(max(idx),0)
from `tabSerial QTY Maintain`
where parent='%s'"""%(parent[0]),as_list=1)
if idx:
idx=idx[0][0]
else:
idx=0
name=create_new_serial_no(idx,sr,rem_qty,d,parent[0])
update_maintain_serial(name,sr,serial_qty,rem_qty,idx,doc,parent[0])
def create_new_serial_no(idx,sr,rem_qty,d,parent):
sn=frappe.new_doc('Serial No')
sn.serial_no=parent+'-'+cstr(idx+1)
sn.serial_no_warehouse=d.s_warehouse
sn.item_code=d.item_code
sn.status='Available'
sn.qty=rem_qty
sn.finished_good='No'
sn.save(ignore_permissions=True)
return sn.name
def update_maintain_serial(name,sr,serial_qty,qty_temp,idx,doc,parent):
sqm=frappe.new_doc("Serial QTY Maintain")
sqm.amended_serial_no=name
sqm.idx=cint(idx)+1
sqm.qty=qty_temp
sqm.document=doc.name
sqm.parent_serial=parent
sqm.parent=sr
sqm.parenttype='Serial No'
sqm.parentfield='serial_qty_maintain'
sqm.save(ignore_permissions=True)
def validate_qty_in_serial(d):
if d.custom_serial_no:
serials=get_serials_list(d.custom_serial_no)
qty=get_qty_for_serials(serials)
if not d.qty <= qty:
frappe.throw(_("Quantity Should be less than or Equal to {0}").format(qty))
else:
return qty
def get_serials_list(serials):
sr_no=(serials).splitlines()
sr=''
for s in sr_no:
if sr:
sr+=','+'\''+s+'\''
else:
sr='\''+s+'\''
return sr
def get_qty_for_serials(serials):
qty=frappe.db.sql("""select ifnull(SUM(qty),0)
from `tabSerial No`
where name in (%s) """%(serials),as_list=1)
if qty:
return qty[0][0]
else:
return 0
#Automatically generate serials based on qty and qty per drum
def generate_serial_no_per_drum(d,doc):
series=frappe.db.get_value('Serial No',{'name':d.serial_no_link,'status':'Available','item_code':d.item_code},'naming_series')
if series:
validate_serial_no(d)
serial_no = (d.custom_serial_no).splitlines()
for sn in serial_no:
if sn:
frappe.db.sql("""update `tabSerial No`
set finished_good='Yes',qty='%s',serial_no_warehouse='%s'
where name='%s'"""%(d.qty_per_drum_bag, d.t_warehouse,sn))
qty=cint(d.qty) - cint(d.qty_per_drum_bag)
serial_no_name=d.serial_no_link + '\n'
while cint(qty) > 0:
qty_for_negative=cint(qty)
qty = cint(qty) - cint(d.qty_per_drum_bag)
if cint(qty) < 0:
name=create_serial_no(d,series,qty_for_negative)
else:
name=create_serial_no(d,series,d.qty_per_drum_bag)
serial_no_name+= name + '\n'
d.custom_serial_no=serial_no_name
frappe.db.sql("""update `tabStock Entry Detail`
set custom_serial_no='%s'
where parent='%s'
and item_code='%s'"""%(serial_no_name,doc.name,d.item_code))
#Create new serial no with current iten and make status available
def create_serial_no(d,series,qty):
sr_no=frappe.new_doc('Serial No')
sr_no.naming_series=series
sr_no.item_code=d.item_code
sr_no.qty=cstr(qty)
sr_no.status="Available"
sr_no.item_name=d.item_name
sr_no.is_repacked='Yes'
sr_no.serial_no_warehouse=d.t_warehouse
sr_no.item_group=frappe.db.get_value("Item",{"item_code":d.item_code},'item_group')
sr_no.description=d.description
sr_no.finished_good='Yes'
sr_no.save(ignore_permissions=True)
return sr_no.name
#create target batch no based on series of source batch no
def create_target_batch(d,previous_source_batch):
t_batch_no=get_batch_id(previous_source_batch)
if t_batch_no:
batch=frappe.new_doc('Batch')
batch.batch_id=t_batch_no
batch.item=d.item_code
batch.warehouse=d.t_warehouse
batch.creation='Auto'
batch.save(ignore_permissions=True)
d.target_batch=batch.name
return d.target_batch
def get_batch_id(batch_no):
import re
batch_no=re.sub(r'\d+(?=[^\d]*$)', lambda m: str(int(m.group())+1).zfill(len(m.group())), batch_no)
return batch_no
#Automatically generate batch and serial no on submission these serial no will be source serial in next process
def generate_serial_no_and_batch(d,previous_source_batch,doc):
target_batch=d.target_batch#new anand
if previous_source_batch:
target_batch=create_target_batch(d,previous_source_batch)
elif not previous_source_batch and not d.target_batch:
validate_serial_no(d)
sr_no=frappe.new_doc('Serial No')
sr_no.serial_no=target_batch
sr_no.item_code=d.item_code
sr_no.qty=cstr(d.qty)
sr_no.status="Available"
sr_no.item_name=d.item_name
sr_no.serial_no_warehouse=d.t_warehouse
sr_no.item_group=frappe.db.get_value("Item",{"item_code":d.item_code},'item_group')
sr_no.description=d.description
sr_no.batch_no=d.target_batch
sr_no.finished_good='Yes'
sr_no.save(ignore_permissions=True)
d.custom_serial_no=d.target_batch
frappe.db.sql("update `tabStock Entry Detail` set custom_serial_no='%s' where parent='%s' and item_code='%s'"%(d.custom_serial_no,doc.name,d.item_code))
def issue_serial_no(d,status,qty):
if d.custom_serial_no:
sr_no=(d.custom_serial_no).splitlines() or (d.custom_serial_no).split('\n')
for s in sr_no:
frappe.db.sql(""" update `tabSerial No` set status='%s' and
serial_no_warehouse='%s' and qty=%s where name='%s'
"""%(status, d.s_warehouse or d.t_warehouse, cint(qty), s))
#Update Serial Warehouse in serial no on material transfer
def update_serial_no_warehouse(doc,method):
if doc.purpose=='Material Transfer':
for item in doc.get("mtn_details"):
validate_serial_no(item)
if item.custom_serial_no:
sr_no=(item.custom_serial_no).splitlines()
for sr in sr_no:
frappe.db.sql("""update `tabSerial No` set serial_no_warehouse='%s' where name='%s'"""%(item.t_warehouse,sr))
#update qty to serial no on use
def update_qty(doc,method):
for d in doc.get('mtn_details'):
if d.s_warehouse and d.custom_serial_no and doc.purpose in ['Manufacture','Repack','Material Receipt']:
sr_no=(d.custom_serial_no).split('\n')
qty=cint(round(d.qty))
for s in sr_no:
if s:
serial_qty=frappe.db.get_value('Serial No',s,'qty')
if qty >= serial_qty:
qty= cint(qty) - cint(serial_qty)
frappe.db.sql("update `tabSerial No` set qty=qty-%s where name='%s'"%(cint(serial_qty),s))
make_serialgl(d,s,serial_qty,doc)
elif qty > 0:
frappe.db.sql("update `tabSerial No` set qty=qty-%s where name='%s'"%((cint(qty)),s))
make_serialgl(d,s,qty,doc)
qty= cint(qty) - cint(serial_qty)
#keep track of serials used in stock entry
def make_serialgl(d,serial_no,qty,doc):
#change Serial Maintain to Serial Stock
bi=frappe.new_doc('Serial Stock')
bi.document=doc.name
bi.item_code=d.item_code
bi.serial_no=serial_no
bi.qty=cstr(qty)
bi.warehouse=d.s_warehouse or d.t_warehouse
bi.parent=serial_no
bi.parentfield='serial_stock'
bi.parenttype='Serial No'
bi.save(ignore_permissions=True)
#Update Warehouse with serial
def update_serial_in_warehouse(doc,method):
for d in doc.get('mtn_details'):
if d.t_warehouse and d.custom_serial_no and frappe.db.get_value('Warehouse',d.t_warehouse,'is_flowbin')=='Yes':
sr_no=(d.custom_serial_no).splitlines() or (d.custom_serial_no).split('\n')
for s in sr_no:
frappe.db.sql("""update tabWarehouse
set serial_no='%s' where name='%s'"""%(s,d.t_warehouse))
#get source serial grade and attach it to target serial
def update_target_serial_grade(doc,method):
if doc.purpose in ['Manufacture','Repack']:
grade=''
for d in doc.get('mtn_details'):
if d.s_warehouse and d.custom_serial_no:
grade=d.grade
elif d.t_warehouse and d.custom_serial_no:
sr_no=(d.custom_serial_no).splitlines() or (d.custom_serial_no).split('\n')
if sr_no:
for sr in sr_no:
frappe.db.sql("""update `tabSerial No`
set grade='%s' where name='%s'"""%(grade,sr))
grade=''
#track of serials
def update_serialgl(doc,method):
for d in doc.get('mtn_details'):
if doc.purpose in ['Manufacture','Repack','Material Receipt']:
if d.custom_serial_no and d.s_warehouse:
serial_no=(d.custom_serial_no).split('\n')
for sr_no in serial_no:
qty=0
#change Serial Maintain to Serial Stock
serial_no_qty=frappe.db.sql("""select qty from `tabSerial Stock`
where parent='%s' and document='%s'"""%(sr_no,doc.name),as_list=1)
if serial_no_qty:
frappe.db.sql("""update `tabSerial No`
set qty=qty+%s,status='Available'
where name='%s'"""%(serial_no_qty[0][0],sr_no))
#change Serial Maintain to Serial Stock
frappe.db.sql("""delete from `tabSerial Stock`
where parent='%s' and document='%s'"""%(sr_no,doc.name))
else:
if d.custom_serial_no:
serial_no=(d.custom_serial_no).split('\n')
for sr_no in serial_no:
frappe.db.sql("""delete from `tabSerial No`
where name='%s'"""%(sr_no))
#update batch status on use
def update_batch_status(status,target_batch):
frappe.db.sql("""update `tabBatch`
set used='%s' where name='%s'"""%(status,target_batch))
def get_serial_no_dn(doctype,txt,searchfield,start,page_len,filters):
doc=filters['doc']
cond=get_conditions(doc)
frappe.errprint(cond)
if cond:
return frappe.db.sql("""select name from `tabSerial No` %s and status='Available' and item_code='%s'"""%(cond,doc['item_code']),debug=1) or [['']]
else:
return [['']]
def get_conditions(doc):
con=''
qc=frappe.db.sql("""select chemical_analysis,psd_analysis,ssa from `tabItem`
where item_code='%s'"""%(doc['item_code']),as_list=1)
if qc[0][0]=='Yes' and qc[0][1]=='Yes' and qc[0][2]=='Yes':
con="where qc_status='Accepted' and sa_analysis='Accepted' and psd_status='Accepted'"
elif qc[0][0]=='Yes' and qc[0][1]=='Yes':
con="where qc_status='Accepted' and psd_status='Accepted'"
elif qc[0][0]=='Yes' and qc[0][2]=='Yes':
con="where qc_status='Accepted' and sa_analysis='Accepted'"
elif qc[0][1]=='Yes' and qc[0][2]=='Yes':
con="where sa_analysis='Accepted' and psd_status='Accepted'"
elif qc[0][0]=='Yes':
con="where qc_status='Accepted'"
elif qc[0][1]=='Yes':
con="where psd_status='Accepted'"
elif qc[0][2]=='Yes':
con="where sa_analysis='Accepted'"
return con
#return query to get serials
def get_serial_no(doctype,txt,searchfield,start,page_len,filters):
doc=filters['doc']
if doc['t_warehouse'] and doc['purpose']=='Manufacture' or doc['purpose']=='Repack' and doc['qty_per_drum_bag']:
return frappe.db.sql("""select name from `tabSerial No` where item_code='%s'
and ifnull(qty, 0) = 0
and status='Available' and finished_good='No' and
serial_no_warehouse='%s'"""%(doc['item_code'],doc['t_warehouse']),debug=1)
elif doc['purpose']=='Sales Return':
return frappe.db.sql("""select name from `tabSerial No` where item_code='%s'
and status='Delivered'"""%(doc['item_code']))
else:
return frappe.db.sql("""select name from `tabSerial No` where item_code='%s'
and ifnull(qty,0)<>0
and status='Available' and serial_no_warehouse='%s'"""%(doc['item_code'],doc['s_warehouse'] or doc['t_warehouse']))
#anand
def get_serial_from(doctype,txt,searchfield,start,page_len,filters):
return frappe.db.sql("""select name,item_name,status from `tabSerial No`
where item_code='%(item_code)s'
and ifnull(qc_status,'')=''
and status='Available'"""%{'item_code':filters['item_code']})
def get_serial_from_psd(doctype,txt,searchfield,start,page_len,filters):
return frappe.db.sql("""select name,item_name,status from `tabSerial No`
where item_code='%(item_code)s'
and ifnull(psd_status,'')=''
and status='Available'"""%{'item_code':filters['item_code']})
def get_serial_from_sa(doctype,txt,searchfield,start,page_len,filters):
return frappe.db.sql("""select name,item_name,status from `tabSerial No`
where item_code='%(item_code)s'
and ifnull(sa_analysis,'')=''
and status='Available'"""%{'item_code':filters['item_code']})
def get_source_batch(doctype,txt,searchfield,start,page_len,filters):
return frappe.db.sql("""select name from `tabBatch`
where warehouse='%s'
and name in(select name from `tabSerial No`
where qty!=0)"""%(filters.get('warehouse')))
#method called for purchase reciept is created
def generate_serial_no(doc,method):
for d in doc.get('purchase_receipt_details'):
if not d.sr_no:
frappe.throw(_("Select Serial No and Click on Add for Item: ").format(d.item_code))
elif d.sr_no and d.qty_per_drum_bag:
series=frappe.db.get_value('Serial No',{'name':d.custom_serial_no,'status':'Available','item_code':d.item_code},'naming_series')
if series and d.qty_per_drum_bag:
frappe.errprint([series, d.qty_per_drum_bag])
frappe.db.sql("update `tabSerial No` set qty='%s',serial_no_warehouse='%s' where name='%s'"%(d.qty_per_drum_bag, d.warehouse,d.sr_no))
qty=cint(d.qty) - cint(d.qty_per_drum_bag)
serial_no_name=d.custom_serial_no + '\n'
while cint(qty) > 0:
qty_for_negative=cint(qty)
qty = cint(qty) - cint(d.qty_per_drum_bag)
if cint(qty) < 0:
name=create_serial_no_pr(d,series,qty_for_negative)
else:
name=create_serial_no_pr(d,series,d.qty_per_drum_bag)
serial_no_name+= name + '\n'
frappe.db.sql("update `tabPurchase Receipt Item` set sr_no='%s' where parent='%s' and item_code='%s'"%(serial_no_name,doc.name,d.item_code))
d.sr_no=serial_no_name
elif d.sr_no and not d.qty_per_drum_bag:
frappe.throw(_("Enter Quantity per Drum/Bag for Item {0}").format(d.item_code))
def create_serial_no_pr(d,series,qty):
sr_no=frappe.new_doc('Serial No')
sr_no.naming_series=series
sr_no.item_code=d.item_code
sr_no.qty=cstr(qty)
sr_no.status="Available"
sr_no.item_name=d.item_name
sr_no.is_repacked='Yes'
sr_no.serial_no_warehouse=d.warehouse
sr_no.item_group=d.item_group
sr_no.description=d.description
sr_no.finished_good='No'
sr_no.save(ignore_permissions=True)
return sr_no.name
def delete_serial_no(doc,method):
for d in doc.get('purchase_receipt_details'):
if d.sr_no:
sr_no=(d.sr_no).split('\n')
for s in sr_no:
frappe.db.sql("delete from `tabSerial No` where name='%s'"%(s))
def check_range(doc,method):
parm=[]
for d in doc.get("item_specification_details"):
if d.min_value and d.max_value:
if not flt(d.min_value) <= flt(d.max_value):
msgprint(_("Min value should be less than max for Inspection parameters"),raise_exception=1)
elif not d.min_value and not d.max_value:
msgprint(_("Min and Max value can not be blank Inspection Parameter"),raise_exception=1)
if d.specification in parm:
msgprint(_("Duplicate parameter {0} found at row {1}").format(d.specification,d.idx),raise_exception=1)
parm.append(d.specification)
@frappe.whitelist()
def make_quality_checking(mtn_details):
mtn_details=eval(mtn_details)
msg=''
for d in mtn_details:
if d.get('parenttype')=='Purchase Receipt' and d.get('sr_no'):
serial_no = (d.get('sr_no')).splitlines() or (d.get('sr_no')).split('\n')
msg=assign_checking(serial_no,d.get('item_code'))
elif d.get('parenttype')=='Stock Entry' and d.get('custom_serial_no') and d.get('t_warehouse'):
serial_no = (d.get('custom_serial_no')).splitlines() or (d.get('custom_serial_no')).split('\n')
msg=assign_checking(serial_no,d.get('item_code'))
if msg:
frappe.msgprint(msg)
@frappe.whitelist()
def assign_checking(sr_no,item_code):
msg='This serial no is already assigned'
quality_checker=frappe.db.sql("select distinct parent from `tabUserRole` where role in('Quality Checker','System Manager')",as_list=1)
if quality_checker:
for checker in quality_checker:
count = 0
for s in sr_no:
if not frappe.db.get_value('ToDo',{'serial_no':s,'owner':checker[0]},'name'):
to_do=frappe.new_doc('ToDo')
to_do.reference_type='Quality Checking'
to_do.role='Quality Checker'
to_do.owner=checker[0]
to_do.assigned_by=frappe.session.user
to_do.description='Do QC for Serial No %s'%(s)
to_do.status='Open'
to_do.priority='Medium'
to_do.serial_no=s
to_do.item_code=item_code
to_do.save()
count+=1
if count!=0:
msg="Assign {0} serial no to Quality Checker".format(count)
return msg
| suyashphadtare/vestasi-erp-1 | erpnext/stock/custom_methods.py | Python | agpl-3.0 | 26,194 |
from timit_dataset import TimitPhoneData
import itertools
import numpy as np
from pylearn2.datasets import DenseDesignMatrix
from pylearn2.models.mlp import MLP, Sigmoid, Softmax, VectorSpace
from pylearn2.termination_criteria import EpochCounter
from pylearn2.training_algorithms.sgd import SGD
from pylearn2.training_algorithms import learning_rule
from pylearn2.train import Train
from pylearn2.train_extensions import best_params
print "Loading training dataset"
train = TimitPhoneData('/home/jfsantos/data/TIMIT/', framelen=160, overlap=80, start=0, stop=100)
print "Loading validation dataset"
valid = TimitPhoneData('/home/jfsantos/data/TIMIT/', framelen=160, overlap=80, start=2500, stop=2520)
print "Loading test dataset"
test = TimitPhoneData('/home/jfsantos/data/TIMIT/', framelen=160, overlap=80, start=4000, stop=4050)
print "Finished loading datasets"
x0 = VectorSpace(160)
s0 = Sigmoid(layer_name='h0', dim=500, sparse_init=100)
s1 = Sigmoid(layer_name='h1', dim=500, sparse_init=100)
y0 = Softmax(layer_name='y', sparse_init=10, n_classes=61)
mdl = MLP(layers=[s0, s1, y0], nvis=160, input_space=x0)
trainer = SGD(batch_size=1024, learning_rate = .01, init_momentum = .5,
monitoring_dataset = {'train' : train, 'valid': valid,
'test' : test}, termination_criterion =
EpochCounter(max_epochs=50))
watcher = best_params.MonitorBasedSaveBest(
channel_name='valid_y_misclass',
save_path='phonerec_mlp_2sig_softmax.pkl')
experiment = Train(dataset=train,
model=mdl,
algorithm=trainer, extensions = [watcher])
experiment.main_loop()
| jfsantos/ift6266h14 | old/exp_phone_rec.py | Python | mit | 1,681 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import functools
import glob
import os
from snapcraft import file_utils
from snapcraft.internal import common
from ._base import Base
class Local(Base):
def __init__(self, *args, copy_function=file_utils.link_or_copy, **kwargs):
super().__init__(*args, **kwargs)
self.source_abspath = os.path.abspath(self.source)
self.copy_function = copy_function
self._ignore = functools.partial(_ignore, self.source_abspath, os.getcwd())
def pull(self):
file_utils.link_or_copy_tree(
self.source_abspath,
self.source_dir,
ignore=self._ignore,
copy_function=self.copy_function,
)
def _check(self, target):
try:
target_mtime = os.lstat(target).st_mtime
except FileNotFoundError:
return False
self._updated_files = set()
self._updated_directories = set()
for (root, directories, files) in os.walk(self.source_abspath, topdown=True):
ignored = set(self._ignore(root, directories + files, check=True))
if ignored:
# Prune our search appropriately given an ignore list, i.e.
# don't walk into directories that are ignored.
directories[:] = [d for d in directories if d not in ignored]
for file_name in set(files) - ignored:
path = os.path.join(root, file_name)
if os.lstat(path).st_mtime >= target_mtime:
self._updated_files.add(os.path.relpath(path, self.source))
for directory in directories:
path = os.path.join(root, directory)
if os.lstat(path).st_mtime >= target_mtime:
# Don't descend into this directory-- we'll just copy it
# entirely.
directories.remove(directory)
# os.walk will include symlinks to directories here, but we
# want to treat those as files
relpath = os.path.relpath(path, self.source)
if os.path.islink(path):
self._updated_files.add(relpath)
else:
self._updated_directories.add(relpath)
return len(self._updated_files) > 0 or len(self._updated_directories) > 0
def _update(self):
# First, copy the directories
for directory in self._updated_directories:
file_utils.link_or_copy_tree(
os.path.join(self.source, directory),
os.path.join(self.source_dir, directory),
ignore=self._ignore,
copy_function=self.copy_function,
)
# Now, copy files
for file_path in self._updated_files:
self.copy_function(
os.path.join(self.source, file_path),
os.path.join(self.source_dir, file_path),
)
def _ignore(source, current_directory, directory, files, check=False):
if directory == source or directory == current_directory:
ignored = copy.copy(common.SNAPCRAFT_FILES)
if check:
# TODO: We hardcode the snap directory here, but we really need
# to ignore the directory where snapcraft.yaml is hosted.
ignored.extend(["snap", "snapcraft.yaml", ".snapcraft.yaml"])
snaps = glob.glob(os.path.join(directory, "*.snap"))
if snaps:
snaps = [os.path.basename(s) for s in snaps]
ignored += snaps
return ignored
else:
return []
| snapcore/snapcraft | snapcraft/internal/sources/_local.py | Python | gpl-3.0 | 4,275 |
from .alarm_state import AlarmState
from .envisalink_base_client import EnvisalinkClient
from .honeywell_client import HoneywellClient
from .dsc_client import DSCClient
from .alarm_panel import EnvisalinkAlarmPanel
| jnimmo/pyenvisalink | pyenvisalink/__init__.py | Python | mit | 215 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, [email protected], Big Switch Networks, Inc.
"""
Quantum REST Proxy Plug-in for Big Switch and FloodLight Controllers
QuantumRestProxy provides a generic quantum plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all quantum
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between quantum and the
network controller
- independent upgrade/development cycles between quantum and the controller
as it limits the proxy code upgrade requirement to quantum release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with quantum for independent recovery/reset
External REST API used by proxy is the same API as defined for quantum (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import base64
import copy
import httplib
import json
import socket
from oslo.config import cfg
from quantum.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from quantum.common import constants as const
from quantum.common import exceptions
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.common import utils
from quantum import context as qcontext
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
from quantum.db import l3_db
from quantum.extensions import l3
from quantum.extensions import portbindings
from quantum.openstack.common import lockutils
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.plugins.bigswitch.version import version_string_with_vcs
from quantum import policy
LOG = logging.getLogger(__name__)
restproxy_opts = [
cfg.StrOpt('servers', default='localhost:8800',
help=_("A comma separated list of servers and port numbers "
"to proxy request to.")),
cfg.StrOpt('server_auth', default='username:password', secret=True,
help=_("Server authentication")),
cfg.BoolOpt('server_ssl', default=False,
help=_("Use SSL to connect")),
cfg.BoolOpt('sync_data', default=False,
help=_("Sync data on connect")),
cfg.IntOpt('server_timeout', default=10,
help=_("Maximum number of seconds to wait for proxy request "
"to connect and complete.")),
cfg.StrOpt('quantum_id', default='Quantum-' + utils.get_hostname(),
help=_("User defined identifier for this Quantum deployment")),
cfg.BoolOpt('add_meta_server_route', default=True,
help=_("Flag to decide if a route to the metadata server "
"should be injected into the VM")),
]
cfg.CONF.register_opts(restproxy_opts, "RESTPROXY")
# The following are used to invoke the API on the external controller
NET_RESOURCE_PATH = "/tenants/%s/networks"
PORT_RESOURCE_PATH = "/tenants/%s/networks/%s/ports"
ROUTER_RESOURCE_PATH = "/tenants/%s/routers"
ROUTER_INTF_OP_PATH = "/tenants/%s/routers/%s/interfaces"
NETWORKS_PATH = "/tenants/%s/networks/%s"
PORTS_PATH = "/tenants/%s/networks/%s/ports/%s"
ATTACHMENT_PATH = "/tenants/%s/networks/%s/ports/%s/attachment"
ROUTERS_PATH = "/tenants/%s/routers/%s"
ROUTER_INTF_PATH = "/tenants/%s/routers/%s/interfaces/%s"
SUCCESS_CODES = range(200, 207)
FAILURE_CODES = [0, 301, 302, 303, 400, 401, 403, 404, 500, 501, 502, 503,
504, 505]
SYNTAX_ERROR_MESSAGE = 'Syntax error in server config file, aborting plugin'
BASE_URI = '/networkService/v1.1'
ORCHESTRATION_SERVICE_ID = 'Quantum v2.0'
METADATA_SERVER_IP = '169.254.169.254'
class RemoteRestError(exceptions.QuantumException):
def __init__(self, message):
if message is None:
message = "None"
self.message = _("Error in REST call to remote network "
"controller") + ": " + message
super(RemoteRestError, self).__init__()
class ServerProxy(object):
"""REST server proxy to a network controller."""
def __init__(self, server, port, ssl, auth, quantum_id, timeout,
base_uri, name):
self.server = server
self.port = port
self.ssl = ssl
self.base_uri = base_uri
self.timeout = timeout
self.name = name
self.success_codes = SUCCESS_CODES
self.auth = None
self.quantum_id = quantum_id
if auth:
self.auth = 'Basic ' + base64.encodestring(auth).strip()
@lockutils.synchronized('rest_call', 'bsn-', external=True)
def rest_call(self, action, resource, data, headers):
uri = self.base_uri + resource
body = json.dumps(data)
if not headers:
headers = {}
headers['Content-type'] = 'application/json'
headers['Accept'] = 'application/json'
headers['QuantumProxy-Agent'] = self.name
headers['Instance-ID'] = self.quantum_id
headers['Orchestration-Service-ID'] = ORCHESTRATION_SERVICE_ID
if self.auth:
headers['Authorization'] = self.auth
LOG.debug(_("ServerProxy: server=%(server)s, port=%(port)d, "
"ssl=%(ssl)r, action=%(action)s"),
{'server': self.server, 'port': self.port, 'ssl': self.ssl,
'action': action})
LOG.debug(_("ServerProxy: resource=%(resource)s, data=%(data)r, "
"headers=%(headers)r"), locals())
conn = None
if self.ssl:
conn = httplib.HTTPSConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('ServerProxy: Could not establish HTTPS '
'connection'))
return 0, None, None, None
else:
conn = httplib.HTTPConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('ServerProxy: Could not establish HTTP '
'connection'))
return 0, None, None, None
try:
conn.request(action, uri, body, headers)
response = conn.getresponse()
respstr = response.read()
respdata = respstr
if response.status in self.success_codes:
try:
respdata = json.loads(respstr)
except ValueError:
# response was not JSON, ignore the exception
pass
ret = (response.status, response.reason, respstr, respdata)
except (socket.timeout, socket.error) as e:
LOG.error(_('ServerProxy: %(action)s failure, %(e)r'), locals())
ret = 0, None, None, None
conn.close()
LOG.debug(_("ServerProxy: status=%(status)d, reason=%(reason)r, "
"ret=%(ret)s, data=%(data)r"), {'status': ret[0],
'reason': ret[1],
'ret': ret[2],
'data': ret[3]})
return ret
class ServerPool(object):
def __init__(self, servers, ssl, auth, quantum_id, timeout=10,
base_uri='/quantum/v1.0', name='QuantumRestProxy'):
self.base_uri = base_uri
self.timeout = timeout
self.name = name
self.auth = auth
self.ssl = ssl
self.quantum_id = quantum_id
self.servers = []
for server_port in servers:
self.servers.append(self.server_proxy_for(*server_port))
def server_proxy_for(self, server, port):
return ServerProxy(server, port, self.ssl, self.auth, self.quantum_id,
self.timeout, self.base_uri, self.name)
def server_failure(self, resp):
"""Define failure codes as required.
Note: We assume 301-303 is a failure, and try the next server in
the server pool.
"""
return resp[0] in FAILURE_CODES
def action_success(self, resp):
"""Defining success codes as required.
Note: We assume any valid 2xx as being successful response.
"""
return resp[0] in SUCCESS_CODES
def rest_call(self, action, resource, data, headers):
failed_servers = []
while self.servers:
active_server = self.servers[0]
ret = active_server.rest_call(action, resource, data, headers)
if not self.server_failure(ret):
self.servers.extend(failed_servers)
return ret
else:
LOG.error(_('ServerProxy: %(action)s failure for servers: '
'%(server)r'),
{'action': action,
'server': (active_server.server,
active_server.port)})
failed_servers.append(self.servers.pop(0))
# All servers failed, reset server list and try again next time
LOG.error(_('ServerProxy: %(action)s failure for all servers: '
'%(server)r'),
{'action': action,
'server': tuple((s.server,
s.port) for s in failed_servers)})
self.servers.extend(failed_servers)
return (0, None, None, None)
def get(self, resource, data='', headers=None):
return self.rest_call('GET', resource, data, headers)
def put(self, resource, data, headers=None):
return self.rest_call('PUT', resource, data, headers)
def post(self, resource, data, headers=None):
return self.rest_call('POST', resource, data, headers)
def delete(self, resource, data='', headers=None):
return self.rest_call('DELETE', resource, data, headers)
class RpcProxy(dhcp_rpc_base.DhcpRpcCallbackMixin):
RPC_API_VERSION = '1.0'
def create_rpc_dispatcher(self):
return q_rpc.PluginRpcDispatcher([self])
class QuantumRestProxyV2(db_base_plugin_v2.QuantumDbPluginV2,
l3_db.L3_NAT_db_mixin):
supported_extension_aliases = ["router", "binding"]
binding_view = "extension:port_binding:view"
binding_set = "extension:port_binding:set"
def __init__(self):
LOG.info(_('QuantumRestProxy: Starting plugin. Version=%s'),
version_string_with_vcs())
# init DB, proxy's persistent store defaults to in-memory sql-lite DB
db.configure_db()
# 'servers' is the list of network controller REST end-points
# (used in order specified till one suceeds, and it is sticky
# till next failure). Use 'server_auth' to encode api-key
servers = cfg.CONF.RESTPROXY.servers
server_auth = cfg.CONF.RESTPROXY.server_auth
server_ssl = cfg.CONF.RESTPROXY.server_ssl
sync_data = cfg.CONF.RESTPROXY.sync_data
timeout = cfg.CONF.RESTPROXY.server_timeout
quantum_id = cfg.CONF.RESTPROXY.quantum_id
self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
# validate config
assert servers is not None, 'Servers not defined. Aborting plugin'
servers = tuple(s.rsplit(':', 1) for s in servers.split(','))
servers = tuple((server, int(port)) for server, port in servers)
assert all(len(s) == 2 for s in servers), SYNTAX_ERROR_MESSAGE
# init network ctrl connections
self.servers = ServerPool(servers, server_ssl, server_auth, quantum_id,
timeout, BASE_URI)
# init dhcp support
self.topic = topics.PLUGIN
self.conn = rpc.create_connection(new=True)
self.callbacks = RpcProxy()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
if sync_data:
self._send_all_data()
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
LOG.debug(_("QuantumRestProxyV2: initialization done"))
def create_network(self, context, network):
"""Create a network, which represents an L2 network segment which
can have a set of subnets and ports associated with it.
:param context: quantum api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug(_("QuantumRestProxyV2: create_network() called"))
self._warn_on_state_status(network['network'])
# Validate args
tenant_id = self._get_tenant_id_for_create(context, network["network"])
session = context.session
with session.begin(subtransactions=True):
# create network in DB
new_net = super(QuantumRestProxyV2, self).create_network(context,
network)
self._process_l3_create(context, network['network'], new_net['id'])
self._extend_network_dict_l3(context, new_net)
# create network on the network controller
try:
resource = NET_RESOURCE_PATH % tenant_id
mapped_network = self._get_mapped_network_with_subnets(new_net)
data = {
"network": mapped_network
}
ret = self.servers.post(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2:Unable to create remote "
"network: %s"), e.message)
super(QuantumRestProxyV2, self).delete_network(context,
new_net['id'])
raise
# return created network
return new_net
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: quantum api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("QuantumRestProxyV2.update_network() called"))
self._warn_on_state_status(network['network'])
session = context.session
with session.begin(subtransactions=True):
orig_net = super(QuantumRestProxyV2, self).get_network(context,
net_id)
new_net = super(QuantumRestProxyV2, self).update_network(context,
net_id,
network)
self._process_l3_update(context, network['network'], net_id)
self._extend_network_dict_l3(context, new_net)
# update network on network controller
try:
self._send_update_network(new_net)
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to update remote "
"network: %s"), e.message)
# reset network to original state
super(QuantumRestProxyV2, self).update_network(context, id,
orig_net)
raise
# return updated network
return new_net
def delete_network(self, context, net_id):
"""Delete a network.
:param context: quantum api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("QuantumRestProxyV2: delete_network() called"))
# Validate args
orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)
tenant_id = orig_net["tenant_id"]
filter = {'network_id': [net_id]}
ports = self.get_ports(context, filters=filter)
# check if there are any tenant owned ports in-use
auto_delete_port_owners = db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS
only_auto_del = all(p['device_owner'] in auto_delete_port_owners
for p in ports)
if not only_auto_del:
raise exceptions.NetworkInUse(net_id=net_id)
# delete from network ctrl. Remote error on delete is ignored
try:
resource = NETWORKS_PATH % (tenant_id, net_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
ret_val = super(QuantumRestProxyV2, self).delete_network(context,
net_id)
return ret_val
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to update remote "
"network: %s"), e.message)
raise
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach to a L2 Quantum network.
:param context: quantum api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID"s and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug(_("QuantumRestProxyV2: create_port() called"))
# Update DB
port["port"]["admin_state_up"] = False
new_port = super(QuantumRestProxyV2, self).create_port(context, port)
net = super(QuantumRestProxyV2,
self).get_network(context, new_port["network_id"])
if self.add_meta_server_route:
if new_port['device_owner'] == 'network:dhcp':
destination = METADATA_SERVER_IP + '/32'
self._add_host_route(context, destination, new_port)
# create on networl ctrl
try:
resource = PORT_RESOURCE_PATH % (net["tenant_id"], net["id"])
mapped_port = self._map_state_and_status(new_port)
data = {
"port": mapped_port
}
ret = self.servers.post(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
# connect device to network, if present
device_id = port["port"].get("device_id")
if device_id:
self._plug_interface(context,
net["tenant_id"], net["id"],
new_port["id"], device_id)
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to create remote port: "
"%s"), e.message)
super(QuantumRestProxyV2, self).delete_port(context,
new_port["id"])
raise
# Set port state up and return that port
port_update = {"port": {"admin_state_up": True}}
new_port = super(QuantumRestProxyV2, self).update_port(context,
new_port["id"],
port_update)
return self._extend_port_dict_binding(context, new_port)
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(QuantumRestProxyV2, self).get_port(context, id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(QuantumRestProxyV2, self).get_ports(context, filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
return [self._fields(port, fields) for port in ports]
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: quantum api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID's and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("QuantumRestProxyV2: update_port() called"))
self._warn_on_state_status(port['port'])
# Validate Args
orig_port = super(QuantumRestProxyV2, self).get_port(context, port_id)
# Update DB
new_port = super(QuantumRestProxyV2, self).update_port(context,
port_id, port)
# update on networl ctrl
try:
resource = PORTS_PATH % (orig_port["tenant_id"],
orig_port["network_id"], port_id)
mapped_port = self._map_state_and_status(new_port)
data = {"port": mapped_port}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
if new_port.get("device_id") != orig_port.get("device_id"):
if orig_port.get("device_id"):
self._unplug_interface(context, orig_port["tenant_id"],
orig_port["network_id"],
orig_port["id"])
device_id = new_port.get("device_id")
if device_id:
self._plug_interface(context, new_port["tenant_id"],
new_port["network_id"],
new_port["id"], device_id)
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to create remote port: "
"%s"), e.message)
# reset port to original state
super(QuantumRestProxyV2, self).update_port(context, port_id,
orig_port)
raise
# return new_port
return self._extend_port_dict_binding(context, new_port)
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete a port.
:param context: quantum api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("QuantumRestProxyV2: delete_port() called"))
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, port_id)
self.disassociate_floatingips(context, port_id)
super(QuantumRestProxyV2, self).delete_port(context, port_id)
def _delete_port(self, context, port_id):
# Delete from DB
port = super(QuantumRestProxyV2, self).get_port(context, port_id)
# delete from network ctrl. Remote error on delete is ignored
try:
resource = PORTS_PATH % (port["tenant_id"], port["network_id"],
port_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
if port.get("device_id"):
self._unplug_interface(context, port["tenant_id"],
port["network_id"], port["id"])
ret_val = super(QuantumRestProxyV2, self)._delete_port(context,
port_id)
return ret_val
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to update remote port: "
"%s"), e.message)
raise
def _plug_interface(self, context, tenant_id, net_id, port_id,
remote_interface_id):
"""Attaches a remote interface to the specified port on the
specified Virtual Network.
:returns: None
:raises: exceptions.NetworkNotFound
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("QuantumRestProxyV2: _plug_interface() called"))
# update attachment on network controller
try:
port = super(QuantumRestProxyV2, self).get_port(context, port_id)
mac = port["mac_address"]
if mac is not None:
resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)
data = {"attachment":
{"id": remote_interface_id,
"mac": mac,
}
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2:Unable to update remote network: "
"%s"), e.message)
raise
def _unplug_interface(self, context, tenant_id, net_id, port_id):
"""Detaches a remote interface from the specified port on the
network controller
:returns: None
:raises: RemoteRestError
"""
LOG.debug(_("QuantumRestProxyV2: _unplug_interface() called"))
# delete from network ctrl. Remote error on delete is ignored
try:
resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to update remote port: "
"%s"), e.message)
def create_subnet(self, context, subnet):
LOG.debug(_("QuantumRestProxyV2: create_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
# create subnet in DB
new_subnet = super(QuantumRestProxyV2, self).create_subnet(context,
subnet)
net_id = new_subnet['network_id']
orig_net = super(QuantumRestProxyV2, self).get_network(context,
net_id)
# update network on network controller
try:
self._send_update_network(orig_net)
except RemoteRestError as e:
# rollback creation of subnet
super(QuantumRestProxyV2, self).delete_subnet(context,
subnet['id'])
raise
return new_subnet
def update_subnet(self, context, id, subnet):
LOG.debug(_("QuantumRestProxyV2: update_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
orig_subnet = super(QuantumRestProxyV2, self)._get_subnet(context, id)
# update subnet in DB
new_subnet = super(QuantumRestProxyV2, self).update_subnet(context, id,
subnet)
net_id = new_subnet['network_id']
orig_net = super(QuantumRestProxyV2, self).get_network(context,
net_id)
# update network on network controller
try:
self._send_update_network(orig_net)
except RemoteRestError as e:
# rollback updation of subnet
super(QuantumRestProxyV2, self).update_subnet(context, id,
orig_subnet)
raise
return new_subnet
def delete_subnet(self, context, id):
LOG.debug(_("QuantumRestProxyV2: delete_subnet() called"))
orig_subnet = super(QuantumRestProxyV2, self).get_subnet(context, id)
net_id = orig_subnet['network_id']
# delete subnet in DB
super(QuantumRestProxyV2, self).delete_subnet(context, id)
orig_net = super(QuantumRestProxyV2, self).get_network(context,
net_id)
# update network on network controller
try:
self._send_update_network(orig_net)
except RemoteRestError as e:
# TODO (Sumit): rollback deletion of subnet
raise
def create_router(self, context, router):
LOG.debug(_("QuantumRestProxyV2: create_router() called"))
self._warn_on_state_status(router['router'])
tenant_id = self._get_tenant_id_for_create(context, router["router"])
# create router in DB
new_router = super(QuantumRestProxyV2, self).create_router(context,
router)
# create router on the network controller
try:
resource = ROUTER_RESOURCE_PATH % tenant_id
mapped_router = self._map_state_and_status(new_router)
data = {
"router": mapped_router
}
ret = self.servers.post(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to create remote router: "
"%s"), e.message)
super(QuantumRestProxyV2, self).delete_router(context,
new_router['id'])
raise
# return created router
return new_router
def update_router(self, context, router_id, router):
LOG.debug(_("QuantumRestProxyV2.update_router() called"))
self._warn_on_state_status(router['router'])
orig_router = super(QuantumRestProxyV2, self).get_router(context,
router_id)
tenant_id = orig_router["tenant_id"]
new_router = super(QuantumRestProxyV2, self).update_router(context,
router_id,
router)
# update router on network controller
try:
resource = ROUTERS_PATH % (tenant_id, router_id)
mapped_router = self._map_state_and_status(new_router)
data = {
"router": mapped_router
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to update remote router: "
"%s"), e.message)
# reset router to original state
super(QuantumRestProxyV2, self).update_router(context,
router_id,
orig_router)
raise
# return updated router
return new_router
def delete_router(self, context, router_id):
LOG.debug(_("QuantumRestProxyV2: delete_router() called"))
with context.session.begin(subtransactions=True):
orig_router = self._get_router(context, router_id)
tenant_id = orig_router["tenant_id"]
# Ensure that the router is not used
router_filter = {'router_id': [router_id]}
fips = self.get_floatingips_count(context.elevated(),
filters=router_filter)
if fips:
raise l3.RouterInUse(router_id=router_id)
device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF
device_filter = {'device_id': [router_id],
'device_owner': [device_owner]}
ports = self.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=router_id)
# delete from network ctrl. Remote error on delete is ignored
try:
resource = ROUTERS_PATH % (tenant_id, router_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
ret_val = super(QuantumRestProxyV2, self).delete_router(context,
router_id)
return ret_val
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to delete remote router: "
"%s"), e.message)
raise
def add_router_interface(self, context, router_id, interface_info):
LOG.debug(_("QuantumRestProxyV2: add_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
# create interface in DB
new_interface_info = super(QuantumRestProxyV2,
self).add_router_interface(context,
router_id,
interface_info)
port = self._get_port(context, new_interface_info['port_id'])
net_id = port['network_id']
subnet_id = new_interface_info['subnet_id']
# we will use the port's network id as interface's id
interface_id = net_id
intf_details = self._get_router_intf_details(context,
interface_id,
subnet_id)
# create interface on the network controller
try:
resource = ROUTER_INTF_OP_PATH % (tenant_id, router_id)
data = {"interface": intf_details}
ret = self.servers.post(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to create interface: "
"%s"), e.message)
super(QuantumRestProxyV2,
self).remove_router_interface(context, router_id,
interface_info)
raise
return new_interface_info
def remove_router_interface(self, context, router_id, interface_info):
LOG.debug(_("QuantumRestProxyV2: remove_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
# we will first get the interface identifier before deleting in the DB
if not interface_info:
msg = "Either subnet_id or port_id must be specified"
raise exceptions.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
port = self._get_port(context, interface_info['port_id'])
interface_id = port['network_id']
elif 'subnet_id' in interface_info:
subnet = self._get_subnet(context, interface_info['subnet_id'])
interface_id = subnet['network_id']
else:
msg = "Either subnet_id or port_id must be specified"
raise exceptions.BadRequest(resource='router', msg=msg)
# remove router in DB
del_intf_info = super(QuantumRestProxyV2,
self).remove_router_interface(context,
router_id,
interface_info)
# create router on the network controller
try:
resource = ROUTER_INTF_PATH % (tenant_id, router_id, interface_id)
ret = self.servers.delete(resource)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2:Unable to delete remote intf: "
"%s"), e.message)
raise
# return new interface
return del_intf_info
def create_floatingip(self, context, floatingip):
LOG.debug(_("QuantumRestProxyV2: create_floatingip() called"))
# create floatingip in DB
new_fl_ip = super(QuantumRestProxyV2,
self).create_floatingip(context, floatingip)
net_id = new_fl_ip['floating_network_id']
orig_net = super(QuantumRestProxyV2, self).get_network(context,
net_id)
# create floatingip on the network controller
try:
self._send_update_network(orig_net)
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to create remote "
"floatin IP: %s"), e.message)
super(QuantumRestProxyV2, self).delete_floatingip(context,
floatingip)
raise
# return created floating IP
return new_fl_ip
def update_floatingip(self, context, id, floatingip):
LOG.debug(_("QuantumRestProxyV2: update_floatingip() called"))
orig_fl_ip = super(QuantumRestProxyV2, self).get_floatingip(context,
id)
# update floatingip in DB
new_fl_ip = super(QuantumRestProxyV2,
self).update_floatingip(context, id, floatingip)
net_id = new_fl_ip['floating_network_id']
orig_net = super(QuantumRestProxyV2, self).get_network(context,
net_id)
# update network on network controller
try:
self._send_update_network(orig_net)
except RemoteRestError as e:
# rollback updation of subnet
super(QuantumRestProxyV2, self).update_floatingip(context, id,
orig_fl_ip)
raise
return new_fl_ip
def delete_floatingip(self, context, id):
LOG.debug(_("QuantumRestProxyV2: delete_floatingip() called"))
orig_fl_ip = super(QuantumRestProxyV2, self).get_floatingip(context,
id)
# delete floating IP in DB
net_id = orig_fl_ip['floating_network_id']
super(QuantumRestProxyV2, self).delete_floatingip(context, id)
orig_net = super(QuantumRestProxyV2, self).get_network(context,
net_id)
# update network on network controller
try:
self._send_update_network(orig_net)
except RemoteRestError as e:
# TODO(Sumit): rollback deletion of floating IP
raise
def _send_all_data(self):
"""Pushes all data to network ctrl (networks/ports, ports/attachments)
to give the controller an option to re-sync it's persistent store
with quantum's current view of that data.
"""
admin_context = qcontext.get_admin_context()
networks = []
routers = []
all_networks = super(QuantumRestProxyV2,
self).get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
net_fl_ips = self._get_network_with_floatingips(mapped_network)
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = super(QuantumRestProxyV2,
self).get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
ports.append(mapped_port)
net_fl_ips['ports'] = ports
networks.append(net_fl_ips)
all_routers = super(QuantumRestProxyV2,
self).get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': ["network:router_interface"],
'device_id': [router.get('id')]
}
router_ports = super(QuantumRestProxyV2,
self).get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
try:
resource = '/topology'
data = {
'networks': networks,
'routers': routers,
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
return ret
except RemoteRestError as e:
LOG.error(_('QuantumRestProxy: Unable to update remote '
'topology: %s'), e.message)
raise
def _add_host_route(self, context, destination, port):
subnet = {}
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
nexthop = fixed_ip['ip_address']
subnet['host_routes'] = [{'destination': destination,
'nexthop': nexthop}]
updated_subnet = self.update_subnet(context,
subnet_id,
{'subnet': subnet})
payload = {'subnet': updated_subnet}
self._dhcp_agent_notifier.notify(context, payload,
'subnet.update.end')
LOG.debug("Adding host route: ")
LOG.debug("destination:%s nexthop:%s" % (destination,
nexthop))
def _get_network_with_floatingips(self, network):
admin_context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
fl_ips = super(QuantumRestProxyV2,
self).get_floatingips(admin_context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id):
admin_context = qcontext.get_admin_context()
subnets = self._get_subnets_by_network(admin_context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network):
admin_context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'])
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[l3.EXTERNAL] = self._network_is_external(admin_context,
network['id'])
return network
def _send_update_network(self, network):
net_id = network['id']
tenant_id = network['tenant_id']
# update network on network controller
try:
resource = NETWORKS_PATH % (tenant_id, net_id)
mapped_network = self._get_mapped_network_with_subnets(network)
net_fl_ips = self._get_network_with_floatingips(mapped_network)
data = {
"network": net_fl_ips,
}
ret = self.servers.put(resource, data)
if not self.servers.action_success(ret):
raise RemoteRestError(ret[2])
except RemoteRestError as e:
LOG.error(_("QuantumRestProxyV2: Unable to update remote "
"network: %s"), e.message)
raise
def _map_state_and_status(self, resource):
resource = copy.copy(resource)
resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
if 'status' in resource:
del resource['status']
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_("Setting admin_state_up=False is not supported"
" in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] is not const.NET_STATUS_ACTIVE:
LOG.warning(_("Operational status is internally set by the"
" plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = super(QuantumRestProxyV2, self).get_network(context,
net_id)
subnet = super(QuantumRestProxyV2, self).get_subnet(context,
subnet_id)
mapped_network = self._get_mapped_network_with_subnets(network)
mapped_subnet = self._map_state_and_status(subnet)
data = {
'id': intf_id,
"network": mapped_network,
"subnet": mapped_subnet
}
return data
def _check_view_auth(self, context, resource, action):
return policy.check(context, action, resource)
def _enforce_set_auth(self, context, resource, action):
policy.enforce(context, action, resource)
def _extend_port_dict_binding(self, context, port):
if self._check_view_auth(context, port, self.binding_view):
port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_OVS
port[portbindings.CAPABILITIES] = {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}
return port
| kaiweifan/vse-lbaas-plugin-poc | quantum/plugins/bigswitch/plugin.py | Python | apache-2.0 | 52,432 |
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import login
urlpatterns = [
# Examples:
# url(r'^$', 'moodle.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
#administrador Django
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login', login, kwargs={'template_name': 'campus/login.html'}),
#Inicio
url(r'^', include('app.campus.urls')),
#Usuario
url(r'^usuario/', include('app.campus.urls')),
#cursos
url(r'^curso/', include('app.curso.urls'))
]
| anderojas1/moodle | moodle/moodle/urls.py | Python | gpl-2.0 | 589 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Flash'
db.create_table('cmsplugin_flash', (
('cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('width', self.gf('django.db.models.fields.CharField')(max_length=6)),
('height', self.gf('django.db.models.fields.CharField')(max_length=6)),
))
db.send_create_signal('flash', ['Flash'])
def backwards(self, orm):
# Deleting model 'Flash'
db.delete_table('cmsplugin_flash')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'flash.flash': {
'Meta': {'object_name': 'Flash', 'db_table': "'cmsplugin_flash'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'width': ('django.db.models.fields.CharField', [], {'max_length': '6'})
}
}
complete_apps = ['flash'] | mpetyx/palmdrop | venv/lib/python2.7/site-packages/cms/plugins/flash/migrations/0001_initial.py | Python | apache-2.0 | 3,244 |
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import enum
import logging
from typing import Any, Dict, List, Optional
from googleapiclient import discovery
import googleapiclient.errors
# TODO(sergiitk): replace with tenacity
import retrying
from framework.infrastructure import gcp
logger = logging.getLogger(__name__)
class ComputeV1(gcp.api.GcpProjectApiResource):
# TODO(sergiitk): move someplace better
_WAIT_FOR_BACKEND_SEC = 60 * 10
_WAIT_FOR_OPERATION_SEC = 60 * 5
@dataclasses.dataclass(frozen=True)
class GcpResource:
name: str
url: str
@dataclasses.dataclass(frozen=True)
class ZonalGcpResource(GcpResource):
zone: str
def __init__(self,
api_manager: gcp.api.GcpApiManager,
project: str,
version: str = 'v1'):
super().__init__(api_manager.compute(version), project)
class HealthCheckProtocol(enum.Enum):
TCP = enum.auto()
GRPC = enum.auto()
class BackendServiceProtocol(enum.Enum):
HTTP2 = enum.auto()
GRPC = enum.auto()
def create_health_check(self,
name: str,
protocol: HealthCheckProtocol,
*,
port: Optional[int] = None) -> GcpResource:
if protocol is self.HealthCheckProtocol.TCP:
health_check_field = 'tcpHealthCheck'
elif protocol is self.HealthCheckProtocol.GRPC:
health_check_field = 'grpcHealthCheck'
else:
raise TypeError(f'Unexpected Health Check protocol: {protocol}')
health_check_settings = {}
if port is None:
health_check_settings['portSpecification'] = 'USE_SERVING_PORT'
else:
health_check_settings['portSpecification'] = 'USE_FIXED_PORT'
health_check_settings['port'] = port
return self._insert_resource(
self.api.healthChecks(), {
'name': name,
'type': protocol.name,
health_check_field: health_check_settings,
})
def delete_health_check(self, name):
self._delete_resource(self.api.healthChecks(), 'healthCheck', name)
def create_firewall_rule(self, name: str, network_url: str,
source_ranges: List[str],
ports: List[str]) -> Optional[GcpResource]:
try:
return self._insert_resource(
self.api.firewalls(), {
"allowed": [{
"IPProtocol": "tcp",
"ports": ports
}],
"direction": "INGRESS",
"name": name,
"network": network_url,
"priority": 1000,
"sourceRanges": source_ranges,
"targetTags": ["allow-health-checks"]
})
except googleapiclient.errors.HttpError as http_error:
# TODO(lidiz) use status_code() when we upgrade googleapiclient
if http_error.resp.status == 409:
logger.debug('Firewall rule %s already existed', name)
return
else:
raise
def delete_firewall_rule(self, name):
self._delete_resource(self.api.firewalls(), 'firewall', name)
def create_backend_service_traffic_director(
self,
name: str,
health_check: GcpResource,
affinity_header: str = None,
protocol: Optional[BackendServiceProtocol] = None,
subset_size: Optional[int] = None) -> GcpResource:
if not isinstance(protocol, self.BackendServiceProtocol):
raise TypeError(f'Unexpected Backend Service protocol: {protocol}')
body = {
'name': name,
'loadBalancingScheme': 'INTERNAL_SELF_MANAGED', # Traffic Director
'healthChecks': [health_check.url],
'protocol': protocol.name,
}
# If affinity header is specified, config the backend service to support
# affinity, and set affinity header to the one given.
if affinity_header:
body['sessionAffinity'] = 'HEADER_FIELD'
body['localityLbPolicy'] = 'RING_HASH'
body['consistentHash'] = {
'httpHeaderName': affinity_header,
}
if subset_size:
body['subsetting'] = {
'policy': 'CONSISTENT_HASH_SUBSETTING',
'subsetSize': subset_size
}
return self._insert_resource(self.api.backendServices(), body)
def get_backend_service_traffic_director(self, name: str) -> GcpResource:
return self._get_resource(self.api.backendServices(),
backendService=name)
def patch_backend_service(self, backend_service, body, **kwargs):
self._patch_resource(collection=self.api.backendServices(),
backendService=backend_service.name,
body=body,
**kwargs)
def backend_service_patch_backends(
self,
backend_service,
backends,
max_rate_per_endpoint: Optional[int] = None):
if max_rate_per_endpoint is None:
max_rate_per_endpoint = 5
backend_list = [{
'group': backend.url,
'balancingMode': 'RATE',
'maxRatePerEndpoint': max_rate_per_endpoint
} for backend in backends]
self._patch_resource(collection=self.api.backendServices(),
body={'backends': backend_list},
backendService=backend_service.name)
def backend_service_remove_all_backends(self, backend_service):
self._patch_resource(collection=self.api.backendServices(),
body={'backends': []},
backendService=backend_service.name)
def delete_backend_service(self, name):
self._delete_resource(self.api.backendServices(), 'backendService',
name)
def create_url_map(
self,
name: str,
matcher_name: str,
src_hosts,
dst_default_backend_service: GcpResource,
dst_host_rule_match_backend_service: Optional[GcpResource] = None,
) -> GcpResource:
if dst_host_rule_match_backend_service is None:
dst_host_rule_match_backend_service = dst_default_backend_service
return self._insert_resource(
self.api.urlMaps(), {
'name':
name,
'defaultService':
dst_default_backend_service.url,
'hostRules': [{
'hosts': src_hosts,
'pathMatcher': matcher_name,
}],
'pathMatchers': [{
'name': matcher_name,
'defaultService': dst_host_rule_match_backend_service.url,
}],
})
def create_url_map_with_content(self, url_map_body: Any) -> GcpResource:
return self._insert_resource(self.api.urlMaps(), url_map_body)
def patch_url_map(self, url_map: GcpResource, body, **kwargs):
self._patch_resource(collection=self.api.urlMaps(),
urlMap=url_map.name,
body=body,
**kwargs)
def delete_url_map(self, name):
self._delete_resource(self.api.urlMaps(), 'urlMap', name)
def create_target_grpc_proxy(
self,
name: str,
url_map: GcpResource,
) -> GcpResource:
return self._insert_resource(self.api.targetGrpcProxies(), {
'name': name,
'url_map': url_map.url,
'validate_for_proxyless': True,
})
def delete_target_grpc_proxy(self, name):
self._delete_resource(self.api.targetGrpcProxies(), 'targetGrpcProxy',
name)
def create_target_http_proxy(
self,
name: str,
url_map: GcpResource,
) -> GcpResource:
return self._insert_resource(self.api.targetHttpProxies(), {
'name': name,
'url_map': url_map.url,
})
def delete_target_http_proxy(self, name):
self._delete_resource(self.api.targetHttpProxies(), 'targetHttpProxy',
name)
def create_forwarding_rule(
self,
name: str,
src_port: int,
target_proxy: GcpResource,
network_url: str,
) -> GcpResource:
return self._insert_resource(
self.api.globalForwardingRules(),
{
'name': name,
'loadBalancingScheme':
'INTERNAL_SELF_MANAGED', # Traffic Director
'portRange': src_port,
'IPAddress': '0.0.0.0',
'network': network_url,
'target': target_proxy.url,
})
def exists_forwarding_rule(self, src_port) -> bool:
# TODO(sergiitk): Better approach for confirming the port is available.
# It's possible a rule allocates actual port range, e.g 8000-9000,
# and this wouldn't catch it. For now, we assume there's no
# port ranges used in the project.
filter_str = (f'(portRange eq "{src_port}-{src_port}") '
f'(IPAddress eq "0.0.0.0")'
f'(loadBalancingScheme eq "INTERNAL_SELF_MANAGED")')
return self._exists_resource(self.api.globalForwardingRules(),
filter=filter_str)
def delete_forwarding_rule(self, name):
self._delete_resource(self.api.globalForwardingRules(),
'forwardingRule', name)
@staticmethod
def _network_endpoint_group_not_ready(neg):
return not neg or neg.get('size', 0) == 0
def wait_for_network_endpoint_group(self, name, zone):
@retrying.retry(retry_on_result=self._network_endpoint_group_not_ready,
stop_max_delay=60 * 1000,
wait_fixed=2 * 1000)
def _wait_for_network_endpoint_group_ready():
try:
neg = self.get_network_endpoint_group(name, zone)
logger.debug(
'Waiting for endpoints: NEG %s in zone %s, '
'current count %s', neg['name'], zone, neg.get('size'))
except googleapiclient.errors.HttpError as error:
# noinspection PyProtectedMember
reason = error._get_reason()
logger.debug('Retrying NEG load, got %s, details %s',
error.resp.status, reason)
raise
return neg
network_endpoint_group = _wait_for_network_endpoint_group_ready()
# TODO(sergiitk): dataclass
return self.ZonalGcpResource(network_endpoint_group['name'],
network_endpoint_group['selfLink'], zone)
def get_network_endpoint_group(self, name, zone):
neg = self.api.networkEndpointGroups().get(project=self.project,
networkEndpointGroup=name,
zone=zone).execute()
# TODO(sergiitk): dataclass
return neg
def wait_for_backends_healthy_status(
self,
backend_service,
backends,
timeout_sec=_WAIT_FOR_BACKEND_SEC,
wait_sec=4,
):
pending = set(backends)
@retrying.retry(retry_on_result=lambda result: not result,
stop_max_delay=timeout_sec * 1000,
wait_fixed=wait_sec * 1000)
def _retry_backends_health():
for backend in pending:
result = self.get_backend_service_backend_health(
backend_service, backend)
if 'healthStatus' not in result:
logger.debug('Waiting for instances: backend %s, zone %s',
backend.name, backend.zone)
continue
backend_healthy = True
for instance in result['healthStatus']:
logger.debug(
'Backend %s in zone %s: instance %s:%s health: %s',
backend.name, backend.zone, instance['ipAddress'],
instance['port'], instance['healthState'])
if instance['healthState'] != 'HEALTHY':
backend_healthy = False
if backend_healthy:
logger.info('Backend %s in zone %s reported healthy',
backend.name, backend.zone)
pending.remove(backend)
return not pending
_retry_backends_health()
def get_backend_service_backend_health(self, backend_service, backend):
return self.api.backendServices().getHealth(
project=self.project,
backendService=backend_service.name,
body={
"group": backend.url
}).execute()
def _get_resource(self, collection: discovery.Resource,
**kwargs) -> GcpResource:
resp = collection.get(project=self.project, **kwargs).execute()
logger.info('Loaded compute resource:\n%s',
self.resource_pretty_format(resp))
return self.GcpResource(resp['name'], resp['selfLink'])
def _exists_resource(self, collection: discovery.Resource,
filter: str) -> bool:
resp = collection.list(
project=self.project, filter=filter,
maxResults=1).execute(num_retries=self._GCP_API_RETRIES)
if 'kind' not in resp:
# TODO(sergiitk): better error
raise ValueError('List response "kind" is missing')
return 'items' in resp and resp['items']
def _insert_resource(self, collection: discovery.Resource,
body: Dict[str, Any]) -> GcpResource:
logger.info('Creating compute resource:\n%s',
self.resource_pretty_format(body))
resp = self._execute(collection.insert(project=self.project, body=body))
return self.GcpResource(body['name'], resp['targetLink'])
def _patch_resource(self, collection, body, **kwargs):
logger.info('Patching compute resource:\n%s',
self.resource_pretty_format(body))
self._execute(
collection.patch(project=self.project, body=body, **kwargs))
def _delete_resource(self, collection: discovery.Resource,
resource_type: str, resource_name: str) -> bool:
try:
params = {"project": self.project, resource_type: resource_name}
self._execute(collection.delete(**params))
return True
except googleapiclient.errors.HttpError as error:
if error.resp and error.resp.status == 404:
logger.info(
'Resource %s "%s" not deleted since it does not exist',
resource_type, resource_name)
else:
logger.warning('Failed to delete %s "%s", %r', resource_type,
resource_name, error)
return False
@staticmethod
def _operation_status_done(operation):
return 'status' in operation and operation['status'] == 'DONE'
def _execute(self,
request,
*,
test_success_fn=None,
timeout_sec=_WAIT_FOR_OPERATION_SEC):
operation = request.execute(num_retries=self._GCP_API_RETRIES)
logger.debug('Response %s', operation)
# TODO(sergiitk) try using wait() here
# https://googleapis.github.io/google-api-python-client/docs/dyn/compute_v1.globalOperations.html#wait
operation_request = self.api.globalOperations().get(
project=self.project, operation=operation['name'])
if test_success_fn is None:
test_success_fn = self._operation_status_done
logger.debug('Waiting for global operation %s, timeout %s sec',
operation['name'], timeout_sec)
response = self.wait_for_operation(operation_request=operation_request,
test_success_fn=test_success_fn,
timeout_sec=timeout_sec)
if 'error' in response:
logger.debug('Waiting for global operation failed, response: %r',
response)
raise Exception(f'Operation {operation["name"]} did not complete '
f'within {timeout_sec}s, error={response["error"]}')
return response
| ejona86/grpc | tools/run_tests/xds_k8s_test_driver/framework/infrastructure/gcp/compute.py | Python | apache-2.0 | 17,539 |
# -*- coding: utf-8 -*-
"""Example that shows how to implement QSR makers.
:Author: Christan Dondrup <[email protected]>, Yiannis Gatsoulis <[email protected]>
:Organization: University of Lincoln
:Date: 10 September 2014
:Version: 0.1
:Status: Development
:Copyright: STRANDS default
:Notes: future extension to handle polygons, to do that use matplotlib.path.Path.contains_points
although might want to have a read on the following also...
http://matplotlib.1069221.n5.nabble.com/How-to-properly-use-path-Path-contains-point-td40718.html
"""
from __future__ import print_function, division
from qsrlib_qsrs.qsr_qtc_simplified_abstractclass import QSR_QTC_Simplified_Abstractclass
class QSR_QTC_B_Simplified(QSR_QTC_Simplified_Abstractclass):
"""Make default QSRs and provide an example for others"""
def __init__(self):
super(QSR_QTC_B_Simplified, self).__init__()
self._unique_id = "qtcbs"
self.qtc_type = "b"
self._all_possible_relations = tuple(self.return_all_possible_state_combinations()[0])
def qtc_to_output_format(self, qtc):
"""Overwrite this for the different QTC veriants to select only the parts
from the QTCC tuple that you would like to return.
Example for QTCB: return qtc[0:2]
:param qtc: The full QTCC tuple [q1,q2,q4,q5]
:return: "q1,q2" of {"qtcbs": "q1,q2"} if future is True
"""
return self._format_qsr(self.create_qtc_string(qtc[0:2]))
| yianni/rtd-dbg | qsr_lib/src/qsrlib_qsrs/qsr_qtc_b_simplified.py | Python | mit | 1,495 |
"""
log handler
"""
import os
import logging
from config import ROOT_PATH
class Logger(object):
def __init__(self, log_name, log_file=None, log_format=None, log_level=None):
self._log_name = log_name
self._log_file = log_file
self._log_format = log_format
self._log_level = log_level
if not self._log_file:
self._log_file = os.path.join(ROOT_PATH,
'logs',
self._log_name + '.log')
if not self._log_format:
self._log_format = '[%(asctime)s] $%(levelname)s (%(filename)s:%(lineno)d) %(message)s'
if not self._log_level:
self._log_level = logging.INFO
self._logger = logging.getLogger(self._log_name)
handler = logging.FileHandler(self._log_file)
formatter = logging.Formatter(self._log_format)
handler.setFormatter(formatter)
self._logger.addHandler(handler)
self._logger.setLevel(self._log_level)
def log(self, msg):
if self._logger is not None:
self._logger.log(self._log_level, msg)
def get_logger(self):
return self._logger
class LogFilter(logging.Filter):
"""
Filters (lets through) all messages with level < LEVEL
"""
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno < self.level
| Jackeriss/Top15 | app/log_kit.py | Python | mit | 1,436 |
#!/usr/bin/env python
# Copyright (c) 2013 Calin Crisan
# This file is part of motionEye.
#
# motionEye is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging
import os.path
import pipes
import sys
# make sure motioneye is on python path
sys.path.insert(0,os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import settings
_LOG_FILE = 'motioneye.log'
def find_command(command):
if command == 'relayevent':
relayevent_sh = os.path.join(os.path.dirname(__file__), 'scripts/relayevent.sh')
cmd = relayevent_sh + ' "%s"' % (settings.config_file or '')
else:
cmd = __file__
cmd = sys.executable + ' ' + cmd
cmd = cmd.replace('-b', '') # remove server-specific options
cmd += ' %s ' % command
cmd += ' '.join([pipes.quote(arg) for arg in sys.argv[2:]
if arg not in ['-b']])
return cmd
def load_settings():
# parse common command line arguments
config_file = None
debug = False
for i in xrange(1, len(sys.argv)):
arg = sys.argv[i]
next_arg = i < len(sys.argv) - 1 and sys.argv[i + 1]
if arg == '-c':
config_file = next_arg
elif arg == '-d':
debug = True
conf_path_given = [False]
run_path_given = [False]
log_path_given = [False]
media_path_given = [False]
# parse the config file, if given
def parse_conf_line(line):
line = line.strip()
if not line or line.startswith('#'):
return
parts = line.split(' ', 1)
if len(parts) != 2:
raise Exception('invalid configuration line: %s' % line)
name, value = parts
upper_name = name.upper().replace('-', '_')
if hasattr(settings, upper_name):
curr_value = getattr(settings, upper_name)
if upper_name == 'LOG_LEVEL':
if value == 'quiet':
value = 100
else:
value = getattr(logging, value.upper(), logging.DEBUG)
elif value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif isinstance(curr_value, int):
value = int(value)
elif isinstance(curr_value, float):
value = float(value)
if upper_name == 'CONF_PATH':
conf_path_given[0] = True
elif upper_name == 'RUN_PATH':
run_path_given[0] = True
elif upper_name == 'LOG_PATH':
log_path_given[0] = True
elif upper_name == 'MEDIA_PATH':
media_path_given[0] = True
setattr(settings, upper_name, value)
else:
logging.warn('unknown configuration option: %s' % name)
if config_file:
try:
with open(config_file) as f:
for line in f:
parse_conf_line(line)
except Exception as e:
logging.fatal('failed to read settings from "%s": %s' % (config_file, e))
sys.exit(-1)
# use the config file directory as base dir
# if not specified otherwise in the config file
base_dir = os.path.dirname(config_file)
settings.config_file = config_file
if not conf_path_given[0]:
settings.CONF_PATH = base_dir
if not run_path_given[0]:
settings.RUN_PATH = base_dir
if not log_path_given[0]:
settings.LOG_PATH = base_dir
if not media_path_given[0]:
settings.MEDIA_PATH = base_dir
else:
logging.info('no configuration file given, using built-in defaults')
if debug:
settings.LOG_LEVEL = logging.DEBUG
def configure_logging(cmd, log_to_file=False):
if log_to_file or cmd != 'motioneye':
fmt = '%(asctime)s: [{cmd}] %(levelname)8s: %(message)s'.format(cmd=cmd)
else:
fmt = '%(levelname)8s: %(message)s'.format(cmd=cmd)
for h in logging.getLogger().handlers:
logging.getLogger().removeHandler(h)
try:
if log_to_file:
log_file = os.path.join(settings.LOG_PATH, _LOG_FILE)
else:
log_file = None
logging.basicConfig(filename=log_file, level=settings.LOG_LEVEL,
format=fmt, datefmt='%Y-%m-%d %H:%M:%S')
except Exception as e:
sys.stderr.write('failed to configure logging: %s\n' % e)
sys.exit(-1)
logging.getLogger('tornado').setLevel(logging.WARN)
logging.getLogger('oauth2client').setLevel(logging.WARN)
def configure_tornado():
from tornado.httpclient import AsyncHTTPClient
AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient', max_clients=16)
def make_arg_parser(command=None):
if command:
usage = description = epilog = None
else:
usage = '%(prog)s [command] [-c CONFIG_FILE] [-d] [-h] [-l] [-v] [command options...]\n\n'
description = 'available commands:\n'
description += ' startserver\n'
description += ' stopserver\n'
description += ' sendmail\n'
description += ' sendtelegram\n'
description += ' webhook\n'
description += ' shell\n\n'
epilog = 'type "%(prog)s [command] -h" for help on a specific command\n\n'
parser = argparse.ArgumentParser(prog='meyectl%s' % ((' ' + command) if command else ''),
usage=usage, description=description, epilog=epilog,
add_help=False, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-c', help='use a config file instead of built-in defaults',
type=str, dest='config_file')
parser.add_argument('-d', help='enable debugging, overriding log level from config file',
action='store_true', dest='debug')
parser.add_argument('-h', help='print this help and exit',
action='help', default=argparse.SUPPRESS)
parser.add_argument('-l', help='log to file instead of standard error',
action='store_true', dest='log_to_file')
parser.add_argument('-v', help='print program version and exit',
action='version', default=argparse.SUPPRESS)
return parser
def print_usage_and_exit(code):
parser = make_arg_parser()
parser.print_help(sys.stderr)
sys.exit(code)
def print_version_and_exit():
import motioneye
sys.stderr.write('motionEye %s\n' % motioneye.VERSION)
sys.exit()
def main():
for a in sys.argv:
if a == '-v':
print_version_and_exit()
if len(sys.argv) < 2 or sys.argv[1] == '-h':
print_usage_and_exit(0)
load_settings()
command = sys.argv[1]
arg_parser = make_arg_parser(command)
if command in ('startserver', 'stopserver'):
import server
server.main(arg_parser, sys.argv[2:], command[:-6])
elif command == 'sendmail':
import sendmail
sendmail.main(arg_parser, sys.argv[2:])
elif command == 'sendtelegram':
import sendtelegram
sendtelegram.main(arg_parser, sys.argv[2:])
elif command == 'webhook':
import webhook
webhook.main(arg_parser, sys.argv[2:])
elif command == 'shell':
import shell
shell.main(arg_parser, sys.argv[2:])
else:
sys.stderr.write('unknown command "%s"\n\n' % command)
print_usage_and_exit(-1)
if __name__ == '__main__':
main()
| ccrisan/motioneye | motioneye/meyectl.py | Python | gpl-3.0 | 8,206 |
import unittest
import numpy
import chainer
from chainer import basic_math
from chainer import cuda
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class BinaryOpTestBase(object):
def make_data(self):
raise NotImplementedError()
def setUp(self):
self.x1, self.x2, self.gy = self.make_data()
def check_forward(self, op, x1_data, x2_data):
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
y = op(x1, x2)
gradient_check.assert_allclose(op(self.x1, self.x2), y.data)
def forward_cpu(self, op):
self.check_forward(op, self.x1, self.x2)
@condition.retry(3)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__radd__(x))
@condition.retry(3)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rsub__(x))
@condition.retry(3)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rmul__(x))
@condition.retry(3)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rtruediv__(x))
@condition.retry(3)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rpow__(x))
def forward_gpu(self, op):
self.check_forward(op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2))
@attr.gpu
@condition.retry(3)
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__radd__(x))
@attr.gpu
@condition.retry(3)
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rsub__(x))
@attr.gpu
@condition.retry(3)
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rmul__(x))
@attr.gpu
@condition.retry(3)
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rtruediv__(x))
@attr.gpu
@condition.retry(3)
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rpow__(x))
@attr.gpu
def test_add_constant_allocation(self):
x = 0
y = chainer.Variable(cuda.cupy.ones((1,)))
z = y + x
self.assertEqual(1, z.data.get()[0])
def check_backward(self, op, x1_data, x2_data, y_grad, atol):
gradient_check.check_backward(op, (x1_data, x2_data), y_grad,
atol=atol)
def backward_cpu(self, op, atol=1e-5):
self.check_backward(op, self.x1, self.x2, self.gy, atol)
@condition.retry(3)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y, atol=1e-4)
def backward_gpu(self, op, atol=1e-5):
self.check_backward(
op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2),
cuda.to_gpu(self.gy), atol)
@attr.gpu
@condition.retry(3)
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y, atol=1e-4)
class TestBinaryOpSimple(BinaryOpTestBase, unittest.TestCase):
def make_data(self):
x1 = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
x2 = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x1, x2, gy
class TestBinaryOpZeroDimension(BinaryOpTestBase, unittest.TestCase):
def make_data(self):
x1 = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
x2 = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
return x1, x2, gy
class TestBinaryOpConstant(unittest.TestCase):
def _test_constant_one(self, func, lhs, rhs, gpu=False):
if gpu:
lhs = cuda.to_gpu(lhs)
x = chainer.Variable(lhs)
y = func(x, rhs)
self.assertEqual(y.data.dtype, numpy.float32)
y.backward()
self.assertEqual(x.grad.dtype, numpy.float32)
def _test_constant(self, func):
x_data = numpy.array(1, numpy.float32)
self._test_constant_one(func, x_data, 1)
self._test_constant_one(func, x_data, 1.0)
self._test_constant_one(func, x_data, numpy.int64(1))
self._test_constant_one(func, x_data, numpy.float64(1.0))
def _test_constant_gpu(self, func):
x_data = numpy.array(1, numpy.float32)
self._test_constant_one(func, x_data, 1, True)
self._test_constant_one(func, x_data, 1.0, True)
self._test_constant_one(func, x_data, numpy.int64(1), True)
self._test_constant_one(func, x_data, numpy.float64(1), True)
def _test_constant_array_one(self, func, lhs, rhs):
x = chainer.Variable(lhs)
y = func(x, rhs)
self.assertEqual(y.data.dtype, numpy.float32)
y.grad = numpy.ones_like(y.data, numpy.float32)
y.backward()
self.assertEqual(x.grad.dtype, numpy.float32)
def _test_constant_array(self, func):
x_data = numpy.array([1.0, 2.0], numpy.float32)
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0], numpy.int32))
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0], numpy.int64))
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0], numpy.float32))
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0], numpy.float64))
with self.assertRaises(ValueError):
self._test_constant_array_one(func, x_data, [3.0, 4.0])
with self.assertRaises(ValueError):
self._test_constant_array_one(func, x_data, (3.0, 4.0))
with self.assertRaises(ValueError):
self._test_constant_array_one(func, x_data, [3.0, 4.0, 5.0])
with self.assertRaises(ValueError):
self._test_constant_array_one(func, x_data, (3.0, 4.0, 5.0))
with self.assertRaises(ValueError):
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0, 5.0], numpy.float32))
def _test_constant_array_gpu_one(self, func, lhs, rhs):
x = chainer.Variable(cuda.to_gpu(lhs))
y = func(x, rhs)
self.assertEqual(y.data.dtype, numpy.float32)
y.grad = chainer.cuda.cupy.ones_like(y.data).astype(numpy.float32)
y.backward()
self.assertEqual(x.grad.dtype, numpy.float32)
def _test_constant_array_gpu(self, func, exception=TypeError):
x_data = numpy.array([1.0, 2.0], numpy.float32)
self._test_constant_array_gpu_one(
func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.int32)))
self._test_constant_array_gpu_one(
func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.int64)))
self._test_constant_array_gpu_one(
func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.float32)))
self._test_constant_array_gpu_one(
func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], numpy.float64)))
with self.assertRaises(exception):
self._test_constant_array_one(
func, x_data, cuda.to_gpu(
numpy.array([3.0, 4.0, 5.0], numpy.float32)))
def test_add_constant(self):
self._test_constant(lambda x, y: x + y)
@attr.gpu
def test_add_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x + y)
def test_add_constant_array(self):
self._test_constant_array(lambda x, y: x + y)
@attr.gpu
def test_add_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x + y)
def test_radd_constant(self):
self._test_constant(lambda x, y: y + x)
@attr.gpu
def test_radd_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y + x)
def test_radd_constant_array(self):
self._test_constant_array(lambda x, y: y + x)
@attr.gpu
def test_radd_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y + x)
def test_sub_constant(self):
self._test_constant(lambda x, y: x - y)
@attr.gpu
def test_sub_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x - y)
def test_sub_constant_array(self):
self._test_constant_array(lambda x, y: x - y)
@attr.gpu
def test_sub_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x - y)
def test_rsub_constant(self):
self._test_constant(lambda x, y: y - x)
@attr.gpu
def test_rsub_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y - x)
def test_rsub_constant_array(self):
self._test_constant_array(lambda x, y: y - x)
@attr.gpu
def test_rsub_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y - x)
def test_mul_constant(self):
self._test_constant(lambda x, y: x * y)
@attr.gpu
def test_mul_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x * y)
def test_mul_constant_array(self):
self._test_constant_array(lambda x, y: x * y)
@attr.gpu
def test_mul_constant_array_gpu(self):
self._test_constant_array(lambda x, y: x * y)
def test_rmul_constant(self):
self._test_constant(lambda x, y: y * x)
@attr.gpu
def test_rmul_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y * x)
def test_rmul_constant_array(self):
self._test_constant_array(lambda x, y: y * x)
@attr.gpu
def test_rmul_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: y * x, exception=Exception)
def test_div_constant(self):
self._test_constant(lambda x, y: x / y)
@attr.gpu
def test_div_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x / y)
def test_div_constant_array(self):
self._test_constant_array(lambda x, y: x / y)
@attr.gpu
def test_div_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: x / y, exception=Exception)
def test_rdiv_constant(self):
self._test_constant(lambda x, y: y / x)
@attr.gpu
def test_rdiv_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y / x)
def test_rdiv_constant_array(self):
self._test_constant_array(lambda x, y: y / x)
@attr.gpu
def test_rdiv_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y / x)
def test_pow_constant(self):
self._test_constant(lambda x, y: x ** y)
@attr.gpu
def test_pow_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x ** y)
def test_pow_constant_array(self):
self._test_constant_array(lambda x, y: x ** y)
@attr.gpu
def test_pow_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x ** y, exception=TypeError)
def test_rpow_constant(self):
self._test_constant(lambda x, y: y ** x)
@attr.gpu
def test_rpow_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y ** x)
def test_rpow_constant_array(self):
self._test_constant_array(lambda x, y: y ** x)
@attr.gpu
def test_rpow_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: y ** x, exception=Exception)
class VariableConstantOpTestBase(object):
def make_date(self):
raise NotImplementedError()
def setUp(self):
self.x, self.gy, self.value = self.make_data()
def check_forward(self, op, x_data):
x = chainer.Variable(x_data)
y = op(x, self.value)
gradient_check.assert_allclose(
op(self.x, self.value), y.data, atol=1e-7, rtol=1e-7)
def forward_cpu(self, op):
self.check_forward(op, self.x)
@condition.retry(3)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y ** x)
def forward_gpu(self, op):
self.check_forward(op, cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y - x)
@attr.gpu
@condition.retry(3)
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y ** x)
def check_backward(self, op, x_data, y_grad):
gradient_check.check_backward(lambda x: op(x, self.value),
x_data, y_grad)
def backward_cpu(self, op):
self.check_backward(op, self.x, self.gy)
@condition.retry(3)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_backward_cpu(self):
self.backward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_backward_cpu(self):
self.backward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_backward_cpu(self):
self.backward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_backward_cpu(self):
self.backward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_backward_cpu(self):
self.backward_cpu(lambda x, y: y ** x)
def backward_gpu(self, op):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_backward_gpu(self):
self.backward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_rsub_backward_gpu(self):
self.backward_gpu(lambda x, y: y - x)
@attr.gpu
@condition.retry(3)
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_backward_gpu(self):
self.backward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_backward_gpu(self):
self.backward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_backward_gpu(self):
self.backward_gpu(lambda x, y: y ** x)
class TestVariableConstantOpSimple(VariableConstantOpTestBase,
unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
value = .5
return x, gy, value
class TestVariableConstantOpZeroDimension(VariableConstantOpTestBase,
unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
value = .5
return x, gy, value
class TestVariableConstantArrayOp(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
self.value = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def check_forward(self, op, x_data, gpu, positive):
value = self.value
if positive:
value = numpy.abs(value)
v = value
if gpu:
v = cuda.to_gpu(v)
x = chainer.Variable(x_data)
y = op(x, v)
gradient_check.assert_allclose(
op(self.x, value), y.data, atol=1e-6, rtol=1e-6)
def forward_cpu(self, op, positive=False):
self.check_forward(op, self.x, False, positive)
@condition.retry(3)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y ** x, positive=True)
def forward_gpu(self, op, positive=False):
self.check_forward(op, cuda.to_gpu(self.x), True, positive)
@attr.gpu
@condition.retry(3)
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y - x)
@attr.gpu
@condition.retry(3)
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y ** x, positive=True)
def check_backward(self, op, x_data, y_grad, gpu, positive):
value = self.value
if positive:
value = numpy.abs(value)
if gpu:
value = cuda.to_gpu(value)
gradient_check.check_backward(lambda x: op(x, value), x_data, y_grad,
atol=1e-4, rtol=1e-4)
def backward_cpu(self, op, positive=False):
self.check_backward(op, self.x, self.gy, False, positive)
@condition.retry(3)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_backward_cpu(self):
self.backward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_backward_cpu(self):
self.backward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_backward_cpu(self):
self.backward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_backward_cpu(self):
self.backward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_backward_cpu(self):
self.backward_cpu(lambda x, y: y ** x, positive=True)
def backward_gpu(self, op, positive=False):
self.check_backward(
op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy), True, positive)
@attr.gpu
@condition.retry(3)
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_backward_gpu(self):
self.backward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_backward_gpu(self):
self.backward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_backward_gpu(self):
self.backward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_backward_gpu(self):
self.backward_gpu(lambda x, y: y ** x, positive=True)
class UnaryFunctionsTestBase(object):
def make_data(self):
raise NotImplementedError()
def setUp(self):
self.x, self.gy = self.make_data()
def check_forward(self, op, op_np, x_data):
x = chainer.Variable(x_data)
y = op(x)
gradient_check.assert_allclose(
op_np(self.x), y.data, atol=1e-7, rtol=1e-7)
def forward_cpu(self, op, op_np):
self.check_forward(op, op_np, self.x)
@condition.retry(3)
def test_neg_forward_cpu(self):
self.forward_cpu(lambda x: -x, lambda x: -x)
@condition.retry(3)
def test_abs_forward_cpu(self):
self.forward_cpu(lambda x: abs(x), lambda x: abs(x))
def forward_gpu(self, op, op_np):
self.check_forward(op, op_np, cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_neg_forward_gpu(self):
self.forward_gpu(lambda x: -x, lambda x: -x)
@attr.gpu
@condition.retry(3)
def test_abs_forward_gpu(self):
self.forward_gpu(lambda x: abs(x), lambda x: abs(x))
def check_backward(self, op, x_data, y_grad):
gradient_check.check_backward(op, x_data, y_grad)
def backward_cpu(self, op):
self.check_backward(op, self.x, self.gy)
@condition.retry(3)
def test_neg_backward_cpu(self):
self.backward_cpu(lambda x: -x)
@condition.retry(3)
def test_abs_backward_cpu(self):
self.backward_cpu(lambda x: abs(x))
def backward_gpu(self, op):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_neg_backward_gpu(self):
self.backward_gpu(lambda x: -x)
@attr.gpu
@condition.retry(3)
def test_abs_backward_gpu(self):
self.backward_gpu(lambda x: abs(x))
class TestUnaryFunctionsSimple(UnaryFunctionsTestBase, unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x, gy
class TestUnaryFunctionsZeroDimension(UnaryFunctionsTestBase,
unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
return x, gy
class TestNegativePow(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 0, (3, 2)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
lambda x: x ** 2, x_data, y_grad, atol=1e-4, rtol=1e-4)
def test_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestNotSupportOperation(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(numpy.zeros(10))
self.y = chainer.Variable(numpy.zeros(10))
def test_lt(self):
with self.assertRaises(NotImplementedError):
self.x < self.y
def test_le(self):
with self.assertRaises(NotImplementedError):
self.x <= self.y
def test_eq(self):
with self.assertRaises(NotImplementedError):
self.x == self.y
def test_ne(self):
with self.assertRaises(NotImplementedError):
self.x != self.y
def test_gt(self):
with self.assertRaises(NotImplementedError):
self.x > self.y
def test_ge(self):
with self.assertRaises(NotImplementedError):
self.x >= self.y
def test_nonzero(self):
with self.assertRaises(NotImplementedError):
if self.x:
pass
class ConvertValueToStringTest(unittest.TestCase):
def _check_scalar(self, value, string):
self.assertEqual(basic_math._convert_value_to_string(value), string)
def test_integer_positive(self):
self._check_scalar(2, '2')
def test_integer_zero(self):
self._check_scalar(0, '0')
def test_integer_negative(self):
self._check_scalar(-2, '(-2)')
def test_float_positive(self):
self._check_scalar(2.0, '2.0')
def test_float_zero(self):
self._check_scalar(0.0, '0.0')
def test_float_negative(self):
self._check_scalar(-2.0, '(-2.0)')
def test_numpy_scalar(self):
self._check_scalar(numpy.float32(2), '2.0')
def _check_array(self, value, string):
self.assertEqual(basic_math._convert_value_to_string(value), string)
value = chainer.Variable(value)
self.assertEqual(basic_math._convert_value_to_string(value), string)
def test_array_cpu(self):
self._check_array(numpy.array([1, 2]), 'constant array')
@attr.gpu
def test_array_gpu(self):
self._check_array(cuda.ndarray([1, 2]), 'constant array')
class TestLabel(unittest.TestCase):
def test_neg(self):
self.assertEqual(basic_math.Neg().label, '__neg__')
def test_absolute(self):
self.assertEqual(basic_math.Absolute().label, '|_|')
def test_add(self):
self.assertEqual(basic_math.Add().label, '_ + _')
def test_add_constant(self):
self.assertEqual(basic_math.AddConstant(2.0).label, '_ + 2.0')
def test_sub(self):
self.assertEqual(basic_math.Sub().label, '_ - _')
def test_sub_from_constant(self):
self.assertEqual(basic_math.SubFromConstant(2.0).label, '2.0 - _')
def test_mul(self):
self.assertEqual(basic_math.Mul().label, '_ * _')
def test_mul_constant(self):
self.assertEqual(basic_math.MulConstant(2.0).label, '_ * 2.0')
def test_div(self):
self.assertEqual(basic_math.Div().label, '_ / _')
def test_div_from_constant(self):
self.assertEqual(basic_math.DivFromConstant(2.0).label, '_ / 2.0')
def test_pow_var_var(self):
self.assertEqual(basic_math.PowVarVar().label, '_ ** _')
def test_pow_var_const(self):
self.assertEqual(basic_math.PowVarConst(2.0).label, '_ ** 2.0')
def test_pow_const_var(self):
self.assertEqual(basic_math.PowConstVar(2.0).label, '2.0 ** _')
testing.run_module(__name__, __file__)
| sinhrks/chainer | tests/chainer_tests/functions_tests/math_tests/test_basic_math.py | Python | mit | 32,057 |
"""View flow data as Sankey diagrams."""
__version__ = '2.1.0-dev'
from .dataset import Dataset
from .partition import Partition, Group
from .sankey_definition import SankeyDefinition, ProcessGroup, Waypoint, Bundle, Elsewhere
from .view_graph import view_graph
from .results_graph import results_graph
from .augment_view_graph import elsewhere_bundles, augment
from .hierarchy import Hierarchy
from .sankey_data import SankeyData, SankeyLink, SankeyNode
from .color_scales import CategoricalScale, QuantitativeScale
from .weave import weave
__all__ = ['Dataset', 'Partition', 'Group', 'SankeyDefinition', 'ProcessGroup',
'Waypoint', 'Bundle', 'Elsewhere', 'view_graph', 'results_graph',
'elsewhere_bundles', 'augment', 'Hierarchy', 'weave', 'SankeyData',
'SankeyLink', 'SankeyNode', 'CategoricalScale', 'QuantitativeScale']
| ricklupton/sankeyview | floweaver/__init__.py | Python | mit | 861 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import models | MarcosCommunity/odoo | marcos_addons/marcos_ncf/dgii_ventas/__init__.py | Python | agpl-3.0 | 1,016 |
a = 1 / 0
| tonyxty/quickfix.py | tests/errors/div_by_zero.py | Python | mit | 10 |
# This file is part of MyPaint.
# Copyright (C) 2007 by Martin Renold <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os, sys
from os.path import join
import gtk, gobject
gdk = gtk.gdk
from lib import brush, helpers, mypaintlib
import filehandling, keyboard, brushmanager, windowing, document, layout
import colorhistory, brushmodifier
class Application: # singleton
"""
This class serves as a global container for everything that needs
to be shared in the GUI. Its constructor is the last part of the
initialization, called by main.py or by the testing scripts.
"""
def __init__(self, datapath, confpath, filenames):
self.confpath = confpath
self.datapath = datapath
# create config directory, and subdirs where the user might drop files
for d in ['', 'backgrounds', 'brushes']:
d = os.path.join(self.confpath, d)
if not os.path.isdir(d):
os.mkdir(d)
print 'Created', d
self.ui_manager = gtk.UIManager()
# if we are not installed, use the icons from the source
theme = gtk.icon_theme_get_default()
themedir_src = join(self.datapath, 'desktop/icons')
theme.prepend_search_path(themedir_src)
if not theme.has_icon('mypaint'):
print 'Warning: Where have all my icons gone?'
print 'Theme search path:', theme.get_search_path()
gtk.window_set_default_icon_name('mypaint')
gdk.set_program_class('MyPaint')
self.pixmaps = PixbufDirectory(join(self.datapath, 'pixmaps'))
self.cursor_color_picker = gdk.Cursor(gdk.display_get_default(), self.pixmaps.cursor_color_picker, 1, 30)
# unmanaged main brush; always the same instance (we can attach settings_observers)
# this brush is where temporary changes (color, size...) happen
self.brush = brush.BrushInfo()
self.brush.load_defaults()
self.preferences = {}
self.load_settings()
self.brushmanager = brushmanager.BrushManager(join(datapath, 'brushes'), join(confpath, 'brushes'), self)
self.kbm = keyboard.KeyboardManager()
self.filehandler = filehandling.FileHandler(self)
self.brushmodifier = brushmodifier.BrushModifier(self)
self.doc = document.Document(self)
self.brush.set_color_hsv((0, 0, 0))
self.init_brush_adjustments()
self.ch = colorhistory.ColorHistory(self)
self.layout_manager = layout.LayoutManager(
app=self,
prefs=self.preferences["layout.window_positions"],
factory=windowing.window_factory,
factory_opts=[self] )
self.drawWindow = self.layout_manager.get_widget_by_role("main-window")
self.layout_manager.show_all()
self.kbm.start_listening()
self.filehandler.doc = self.doc
self.filehandler.filename = None
gtk.accel_map_load(join(self.confpath, 'accelmap.conf'))
# Load the background settings window.
# FIXME: this line shouldn't be needed, but we need to load this up
# front to get any non-default background that the user has configured
# from the preferences.
self.layout_manager.get_subwindow_by_role("backgroundWindow")
# And the brush settings window, or things like eraser mode will break.
# FIXME: brush_adjustments should not be dependent on this
self.layout_manager.get_subwindow_by_role("brushSettingsWindow")
def at_application_start(*trash):
self.brushmanager.select_initial_brush()
if filenames:
# Open only the first file, no matter how many has been specified
# If the file does not exist just set it as the file to save to
fn = filenames[0].replace('file:///', '/') # some filebrowsers do this (should only happen with outdated mypaint.desktop)
if not os.path.exists(fn):
self.filehandler.filename = fn
else:
self.filehandler.open_file(fn)
self.apply_settings()
if not self.pressure_devices:
print 'No pressure sensitive devices found.'
self.drawWindow.present()
gobject.idle_add(at_application_start)
def save_settings(self):
"""Saves the current settings to persistent storage."""
def save_config():
settingspath = join(self.confpath, 'settings.json')
jsonstr = helpers.json_dumps(self.preferences)
f = open(settingspath, 'w')
f.write(jsonstr)
f.close()
self.brushmanager.save_brushes_for_devices()
save_config()
def apply_settings(self):
"""Applies the current settings."""
self.update_input_mapping()
self.update_input_devices()
prefs_win = self.layout_manager.get_widget_by_role('preferencesWindow')
prefs_win.update_ui()
def load_settings(self):
'''Loads the settings from persistent storage. Uses defaults if
not explicitly configured'''
def get_legacy_config():
dummyobj = {}
tmpdict = {}
settingspath = join(self.confpath, 'settings.conf')
if os.path.exists(settingspath):
exec open(settingspath) in dummyobj
tmpdict['saving.scrap_prefix'] = dummyobj['save_scrap_prefix']
tmpdict['input.device_mode'] = dummyobj['input_devices_mode']
tmpdict['input.global_pressure_mapping'] = dummyobj['global_pressure_mapping']
return tmpdict
def get_json_config():
settingspath = join(self.confpath, 'settings.json')
jsonstr = open(settingspath).read()
try:
return helpers.json_loads(jsonstr)
except Exception, e:
print "settings.json: %s" % (str(e),)
print "warning: failed to load settings.json, using defaults"
return {}
if sys.platform == 'win32':
import glib
scrappre = join(glib.get_user_special_dir(glib.USER_DIRECTORY_DOCUMENTS).decode('utf-8'),'MyPaint','scrap')
else:
scrappre = '~/MyPaint/scrap'
DEFAULT_CONFIG = {
'saving.scrap_prefix': scrappre,
'input.device_mode': 'screen',
'input.global_pressure_mapping': [(0.0, 1.0), (1.0, 0.0)],
'view.default_zoom': 1.0,
'view.high_quality_zoom': True,
'saving.default_format': 'openraster',
'brushmanager.selected_brush' : None,
'brushmanager.selected_groups' : [],
"input.button1_shift_action": 'straight_line',
"input.button1_ctrl_action": 'ColorPickerPopup',
"input.button2_action": 'pan_canvas',
"input.button2_shift_action": 'rotate_canvas',
"input.button2_ctrl_action": 'zoom_canvas',
"input.button3_action": 'ColorHistoryPopup',
"input.button3_shift_action": 'no_action',
"input.button3_ctrl_action": 'no_action',
# Default window positions.
# See gui.layout.set_window_initial_position for the meanings
# of the common x, y, w, and h settings
"layout.window_positions": {
# Main window default size. Sidebar width is saved here
'main-window': dict(sbwidth=270, x=64, y=32, w=-74, h=-96),
# Tool windows. These can be undocked (floating=True) or set
# initially hidden (hidden=True), or be given an initial sidebar
# index (sbindex=<int>) or height in the sidebar (sbheight=<int>)
# Non-hidden entries determine the default set of tools.
'colorSamplerWindow': dict(sbindex=1, floating=False, hidden=False,
x=-200, y=128,
w=200, h=300, sbheight=300),
'colorSelectionWindow': dict(sbindex=0, floating=True, hidden=True,
x=-128, y=64,
w=200, h=250, sbheight=250),
'brushSelectionWindow': dict(sbindex=2, floating=True,
x=-128, y=-128,
w=250, h=350, sbheight=350),
'layersWindow': dict(sbindex=3, floating=True,
x=128, y=-128,
w=200, h=200, sbheight=200),
# Non-tool subwindows. These cannot be docked, and are all
# intially hidden.
'brushSettingsWindow': dict(x=-460, y=-128, w=300, h=300),
'backgroundWindow': dict(),
'inputTestWindow': dict(),
'frameWindow': dict(),
'preferencesWindow': dict(),
},
}
window_pos = DEFAULT_CONFIG["layout.window_positions"]
self.window_names = window_pos.keys()
self.preferences = DEFAULT_CONFIG
try:
user_config = get_json_config()
except IOError:
user_config = get_legacy_config()
user_window_pos = user_config.get("layout.window_positions", {})
# note: .update() replaces the window position dict, but we want to update it
self.preferences.update(user_config)
# update window_pos, and drop window names that don't exist any more
# (we need to drop them because otherwise we will try to show a non-existing window)
for role in self.window_names:
if role in user_window_pos:
window_pos[role] = user_window_pos[role]
self.preferences["layout.window_positions"] = window_pos
def init_brush_adjustments(self):
"""Initializes all the brush adjustments for the current brush"""
self.brush_adjustment = {}
from brushlib import brushsettings
for i, s in enumerate(brushsettings.settings_visible):
adj = gtk.Adjustment(value=s.default, lower=s.min, upper=s.max, step_incr=0.01, page_incr=0.1)
self.brush_adjustment[s.cname] = adj
def update_input_mapping(self):
p = self.preferences['input.global_pressure_mapping']
if len(p) == 2 and abs(p[0][1]-1.0)+abs(p[1][1]-0.0) < 0.0001:
# 1:1 mapping (mapping disabled)
self.doc.tdw.pressure_mapping = None
else:
# TODO: maybe replace this stupid mapping by a hard<-->soft slider?
m = mypaintlib.Mapping(1)
m.set_n(0, len(p))
for i, (x, y) in enumerate(p):
m.set_point(0, i, x, 1.0-y)
def mapping(pressure):
return m.calculate_single_input(pressure)
self.doc.tdw.pressure_mapping = mapping
def update_input_devices(self):
# init extended input devices
self.pressure_devices = []
for device in gdk.devices_list():
#print device.name, device.source
#if device.source in [gdk.SOURCE_PEN, gdk.SOURCE_ERASER]:
# The above contition is True sometimes for a normal USB
# Mouse. https://gna.org/bugs/?11215
# In fact, GTK also just guesses this value from device.name.
last_word = device.name.split()[-1].lower()
if last_word == 'pad':
# Setting the intuos3 pad into "screen mode" causes
# glitches when you press a pad-button in mid-stroke,
# and it's not a pointer device anyway. But it reports
# axes almost identical to the pen and eraser.
#
# device.name is usually something like "wacom intuos3 6x8 pad" or just "pad"
print 'Ignoring "%s" (probably wacom keypad device)' % device.name
continue
if last_word == 'cursor':
# this is a "normal" mouse and does not work in screen mode
print 'Ignoring "%s" (probably wacom mouse device)' % device.name
continue
for use, val_min, val_max in device.axes:
# Some mice have a third "pressure" axis, but without
# minimum or maximum. https://gna.org/bugs/?14029
if use == gdk.AXIS_PRESSURE and val_min != val_max:
if 'mouse' in device.name.lower():
# Real fix for the above bug https://gna.org/bugs/?14029
print 'Ignoring "%s" (probably a mouse, but it reports extra axes)' % device.name
continue
self.pressure_devices.append(device.name)
modesetting = self.preferences['input.device_mode']
mode = getattr(gdk, 'MODE_' + modesetting.upper())
if device.mode != mode:
print 'Setting %s mode for "%s"' % (modesetting, device.name)
device.set_mode(mode)
break
def save_gui_config(self):
gtk.accel_map_save(join(self.confpath, 'accelmap.conf'))
self.save_settings()
def message_dialog(self, text, type=gtk.MESSAGE_INFO, flags=0):
"""utility function to show a message/information dialog"""
d = gtk.MessageDialog(self.drawWindow, flags=flags, buttons=gtk.BUTTONS_OK, type=type)
d.set_markup(text)
d.run()
d.destroy()
def pick_color_at_pointer(self, widget, size=3):
'''Grab screen color at cursor (average of size x size rectangle)'''
# inspired by gtkcolorsel.c function grab_color_at_mouse()
screen = widget.get_screen()
colormap = screen.get_system_colormap()
root = screen.get_root_window()
screen_w, screen_h = screen.get_width(), screen.get_height()
display = widget.get_display()
screen_trash, x_root, y_root, modifiermask_trash = display.get_pointer()
image = None
x = x_root-size/2
y = y_root-size/2
if x < 0: x = 0
if y < 0: y = 0
if x+size > screen_w: x = screen_w-size
if y+size > screen_h: y = screen_h-size
image = root.get_image(x, y, size, size)
color_total = (0, 0, 0)
for x, y in helpers.iter_rect(0, 0, size, size):
pixel = image.get_pixel(x, y)
color = colormap.query_color(pixel)
color = [color.red, color.green, color.blue]
color_total = (color_total[0]+color[0], color_total[1]+color[1], color_total[2]+color[2])
N = size*size
color_total = (color_total[0]/N, color_total[1]/N, color_total[2]/N)
color_rgb = [ch/65535. for ch in color_total]
self.brush.set_color_rgb(color_rgb)
class PixbufDirectory:
def __init__(self, dirname):
self.dirname = dirname
self.cache = {}
def __getattr__(self, name):
if name not in self.cache:
try:
pixbuf = gdk.pixbuf_new_from_file(join(self.dirname, name + '.png'))
except gobject.GError, e:
raise AttributeError, str(e)
self.cache[name] = pixbuf
return self.cache[name]
| benosteen/mypaint | gui/application.py | Python | gpl-2.0 | 15,563 |
# -*- coding: utf-8 -*-
##
# This file is part of Testerman, a test automation system.
# Copyright (c) 2010 Sebastien Lefevre and other contributors
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
##
##
# A cisco-like command-line shell, using
# "inline" value tree representations,
# based on a syntax that could be described with ASN.1 primitives
# (sequence, choice, integer, string)
#
# Enables support for command completion,
# command argument parsing (value tree), and command
# execution.
#
#
# Low-level input handling, however, is based on readline or other adapters.
#
#
# Usage:
# - create a CommandContext and register one or multiple commands
# in it. These commands have a syntax tree based on a SequenceNode.
# - ...
#
#
##
import sys
import shlex
import traceback
import StringIO
def getBacktrace():
backtrace = StringIO.StringIO()
traceback.print_exc(None, backtrace)
ret = backtrace.getvalue()
backtrace.close()
return ret
##
# Tools/output pretty printers
##
def formatTable(headers = [], rows = [], order = None, notAvailableLabel = "(n/a)"):
"""
Pretty format the list of dict (rows) according to the header list (headers)
Header names not found in the dict are not displayed, and
only header names found in the dict are displayed.
Header is a list of either simple string (name) or tuple (name, label, [formatter]).
If it is a tuple, label is used to display the header, and name
to look for the element in the dicts.
The optional formatter is a function that will take the value to format as single arg.
"""
def formatRow(cols, widths):
"""
Formatting helper: row pretty print.
"""
line = " %s%s " % (cols[0], (widths[0]-len(cols[0]))*' ')
for i in range(1, len(cols)):
line = line + "| %s%s " % (cols[i], (widths[i]-len(cols[i]))*' ')
return line
def expand(header):
"""
returns the name, label, and formatter for a header entry.
"""
if isinstance(header, tuple):
if len(header) == 2:
return header[0], header[1], lambda x: x
elif len(header) == 3:
return header
else:
raise Exception("Invalid header")
else:
return header, header, lambda x:x
headers = map(expand, headers)
# First, we initialize the widths for each column
colLabels = []
widths = []
for name, label, _ in headers:
widths.append(len(label))
colLabels.append(label)
if order:
rows.sort(lambda x, y: cmp(x.get(order), y.get(order)))
lines = [ ]
for entry in rows:
i = 0
line = []
for name, label, formatter in headers:
if entry.has_key(name):
e = str(formatter(entry[name]))
else:
e = notAvailableLabel
if len(e) > widths[i]: widths[i] = len(e)
line.append(e)
i += 1
lines.append(line)
# Then we can display them
res = formatRow(colLabels, widths)
res += "\n"
res += '-'*len(res) + "\n"
for line in lines:
res += formatRow(line, widths) + "\n"
return res
def formatForm(header = [], values = {}):
"""
Pretty format the dict according to the header list, as a form.
Header names not found in the dict are not displayed, and
only header names found in the dict are displayed.
Header is a list of either simple string (name) or tuple (name, label).
If it is a tuple, label is used to display the header, and name
to look for the element in the dict of values.
Support multiline values.
"""
# First, we compute the max width for the label column
labelWidth = 0
for h in header:
try:
name, label = h
except:
label = h
labelWidth = max(labelWidth, len(label))
labelWidth += 1 # includes the automatically added ':'
lines = [ ]
for h in header:
try:
name, label = h
except:
name = h
label = h
value = ""
if values.has_key(name):
value = str(values[name])
# Support for multilines
value = value.split('\n')
lines.append((" %%-%ss %%s" % labelWidth) % (label+":", value[0]))
for v in value[1:]:
lines.append((" %%-%ss %%s" % labelWidth) % ("", v))
return "\n".join(lines)
##
# Some usual exceptions
##
class ParsingException(Exception):
"""
An exception forward mechanism enables
to reconstruct the path to the node that raises
the exception on the fly.
"""
def __init__(self, error = ""):
self._error = error
self._forwarded = False
def forwardAs(self, name):
if not self._forwarded:
# first node naming
self._error = "%s: %s" % (name, self._error)
self._forwarded = True
else:
# Continue to construct the node path
self._error = "%s.%s" % (name, self._error)
raise self
def __str__(self):
return self._error
class InvalidSyntax(ParsingException):
pass
class UnexpectedToken(ParsingException):
pass
class MissingToken(ParsingException):
pass
class ShellExit(Exception):
pass
##
# Supported command syntax trees
##
class SyntaxNode:
"""
Syntax tree node base class.
A syntax node enables to define syntax trees
for command arguments.
The name and description of the node are properties of the
association between the node and its parent.
This way, a SyntaxNode with child could be considered
as a fully resuable type, registered into different
context with different description and naming.
"""
# type description - to override in each subclass
_typeName = "<undefined>"
def __init__(self):
pass
def __str__(self):
return "%s" % (self._typeName)
# To implement in sub-classes
def suggestNextTokens(self, tokens):
"""
Returns a list of possible next tokens to continue
the current tokenized line.
Returns it as a list of (token, description), followed by a flag
indicating whether this node requires one of these
completion suggestions, or if the branch is already
completed (all mandatory tokens are already provided), followed
by the remaining tokens (exactly as in parse())
(True = one of these suggestions is required. False: I'm ok with
what I have now)
This requires a value tree computation. May raise a syntax
error/unexpected token exception if needed.
"""
return [], False, tokens
def parse(self, tokens):
"""
Generates a value tree by parsing the tokenized line (list of strings).
The first token of this list corresponds to this syntax node.
The syntax node is guaranteed to accept it as a valid value
as isValid() is called on it prior to be passed to a getValue().
The Testerman usual struct representation convention is used:
dict for "sequence"-like parameters (normal command parameters),
tuple (name, value) for choices.
Value lists are currently not supported.
@raises InvalidSyntax in case of a syntax error
@raises UnexpectedToken in case of an invalid continuation
@raises MissingToken in case of a missing mandatory continuation
@type tokens: list of strings
@param tokens: tokenized line, starting at the value to be parsed
by this syntax node (and the next one, if any)
@rtype: tuple (object, list of strings)
@returns: the parsed value as a value tree, and
the list of unconsummed tokens.
"""
raise InvalidSyntax()
##
# Primitive syntax nodes
##
class StringNode(SyntaxNode):
"""
Simple string node.
"""
_typeName = "<string>"
def __init__(self, description = "a string value"):
SyntaxNode.__init__(self)
self._description = description
def parse(self, tokens):
if not tokens:
raise MissingToken("missing value")
return (tokens[0], tokens[1:])
def suggestNextTokens(self, tokens):
if not tokens:
# Suggest the user to enter a string value
return [ (None,self._description) ], True, tokens
else:
# no suggestion
return [], False, tokens[1:]
class IntegerNode(SyntaxNode):
"""
Simple integer node.
"""
_typeName = "<integer>"
def __init__(self, description = "an integer value"):
SyntaxNode.__init__(self)
self._description = description
def parse(self, tokens):
if not tokens:
raise MissingToken("missing value")
try:
int(tokens[0])
except:
raise InvalidSyntax("integer value expected")
return (int(tokens[0]), tokens[1:])
def suggestNextTokens(self, tokens):
if not tokens:
# Suggest to enter an integer value
return [ (None, self._description) ], True, tokens
else:
try:
int(tokens[0])
except:
raise InvalidSyntax("integer value expected")
return [], False, tokens[1:]
class BooleanNode(SyntaxNode):
"""
Simple boolean node (true/false)
"""
_typeName = "<boolean>"
def __init__(self, trueDescription = "", falseDescription = ""):
SyntaxNode.__init__(self)
self._trueDescription = trueDescription
self._falseDescription = falseDescription
def parse(self, tokens):
ret = False
if not tokens:
raise MissingToken("missing value")
if tokens[0].lower() in [ 'true', 't', '1' ]:
ret = True
elif tokens[0].lower() in[ 'false', 'f', '0' ]:
ret = False
else:
raise InvalidSyntax("integer value expected")
return (ret, tokens[1:])
def suggestNextTokens(self, tokens):
if not tokens:
# Suggest to enter an integer value
return [ ('true', self._trueDescription), ('false', self._falseDescription) ], True, tokens
else:
if not tokens[0].lower() in [ 'true', 't', '1', 'false', 'f', '0']:
raise InvalidSyntax("boolean value expected (true/false/t/f/0/1)")
return [], False, tokens[1:]
class NullNode(SyntaxNode):
"""
'constant'-like. Useful in choices.
"""
_typeName = "<null>"
def __init__(self):
SyntaxNode.__init__(self)
def parse(self, tokens):
return (None, tokens)
def suggestNextTokens(self, tokens):
# nothing to suggest
return [], False, tokens
##
# More advanced syntax nodes
##
class SequenceNode(SyntaxNode):
"""
Contains named values.
When valuated, returns a dict.
MyType ::= SEQUENCE {
field1 INTEGER optional, -- this is field1
field2 String -- optional field2
}
would be declared as:
MyType = SequenceNode()
MyType.addField("field1", "this is field1", IntegerNode(), True)
MyType.addField("field2", "optional field2", StringNode())
"""
_typeName = "<sequence>"
def __init__(self):
SyntaxNode.__init__(self)
self._fields = {}
def addField(self, fieldName, description, syntaxNode, optional = False):
"""
Declares a new field in the sequence.
"""
self._fields[fieldName] = (description, syntaxNode, optional)
return self
def parse(self, tokens):
"""
Returns a dict {fieldName: value}
All mandatory fields must be filled.
If not, an exception is raised.
"""
ret = {}
parsedFields = []
nextTokens = tokens
while nextTokens:
fieldName = nextTokens[0]
if not fieldName in self._fields:
break
if fieldName in parsedFields:
raise UnexpectedToken("duplicated field %s" % (fieldName))
parsedFields.append(fieldName)
try:
v, nextTokens = self._fields[fieldName][1].parse(nextTokens[1:])
except ParsingException, e:
e.forwardAs(fieldName)
ret[fieldName] = v
# Check if we have all mandatory fields
for fieldName, (description, node, optional) in self._fields.items():
if not optional and not fieldName in parsedFields:
raise MissingToken("missing mandatory field %s (%s)" % (fieldName, description))
return (ret, nextTokens)
def suggestNextTokens(self, tokens):
suggestions = []
completionRequired = False
parsedFields = []
nextTokens = tokens
# Parse
while nextTokens:
fieldName = nextTokens[0]
if not fieldName in self._fields:
break
if fieldName in parsedFields:
raise UnexpectedToken("duplicated field %s" % (fieldName))
parsedFields.append(fieldName)
# Check if the current field wants to complete something or not
try:
suggestions, completionRequired, nextTokens = self._fields[fieldName][1].suggestNextTokens(nextTokens[1:])
except ParsingException, e:
e.forwardAs(fieldName)
if completionRequired:
if nextTokens:
# well, this field could have consumed other tokens,
# but it did not: missing a token somewhere
raise MissingToken("field %s misses a value" % (fieldName))
else:
# OK, first suggest to complete this token
break
# otherwise, just continue with the next possible field
# Now, let's analyse our current state
if not nextTokens:
# The line ends here - we can propose some suggestions to continue it
if completionRequired:
return suggestions, completionRequired, nextTokens
else:
# in this case, we may complete with:
# optional tokens for the last started field branch
# and any non-entered field names in the current sequence
for fieldName, (description, node, optional) in self._fields.items():
if not fieldName in parsedFields:
# we prefix the description with a * for mandatory fields
suggestions.append((fieldName, (not optional and "*" or "") + description))
if not optional:
# At least one non-optional field: completion required.
completionRequired = True
return suggestions, completionRequired, nextTokens
else:
# The line has not been consumed completely - just check that
# we have all our mandatory fields
for fieldName, (description, node, optional) in self._fields.items():
if not optional and not fieldName in parsedFields:
raise MissingToken("missing mandatory field %s (%s)" % (fieldName, description))
# OK, we have everything, and nothing can't be completed at
# our level (i.e. in this node branch) anymore
return [], False, nextTokens
class SequenceOfNode(SyntaxNode):
"""
Equivalent of a ASN.1 SEQUENCE OF.
MyType ::= SEQUENCE OF <other SyntaxNode>
translates into:
MyType = SequenceOfNode(OtherSyntaxNode)
"""
_typeName = "<sequenceOf>"
def __init__(self, itemSyntaxNode):
SyntaxNode.__init__(self)
self.itemSyntaxNode = itemSyntaxNode
def parse(self, tokens):
"""
Parse tokens as itemSyntaxNode until no longer possible (i.e. error).
Note: a way to start a new item when the item is a SEQUENCE is to provide
a new value for a field of the sequence.
For instance for a SEQUENCE (name String, value String, description String optional),
you can use:
name name1 value v1 name name2 value v2 description d2 value v3 name3
The duplicate field will end the SEQUENCE syntax node parsing.
"""
remainingTokens = tokens
res = []
logging.debug("parsing sequenceOf")
while remainingTokens:
try:
logging.debug(remainingTokens)
val, remainingTokens = self.itemSyntaxNode.parse(remainingTokens)
res.append(val)
logging.debug("Captured: " + str(val))
except Exception, e:
logging.debug("Exception " + str(e))
break
return (res, remainingTokens)
def suggestNextTokens(self, tokens):
remainingTokens = tokens
# Suggest to add an element to the sequence of.
itemSuggestions, _, _ = self.itemSyntaxNode.suggestNextTokens([])
suggestions = []
incomplete = False
# check the existing sequence up to where we have an issue.
# Either we consume exactly all our tokens, or we have an exception before.
# So normally this is not an infinite loop.
while remainingTokens:
try:
logging.debug("sequenceOf suggestions with remaining tokens " + str(remainingTokens))
suggestions, incomplete, remainingTokens = self.itemSyntaxNode.suggestNextTokens(remainingTokens)
except Exception, e:
logging.debug("sequenceOf suggestions: " + str(e))
break
if incomplete:
return suggestions, incomplete, remainingTokens
# Complete with a new itemSyntaxNode if the current started node is also OK (no exception, no incomplete)
# But only if we don't have remainingTokens behind us - letting the parent manage them in that case,
# and not allowing further completion at our level.
if not remainingTokens:
return suggestions + itemSuggestions, False, remainingTokens
else:
return [], False, remainingTokens
class ChoiceNode(SyntaxNode):
"""
Equivalent of a ASN.1 CHOICE:
MyType ::= CHOICE {
choice1 INTEGER, -- this is choice 1
choice2 String -- this is choice 2
}
translates into:
MyType = ChoiceNode()
MyType.addChoice("choice1", "this is choice 1", IntegerNode())
MyType.addChoice("choice2", "this is choice 2", StringNode())
"""
_typeName = "<choice>"
def __init__(self):
SyntaxNode.__init__(self)
self._choices = {}
def addChoice(self, name, description, syntaxNode = None):
if not syntaxNode:
syntaxNode = NullNode()
self._choices[name] = (description, syntaxNode)
return self # enable cascading multiple addChoice()
def addChoices(self, choices):
for choiceName, choiceDescription, syntaxNode in choices:
self.addChoice(choiceName, choiceDescription, syntaxNode)
return self
def parse(self, tokens):
"""
For a choice, returns a tuple (choiceName, value)
"""
if not tokens:
raise MissingToken("missing choice name")
# Check that we have one of or choice names
choiceName = tokens[0]
if choiceName in self._choices:
try:
v, remaining = self._choices[choiceName][1].parse(tokens[1:])
except ParsingException, e:
e.forwardAs(choiceName)
return ( (choiceName, v), remaining )
else:
raise InvalidSyntax("invalid choice name (%s)" % (choiceName))
def suggestNextTokens(self, tokens):
if not tokens:
# Suggest one choice
suggestions = []
for choiceName, (choiceDescription, node) in self._choices.items():
suggestions.append((choiceName, choiceDescription))
return suggestions, True, tokens
else:
# Delegate to the selected choice
choiceName = tokens[0]
if choiceName in self._choices:
try:
return self._choices[choiceName][1].suggestNextTokens(tokens[1:])
except ParsingException, e:
e.forwardAs(choiceName)
else:
raise InvalidSyntax("invalid choice name (%s)" % (choiceName))
class EnumNode(SyntaxNode):
"""
A string node that only accepts pre-defined values.
Enables to create choice-like based on string values instead
of the tuple value (choiceName, nodeValue).
"""
_typeName = "<enum-string>"
def __init__(self):
SyntaxNode.__init__(self)
self._possibleValues = {}
def addChoice(self, name, description):
self._possibleValues[name] = description
return self
def parse(self, tokens):
if not tokens:
raise MissingToken("missing value")
if not tokens[0] in self._possibleValues:
raise InvalidSyntax("invalid value (%s)" % tokens[0])
return (tokens[0], tokens[1:])
def suggestNextTokens(self, tokens):
if not tokens:
# Suggest the user to enter a string value
return self._possibleValues.items(), True, tokens
else:
if not tokens[0] in self._possibleValues:
raise InvalidSyntax("invalid value (%s)" % tokens[0])
# no suggestion
return [], False, tokens[1:]
class PositionalSequenceNode(SyntaxNode):
"""
Similar to a sequence node, but arguments
are not named and must be provided in the correct order.
When valuated, returns a dict.
All arguments must be provided - no optional fields support.
MyType ::= SEQUENCE {
field1 INTEGER, -- this is field1
field2 String, -- another field2
}
would be declared as:
MyType = PositionalSequenceNode()
MyType.addField("field1", "this is field1", IntegerNode())
MyType.addField("field2", "another field2", StringNode())
"""
_typeName = "<positional-sequence>"
def __init__(self):
SyntaxNode.__init__(self)
self._fields = []
def addField(self, fieldName, description, syntaxNode):
"""
Declares a new field in the sequence.
"""
self._fields.append((fieldName, description, syntaxNode))
return self
def parse(self, tokens):
"""
Returns a dict {fieldName: value}
All fields must be filled.
If not, an exception is raised.
"""
ret = {}
nextTokens = tokens
nextField = 0
while nextTokens:
fieldName, fieldDescription, fieldSyntaxNode = self._fields[nextField]
try:
v, nextTokens = fieldSyntaxNode.parse(nextTokens)
except ParsingException, e:
e.forwardAs(fieldName)
ret[fieldName] = v
nextField += 1
# Check if we have all mandatory fields
if nextField < len(self._fields):
raise MissingToken("missing mandatory positional argument %s (%s)" % (nextField, self._fields[nextField][1]))
return (ret, nextTokens)
def suggestNextTokens(self, tokens):
suggestions = []
completionRequired = False
nextTokens = tokens
nextField = 0
# Parse
while nextTokens:
fieldName, fieldDescription, fieldSyntaxNode = self._fields[nextField]
try:
suggestions, completionRequired, nextTokens = fieldSyntaxNode.suggestNextTokens(nextTokens)
except ParsingException, e:
e.forwardAs(fieldName)
if completionRequired:
if nextTokens:
# well, this field could have consumed other tokens,
# but it did not: missing a token somewhere
raise MissingToken("positional argument %s (%s) misses a value" % (nextField, fieldDescription))
else:
# OK, first suggest to complete this token
break
# otherwise, just continue with the next possible field
nextField += 1
# Now, let's analyse our current state
if not nextTokens:
# The line ends here - we can propose some suggestions to continue it
if completionRequired:
return suggestions, completionRequired, nextTokens
else:
# Just check that we have our count of positional args
if nextField < len(self._fields):
# Suggest what the next positional arg suggests
return self._fields[nextField][2].suggestNextTokens(nextTokens)
else:
# Everything is OK
return [], False, nextTokens
else:
# The line has not been consumed completely - just check that
# we have all our mandatory fields
if nextField < len(self._fields):
raise MissingToken("missing mandatory positional argument %s (%s)" % (nextField, self._fields[nextField][1]))
# OK, we have everything, and nothing can't be completed at
# our level (i.e. in this node branch) anymore
return [], False, nextTokens
##
# Command Context
#
# A special container to register commands whose exec line
# is usually a sequence node.
#
# A context is created and managed by a ContextManager,
# enabling sub-context navigation.
##
class CommandContext(ChoiceNode):
"""
A special container to register commands whose exec line is
usually a sequence node.
A context must be registered into a ContextManager to
be reachable. Such a registration enables context navigation.
Once created, use ContextManager.registerContext(name, description, context, parent = None)
to register the context.
Child contexts are automatically registered into the same context manager
(regardless they were added before or after this context registration).
Alternatively, you may directly get a prepared registered context with:
ContextManager.createContext(contextName, description, parent = ...)
or ContextManager.createRootContext(contextName, description)
"""
def __init__(self):
ChoiceNode.__init__(self)
self.__commands = {}
# Automatically injected by the context manager (upon registration)
self._parentContext = None
self._contextManager = None
self._name = None
# Traversed by the context manager (upon registration)
self._contexts = {}
##
# "Private" methods
##
def __str__(self):
return self._name
def _execute(self, tokens):
"""
Parse and execute the command provided by the tokenized line.
"""
if not tokens:
return 0
value, remainingTokens = self.parse(tokens)
if remainingTokens:
raise UnexpectedToken("unexpected token at: %s" % (remainingTokens[0]))
# Let's execute the command
(commandName, args) = value
if isinstance(args, dict):
# If a parameter was named like something-like-this,
# it is renamed to something_like_this
kwargs = {}
for k, v in args.items():
kwargs[k.replace('-', '_')] = v
return self.__commands[commandName](**kwargs)
elif args is None:
return self.__commands[commandName]()
else:
return self.__commands[commandName](args)
def _getSuggestions(self, tokens):
if not tokens:
return []
# The last token is either empty or the beginning of a token.
# We first retrieve all the suggestions for this last token,
# then we try to match it against the non-empty last token, if any.
suggestions, completionRequired, remainingTokens = self.suggestNextTokens(tokens[:-1])
if remainingTokens:
raise UnexpectedToken("unexpected token at: %s" % (remainingTokens[0]))
if not tokens[-1]:
# Command completion
# We don't have the beginning of a token.
# Simply suggest pure continuations
return completionRequired, suggestions
else:
# Word/token completion
# Try to match the beginning of the token with a possible continuation
tokenToComplete = tokens[-1]
adjustedSuggestions = [ x for x in suggestions if x[0].startswith(tokenToComplete) ]
if not adjustedSuggestions:
raise InvalidSyntax("unrecognized command: %s" % tokenToComplete)
return True, adjustedSuggestions
def _getFormattedSuggestions(self, tokens):
"""
Format the suggestions "a la cisco":
token description
token description
...
"""
ret = ""
suggestions = self._getSuggestions(tokens)
if not suggestions:
ret = "(no suggestion)"
else:
maxTokenLength = max([ len(x[0]) for x in suggestions])
format = " %%%ss %%s\n" % maxTokenLength
for token, description in suggestions:
ret += format % (token, description)
return ret
##
# "Public" methods that may be used in subclasses
##
def getContextName(self):
if self._parentContext:
return self._parentContext.getContextName() + '/' + self._name
else:
return self._name
def error(self, txt):
self._contextManager.write("%% error: %s\n" % txt)
def out(self, txt):
self._contextManager.write(txt)
def notify(self, txt = ""):
self._contextManager.write(txt + "\n")
def printTable(self, headers, rows, order = None, notAvailableLabel = "(n/a)"):
self.notify(formatTable(headers, rows, order, notAvailableLabel))
def printForm(self, headers, rows):
self.notify(formatForm(headers, rows))
def addCommand(self, commandName, description, syntaxNode, callback):
"""
Register a new command into the context.
The callback is a function that will take arguments according
to the node field names.
Node is the syntaxNode representing the command arguments.
"""
self.addChoice(commandName, description, syntaxNode)
self.__commands[commandName] = callback
return self
def addContext(self, name, description, context):
"""
Add a child context.
It won't be necessary to register this child context
into the context manager.
"""
self._contexts[name] = (name, description, context)
# Already registered into a context manager ? declare the child context into it
if self._contextManager:
self._contextManager.registerContext(name, description, context, self)
##
# Context Manager
##
class ContextManager:
"""
This context manager manages the CommandContext tree
and forwards the completion/execution requests to the current
active context.
Usage:
cm = ContextManager()
root = cm.createRootContext()
Raise ShellExit exception when exit is called from the root context.
"""
def __init__(self):
self._currentContext = None
self._registeredContexts = {} # contexts, by complete name/path. Used for direct access.
self._debug = False
def setDebug(self, debug):
self._debug = debug
def isDebug(self):
return self._debug
# Factory-oriented method
def createRootContext(self, contextName, description = None):
"""
Returns a pre-registered root context.
"""
context = self.createContext(contextName, description, None)
self.setCurrentContext(context)
return context
# Factory-oriented method
def createContext(self, contextName, description, parentContext):
"""
Returns a pre-registered child context.
"""
context = CommandContext()
return self.registerContext(contextName, description, context, parentContext)
# Registration-oriented method
def registerRootContext(self, contextName, description, context):
ret = self.registerContext(contextName, description, context, None)
self.setCurrentContext(context)
return ret
# Registration-oriented method
def registerContext(self, contextName, description, context, parentContext):
"""
Registers a context as root (parentContext = None) or as an existing
context child.
Automatically adds the navigation commands to navigate from/to this context.
Performs some injections so that each context (and sub-contexts) are aware
of their context manager.
"""
# Some injections
context._parentContext = parentContext
context._contextManager = self
context._name = contextName
if parentContext:
# Register a way to navigate to the context
parentContext.addCommand(contextName, "go to " + description + " context", NullNode(), lambda: self.setCurrentContext(context))
# Register a way to exit the context
context.addCommand("exit", "exit to parent context", NullNode(), self.goUp)
else:
context.addCommand("exit", "exit " + description, NullNode(), self.goUp)
# Registration for direct access
self._registeredContexts[context.getContextName()] = context
# Now, register the child contexts, if any
for n, d, c in context._contexts.values():
self.registerContext(n, d, c, context)
return context
def getCurrentContextName(self):
return self._currentContext.getContextName()
def setCurrentContext(self, context):
self._currentContext = context
def goTo(self, contextPath):
"""
Directly go to the context identified by the contextPath (path/to/context)
"""
context = self._registeredContexts.get(contextPath)
if not context:
raise Exception("Unknown context: %s" % contextPath)
else:
self.setCurrentContext(context)
def goUp(self):
if self._currentContext._parentContext:
self.setCurrentContext(self._currentContext._parentContext)
else:
raise ShellExit()
def execute(self, tokens):
"""
Forward the execution signal to the current context.
Return a status code that is suitable for a shell-based exec.
"""
ret = self._currentContext._execute(tokens)
if not isinstance(ret, int):
return 0
elif ret < 0:
return -ret
return ret
def getFormattedSuggestions(self, tokens):
"""
Forward the signal to the current context
"""
self._currentContext._getFormattedSuggestions(tokens)
def getSuggestions(self, tokens):
"""
Forward the signal to the current context
"""
return self._currentContext._getSuggestions(tokens)
def write(self, txt):
"""
To be reimplemented in inherited adapters
"""
sys.stdout.write(txt)
################################################################################
# Adapter Classes
################################################################################
# These adapters enable to bind the context manager
# with an input/output manager.
#
# Could be a telnet layer, or raw input, readline, cmd.Cmd...
import cmd
import readline
class CmdContextManagerAdapter(ContextManager):
"""
This is an adapter class to glue the Context Manager
logic to a cmd.Cmd access interface.
"""
class MyCmd(cmd.Cmd):
def __init__(self, contextManager):
cmd.Cmd.__init__(self)
self._contextManager = contextManager
readline.set_completer_delims(" ")
readline.parse_and_bind('"?": possible-completions')
def emptyline(self):
"""
Do not repeat the last command on empty line
"""
pass
def completedefault(self, text, line, begidx, endidx):
"""
Overrides the cmd.Cmd implementation.
Completes the words after the first one.
"""
return self.completenames(line)
def completenames(self, text, *ignored):
"""
Overrides the cmd.Cmd implementation.
Normally, we should only return a list of tokens to complete the current text.
Actually, we also display the possible completion options, if any, directly here,
as we display them "a la cisco", with an associated description (which is not
the case of the standard way of readline displaying completion suggestions).
In this case, we just "simulate" that there are no possible completions so
that readline do not display them its own way.
This avoids hooking the rl_completion_display_matches_hook via set_completion_display_matches (Python 2.6+)
or ctypes/cdll manipulation in 2.5.
"""
ret = self._getSuggestions(text)
if isinstance(ret, basestring):
# a single suggestion was returned. Complete with it.
return [ ret + ' ' ] # the trailing space enables the next completion attempt to focus on next token, not current token completion
elif isinstance(ret, list):
# multiple possibilities. Display them.
self.showCompletionSuggestions(ret)
# And do not complete anything
return []
else:
# Error during completion. Do not complete anything.
return []
def _getSuggestions(self, text, *ignored):
"""
Returns a list of possible tokens or continuations, (list)
or a single token (completion), (string)
or None (error). (None)
I'm not a big fan of dynamic return types, but it makes things easier here.
"""
tokens = self.tokenize(text)
try:
completionRequired, ret = self._contextManager.getSuggestions(tokens)
except ParsingException, e:
self.stdout.write("\n%% error: %s\n" % str(e))
self.redisplay()
return None
# If we have a required completion, let's check if we have a single one.
if completionRequired:
if len(ret) == 1:
# Single suggestion. A real one or a help suggestion ?
token, description = ret[0]
if token is not None:
# If we have only one suggestion, autocomplete with it.
return token
else:
# We have a None suggestion, i.e. we can't complete for the user (int/string value, ...)
# Display it as a suggestion.
return [ ("<value>", description) ]
else:
# We have multiple suggestions, display them.
return ret
else:
# We may have multiple, optional completions, and the <CR> option, too
# Display them
return ret + [( '<CR>', '')]
def tokenize(self, line):
"""
Tokenize a command line (simple version)
"""
try:
ret = shlex.split(line)
if not line or line.endswith(' '):
return ret + [ '' ] # the trailing '' is here to force a continuation
else:
return ret # will force a word completion
except Exception, e:
return []
def onecmd(self, line):
# Support for ^Z
line = line.strip()
if not line:
return
if line == "EOF":
self.stdout.write("\n")
self._contextManager.goUp()
return
try:
self._contextManager.execute(self.tokenize(line))
except ShellExit:
raise
except Exception, e:
if self._contextManager.isDebug():
self.stdout.write(getBacktrace() + "\n")
self.stdout.write("%% error: %s\n" % str(e))
def showCompletionSuggestions(self, suggestions):
"""
Displays the completion suggestions "a la Cisco".
"""
suggestions.sort()
maxTokenLength = max([ len(x[0]) for x in suggestions])
fmt = " %%-%ss %%s\n" % maxTokenLength
self.stdout.write("\n")
for token, description in suggestions:
self.stdout.write(fmt % (token, description))
self.redisplay()
def redisplay(self):
# a readline.redisplay() is not enough: for readline, nothing
# has changed and it won't redisplay the prompt> line
# Instead, we should call rl_forced_update_display,
# but this is not exported through the Python wrapper.
# readline.redisplay()
self.stdout.write(self.prompt + readline.get_line_buffer())
def __init__(self, intro):
ContextManager.__init__(self)
self._cmd = self.MyCmd(self)
self._cmd.intro = intro
def run(self):
self._cmd.cmdloop()
def setCurrentContext(self, context):
"""
Overriden so that the cmd.prompt is updated
when changing contexts
"""
self._currentContext = context
self._cmd.prompt = self.getCurrentContextName() + "> "
def write(self, txt):
self._cmd.stdout.write(txt)
| seblefevre/testerman | admin/StructuredInteractiveShell.py | Python | gpl-2.0 | 36,549 |
#!/usr/bin/python
"""
Program that parses the avocado results ,will use the results.json
file as the input filename, the defualt dir is ~/avocado/job-results/latest/,
or you could specify it .
"""
__version__ = '1.1'
import os
import json
import argparse
from avocado.core import data_dir
def parse_result(resultfile):
with open(resultfile, 'r') as f:
data = json.load(f)
max_width = 0
for i in data['tests']:
max_width = max(max_width,len(i['test']))
nice_results(max_width, "Test", "Status", "Seconds", "Info")
nice_results(max_width, "----", "------", "-------", "----")
for i in data['tests']:
if i['fail_reason'] == 'None':
nice_results(max_width, i['test'], i['status'], round(i['time']), '')
else:
nice_results(max_width, i['test'], i['status'], round(i['time']),
i['fail_reason'])
def nice_results(max_width, casename, status, seconds, reason=''):
# TODO: how to make the results look more beautiful
print_format = '%%-%ds %%-8s %%-10s %%-8s' % max_width
print print_format % (casename, status, seconds, reason)
if __name__ == '__main__':
# Default to use the directory latest
statusfile = 'results.json'
log_dir = data_dir.get_logs_dir()
result_file = os.path.join(log_dir, "latest/", statusfile)
parser_a = argparse.ArgumentParser(description="show avocado results.")
parser_a.add_argument('-f', '--filepath',
help='path of the results file results.json',
dest='filepath', action='store')
arguments = parser_a.parse_args()
if arguments.filepath is None:
parse_result(result_file)
elif os.path.exists(os.path.join(arguments.filepath, statusfile)):
parse_result(os.path.join(arguments.filepath, statusfile))
else:
print "Input filepath is wrong ,please check it"
| PyLearner/myworks | scan_result_avocado.py | Python | apache-2.0 | 1,919 |
'''
test Ip4Protocol
'''
import pytest
import sys
sys.path.insert(0, '..')
sys.path.insert(0, '.')
from jabs import ilf
KNOWN_PROTOS = [('icmp', 1),
('tcp', 6),
('udp', 17),
('rdp', 27),
('rsvp', 46),
('gre', 47),
('esp', 50),
('ah', 51),
('encap', 98),
('eigrp', 88),
('ospfigp', 89),
('vrrp', 112),
]
def test_init():
'initial object has 256 entries in all 3 hashes'
ipp = ilf.Ip4Protocol()
assert len(ipp._num_toname) == 256
assert len(ipp._num_todesc) == 256
assert len(ipp._name_tonum) == 256
def test_known():
'test some known name/protocol number translations'
ipp = ilf.Ip4Protocol()
for name, proto in KNOWN_PROTOS:
assert name == ipp.getnamebyproto(proto)
assert proto == ipp.getprotobyname(name)
def test_roundtrip():
'check roundtrip translation proto>name>proto'
ipp = ilf.Ip4Protocol()
PROT = ilf.numbers.IP4PROTOCOLS
for proto, (name, desc) in PROT.items():
assert name == ipp.getnamebyproto(proto)
assert proto == ipp.getprotobyname(name)
def test_name_tonum():
'name, acquired by number, maps back to same protocol number'
ipp = ilf.Ip4Protocol()
for num in range(255):
name = ipp.getnamebyproto(num)
assert num == ipp.getprotobyname(name)
def test_raise_valueerror():
'invalid protocol numbers raise ValueError'
ipp = ilf.Ip4Protocol()
with pytest.raises(ValueError):
ipp.getnamebyproto(256)
with pytest.raises(ValueError):
ipp.getnamebyproto(-1)
| hertogp/jabs | test/test_ilf_core_protocols.py | Python | mit | 1,713 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2012 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp.osv import orm, fields # pylint: disable=W0402
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
try:
import ldap
from ldap.filter import filter_format
except ImportError:
_logger.debug('Cannot import ldap')
class CompanyLDAP(orm.Model):
_inherit = 'res.company.ldap'
_columns = {
'no_deactivate_user_ids': fields.many2many(
'res.users', 'res_company_ldap_no_deactivate_user_rel',
'ldap_id', 'user_id',
'Users never to deactivate',
help='List users who never should be deactivated by'
' the deactivation wizard'),
'deactivate_unknown_users': fields.boolean(
'Deactivate unknown users'),
}
_defaults = {
'no_deactivate_user_ids': [(6, 0, [SUPERUSER_ID])],
'deactivate_unknown_users': False,
}
def action_populate(self, cr, uid, ids, context=None):
"""
Prepopulate the user table from one or more LDAP resources.
Obviously, the option to create users must be toggled in
the LDAP configuration.
Return the number of users created (as far as we can tell).
"""
if isinstance(ids, (int, float)):
ids = [ids]
users_pool = self.pool.get('res.users')
users_no_before = users_pool.search(
cr, uid, [], context=context, count=True)
logger = logging.getLogger('orm.ldap')
logger.debug("action_populate called on res.company.ldap ids %s", ids)
deactivate_unknown = None
known_user_ids = [uid]
for this in self.read(cr, uid, ids,
[
'no_deactivate_user_ids',
'deactivate_unknown_users',
],
context=context, load='_classic_write'):
if deactivate_unknown is None:
deactivate_unknown = True
known_user_ids.extend(this['no_deactivate_user_ids'])
deactivate_unknown &= this['deactivate_unknown_users']
if deactivate_unknown:
logger.debug("will deactivate unknown users")
for conf in self.get_ldap_dicts(cr, ids):
if not conf['create_user']:
continue
attribute_match = re.search(
r'([a-zA-Z_]+)=\%s', conf['ldap_filter'])
if attribute_match:
login_attr = attribute_match.group(1)
else:
raise orm.except_orm(
"No login attribute found",
"Could not extract login attribute from filter %s" %
conf['ldap_filter'])
results = self.get_ldap_entry_dicts(conf)
for result in results:
login = result[1][login_attr][0].lower().strip()
user_id = self.get_or_create_user(
cr, uid, conf, login, result)
if not user_id:
# this happens if the user exists but is active = False
# -> fetch the user again and reactivate it
cr.execute(
"SELECT id FROM res_users "
"WHERE lower(login)=%s",
(login,))
res = cr.fetchone()
if res:
self.pool.get('res.users').write(
cr, SUPERUSER_ID,
res[0],
{'active': True},
context=context
)
else:
raise orm.except_orm(
_('UserError'),
_('Unable to process user with login %s' % login)
)
known_user_ids.append(user_id)
users_no_after = users_pool.search(
cr, uid, [], context=context, count=True)
users_created = users_no_after - users_no_before
deactivated_users_count = 0
if deactivate_unknown:
deactivated_users_count = self.do_deactivate_unknown_users(
cr, uid, ids, known_user_ids, context=context)
logger.debug("%d users created", users_created)
logger.debug("%d users deactivated", deactivated_users_count)
return users_created, deactivated_users_count
def do_deactivate_unknown_users(
self, cr, uid, ids, known_user_ids, context=None):
"""
Deactivate users not found in last populate run
"""
res_users = self.pool.get('res.users')
unknown_user_ids = []
for unknown_user in res_users.read(
cr, uid,
res_users.search(
cr, uid,
[('id', 'not in', known_user_ids)],
context=context),
['login'],
context=context):
present_in_ldap = False
for conf in self.get_ldap_dicts(cr, ids):
present_in_ldap |= bool(self.get_ldap_entry_dicts(
conf, user_name=unknown_user['login']))
if not present_in_ldap:
res_users.write(
cr, uid, unknown_user['id'], {'active': False},
context=context)
unknown_user_ids.append(unknown_user['id'])
return len(unknown_user_ids)
def get_ldap_entry_dicts(self, conf, user_name='*'):
"""
Execute ldap query as defined in conf
Don't call self.query because it supresses possible exceptions
"""
ldap_filter = filter_format(conf['ldap_filter'] % user_name, ())
conn = self.connect(conf)
conn.simple_bind_s(conf['ldap_binddn'] or '',
conf['ldap_password'] or '')
results = conn.search_st(conf['ldap_base'], ldap.SCOPE_SUBTREE,
ldap_filter.encode('utf8'), None,
timeout=60)
conn.unbind()
return results
def populate_wizard(self, cr, uid, ids, context=None):
"""
GUI wrapper for the populate method that reports back
the number of users created.
"""
if not ids:
return
if isinstance(ids, (int, float)):
ids = [ids]
wizard_obj = self.pool.get('res.company.ldap.populate_wizard')
res_id = wizard_obj.create(
cr, uid, {'ldap_id': ids[0]}, context=context)
return {
'name': wizard_obj._description,
'view_type': 'form',
'view_mode': 'form',
'res_model': wizard_obj._name,
'domain': [],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': res_id,
'nodestroy': True,
}
| ddico/server-tools | users_ldap_populate/model/users_ldap.py | Python | agpl-3.0 | 8,007 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015, Florent Thiery
import os
import subprocess
import shlex
ERRORS = (
'ERROR',
)
def run_cmd(cmd, shell=False):
env = dict(os.environ)
env["LANG"] = "C"
if shell:
args = cmd
else:
args = shlex.split(cmd)
p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=shell)
stdout, stderr = p.communicate()
rc = p.returncode
if rc != 0:
if '|' in cmd:
raise Exception('Command %s failed, maybe shell=True is required' %cmd)
else:
raise Exception('Command %s failed, error: %s' % (cmd, stderr))
elif contains_errors(stderr):
raise Exception('Command %s had an error' %cmd)
return rc, stdout, stderr
def parse_gst_execution_time(stdout):
pattern = 'Execution ended after '
if stdout:
for line in stdout.split('\n'):
if line.startswith(pattern):
t = line.split(pattern)[1]
h, m, s = t.split(':')
return int(h)*3600 + int(m)*60 + float(s)
def get_stdout(cmd, shell=False):
rc, stdout, stderr = run_cmd(cmd, shell=shell)
return stdout
def check_cmd(cmd, shell=False, complain=True):
try:
run_cmd(cmd, shell=shell)
return True, 'no errors'
except Exception as e:
if complain:
print(e)
return False, e
def run_gst_cmd(cmd):
try:
rc, stdout, stderr = run_cmd(cmd, shell=False)
took = parse_gst_execution_time(stdout)
except Exception as e:
print(e)
took = 0
return took
def contains_errors(text):
for e in ERRORS:
if e in text:
return True
| UbiCastTeam/gstreamer-benchmarks | utils/process.py | Python | lgpl-3.0 | 1,765 |
# Copyright 2015 Rackspace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import eventlet
import fixtures
import functools
import logging as pylogging
import platform
import sys
import time
from unittest import mock
from oslo_log import formatters
from oslo_log import log as logging
from oslotest import base
import testtools
from oslo_privsep import capabilities
from oslo_privsep import comm
from oslo_privsep import daemon
from oslo_privsep.tests import testctx
LOG = logging.getLogger(__name__)
def undecorated():
pass
class TestException(Exception):
pass
def get_fake_context(conf_attrs=None, **context_attrs):
conf_attrs = conf_attrs or {}
context = mock.NonCallableMock()
context.conf.user = 42
context.conf.group = 84
context.conf.thread_pool_size = 10
context.conf.capabilities = [
capabilities.CAP_SYS_ADMIN, capabilities.CAP_NET_ADMIN]
context.conf.logger_name = 'oslo_privsep.daemon'
vars(context).update(context_attrs)
vars(context.conf).update(conf_attrs)
return context
@testctx.context.entrypoint
def logme(level, msg, exc_info=False):
# We want to make sure we log everything from the priv side for
# the purposes of this test, so force loglevel.
LOG.logger.setLevel(logging.DEBUG)
if exc_info:
try:
raise TestException('with arg')
except TestException:
LOG.log(level, msg, exc_info=True)
else:
LOG.log(level, msg)
class LogRecorder(pylogging.Formatter):
def __init__(self, logs, *args, **kwargs):
if sys.version_info >= (3, 8):
kwargs['validate'] = False
super(LogRecorder, self).__init__(*args, **kwargs)
self.logs = logs
def format(self, record):
self.logs.append(copy.deepcopy(record))
return super(LogRecorder, self).format(record)
@testtools.skipIf(platform.system() != 'Linux',
'works only on Linux platform.')
class LogTest(testctx.TestContextTestCase):
def setUp(self):
super(LogTest, self).setUp()
def test_priv_loglevel(self):
logger = self.useFixture(fixtures.FakeLogger(
level=logging.INFO))
# These write to the log on the priv side
logme(logging.DEBUG, u'test@DEBUG')
logme(logging.WARN, u'test@WARN')
time.sleep(0.1) # Hack to give logging thread a chance to run
# logger.output is the resulting log on the unpriv side.
# This should have been filtered based on (unpriv) loglevel.
self.assertNotIn(u'test@DEBUG', logger.output)
self.assertIn(u'test@WARN', logger.output)
def test_record_data(self):
logs = []
self.useFixture(fixtures.FakeLogger(
level=logging.INFO, format='dummy',
# fixtures.FakeLogger accepts only a formatter
# class/function, not an instance :(
formatter=functools.partial(LogRecorder, logs)))
logme(logging.WARN, u'test with exc', exc_info=True)
time.sleep(0.1) # Hack to give logging thread a chance to run
self.assertEqual(1, len(logs))
record = logs[0]
self.assertIn(u'test with exc', record.getMessage())
self.assertIsNone(record.exc_info)
self.assertIn(u'TestException: with arg', record.exc_text)
self.assertEqual('PrivContext(cfg_section=privsep)',
record.processName)
self.assertIn(u'test_daemon.py', record.exc_text)
self.assertEqual(logging.WARN, record.levelno)
self.assertEqual('logme', record.funcName)
def test_format_record(self):
logs = []
self.useFixture(fixtures.FakeLogger(
level=logging.INFO, format='dummy',
# fixtures.FakeLogger accepts only a formatter
# class/function, not an instance :(
formatter=functools.partial(LogRecorder, logs)))
logme(logging.WARN, u'test with exc', exc_info=True)
time.sleep(0.1) # Hack to give logging thread a chance to run
self.assertEqual(1, len(logs))
record = logs[0]
# Verify the log record can be formatted by ContextFormatter
fake_config = mock.Mock(
logging_default_format_string="NOCTXT: %(message)s")
formatter = formatters.ContextFormatter(config=fake_config)
formatter.format(record)
@testtools.skipIf(platform.system() != 'Linux',
'works only on Linux platform.')
class DaemonTest(base.BaseTestCase):
@mock.patch('os.setuid')
@mock.patch('os.setgid')
@mock.patch('os.setgroups')
@mock.patch('oslo_privsep.capabilities.set_keepcaps')
@mock.patch('oslo_privsep.capabilities.drop_all_caps_except')
def test_drop_privs(self, mock_dropcaps, mock_keepcaps,
mock_setgroups, mock_setgid, mock_setuid):
channel = mock.NonCallableMock()
context = get_fake_context()
d = daemon.Daemon(channel, context)
d._drop_privs()
mock_setuid.assert_called_once_with(42)
mock_setgid.assert_called_once_with(84)
mock_setgroups.assert_called_once_with([])
self.assertCountEqual(
[mock.call(True), mock.call(False)],
mock_keepcaps.mock_calls)
mock_dropcaps.assert_called_once_with(
set((capabilities.CAP_SYS_ADMIN, capabilities.CAP_NET_ADMIN)),
set((capabilities.CAP_SYS_ADMIN, capabilities.CAP_NET_ADMIN)),
[])
@testtools.skipIf(platform.system() != 'Linux',
'works only on Linux platform.')
class WithContextTest(testctx.TestContextTestCase):
def test_unexported(self):
self.assertRaisesRegex(
NameError, 'undecorated not exported',
testctx.context._wrap, undecorated)
class ClientChannelTestCase(base.BaseTestCase):
DICT = {
'string_1': ('tuple_1', b'tuple_2'),
b'byte_1': ['list_1', 'list_2'],
}
EXPECTED = {
'string_1': ('tuple_1', b'tuple_2'),
'byte_1': ['list_1', 'list_2'],
}
def setUp(self):
super(ClientChannelTestCase, self).setUp()
context = get_fake_context()
with mock.patch.object(comm.ClientChannel, '__init__'), \
mock.patch.object(daemon._ClientChannel, 'exchange_ping'):
self.client_channel = daemon._ClientChannel(mock.ANY, context)
@mock.patch.object(daemon.LOG.logger, 'handle')
def test_out_of_band_log_message(self, handle_mock):
message = [comm.Message.LOG, self.DICT]
self.assertEqual(self.client_channel.log, daemon.LOG)
with mock.patch.object(pylogging, 'makeLogRecord') as mock_make_log, \
mock.patch.object(daemon.LOG, 'isEnabledFor',
return_value=True) as mock_enabled:
self.client_channel.out_of_band(message)
mock_make_log.assert_called_once_with(self.EXPECTED)
handle_mock.assert_called_once_with(mock_make_log.return_value)
mock_enabled.assert_called_once_with(
mock_make_log.return_value.levelno)
def test_out_of_band_not_log_message(self):
with mock.patch.object(daemon.LOG, 'warning') as mock_warning:
self.client_channel.out_of_band([comm.Message.PING])
mock_warning.assert_called_once()
@mock.patch.object(daemon.logging, 'getLogger')
@mock.patch.object(pylogging, 'makeLogRecord')
def test_out_of_band_log_message_context_logger(self, make_log_mock,
get_logger_mock):
logger_name = 'os_brick.privileged'
context = get_fake_context(conf_attrs={'logger_name': logger_name})
with mock.patch.object(comm.ClientChannel, '__init__'), \
mock.patch.object(daemon._ClientChannel, 'exchange_ping'):
channel = daemon._ClientChannel(mock.ANY, context)
get_logger_mock.assert_called_once_with(logger_name)
self.assertEqual(get_logger_mock.return_value, channel.log)
message = [comm.Message.LOG, self.DICT]
channel.out_of_band(message)
make_log_mock.assert_called_once_with(self.EXPECTED)
channel.log.isEnabledFor.assert_called_once_with(
make_log_mock.return_value.levelno)
channel.log.logger.handle.assert_called_once_with(
make_log_mock.return_value)
class UnMonkeyPatch(base.BaseTestCase):
def test_un_monkey_patch(self):
self.assertFalse(any(
eventlet.patcher.is_monkey_patched(eventlet_mod_name)
for eventlet_mod_name in daemon.EVENTLET_MODULES))
eventlet.monkey_patch()
self.assertTrue(any(
eventlet.patcher.is_monkey_patched(eventlet_mod_name)
for eventlet_mod_name in daemon.EVENTLET_MODULES))
daemon.un_monkey_patch()
for eventlet_mod_name, func_modules in daemon.EVENTLET_LIBRARIES:
if not eventlet.patcher.is_monkey_patched(eventlet_mod_name):
continue
for name, green_mod in func_modules():
orig_mod = eventlet.patcher.original(name)
patched_mod = sys.modules.get(name)
for attr_name in green_mod.__patched__:
un_monkey_patched_attr = getattr(patched_mod, attr_name,
None)
original_attr = getattr(orig_mod, attr_name, None)
self.assertEqual(un_monkey_patched_attr, original_attr)
| openstack/oslo.privsep | oslo_privsep/tests/test_daemon.py | Python | apache-2.0 | 10,078 |
#!/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Fabio Falcinelli
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from tornado.ioloop import IOLoop
import yaml
import servicemanager
import win32event
import win32service
import win32serviceutil
from wstunnel import winreg
from wstunnel.factory import create_ws_server_endpoint
from svc.registry import get_reg_values
__author__ = 'fabio'
WSTUNNELD_KEY = r"SOFTWARE\wstunneld"
class wstunsrvd(win32serviceutil.ServiceFramework):
"""
The server service class
"""
_svc_name_ = "WSTunnelServerSvc"
_svc_display_name_ = "WebSocket tunnel server service"
_svc_description_ = "This is the server endpoint of the WebSocket tunnel"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
#Read configuration from registry
os.chdir(get_reg_values(key=WSTUNNELD_KEY, root_key=winreg.HKEY_LOCAL_MACHINE)["install_dir"])
self.reg_conf = get_reg_values(key=os.path.join(WSTUNNELD_KEY, "server"))
self.srv = None
def SvcStop(self):
"""
Stops the Windows service
"""
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
if self.srv:
self.srv.stop()
IOLoop.instance().stop()
def SvcDoRun(self):
"""
Starts the Windows service
"""
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_, ''))
with open(self.reg_conf["config"]) as yaml_conf:
self.srv = create_ws_server_endpoint(yaml.load(yaml_conf.read()))
self.srv.start()
IOLoop.instance().start()
def main():
"""
Entry point for the WebSocket server tunnel service endpoint
"""
win32serviceutil.HandleCommandLine(wstunsrvd)
if __name__ == "__main__":
main() | ffalcinelli/wstunnel | wstunnel/svc/wstunsrvd.py | Python | lgpl-3.0 | 2,656 |
class LazyLoadProxy(object):
# Taken from http://code.activestate.com/recipes/496741-object-proxying/
__slots__ = ["_obj_fn", "__weakref__", "__proxy_storage"]
def __init__(self, fn, storage=None):
object.__setattr__(self, "_obj_fn", fn)
object.__setattr__(self, "__proxy_storage", storage)
def __getattribute__(self, name):
return getattr(object.__getattribute__(self, "_obj_fn")(), name)
def __delattr__(self, name):
delattr(object.__getattribute__(self, "_obj_fn")(), name)
def __setattr__(self, name, value):
setattr(object.__getattribute__(self, "_obj_fn")(), name, value)
def __getitem__(self, index):
return object.__getattribute__(self, "_obj_fn")().__getitem__(index)
def __nonzero__(self):
return bool(object.__getattribute__(self, "_obj_fn")())
def __str__(self):
return str(object.__getattribute__(self, "_obj_fn")())
def __repr__(self):
return repr(object.__getattribute__(self, "_obj_fn")())
def __len__(self):
return len(object.__getattribute__(self, "_obj_fn")())
_special_names = [
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
'__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__',
'__eq__', '__float__', '__floordiv__', '__ge__', #'__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__idivmod__', '__ifloordiv__', '__ilshift__', '__imod__',
'__imul__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', #'__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
'__neg__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__',
'__rand__', '__rdiv__', '__rdivmod__', '__reduce__', '__reduce_ex__',
'__repr__', '__reversed__', '__rfloorfiv__', '__rlshift__', '__rmod__',
'__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__sub__',
'__truediv__', '__xor__', 'next',
]
@classmethod
def _create_class_proxy(cls, theclass):
"""creates a proxy for the given class"""
def make_method(name):
def method(self, *args, **kw):
return getattr(object.__getattribute__(self, "_obj_fn")(), name)(
*args, **kw)
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
def __new__(cls, obj, *args, **kwargs):
"""
creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are
passed to this class' __init__, so deriving classes can define an
__init__ method of their own.
note: _class_proxy_cache is unique per deriving class (each deriving
class must hold its own cache)
"""
try:
cache = cls.__dict__["_class_proxy_cache"]
except KeyError:
cls._class_proxy_cache = cache = {}
try:
theclass = cache[obj.__class__]
except KeyError:
cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)
ins = object.__new__(theclass)
theclass.__init__(ins, obj, *args, **kwargs)
return ins
class Proxy(LazyLoadProxy):
# Taken from http://code.activestate.com/recipes/496741-object-proxying/
def __init__(self, obj):
super(Proxy, self).__init__(lambda: obj)
| neversun/sailfish-hackernews | pyPackages/python_firebase-noarch/firebase/lazy.py | Python | mit | 3,754 |
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Controller for the build_annotations app.
This controller sits between the django models for cidb tables and the views
that power the app.
Keep non-trivial logic to aggregate data / optimize db access here and test it.
"""
from __future__ import print_function
import collections
from django.db import models
from django.db.models import query
from build_annotations import models as ba_models
# We need to fake out some system modules before importing chromite modules.
from cq_stats import fake_system_modules # pylint: disable=unused-import
from chromite.lib import clactions
class BuildRow(collections.MutableMapping):
"""A database "view" that collects all relevant stats about a build."""
def __init__(self, build_entry, build_stage_entries,
cl_action_entries, failure_entries, annotations,
costly_annotations_qs):
"""Initialize a BuildRow.
Do not use QuerySets as arguments. All query sets must have been evaluated
before creating this object. All data manipulation within this object is
pure python.
All non-trivial computation on this object should be lazy: Defer it to
property getters.
"""
assert not isinstance(build_entry, query.QuerySet)
assert not isinstance(build_stage_entries, query.QuerySet)
assert not isinstance(cl_action_entries, query.QuerySet)
assert not isinstance(failure_entries, query.QuerySet)
self._data = {}
self.build_entry = build_entry
self._build_stage_entries = build_stage_entries
self._cl_action_entries = cl_action_entries
self._failure_entries = failure_entries
# The readonly data is accessible from this object as dict entries.
self['id'] = self.build_entry.id
self['build_number'] = self.build_entry.build_number
self['status'] = self.build_entry.status
self['summary'] = self.build_entry.summary
self['start_time'] = self.build_entry.start_time
if (self.build_entry.finish_time is not None and
self['start_time'] is not None):
self['run_time'] = self.build_entry.finish_time - self['start_time']
else:
self['run_time'] = None
if self['start_time'] is not None:
self['weekday'] = (self['start_time'].date().weekday() != 6)
else:
self['weekday'] = None
self['chromeos_version'] = self.build_entry.full_version
self['chrome_version'] = self.build_entry.chrome_version
self['waterfall'] = self.build_entry.waterfall
self['builder_name'] = self.build_entry.builder_name
failed_stages = [x.name for x in build_stage_entries if
x.status == x.FAIL]
self['failed_stages'] = ', '.join(failed_stages)
self['picked_up_count'] = self._CountCLActions(
ba_models.ClActionTable.PICKED_UP)
self['submitted_count'] = self._CountCLActions(
ba_models.ClActionTable.SUBMITTED)
self['kicked_out_count'] = self._CountCLActions(
ba_models.ClActionTable.KICKED_OUT)
self['annotation_summary'] = self._SummaryAnnotations(annotations)
self._costly_annotations_qs = costly_annotations_qs
def GetAnnotationsQS(self):
"""Return the queryset backing annotations.
Executing this queryset is costly because there is no way to optimize the
query execution.
Since this is a related_set queryset, that was further filtered, each item
in the queryset causes a db hit.
"""
return self._costly_annotations_qs
def __getitem__(self, *args, **kwargs):
return self._data.__getitem__(*args, **kwargs)
def __iter__(self, *args, **kwargs):
return self._data.__iter__(*args, **kwargs)
def __len__(self, *args, **kwargs):
return self._data.__len__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
return self._data.__setitem__(*args, **kwargs)
def __delitem__(self, *args, **kwargs):
return self._data.__delitem__(*args, **kwargs)
def _CountCLActions(self, cl_action):
actions = [x for x in self._cl_action_entries if x.action == cl_action]
return len(actions)
def _SummaryAnnotations(self, annotations):
if not annotations:
return ''
result = '%d annotations: ' % len(annotations)
summaries = []
for annotation in annotations:
summary = annotation.failure_category
failure_message = annotation.failure_message
blame_url = annotation.blame_url
if failure_message:
summary += '(%s)' % failure_message[:30]
elif blame_url:
summary += '(%s)' % blame_url[:30]
summaries.append(summary)
result += '; '.join(summaries)
return result
class BuildRowController(object):
"""The 'controller' class that collates stats for builds.
More details here.
Unit-test this class please.
"""
DEFAULT_NUM_BUILDS = 100
def __init__(self):
self._latest_build_id = 0
self._build_rows_map = {}
def GetStructuredBuilds(self, latest_build_id=None,
num_builds=DEFAULT_NUM_BUILDS, extra_filter_q=None):
"""The primary method to obtain stats for builds
Args:
latest_build_id: build_id of the latest build to query.
num_builds: Number of build to query.
extra_filter_q: An optional Q object to filter builds. Use GetQ* methods
provided in this class to form the filter.
Returns:
A list of BuildRow entries for the queried builds.
"""
# If we're not given any latest_build_id, we fetch the latest builds
if latest_build_id is not None:
build_qs = ba_models.BuildTable.objects.filter(id__lte=latest_build_id)
else:
build_qs = ba_models.BuildTable.objects.all()
if extra_filter_q is not None:
build_qs = build_qs.filter(extra_filter_q)
build_qs = build_qs.order_by('-id')
build_qs = build_qs[:num_builds]
# Critical for performance: Prefetch all the join relations we'll need.
build_qs = build_qs.prefetch_related('buildstagetable_set')
build_qs = build_qs.prefetch_related('clactiontable_set')
build_qs = build_qs.prefetch_related(
'buildstagetable_set__failuretable_set')
build_qs = build_qs.prefetch_related('annotationstable_set')
# Now hit the database.
build_entries = [x for x in build_qs]
self._build_rows_map = {}
build_rows = []
for build_entry in build_entries:
build_stage_entries = [x for x in build_entry.buildstagetable_set.all()]
cl_action_entries = [x for x in build_entry.clactiontable_set.all()]
failure_entries = []
for entry in build_stage_entries:
failure_entries += [x for x in entry.failuretable_set.all()]
# Filter in python, filter'ing the queryset changes the queryset, and we
# end up hitting the database again.
annotations = [a for a in build_entry.annotationstable_set.all() if
a.deleted == False]
costly_annotations_qs = build_entry.annotationstable_set.filter(
deleted=False)
build_row = BuildRow(build_entry, build_stage_entries, cl_action_entries,
failure_entries, annotations, costly_annotations_qs)
self._build_rows_map[build_entry.id] = build_row
build_rows.append(build_row)
if build_entries:
self._latest_build_id = build_entries[0].id
return build_rows
def GetHandlingTimeHistogram(self, latest_build_id=None,
num_builds=DEFAULT_NUM_BUILDS,
extra_filter_q=None):
"""Get CL handling time histogram."""
# If we're not given any latest_build_id, we fetch the latest builds
if latest_build_id is not None:
build_qs = ba_models.BuildTable.objects.filter(id__lte=latest_build_id)
else:
build_qs = ba_models.BuildTable.objects.all()
if extra_filter_q is not None:
build_qs = build_qs.filter(extra_filter_q)
build_qs = build_qs.order_by('-id')
build_qs = build_qs[:num_builds]
# Hit the database.
build_entries = list(build_qs)
claction_qs = ba_models.ClActionTable.objects.select_related('build_id')
claction_qs = claction_qs.filter(
build_id__in=set(b.id for b in build_entries))
# Hit the database.
claction_entries = [c for c in claction_qs]
claction_history = clactions.CLActionHistory(
self._JoinBuildTableClActionTable(build_entries, claction_entries))
# Convert times seconds -> minutes.
return {k: v / 60.0
for k, v in claction_history.GetPatchHandlingTimes().iteritems()}
def _JoinBuildTableClActionTable(self, build_entries, claction_entries):
"""Perform the join operation in python.
Args:
build_entries: A list of buildTable entries.
claction_entries: A list of claction_entries.
Returns:
A list fo claction.CLAction objects created by joining the list of builds
and list of claction entries.
"""
claction_entries_by_build_id = {}
for entry in claction_entries:
entries = claction_entries_by_build_id.setdefault(entry.build_id.id, [])
entries.append(entry)
claction_list = []
for build_entry in build_entries:
for claction_entry in claction_entries_by_build_id.get(build_entry.id,
[]):
claction_list.append(clactions.CLAction(
id=claction_entry.id,
build_id=build_entry.id,
action=claction_entry.action,
reason=claction_entry.reason,
build_config=build_entry.build_config,
change_number=claction_entry.change_number,
patch_number=claction_entry.patch_number,
change_source=claction_entry.change_source,
timestamp=claction_entry.timestamp))
return claction_list
############################################################################
# GetQ* methods are intended to be used in nifty search expressions to search
# for builds.
@classmethod
def GetQNoAnnotations(cls):
"""Return a Q for builds with no annotations yet."""
return models.Q(annotationstable__isnull=True)
@classmethod
def GetQRestrictToBuildConfig(cls, build_config):
"""Return a Q for builds with the given build_config."""
return models.Q(build_config=build_config)
@property
def num_builds(self):
return len(self._build_rows_map)
@property
def latest_build_id(self):
return self._latest_build_id
| guorendong/iridium-browser-ubuntu | third_party/chromite/appengine/cq_stats/build_annotations/build_row_controller.py | Python | bsd-3-clause | 10,530 |
# License: https://github.com/RobFisher/mailshare/blob/master/LICENSE
import imaplib
import settings # TODO make this work: from django.conf import settings
import email
_server = None
def get_server_connection():
"""Logs in to the server if needed, Returns an IMAP4 object."""
global _server
if _server == None:
_server = imaplib.IMAP4_SSL(settings.MAILSHARE_IMAP_HOST)
_server.login(settings.MAILSHARE_IMAP_USER, settings.MAILSHARE_IMAP_PASSWORD)
_server.select(settings.MAILSHARE_IMAP_MAILBOX)
return _server
def fetch_messages(max_messages=10, output_file=None, expunge=False):
"""Return a list of email.message.Message objects representing some messages in the IMAP mailbox.
max_messages: the maximum number of messages to fetch this call
output_file: a file to append email data to
"""
messages = []
server = get_server_connection()
typ, message_ids = server.search(None, 'ALL')
# message_ids is a list with one item, so it looks like this:
# [b'1 2 3 4 5']
# To get a maximum number of mails we need to split it out into a list of
# ids.
message_ids_to_fetch = message_ids[0].split()[0:max_messages]
for message_id in message_ids_to_fetch:
try:
typ, message_data = server.fetch(message_id, '(RFC822)')
except:
print 'Exception processing message ' + str(message_id)
continue
for part in message_data:
if isinstance(part, tuple):
if output_file != None:
write_part(output_file, part[1])
message = email.message_from_string(part[1])
messages.append(message)
if expunge and settings.MAILSHARE_IMAP_ENABLE_EXPUNGE:
typ, response = server.store(message_id, '+FLAGS', '\\Deleted')
typ, response = server.expunge()
return messages
def write_part(output_file, message_data):
"""Write the email part to the file."""
output_file.write(str(len(message_data))+'\n')
output_file.write(message_data)
output_file.write('\n')
def read_messages(input_file):
"""
Read up to max_messages messages from input_file. Reads messages previously written to the
output_file specified in a call to fetch_messages. This enable replaying of messages into the
database that have been deleted from the IMAP mailbox.
"""
messages = []
while True:
line = input_file.readline()
if line == '':
break
length = int(line)
message_data = input_file.read(length)
input_file.readline()
message = email.message_from_string(message_data)
messages.append(message)
return messages
| RobFisher/mailshare | mailshareapp/poll_imap_email.py | Python | bsd-3-clause | 2,742 |
from django import forms
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.db.models import ObjectDoesNotExist
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.views import generic
from plata.contact.models import Contact
from plata.discount.models import Discount
from plata.shop.views import Shop
from plata.shop.models import Order
from simple.models import Product
shop = Shop(Contact, Order, Discount)
product_list = generic.ListView.as_view(
queryset=Product.objects.filter(is_active=True),
template_name='product/product_list.html',
)
class OrderItemForm(forms.Form):
quantity = forms.IntegerField(label=_('quantity'), initial=1,
min_value=1, max_value=100)
def product_detail(request, object_id):
product = get_object_or_404(Product.objects.filter(is_active=True), pk=object_id)
if request.method == 'POST':
form = OrderItemForm(request.POST)
if form.is_valid():
order = shop.order_from_request(request, create=True)
try:
order.modify_item(product, form.cleaned_data.get('quantity'))
messages.success(request, _('The cart has been updated.'))
except ValidationError, e:
if e.code == 'order_sealed':
[messages.error(request, msg) for msg in e.messages]
else:
raise
return redirect('plata_shop_cart')
else:
form = OrderItemForm()
return render_to_response('product/product_detail.html', {
'object': product,
'form': form,
}, context_instance=RequestContext(request))
| ixc/plata | examples/simple/views.py | Python | bsd-3-clause | 1,810 |
# Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from cryptography.x509 import (
ExtensionNotFound, ObjectIdentifier
)
__all__ = [
'DeviceMatcher',
'FingerprintMatcher',
'ExtensionMatcher',
'DEFAULT_MATCHERS'
]
class DeviceMatcher(object):
selector_type = None
def matches(self, certificate, parameters=None):
raise NotImplementedError
class FingerprintMatcher(DeviceMatcher):
selector_type = 'fingerprint'
def matches(self, certificate, parameters=[]):
fingerprints = [s.lower() for s in parameters]
return certificate.get_fingerprint('sha1').lower() in fingerprints
def get_ext_by_oid(cert, oid):
oid = ObjectIdentifier(oid)
try:
extension = cert.extensions.get_extension_for_oid(oid)
except ExtensionNotFound:
return None
return extension.value.value
class ExtensionMatcher(DeviceMatcher):
selector_type = 'x509Extension'
def matches(self, certificate, parameters={}):
key = parameters.get('key')
match_value = parameters.get('value')
extension_value = get_ext_by_oid(certificate, key)
if extension_value is not None:
if match_value is None or match_value == extension_value:
return True
return False
DEFAULT_MATCHERS = [
FingerprintMatcher(),
ExtensionMatcher()
]
| moreati/python-u2flib-server | u2flib_server/attestation/matchers.py | Python | bsd-2-clause | 2,683 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the Poster project
#
# Copyright (c) 2006-2009 Marco Antonio Islas Cruz
#
# Poster is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Poster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# @author Marco Antonio Islas Cruz <[email protected]>
# @copyright 2011 Marco Antonio Islas Cruz
# @license http://www.gnu.org/licenses/gpl.txt | markuz/poster | poster_resources/__init__.py | Python | gpl-3.0 | 988 |
__author__ = 'hiroki-m'
| hiroki8080/Kokemomo | test/plugins/blog/__init__.py | Python | mit | 27 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides an interface between the previous Pod
API and outputs a kubernetes.client.models.V1Pod.
The advantage being that the full Kubernetes API
is supported and no serialization need be written.
"""
import copy
import uuid
import kubernetes.client.models as k8s
class PodDefaults:
"""
Static defaults for Pods
"""
XCOM_MOUNT_PATH = '/airflow/xcom'
SIDECAR_CONTAINER_NAME = 'airflow-xcom-sidecar'
XCOM_CMD = 'trap "exit 0" INT; while true; do sleep 30; done;'
VOLUME_MOUNT = k8s.V1VolumeMount(
name='xcom',
mount_path=XCOM_MOUNT_PATH
)
VOLUME = k8s.V1Volume(
name='xcom',
empty_dir=k8s.V1EmptyDirVolumeSource()
)
SIDECAR_CONTAINER = k8s.V1Container(
name=SIDECAR_CONTAINER_NAME,
command=['sh', '-c', XCOM_CMD],
image='alpine',
volume_mounts=[VOLUME_MOUNT],
resources=k8s.V1ResourceRequirements(
requests={
"cpu": "1m",
}
),
)
class PodGenerator:
"""
Contains Kubernetes Airflow Worker configuration logic
Represents a kubernetes pod and manages execution of a single pod.
:param image: The docker image
:type image: str
:param envs: A dict containing the environment variables
:type envs: Dict[str, str]
:param cmds: The command to be run on the pod
:type cmds: List[str]
:param image_pull_policy: Specify a policy to cache or always pull an image
:type image_pull_policy: str
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a comma separated list:
secret_a,secret_b
:type image_pull_secrets: str
:param affinity: A dict containing a group of affinity scheduling rules
:type affinity: dict
:param hostnetwork: If True enable host networking on the pod
:type hostnetwork: bool
:param tolerations: A list of kubernetes tolerations
:type tolerations: list
:param security_context: A dict containing the security context for the pod
:type security_context: dict
:param configmaps: Any configmap refs to envfrom.
If more than one configmap is required, provide a comma separated list
configmap_a,configmap_b
:type configmaps: List[str]
:param dnspolicy: Specify a dnspolicy for the pod
:type dnspolicy: str
:param pod: The fully specified pod.
:type pod: kubernetes.client.models.V1Pod
"""
def __init__( # pylint: disable=too-many-arguments,too-many-locals
self,
image,
name=None,
namespace=None,
volume_mounts=None,
envs=None,
cmds=None,
args=None,
labels=None,
node_selectors=None,
ports=None,
volumes=None,
image_pull_policy='IfNotPresent',
restart_policy='Never',
image_pull_secrets=None,
init_containers=None,
service_account_name=None,
resources=None,
annotations=None,
affinity=None,
hostnetwork=False,
tolerations=None,
security_context=None,
configmaps=None,
dnspolicy=None,
pod=None,
extract_xcom=False,
):
self.ud_pod = pod
self.pod = k8s.V1Pod()
self.pod.api_version = 'v1'
self.pod.kind = 'Pod'
# Pod Metadata
self.metadata = k8s.V1ObjectMeta()
self.metadata.labels = labels
self.metadata.name = name + "-" + str(uuid.uuid4())[:8] if name else None
self.metadata.namespace = namespace
self.metadata.annotations = annotations
# Pod Container
self.container = k8s.V1Container(name='base')
self.container.image = image
self.container.env = []
if envs:
if isinstance(envs, dict):
for key, val in envs.items():
self.container.env.append(k8s.V1EnvVar(
name=key,
value=val
))
elif isinstance(envs, list):
self.container.env.extend(envs)
configmaps = configmaps or []
self.container.env_from = []
for configmap in configmaps:
self.container.env_from.append(k8s.V1EnvFromSource(
config_map_ref=k8s.V1ConfigMapEnvSource(
name=configmap
)
))
self.container.command = cmds or []
self.container.args = args or []
self.container.image_pull_policy = image_pull_policy
self.container.ports = ports or []
self.container.resources = resources
self.container.volume_mounts = volume_mounts or []
# Pod Spec
self.spec = k8s.V1PodSpec(containers=[])
self.spec.security_context = security_context
self.spec.tolerations = tolerations
self.spec.dns_policy = dnspolicy
self.spec.host_network = hostnetwork
self.spec.affinity = affinity
self.spec.service_account_name = service_account_name
self.spec.init_containers = init_containers
self.spec.volumes = volumes or []
self.spec.node_selector = node_selectors
self.spec.restart_policy = restart_policy
self.spec.image_pull_secrets = []
if image_pull_secrets:
for image_pull_secret in image_pull_secrets.split(','):
self.spec.image_pull_secrets.append(k8s.V1LocalObjectReference(
name=image_pull_secret
))
# Attach sidecar
self.extract_xcom = extract_xcom
def gen_pod(self) -> k8s.V1Pod:
"""Generates pod"""
result = self.ud_pod
if result is None:
result = self.pod
result.spec = self.spec
result.metadata = self.metadata
result.spec.containers = [self.container]
if self.extract_xcom:
result = self.add_sidecar(result)
return result
@staticmethod
def add_sidecar(pod: k8s.V1Pod) -> k8s.V1Pod:
"""Adds sidecar"""
pod_cp = copy.deepcopy(pod)
pod_cp.spec.volumes.insert(0, PodDefaults.VOLUME)
pod_cp.spec.containers[0].volume_mounts.insert(0, PodDefaults.VOLUME_MOUNT)
pod_cp.spec.containers.append(PodDefaults.SIDECAR_CONTAINER)
return pod_cp
@staticmethod
def from_obj(obj) -> k8s.V1Pod:
"""Converts to pod from obj"""
if obj is None:
return k8s.V1Pod()
if isinstance(obj, PodGenerator):
return obj.gen_pod()
if not isinstance(obj, dict):
raise TypeError(
'Cannot convert a non-dictionary or non-PodGenerator '
'object into a KubernetesExecutorConfig')
# We do not want to extract constant here from ExecutorLoader because it is just
# A name in dictionary rather than executor selection mechanism and it causes cyclic import
namespaced = obj.get("KubernetesExecutor", {})
resources = namespaced.get('resources')
if resources is None:
requests = {
'cpu': namespaced.get('request_cpu'),
'memory': namespaced.get('request_memory')
}
limits = {
'cpu': namespaced.get('limit_cpu'),
'memory': namespaced.get('limit_memory')
}
all_resources = list(requests.values()) + list(limits.values())
if all(r is None for r in all_resources):
resources = None
else:
resources = k8s.V1ResourceRequirements(
requests=requests,
limits=limits
)
annotations = namespaced.get('annotations', {})
gcp_service_account_key = namespaced.get('gcp_service_account_key', None)
if annotations is not None and gcp_service_account_key is not None:
annotations.update({
'iam.cloud.google.com/service-account': gcp_service_account_key
})
pod_spec_generator = PodGenerator(
image=namespaced.get('image'),
envs=namespaced.get('env'),
cmds=namespaced.get('cmds'),
args=namespaced.get('args'),
labels=namespaced.get('labels'),
node_selectors=namespaced.get('node_selectors'),
name=namespaced.get('name'),
ports=namespaced.get('ports'),
volumes=namespaced.get('volumes'),
volume_mounts=namespaced.get('volume_mounts'),
namespace=namespaced.get('namespace'),
image_pull_policy=namespaced.get('image_pull_policy'),
restart_policy=namespaced.get('restart_policy'),
image_pull_secrets=namespaced.get('image_pull_secrets'),
init_containers=namespaced.get('init_containers'),
service_account_name=namespaced.get('service_account_name'),
resources=resources,
annotations=namespaced.get('annotations'),
affinity=namespaced.get('affinity'),
hostnetwork=namespaced.get('hostnetwork'),
tolerations=namespaced.get('tolerations'),
security_context=namespaced.get('security_context'),
configmaps=namespaced.get('configmaps'),
dnspolicy=namespaced.get('dnspolicy'),
pod=namespaced.get('pod'),
extract_xcom=namespaced.get('extract_xcom'),
)
return pod_spec_generator.gen_pod()
@staticmethod
def reconcile_pods(base_pod: k8s.V1Pod, client_pod: k8s.V1Pod) -> k8s.V1Pod:
"""
:param base_pod: has the base attributes which are overwritten if they exist
in the client pod and remain if they do not exist in the client_pod
:type base_pod: k8s.V1Pod
:param client_pod: the pod that the client wants to create.
:type client_pod: k8s.V1Pod
:return: the merged pods
This can't be done recursively as certain fields are preserved,
some overwritten, and some concatenated, e.g. The command
should be preserved from base, the volumes appended to and
the other fields overwritten.
"""
client_pod_cp = copy.deepcopy(client_pod)
def merge_objects(base_obj, client_obj):
for base_key in base_obj.to_dict().keys():
base_val = getattr(base_obj, base_key, None)
if not getattr(client_obj, base_key, None) and base_val:
setattr(client_obj, base_key, base_val)
def extend_object_field(base_obj, client_obj, field_name):
base_obj_field = getattr(base_obj, field_name, None)
client_obj_field = getattr(client_obj, field_name, None)
if not base_obj_field:
return
if not client_obj_field:
setattr(client_obj, field_name, base_obj_field)
return
appended_fields = base_obj_field + client_obj_field
setattr(client_obj, field_name, appended_fields)
# Values at the pod and metadata should be overwritten where they exist,
# but certain values at the spec and container level must be conserved.
base_container = base_pod.spec.containers[0]
client_container = client_pod_cp.spec.containers[0]
extend_object_field(base_container, client_container, 'volume_mounts')
extend_object_field(base_container, client_container, 'env')
extend_object_field(base_container, client_container, 'env_from')
extend_object_field(base_container, client_container, 'ports')
extend_object_field(base_container, client_container, 'volume_devices')
client_container.command = base_container.command
client_container.args = base_container.args
merge_objects(base_pod.spec.containers[0], client_pod_cp.spec.containers[0])
# Just append any additional containers from the base pod
client_pod_cp.spec.containers.extend(base_pod.spec.containers[1:])
merge_objects(base_pod.metadata, client_pod_cp.metadata)
extend_object_field(base_pod.spec, client_pod_cp.spec, 'volumes')
merge_objects(base_pod.spec, client_pod_cp.spec)
merge_objects(base_pod, client_pod_cp)
return client_pod_cp
| Fokko/incubator-airflow | airflow/kubernetes/pod_generator.py | Python | apache-2.0 | 13,165 |
import types
from stp_core.loop.eventually import eventually
from plenum.test import waits
from plenum.test.delayers import ppDelay, pDelay
from plenum.test.helper import sdk_send_random_request
from plenum.test.test_node import getNonPrimaryReplicas
def testOrderingWhenPrePrepareNotReceived(looper, txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle):
"""
Send commits but delay pre-prepare and prepares such that enough
commits are received, now the request should not be ordered until
pre-prepare is received and ordering should just happen once,
"""
delay = 10
non_prim_reps = getNonPrimaryReplicas(txnPoolNodeSet, 0)
slow_rep = non_prim_reps[0]
slow_node = slow_rep.node
slow_node.nodeIbStasher.delay(ppDelay(delay, 0))
slow_node.nodeIbStasher.delay(pDelay(delay, 0))
stash_pp = []
stash_p = []
orig_pp_method = slow_rep._ordering_service.process_preprepare
orig_p_method = slow_rep._ordering_service.process_prepare
def patched_pp(self, msg, sender):
stash_pp.append((msg, sender))
def patched_p(self, msg, sender):
stash_p.append((msg, sender))
slow_rep._ordering_service.process_preprepare = \
types.MethodType(patched_pp, slow_rep)
slow_rep._ordering_service.process_prepare = \
types.MethodType(patched_p, slow_rep)
def chk1():
assert len(slow_rep._ordering_service.commitsWaitingForPrepare) > 0
sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + delay
looper.run(eventually(chk1, retryWait=1, timeout=timeout))
for m, s in stash_pp:
orig_pp_method(m, s)
for m, s in stash_p:
orig_p_method(m, s)
def chk2():
assert len(slow_rep._ordering_service.commitsWaitingForPrepare) == 0
assert slow_rep._ordering_service.spylog.count(slow_rep._ordering_service._do_order.__name__) == 1
timeout = waits.expectedOrderingTime(len(non_prim_reps) + 1) + 2 * delay
looper.run(eventually(chk2, retryWait=1, timeout=timeout))
| evernym/plenum | plenum/test/node_request/test_order/test_ordering_when_pre_prepare_not_received.py | Python | apache-2.0 | 2,139 |
import numpy as np
from scipy.ndimage import label
def generate_test_vecs(infile, strelfile, resultfile):
"test label with different structuring element neighborhoods"
def bitimage(l):
return np.array([[c for c in s] for s in l]) == '1'
data = [np.ones((7, 7)),
bitimage(["1110111",
"1100011",
"1010101",
"0001000",
"1010101",
"1100011",
"1110111"]),
bitimage(["1011101",
"0001000",
"1001001",
"1111111",
"1001001",
"0001000",
"1011101"])]
strels = [np.ones((3, 3)),
np.zeros((3, 3)),
bitimage(["010", "111", "010"]),
bitimage(["101", "010", "101"]),
bitimage(["100", "010", "001"]),
bitimage(["000", "111", "000"]),
bitimage(["110", "010", "011"]),
bitimage(["110", "111", "011"])]
strels = strels + [np.flipud(s) for s in strels]
strels = strels + [np.rot90(s) for s in strels]
strels = [np.fromstring(s, dtype=np.int).reshape((3, 3))
for s in set(t.astype(np.int).tostring() for t in strels)]
inputs = np.vstack(data)
results = np.vstack([label(d, s)[0] for d in data for s in strels])
strels = np.vstack(strels)
np.savetxt(infile, inputs, fmt="%d")
np.savetxt(strelfile, strels, fmt="%d")
np.savetxt(resultfile, results, fmt="%d")
generate_test_vecs("label_inputs.txt", "label_strels.txt", "label_results.txt")
| sargas/scipy | scipy/ndimage/utils/generate_label_testvectors.py | Python | bsd-3-clause | 1,677 |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import socket
import subprocess
import pyroute2
import webob
from octavia.amphorae.backends.agent import api_server
from octavia.amphorae.backends.agent.api_server import util
from octavia.amphorae.backends.utils import network_utils
from octavia.common import constants as consts
from octavia.common import exceptions
class AmphoraInfo(object):
def __init__(self, osutils):
self._osutils = osutils
def compile_amphora_info(self, extend_lvs_driver=None):
extend_body = {}
if extend_lvs_driver:
extend_body = self._get_extend_body_from_lvs_driver(
extend_lvs_driver)
body = {'hostname': socket.gethostname(),
'haproxy_version':
self._get_version_of_installed_package('haproxy'),
'api_version': api_server.VERSION}
if extend_body:
body.update(extend_body)
return webob.Response(json=body)
def compile_amphora_details(self, extend_lvs_driver=None):
haproxy_listener_list = sorted(util.get_listeners())
extend_body = {}
lvs_listener_list = []
if extend_lvs_driver:
lvs_listener_list = util.get_lvs_listeners()
extend_data = self._get_extend_body_from_lvs_driver(
extend_lvs_driver)
lvs_count = self._count_lvs_listener_processes(
extend_lvs_driver,
lvs_listener_list)
extend_body['lvs_listener_process_count'] = lvs_count
extend_body.update(extend_data)
meminfo = self._get_meminfo()
cpu = self._cpu()
st = os.statvfs('/')
body = {'hostname': socket.gethostname(),
'haproxy_version':
self._get_version_of_installed_package('haproxy'),
'api_version': api_server.VERSION,
'networks': self._get_networks(),
'active': True,
'haproxy_count':
self._count_haproxy_processes(haproxy_listener_list),
'cpu': {
'total': cpu['total'],
'user': cpu['user'],
'system': cpu['system'],
'soft_irq': cpu['softirq'], },
'memory': {
'total': meminfo['MemTotal'],
'free': meminfo['MemFree'],
'buffers': meminfo['Buffers'],
'cached': meminfo['Cached'],
'swap_used': meminfo['SwapCached'],
'shared': meminfo['Shmem'],
'slab': meminfo['Slab'], },
'disk': {
'used': (st.f_blocks - st.f_bfree) * st.f_frsize,
'available': st.f_bavail * st.f_frsize},
'load': self._load(),
'topology': consts.TOPOLOGY_SINGLE,
'topology_status': consts.TOPOLOGY_STATUS_OK,
'listeners': sorted(list(
set(haproxy_listener_list + lvs_listener_list)))
if lvs_listener_list else haproxy_listener_list,
'packages': {}}
if extend_body:
body.update(extend_body)
return webob.Response(json=body)
def _get_version_of_installed_package(self, name):
cmd = self._osutils.cmd_get_version_of_installed_package(name)
version = subprocess.check_output(cmd.split())
return version
def _count_haproxy_processes(self, lb_list):
num = 0
for lb_id in lb_list:
if util.is_lb_running(lb_id):
# optional check if it's still running
num += 1
return num
def _count_lvs_listener_processes(self, lvs_driver, listener_list):
num = 0
for listener_id in listener_list:
if lvs_driver.is_listener_running(listener_id):
# optional check if it's still running
num += 1
return num
def _get_extend_body_from_lvs_driver(self, extend_lvs_driver):
extend_info = extend_lvs_driver.get_subscribed_amp_compile_info()
extend_data = {}
for extend in extend_info:
package_version = self._get_version_of_installed_package(extend)
extend_data['%s_version' % extend] = package_version
return extend_data
def _get_meminfo(self):
re_parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB')
result = {}
with open('/proc/meminfo', 'r', encoding='utf-8') as meminfo:
for line in meminfo:
match = re_parser.match(line)
if not match:
continue # skip lines that don't parse
key, value = match.groups(['key', 'value'])
result[key] = int(value)
return result
def _cpu(self):
with open('/proc/stat', encoding='utf-8') as f:
cpu = f.readline()
vals = cpu.split(' ')
return {
'user': vals[2],
'nice': vals[3],
'system': vals[4],
'idle': vals[5],
'iowait': vals[6],
'irq': vals[7],
'softirq': vals[8],
'total': sum([int(i) for i in vals[2:]])
}
def _load(self):
with open('/proc/loadavg', encoding='utf-8') as f:
load = f.readline()
vals = load.split(' ')
return vals[:3]
def _get_networks(self):
networks = {}
with pyroute2.NetNS(consts.AMPHORA_NAMESPACE) as netns:
for interface in netns.get_links():
interface_name = None
for item in interface['attrs']:
if (item[0] == 'IFLA_IFNAME' and
not item[1].startswith('eth')):
break
if item[0] == 'IFLA_IFNAME':
interface_name = item[1]
if item[0] == 'IFLA_STATS64':
networks[interface_name] = dict(
network_tx=item[1]['tx_bytes'],
network_rx=item[1]['rx_bytes'])
return networks
def get_interface(self, ip_addr):
try:
interface = network_utils.get_interface_name(
ip_addr, net_ns=consts.AMPHORA_NAMESPACE)
except exceptions.InvalidIPAddress:
return webob.Response(json=dict(message="Invalid IP address"),
status=400)
except exceptions.NotFound:
return webob.Response(
json=dict(message="Error interface not found for IP address"),
status=404)
return webob.Response(json=dict(message='OK', interface=interface),
status=200)
| openstack/octavia | octavia/amphorae/backends/agent/api_server/amphora_info.py | Python | apache-2.0 | 7,469 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.volumes.backups \
import forms as backup_forms
from openstack_dashboard.dashboards.project.volumes.backups \
import tables as backup_tables
from openstack_dashboard.dashboards.project.volumes.backups \
import tabs as backup_tabs
class CreateBackupView(forms.ModalFormView):
form_class = backup_forms.CreateBackupForm
modal_header = _("Create Volume Backup")
template_name = 'project/volumes/backups/create_backup.html'
submit_label = _("Create Volume Backup")
submit_url = "horizon:project:volumes:volumes:create_backup"
success_url = reverse_lazy("horizon:project:volumes:backups_tab")
page_title = _("Create a Volume Backup")
def get_context_data(self, **kwargs):
context = super(CreateBackupView, self).get_context_data(**kwargs)
context['volume_id'] = self.kwargs['volume_id']
args = (self.kwargs['volume_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
return {"volume_id": self.kwargs["volume_id"]}
class BackupDetailView(tabs.TabView):
tab_group_class = backup_tabs.BackupDetailTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ backup.name|default:backup.id }}"
def get_context_data(self, **kwargs):
context = super(BackupDetailView, self).get_context_data(**kwargs)
backup = self.get_data()
table = backup_tables.BackupsTable(self.request)
context["backup"] = backup
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(backup)
return context
@memoized.memoized_method
def get_data(self):
try:
backup_id = self.kwargs['backup_id']
backup = api.cinder.volume_backup_get(self.request,
backup_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve backup details.'),
redirect=self.get_redirect_url())
return backup
def get_tabs(self, request, *args, **kwargs):
backup = self.get_data()
return self.tab_group_class(request, backup=backup, **kwargs)
@staticmethod
def get_redirect_url():
return reverse('horizon:project:volumes:index')
class RestoreBackupView(forms.ModalFormView):
form_class = backup_forms.RestoreBackupForm
modal_header = _("Restore Volume Backup")
template_name = 'project/volumes/backups/restore_backup.html'
submit_label = _("Restore Backup to Volume")
submit_url = "horizon:project:volumes:backups:restore"
success_url = reverse_lazy('horizon:project:volumes:index')
page_title = _("Restore a Volume Backup")
def get_context_data(self, **kwargs):
context = super(RestoreBackupView, self).get_context_data(**kwargs)
context['backup_id'] = self.kwargs['backup_id']
args = (self.kwargs['backup_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
backup_id = self.kwargs['backup_id']
backup_name = self.request.GET.get('backup_name')
volume_id = self.request.GET.get('volume_id')
return {
'backup_id': backup_id,
'backup_name': backup_name,
'volume_id': volume_id,
}
| ankur-gupta91/horizon-net-ip | openstack_dashboard/dashboards/project/volumes/backups/views.py | Python | apache-2.0 | 4,315 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
GridNearest.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtGui import *
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import *
class GridNearest(GdalAlgorithm):
INPUT = 'INPUT'
Z_FIELD = 'Z_FIELD'
RADIUS_1 = 'RADIUS_1'
RADIUS_2 = 'RADIUS_2'
ANGLE = 'ANGLE'
NODATA = 'NODATA'
OUTPUT = 'OUTPUT'
RTYPE = 'RTYPE'
TYPE = ['Byte','Int16','UInt16','UInt32','Int32','Float32','Float64','CInt16','CInt32','CFloat32','CFloat64']
def commandLineName(self):
return "gdalogr:gridnearestneighbor"
def defineCharacteristics(self):
self.name = 'Grid (Nearest neighbor)'
self.group = '[GDAL] Analysis'
self.addParameter(ParameterVector(self.INPUT, 'Input layer',
[ParameterVector.VECTOR_TYPE_POINT]))
self.addParameter(ParameterTableField(self.Z_FIELD, 'Z field',
self.INPUT, ParameterTableField.DATA_TYPE_NUMBER,
True))
self.addParameter(ParameterNumber(self.RADIUS_1, 'Radius 1',
0.0, 99999999.999999, 0.0))
self.addParameter(ParameterNumber(self.RADIUS_2, 'Radius 2',
0.0, 99999999.999999, 0.0))
self.addParameter(ParameterNumber(self.ANGLE, 'Angle',
0.0, 359.0, 0.0))
self.addParameter(ParameterNumber(self.NODATA, 'Nodata',
0.0, 99999999.999999, 0.0))
self.addParameter(ParameterSelection(self.RTYPE, 'Output raster type',
self.TYPE, 5))
self.addOutput(OutputRaster(self.OUTPUT, 'Output file'))
def processAlgorithm(self, progress):
arguments = ['-l']
arguments.append(
os.path.basename(os.path.splitext(
unicode(self.getParameterValue(self.INPUT)))[0]))
fieldName = self.getParameterValue(self.Z_FIELD)
if fieldName is not None and fieldName != '':
arguments.append('-zfield')
arguments.append(fieldName)
params = 'nearest'
params += ':radius1=%s' % self.getParameterValue(self.RADIUS_1)
params += ':radius2=%s' % self.getParameterValue(self.RADIUS_2)
params += ':angle=%s' % self.getParameterValue(self.ANGLE)
params += ':nodata=%s' % self.getParameterValue(self.NODATA)
arguments.append('-a')
arguments.append(params)
arguments.append('-ot')
arguments.append(self.TYPE[self.getParameterValue(self.RTYPE)])
arguments.append(unicode(self.getParameterValue(self.INPUT)))
arguments.append(unicode(self.getOutputValue(self.OUTPUT)))
GdalUtils.runGdal(['gdal_grid',
GdalUtils.escapeAndJoin(arguments)], progress)
| yordan-desta/QgisIns | python/plugins/processing/algs/gdal/GridNearest.py | Python | gpl-2.0 | 4,205 |
#
# Copyright (C) 2015 tknorris (Derived from Mikey1234's & Lambda's)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
# This code is a derivative of the YouTube plugin for XBMC and associated works
# released under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3
import re
import urllib2
import urllib
import urlparse
import log_utils
import xbmc
from constants import USER_AGENT
MAX_TRIES = 3
logger = log_utils.Logger.get_logger(__name__)
logger.disable()
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response): # @UnusedVariable
logger.log('Stopping Redirect', log_utils.LOGDEBUG)
return response
https_response = http_response
def solve_equation(equation):
try:
offset = 1 if equation[0] == '+' else 0
return int(eval(equation.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0').replace('(', 'str(')[offset:]))
except:
pass
def solve(url, cj, user_agent=None, wait=True, extra_headers=None):
if extra_headers is None: extra_headers = {}
if user_agent is None: user_agent = USER_AGENT
headers = {'User-Agent': user_agent, 'Referer': url}
if cj is not None:
try: cj.load(ignore_discard=True)
except: pass
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
request = urllib2.Request(url)
for key in headers: request.add_header(key, headers[key])
try:
response = urllib2.urlopen(request)
html = response.read()
except urllib2.HTTPError as e:
html = e.read()
tries = 0
while tries < MAX_TRIES:
solver_pattern = 'var (?:s,t,o,p,b,r,e,a,k,i,n,g|t,r,a),f,\s*([^=]+)={"([^"]+)":([^}]+)};.+challenge-form\'\);.*?\n.*?;(.*?);a\.value'
vc_pattern = 'input type="hidden" name="jschl_vc" value="([^"]+)'
pass_pattern = 'input type="hidden" name="pass" value="([^"]+)'
init_match = re.search(solver_pattern, html, re.DOTALL)
vc_match = re.search(vc_pattern, html)
pass_match = re.search(pass_pattern, html)
if not init_match or not vc_match or not pass_match:
logger.log("Couldn't find attribute: init: |%s| vc: |%s| pass: |%s| No cloudflare check?" % (init_match, vc_match, pass_match), log_utils.LOGWARNING)
return False
init_dict, init_var, init_equation, equations = init_match.groups()
vc = vc_match.group(1)
password = pass_match.group(1)
# logger.log("VC is: %s" % (vc), xbmc.LOGDEBUG)
varname = (init_dict, init_var)
result = int(solve_equation(init_equation.rstrip()))
logger.log('Initial value: |%s| Result: |%s|' % (init_equation, result), log_utils.LOGDEBUG)
for equation in equations.split(';'):
equation = equation.rstrip()
if equation[:len('.'.join(varname))] != '.'.join(varname):
logger.log('Equation does not start with varname |%s|' % (equation), log_utils.LOGDEBUG)
else:
equation = equation[len('.'.join(varname)):]
expression = equation[2:]
operator = equation[0]
if operator not in ['+', '-', '*', '/']:
logger.log('Unknown operator: |%s|' % (equation), log_utils.LOGWARNING)
continue
result = int(str(eval(str(result) + operator + str(solve_equation(expression)))))
logger.log('intermediate: %s = %s' % (equation, result), log_utils.LOGDEBUG)
scheme = urlparse.urlparse(url).scheme
domain = urlparse.urlparse(url).hostname
result += len(domain)
logger.log('Final Result: |%s|' % (result), log_utils.LOGDEBUG)
if wait:
logger.log('Sleeping for 5 Seconds', log_utils.LOGDEBUG)
xbmc.sleep(5000)
url = '%s://%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s&pass=%s' % (scheme, domain, vc, result, urllib.quote(password))
logger.log('url: |%s| headers: |%s|' % (url, headers), log_utils.LOGDEBUG)
request = urllib2.Request(url)
for key in headers: request.add_header(key, headers[key])
try:
opener = urllib2.build_opener(NoRedirection)
urllib2.install_opener(opener)
response = urllib2.urlopen(request)
while response.getcode() in [301, 302, 303, 307]:
if cj is not None:
cj.extract_cookies(response, request)
redir_url = response.info().getheader('location')
if not redir_url.startswith('http'):
base_url = '%s://%s' % (scheme, domain)
redir_url = urlparse.urljoin(base_url, redir_url)
request = urllib2.Request(redir_url)
headers.update(extra_headers)
for key in headers: request.add_header(key, headers[key])
if cj is not None:
cj.add_cookie_header(request)
logger.log('redir url: |%s| headers: |%s|' % (redir_url, headers), log_utils.LOGDEBUG)
response = urllib2.urlopen(request)
final = response.read()
if 'cf-browser-verification' in final:
logger.log('CF Failure: html: %s url: %s' % (html, url), log_utils.LOGWARNING)
tries += 1
html = final
else:
break
except urllib2.HTTPError as e:
logger.log('CloudFlare HTTP Error: %s on url: %s' % (e.code, url), log_utils.LOGWARNING)
return False
except urllib2.URLError as e:
logger.log('CloudFlare URLError Error: %s on url: %s' % (e, url), log_utils.LOGWARNING)
return False
if cj is not None:
cj.save()
return final
| repotvsupertuga/tvsupertuga.repository | script.vodextende/salts_lib/cloudflare.py | Python | gpl-2.0 | 6,746 |
"""
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
import datetime
import re
import sys
import warnings
try:
import MySQLdb as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1,2,1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions, Thing2Literal
from MySQLdb.constants import FIELD_TYPE, CLIENT
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.safestring import SafeString, SafeUnicode
from django.utils import timezone
# Raise exceptions for database warnings if DEBUG is on
from django.conf import settings
if settings.DEBUG:
warnings.filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# It's impossible to import datetime_or_None directly from MySQLdb.times
parse_datetime = conversions[FIELD_TYPE.DATETIME]
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value, conv):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn(u"SQLite received a naive datetime (%s)"
u" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeUnicode and SafeString as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
# Finally, MySQLdb always returns naive datetime objects. However, when
# timezone support is active, Django expects timezone-aware datetime objects.
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
datetime.datetime: adapt_datetime_with_timezone_support,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard util.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_long_model_names = False
supports_microsecond_precision = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_primary_key_0 = False
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
self._storage_engine = None
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
if self._storage_engine is None:
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)')
# This command is MySQL specific; the second column
# will tell you the default table type of the created
# table. Since all Django's test tables will have the same
# table type, that's enough to evaluate the feature.
cursor.execute("SHOW TABLE STATUS LIKE 'INTROSPECT_TEST'")
result = cursor.fetchone()
cursor.execute('DROP TABLE INTROSPECT_TEST')
self._storage_engine = result[1]
return self._storage_engine
def _can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine() != 'MyISAM'
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def date_interval_sql(self, sql, connector, timedelta):
return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
timedelta.days, timedelta.seconds, timedelta.microseconds)
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return cursor._last_executed
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615L
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
# 'ALTER TABLE table AUTO_INCREMENT = 1;'... style SQL statements
# to reset sequence indices
sql.extend(["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences])
return sql
else:
return []
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.99'
return [first % value, second % value]
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def savepoint_create_sql(self, sid):
return "SAVEPOINT %s" % sid
def savepoint_commit_sql(self, sid):
return "RELEASE SAVEPOINT %s" % sid
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT %s" % sid
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.server_version = None
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.connection.close()
self.connection = None
return False
def _cursor(self):
if not self._valid_connection():
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': True,
}
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = settings_dict['PASSWORD']
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
self.connection = Database.connect(**kwargs)
self.connection.encoders[SafeUnicode] = self.connection.encoders[unicode]
self.connection.encoders[SafeString] = self.connection.encoders[str]
self.features.uses_savepoints = \
self.get_server_version() >= (5, 0, 3)
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def get_server_version(self):
if not self.server_version:
new_connection = False
if not self._valid_connection():
# Ensure we have a connection with the DB by using a temporary
# cursor
new_connection = True
self.cursor().close()
server_info = self.connection.get_server_info()
if new_connection:
# Make sure we close the connection
self.connection.close()
self.connection = None
m = server_version_re.match(server_info)
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
self.cursor().execute('SET foreign_key_checks=1')
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.get_table_list(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
| smartfile/django-1.4 | django/db/backends/mysql/base.py | Python | bsd-3-clause | 19,979 |
import logging
import pinylib
log = logging.getLogger(__name__)
def main():
room_name = raw_input('Enter room name: ').strip()
if pinylib.CONFIG.ACCOUNT and pinylib.CONFIG.PASSWORD:
client = pinylib.TinychatRTMPClient(roomname=room_name, account=pinylib.CONFIG.ACCOUNT,
password=pinylib.CONFIG.PASSWORD)
else:
client = pinylib.TinychatRTMPClient(roomname=room_name)
client.nickname = raw_input('Enter nick name (optional): ').strip()
do_login = raw_input('Login? [enter=No] ')
if do_login:
if not client.account:
client.account = raw_input('Account: ').strip()
if not client.password:
client.password = raw_input('Password: ')
is_logged_in = client.login()
while not is_logged_in:
client.account = raw_input('Account: ').strip()
client.password = raw_input('Password: ')
if client.account == '/' or client.password == '/':
main()
break
elif client.account == '//' or client.password == '//':
do_login = False
break
else:
is_logged_in = client.login()
if is_logged_in:
client.console_write(pinylib.COLOR['bright_green'], 'Logged in as: %s' % client.account)
if not do_login:
client.account = ''
client.password = None
status = client.set_rtmp_parameters()
while True:
if status == 1:
client.console_write(pinylib.COLOR['bright_red'], 'Password protected. Enter room password')
client.room_pass = raw_input()
if client.room_pass == '/':
main()
break
else:
status = client.set_rtmp_parameters()
elif status == 2:
client.console_write(pinylib.COLOR['bright_red'], 'The room has been closed.')
main()
break
elif status == 4:
client.console_write(pinylib.COLOR['bright_red'], 'The response returned nothing.')
main()
break
else:
client.console_write(pinylib.COLOR['bright_green'], 'Connect parameters set.')
break
t = pinylib.threading.Thread(target=client.connect)
t.daemon = True
t.start()
while not client.is_connected:
pinylib.time.sleep(2)
while client.is_connected:
chat_msg = raw_input()
if chat_msg.startswith('/'):
msg_parts = chat_msg.split(' ')
cmd = msg_parts[0].lower().strip()
if cmd == '/q':
client.disconnect()
if client.is_green_connected:
client.disconnect(greenroom=True)
elif cmd == '/a':
if len(client.users.signed_in) == 0:
print ('No signed in users in the room.')
else:
for user in client.users.signed_in:
print ('%s:%s' % (user.nick, user.account))
elif cmd == '/u':
for user in client.users.all:
print ('%s: %s' % (user, client.users.all[user].user_level))
elif cmd == '/m':
if len(client.users.mods) == 0:
print ('No moderators in the room.')
else:
for mod in client.users.mods:
print (mod.nick)
elif cmd == '/n':
if len(client.users.norms) == 0:
print ('No normal users in the room.')
else:
for norm in client.users.norms:
print (norm.nick)
elif cmd == '/l':
if len(client.users.lurkers) == 0:
print ('No lurkers in the room.')
else:
for lurker in client.users.lurkers:
print (lurker.nick)
else:
client.send_chat_msg(chat_msg)
if __name__ == '__main__':
if pinylib.CONFIG.DEBUG_TO_FILE:
formater = '%(asctime)s : %(levelname)s : %(filename)s : %(lineno)d : %(funcName)s() : %(name)s : %(message)s'
logging.basicConfig(filename=pinylib.CONFIG.DEBUG_FILE_NAME,
level=pinylib.CONFIG.DEBUG_LEVEL, format=formater)
log.info('Starting pinylib version: %s' % pinylib.__version__)
else:
log.addHandler(logging.NullHandler())
main()
| nortxort/pinylib | sample_client.py | Python | mit | 4,629 |
import six
from django.conf import settings as django_settings
from django.utils.functional import LazyObject
from wysihtml5.conf import defaults as app_settings
class LazySettings(LazyObject):
def _setup(self):
self._wrapped = Settings(app_settings, django_settings)
def update_dict_in_depth(a, b):
"""Updates dict a in depth with values of dict b (not for sequences)"""
for k, v in six.iteritems(b):
if a.get(k, None) and type(v) == dict:
update_dict_in_depth(a[k], v)
else:
a[k] = v
class Settings(object):
def __init__(self, *args):
for item in args:
for attr in dir(item):
if attr == attr.upper():
setattr(self, attr, getattr(item, attr))
def __setattr__(self, name, value):
obj_attr = getattr(self, name, None)
if obj_attr and type(obj_attr) == dict:
update_dict_in_depth(obj_attr, value)
else:
object.__setattr__(self, name, value)
settings = LazySettings()
| danirus/django-wysihtml5 | wysihtml5/conf/__init__.py | Python | bsd-2-clause | 1,049 |
from django.conf.urls import url
from dojo.engagement import views
urlpatterns = [
# engagements and calendar
url(r'^calendar$', views.engagement_calendar, name='calendar'),
url(r'^calendar/engagements$', views.engagement_calendar, name='engagement_calendar'),
url(r'^engagement$', views.engagement, name='engagement'),
url(r'^engagement/new$', views.new_engagement, name='new_eng'),
url(r'^engagement/(?P<eid>\d+)$', views.view_engagement,
name='view_engagement'),
url(r'^engagement/(?P<eid>\d+)/ics$', views.engagement_ics,
name='engagement_ics'),
url(r'^engagement/(?P<eid>\d+)/edit$', views.edit_engagement,
name='edit_engagement'),
url(r'^engagement/(?P<eid>\d+)/delete$', views.delete_engagement,
name='delete_engagement'),
url(r'^engagement/(?P<eid>\d+)/add_tests$', views.add_tests,
name='add_tests'),
url(r'^engagement/(?P<eid>\d+)/import_scan_results$',
views.import_scan_results, name='import_scan_results'),
url(r'^engagement/(?P<eid>\d+)/close$', views.close_eng,
name='close_engagement'),
url(r'^engagement/(?P<eid>\d+)/reopen$', views.reopen_eng,
name='reopen_engagement'),
url(r'^engagement/(?P<eid>\d+)/complete_checklist$',
views.complete_checklist, name='complete_checklist'),
url(r'^engagement/(?P<eid>\d+)/upload_risk_acceptance$',
views.upload_risk, name='upload_risk_acceptance$'),
url(r'^engagement/(?P<eid>\d+)/risk_approval/(?P<raid>\d+)$',
views.view_risk, name='view_risk'),
url(r'^engagement/(?P<eid>\d+)/risk_approval/(?P<raid>\d+)/delete$',
views.delete_risk, name='delete_risk'),
url(r'^engagement/(?P<eid>\d+)/risk_approval/(?P<raid>\d+)/download$',
views.download_risk, name='download_risk'),
url(r'^engagement/(?P<eid>\d+)/threatmodel$', views.view_threatmodel,
name='view_threatmodel'),
url(r'^engagement/(?P<eid>\d+)/threatmodel/upload$',
views.upload_threatmodel, name='upload_threatmodel'),
]
| Prakhash/security-tools | external/django-DefectDojo-1.2.1/dojo/engagement/urls.py | Python | apache-2.0 | 2,035 |
# -*- coding: utf-8 -*-
"""
Tests of the Capa XModule
"""
# pylint: disable=missing-docstring
# pylint: disable=invalid-name
import datetime
import json
import random
import os
import textwrap
import unittest
import ddt
from mock import Mock, patch, DEFAULT
import webob
from webob.multidict import MultiDict
import xmodule
from xmodule.tests import DATA_DIR
from capa import responsetypes
from capa.responsetypes import (StudentInputError, LoncapaProblemError,
ResponseError)
from capa.xqueue_interface import XQueueInterface
from xmodule.capa_module import CapaModule, CapaDescriptor, ComplexEncoder
from opaque_keys.edx.locations import Location
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from . import get_test_system
from pytz import UTC
from capa.correctmap import CorrectMap
from ..capa_base_constants import RANDOMIZATION
class CapaFactory(object):
"""
A helper class to create problem modules with various parameters for testing.
"""
sample_problem_xml = textwrap.dedent("""\
<?xml version="1.0"?>
<problem>
<text>
<p>What is pi, to two decimal places?</p>
</text>
<numericalresponse answer="3.14">
<textline math="1" size="30"/>
</numericalresponse>
</problem>
""")
num = 0
@classmethod
def next_num(cls):
cls.num += 1
return cls.num
@classmethod
def input_key(cls, response_num=2, input_num=1):
"""
Return the input key to use when passing GET parameters
"""
return ("input_" + cls.answer_key(response_num, input_num))
@classmethod
def answer_key(cls, response_num=2, input_num=1):
"""
Return the key stored in the capa problem answer dict
"""
return (
"%s_%d_%d" % (
"-".join(['i4x', 'edX', 'capa_test', 'problem', 'SampleProblem%d' % cls.num]),
response_num,
input_num
)
)
@classmethod
def create(cls,
attempts=None,
problem_state=None,
correct=False,
xml=None,
override_get_score=True,
**kwargs
):
"""
All parameters are optional, and are added to the created problem if specified.
Arguments:
graceperiod:
due:
max_attempts:
showanswer:
force_save_button:
rerandomize: all strings, as specified in the policy for the problem
problem_state: a dict to to be serialized into the instance_state of the
module.
attempts: also added to instance state. Will be converted to an int.
"""
location = Location(
"edX",
"capa_test",
"2012_Fall",
"problem",
"SampleProblem{0}".format(cls.next_num()),
None
)
if xml is None:
xml = cls.sample_problem_xml
field_data = {'data': xml}
field_data.update(kwargs)
descriptor = Mock(weight="1")
if problem_state is not None:
field_data.update(problem_state)
if attempts is not None:
# converting to int here because I keep putting "0" and "1" in the tests
# since everything else is a string.
field_data['attempts'] = int(attempts)
system = get_test_system()
system.render_template = Mock(return_value="<div>Test Template HTML</div>")
module = CapaModule(
descriptor,
system,
DictFieldData(field_data),
ScopeIds(None, None, location, location),
)
if override_get_score:
if correct:
# TODO: probably better to actually set the internal state properly, but...
module.get_score = lambda: {'score': 1, 'total': 1}
else:
module.get_score = lambda: {'score': 0, 'total': 1}
return module
class CapaFactoryWithFiles(CapaFactory):
"""
A factory for creating a Capa problem with files attached.
"""
sample_problem_xml = textwrap.dedent("""\
<problem>
<coderesponse queuename="BerkeleyX-cs188x">
<!-- actual filenames here don't matter for server-side tests,
they are only acted upon in the browser. -->
<filesubmission
points="25"
allowed_files="prog1.py prog2.py prog3.py"
required_files="prog1.py prog2.py prog3.py"
/>
<codeparam>
<answer_display>
If you're having trouble with this Project,
please refer to the Lecture Slides and attend office hours.
</answer_display>
<grader_payload>{"project": "p3"}</grader_payload>
</codeparam>
</coderesponse>
<customresponse>
<text>
If you worked with a partner, enter their username or email address. If you
worked alone, enter None.
</text>
<textline points="0" size="40" correct_answer="Your partner's username or 'None'"/>
<answer type="loncapa/python">
correct=['correct']
s = str(submission[0]).strip()
if submission[0] == '':
correct[0] = 'incorrect'
</answer>
</customresponse>
</problem>
""")
@ddt.ddt
class CapaModuleTest(unittest.TestCase):
def setUp(self):
super(CapaModuleTest, self).setUp()
now = datetime.datetime.now(UTC)
day_delta = datetime.timedelta(days=1)
self.yesterday_str = str(now - day_delta)
self.today_str = str(now)
self.tomorrow_str = str(now + day_delta)
# in the capa grace period format, not in time delta format
self.two_day_delta_str = "2 days"
def test_import(self):
module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
other_module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
self.assertNotEqual(module.url_name, other_module.url_name,
"Factory should be creating unique names for each problem")
def test_correct(self):
"""
Check that the factory creates correct and incorrect problems properly.
"""
module = CapaFactory.create()
self.assertEqual(module.get_score()['score'], 0)
other_module = CapaFactory.create(correct=True)
self.assertEqual(other_module.get_score()['score'], 1)
def test_get_score(self):
"""
Do 1 test where the internals of get_score are properly set
@jbau Note: this obviously depends on a particular implementation of get_score, but I think this is actually
useful as unit-code coverage for this current implementation. I don't see a layer where LoncapaProblem
is tested directly
"""
from capa.correctmap import CorrectMap
student_answers = {'1_2_1': 'abcd'}
correct_map = CorrectMap(answer_id='1_2_1', correctness="correct", npoints=0.9)
module = CapaFactory.create(correct=True, override_get_score=False)
module.lcp.correct_map = correct_map
module.lcp.student_answers = student_answers
self.assertEqual(module.get_score()['score'], 0.9)
other_correct_map = CorrectMap(answer_id='1_2_1', correctness="incorrect", npoints=0.1)
other_module = CapaFactory.create(correct=False, override_get_score=False)
other_module.lcp.correct_map = other_correct_map
other_module.lcp.student_answers = student_answers
self.assertEqual(other_module.get_score()['score'], 0.1)
def test_showanswer_default(self):
"""
Make sure the show answer logic does the right thing.
"""
# default, no due date, showanswer 'closed', so problem is open, and show_answer
# not visible.
problem = CapaFactory.create()
self.assertFalse(problem.answer_available())
def test_showanswer_attempted(self):
problem = CapaFactory.create(showanswer='attempted')
self.assertFalse(problem.answer_available())
problem.attempts = 1
self.assertTrue(problem.answer_available())
def test_showanswer_closed(self):
# can see after attempts used up, even with due date in the future
used_all_attempts = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="1",
due=self.tomorrow_str)
self.assertTrue(used_all_attempts.answer_available())
# can see after due date
after_due_date = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(after_due_date.answer_available())
# can't see because attempts left
attempts_left_open = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="0",
due=self.tomorrow_str)
self.assertFalse(attempts_left_open.answer_available())
# Can't see because grace period hasn't expired
still_in_grace = CapaFactory.create(showanswer='closed',
max_attempts="1",
attempts="0",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertFalse(still_in_grace.answer_available())
def test_showanswer_correct_or_past_due(self):
"""
With showanswer="correct_or_past_due" should show answer after the answer is correct
or after the problem is closed for everyone--e.g. after due date + grace period.
"""
# can see because answer is correct, even with due date in the future
answer_correct = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="0",
due=self.tomorrow_str,
correct=True)
self.assertTrue(answer_correct.answer_available())
# can see after due date, even when answer isn't correct
past_due_date = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(past_due_date.answer_available())
# can also see after due date when answer _is_ correct
past_due_date_correct = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="0",
due=self.yesterday_str,
correct=True)
self.assertTrue(past_due_date_correct.answer_available())
# Can't see because grace period hasn't expired and answer isn't correct
still_in_grace = CapaFactory.create(showanswer='correct_or_past_due',
max_attempts="1",
attempts="1",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertFalse(still_in_grace.answer_available())
def test_showanswer_past_due(self):
"""
With showanswer="past_due" should only show answer after the problem is closed
for everyone--e.g. after due date + grace period.
"""
# can't see after attempts used up, even with due date in the future
used_all_attempts = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="1",
due=self.tomorrow_str)
self.assertFalse(used_all_attempts.answer_available())
# can see after due date
past_due_date = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(past_due_date.answer_available())
# can't see because attempts left
attempts_left_open = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="0",
due=self.tomorrow_str)
self.assertFalse(attempts_left_open.answer_available())
# Can't see because grace period hasn't expired, even though have no more
# attempts.
still_in_grace = CapaFactory.create(showanswer='past_due',
max_attempts="1",
attempts="1",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertFalse(still_in_grace.answer_available())
def test_showanswer_finished(self):
"""
With showanswer="finished" should show answer after the problem is closed,
or after the answer is correct.
"""
# can see after attempts used up, even with due date in the future
used_all_attempts = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="1",
due=self.tomorrow_str)
self.assertTrue(used_all_attempts.answer_available())
# can see after due date
past_due_date = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="0",
due=self.yesterday_str)
self.assertTrue(past_due_date.answer_available())
# can't see because attempts left and wrong
attempts_left_open = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="0",
due=self.tomorrow_str)
self.assertFalse(attempts_left_open.answer_available())
# _can_ see because attempts left and right
correct_ans = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="0",
due=self.tomorrow_str,
correct=True)
self.assertTrue(correct_ans.answer_available())
# Can see even though grace period hasn't expired, because have no more
# attempts.
still_in_grace = CapaFactory.create(showanswer='finished',
max_attempts="1",
attempts="1",
due=self.yesterday_str,
graceperiod=self.two_day_delta_str)
self.assertTrue(still_in_grace.answer_available())
def test_closed(self):
# Attempts < Max attempts --> NOT closed
module = CapaFactory.create(max_attempts="1", attempts="0")
self.assertFalse(module.closed())
# Attempts < Max attempts --> NOT closed
module = CapaFactory.create(max_attempts="2", attempts="1")
self.assertFalse(module.closed())
# Attempts = Max attempts --> closed
module = CapaFactory.create(max_attempts="1", attempts="1")
self.assertTrue(module.closed())
# Attempts > Max attempts --> closed
module = CapaFactory.create(max_attempts="1", attempts="2")
self.assertTrue(module.closed())
# Max attempts = 0 --> closed
module = CapaFactory.create(max_attempts="0", attempts="2")
self.assertTrue(module.closed())
# Past due --> closed
module = CapaFactory.create(max_attempts="1", attempts="0",
due=self.yesterday_str)
self.assertTrue(module.closed())
def test_parse_get_params(self):
# Valid GET param dict
# 'input_5' intentionally left unset,
valid_get_dict = MultiDict({
'input_1': 'test',
'input_1_2': 'test',
'input_1_2_3': 'test',
'input_[]_3': 'test',
'input_4': None,
'input_6': 5
})
result = CapaModule.make_dict_of_responses(valid_get_dict)
# Expect that we get a dict with "input" stripped from key names
# and that we get the same values back
for key in result.keys():
original_key = "input_" + key
self.assertTrue(original_key in valid_get_dict,
"Output dict should have key %s" % original_key)
self.assertEqual(valid_get_dict[original_key], result[key])
# Valid GET param dict with list keys
# Each tuple represents a single parameter in the query string
valid_get_dict = MultiDict((('input_2[]', 'test1'), ('input_2[]', 'test2')))
result = CapaModule.make_dict_of_responses(valid_get_dict)
self.assertTrue('2' in result)
self.assertEqual(['test1', 'test2'], result['2'])
# If we use [] at the end of a key name, we should always
# get a list, even if there's just one value
valid_get_dict = MultiDict({'input_1[]': 'test'})
result = CapaModule.make_dict_of_responses(valid_get_dict)
self.assertEqual(result['1'], ['test'])
# If we have no underscores in the name, then the key is invalid
invalid_get_dict = MultiDict({'input': 'test'})
with self.assertRaises(ValueError):
result = CapaModule.make_dict_of_responses(invalid_get_dict)
# Two equivalent names (one list, one non-list)
# One of the values would overwrite the other, so detect this
# and raise an exception
invalid_get_dict = MultiDict({'input_1[]': 'test 1',
'input_1': 'test 2'})
with self.assertRaises(ValueError):
result = CapaModule.make_dict_of_responses(invalid_get_dict)
def test_check_problem_correct(self):
module = CapaFactory.create(attempts=1)
# Simulate that all answers are marked correct, no matter
# what the input is, by patching CorrectMap.is_correct()
# Also simulate rendering the HTML
# TODO: pep8 thinks the following line has invalid syntax
with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct, \
patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html:
mock_is_correct.return_value = True
mock_html.return_value = "Test HTML"
# Check the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
# Expect that we get the (mocked) HTML
self.assertEqual(result['contents'], 'Test HTML')
# Expect that the number of attempts is incremented by 1
self.assertEqual(module.attempts, 2)
def test_check_problem_incorrect(self):
module = CapaFactory.create(attempts=0)
# Simulate marking the input incorrect
with patch('capa.correctmap.CorrectMap.is_correct') as mock_is_correct:
mock_is_correct.return_value = False
# Check the problem
get_request_dict = {CapaFactory.input_key(): '0'}
result = module.check_problem(get_request_dict)
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'incorrect')
# Expect that the number of attempts is incremented by 1
self.assertEqual(module.attempts, 1)
def test_check_problem_closed(self):
module = CapaFactory.create(attempts=3)
# Problem closed -- cannot submit
# Simulate that CapaModule.closed() always returns True
with patch('xmodule.capa_module.CapaModule.closed') as mock_closed:
mock_closed.return_value = True
with self.assertRaises(xmodule.exceptions.NotFoundError):
get_request_dict = {CapaFactory.input_key(): '3.14'}
module.check_problem(get_request_dict)
# Expect that number of attempts NOT incremented
self.assertEqual(module.attempts, 3)
@ddt.data(
RANDOMIZATION.ALWAYS,
'true'
)
def test_check_problem_resubmitted_with_randomize(self, rerandomize):
# Randomize turned on
module = CapaFactory.create(rerandomize=rerandomize, attempts=0)
# Simulate that the problem is completed
module.done = True
# Expect that we cannot submit
with self.assertRaises(xmodule.exceptions.NotFoundError):
get_request_dict = {CapaFactory.input_key(): '3.14'}
module.check_problem(get_request_dict)
# Expect that number of attempts NOT incremented
self.assertEqual(module.attempts, 0)
@ddt.data(
RANDOMIZATION.NEVER,
'false',
RANDOMIZATION.PER_STUDENT
)
def test_check_problem_resubmitted_no_randomize(self, rerandomize):
# Randomize turned off
module = CapaFactory.create(rerandomize=rerandomize, attempts=0, done=True)
# Expect that we can submit successfully
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
self.assertEqual(result['success'], 'correct')
# Expect that number of attempts IS incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_queued(self):
module = CapaFactory.create(attempts=1)
# Simulate that the problem is queued
multipatch = patch.multiple(
'capa.capa_problem.LoncapaProblem',
is_queued=DEFAULT,
get_recentmost_queuetime=DEFAULT
)
with multipatch as values:
values['is_queued'].return_value = True
values['get_recentmost_queuetime'].return_value = datetime.datetime.now(UTC)
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
self.assertIn('You must wait', result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_with_files(self):
# Check a problem with uploaded files, using the check_problem API.
# pylint: disable=protected-access
# The files we'll be uploading.
fnames = ["prog1.py", "prog2.py", "prog3.py"]
fpaths = [os.path.join(DATA_DIR, "capa", fname) for fname in fnames]
fileobjs = [open(fpath) for fpath in fpaths]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
module = CapaFactoryWithFiles.create()
# Mock the XQueueInterface.
xqueue_interface = XQueueInterface("http://example.com/xqueue", Mock())
xqueue_interface._http_post = Mock(return_value=(0, "ok"))
module.system.xqueue['interface'] = xqueue_interface
# Create a request dictionary for check_problem.
get_request_dict = {
CapaFactoryWithFiles.input_key(response_num=2): fileobjs,
CapaFactoryWithFiles.input_key(response_num=3): 'None',
}
module.check_problem(get_request_dict)
# _http_post is called like this:
# _http_post(
# 'http://example.com/xqueue/xqueue/submit/',
# {
# 'xqueue_header': '{"lms_key": "df34fb702620d7ae892866ba57572491", "lms_callback_url": "/", "queue_name": "BerkeleyX-cs188x"}',
# 'xqueue_body': '{"student_info": "{\\"anonymous_student_id\\": \\"student\\", \\"submission_time\\": \\"20131117183318\\"}", "grader_payload": "{\\"project\\": \\"p3\\"}", "student_response": ""}',
# },
# files={
# path(u'/home/ned/edx/edx-platform/common/test/data/uploads/asset.html'):
# <open file u'/home/ned/edx/edx-platform/common/test/data/uploads/asset.html', mode 'r' at 0x49c5f60>,
# path(u'/home/ned/edx/edx-platform/common/test/data/uploads/image.jpg'):
# <open file u'/home/ned/edx/edx-platform/common/test/data/uploads/image.jpg', mode 'r' at 0x49c56f0>,
# path(u'/home/ned/edx/edx-platform/common/test/data/uploads/textbook.pdf'):
# <open file u'/home/ned/edx/edx-platform/common/test/data/uploads/textbook.pdf', mode 'r' at 0x49c5a50>,
# },
# )
self.assertEqual(xqueue_interface._http_post.call_count, 1)
_, kwargs = xqueue_interface._http_post.call_args
self.assertItemsEqual(fpaths, kwargs['files'].keys())
for fpath, fileobj in kwargs['files'].iteritems():
self.assertEqual(fpath, fileobj.name)
def test_check_problem_with_files_as_xblock(self):
# Check a problem with uploaded files, using the XBlock API.
# pylint: disable=protected-access
# The files we'll be uploading.
fnames = ["prog1.py", "prog2.py", "prog3.py"]
fpaths = [os.path.join(DATA_DIR, "capa", fname) for fname in fnames]
fileobjs = [open(fpath) for fpath in fpaths]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
module = CapaFactoryWithFiles.create()
# Mock the XQueueInterface.
xqueue_interface = XQueueInterface("http://example.com/xqueue", Mock())
xqueue_interface._http_post = Mock(return_value=(0, "ok"))
module.system.xqueue['interface'] = xqueue_interface
# Create a webob Request with the files uploaded.
post_data = []
for fname, fileobj in zip(fnames, fileobjs):
post_data.append((CapaFactoryWithFiles.input_key(response_num=2), (fname, fileobj)))
post_data.append((CapaFactoryWithFiles.input_key(response_num=3), 'None'))
request = webob.Request.blank("/some/fake/url", POST=post_data, content_type='multipart/form-data')
module.handle('xmodule_handler', request, 'problem_check')
self.assertEqual(xqueue_interface._http_post.call_count, 1)
_, kwargs = xqueue_interface._http_post.call_args
self.assertItemsEqual(fnames, kwargs['files'].keys())
for fpath, fileobj in kwargs['files'].iteritems():
self.assertEqual(fpath, fileobj.name)
def test_check_problem_error(self):
# Try each exception that capa_module should handle
exception_classes = [StudentInputError,
LoncapaProblemError,
ResponseError]
for exception_class in exception_classes:
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user is NOT staff
module.system.user_is_staff = False
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
mock_grade.side_effect = exception_class('test error')
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
expected_msg = 'Error: test error'
self.assertEqual(expected_msg, result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_other_errors(self):
"""
Test that errors other than the expected kinds give an appropriate message.
See also `test_check_problem_error` for the "expected kinds" or errors.
"""
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user is NOT staff
module.system.user_is_staff = False
# Ensure that DEBUG is on
module.system.DEBUG = True
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
error_msg = u"Superterrible error happened: ☠"
mock_grade.side_effect = Exception(error_msg)
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
self.assertTrue(error_msg in result['success'])
def test_check_problem_error_nonascii(self):
# Try each exception that capa_module should handle
exception_classes = [StudentInputError,
LoncapaProblemError,
ResponseError]
for exception_class in exception_classes:
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user is NOT staff
module.system.user_is_staff = False
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
mock_grade.side_effect = exception_class(u"ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ")
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: ȧƈƈḗƞŧḗḓ ŧḗẋŧ ƒǿř ŧḗşŧīƞɠ'
self.assertEqual(expected_msg, result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_check_problem_error_with_staff_user(self):
# Try each exception that capa module should handle
for exception_class in [StudentInputError,
LoncapaProblemError,
ResponseError]:
# Create the module
module = CapaFactory.create(attempts=1)
# Ensure that the user IS staff
module.system.user_is_staff = True
# Simulate answering a problem that raises an exception
with patch('capa.capa_problem.LoncapaProblem.grade_answers') as mock_grade:
mock_grade.side_effect = exception_class('test error')
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.check_problem(get_request_dict)
# Expect an AJAX alert message in 'success'
self.assertTrue('test error' in result['success'])
# We DO include traceback information for staff users
self.assertTrue('Traceback' in result['success'])
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_reset_problem(self):
module = CapaFactory.create(done=True)
module.new_lcp = Mock(wraps=module.new_lcp)
module.choose_new_seed = Mock(wraps=module.choose_new_seed)
# Stub out HTML rendering
with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html:
mock_html.return_value = "<div>Test HTML</div>"
# Reset the problem
get_request_dict = {}
result = module.reset_problem(get_request_dict)
# Expect that the request was successful
self.assertTrue('success' in result and result['success'])
# Expect that the problem HTML is retrieved
self.assertTrue('html' in result)
self.assertEqual(result['html'], "<div>Test HTML</div>")
# Expect that the problem was reset
module.new_lcp.assert_called_once_with(None)
def test_reset_problem_closed(self):
# pre studio default
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS)
# Simulate that the problem is closed
with patch('xmodule.capa_module.CapaModule.closed') as mock_closed:
mock_closed.return_value = True
# Try to reset the problem
get_request_dict = {}
result = module.reset_problem(get_request_dict)
# Expect that the problem was NOT reset
self.assertTrue('success' in result and not result['success'])
def test_reset_problem_not_done(self):
# Simulate that the problem is NOT done
module = CapaFactory.create(done=False)
# Try to reset the problem
get_request_dict = {}
result = module.reset_problem(get_request_dict)
# Expect that the problem was NOT reset
self.assertTrue('success' in result and not result['success'])
def test_rescore_problem_correct(self):
module = CapaFactory.create(attempts=1, done=True)
# Simulate that all answers are marked correct, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'correct')
result = module.rescore_problem()
# Expect that the problem is marked correct
self.assertEqual(result['success'], 'correct')
# Expect that we get no HTML
self.assertFalse('contents' in result)
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_incorrect(self):
# make sure it also works when attempts have been reset,
# so add this to the test:
module = CapaFactory.create(attempts=0, done=True)
# Simulate that all answers are marked incorrect, no matter
# what the input is, by patching LoncapaResponse.evaluate_answers()
with patch('capa.responsetypes.LoncapaResponse.evaluate_answers') as mock_evaluate_answers:
mock_evaluate_answers.return_value = CorrectMap(CapaFactory.answer_key(), 'incorrect')
result = module.rescore_problem()
# Expect that the problem is marked incorrect
self.assertEqual(result['success'], 'incorrect')
# Expect that the number of attempts is not incremented
self.assertEqual(module.attempts, 0)
def test_rescore_problem_not_done(self):
# Simulate that the problem is NOT done
module = CapaFactory.create(done=False)
# Try to rescore the problem, and get exception
with self.assertRaises(xmodule.exceptions.NotFoundError):
module.rescore_problem()
def test_rescore_problem_not_supported(self):
module = CapaFactory.create(done=True)
# Try to rescore the problem, and get exception
with patch('capa.capa_problem.LoncapaProblem.supports_rescoring') as mock_supports_rescoring:
mock_supports_rescoring.return_value = False
with self.assertRaises(NotImplementedError):
module.rescore_problem()
def _rescore_problem_error_helper(self, exception_class):
"""Helper to allow testing all errors that rescoring might return."""
# Create the module
module = CapaFactory.create(attempts=1, done=True)
# Simulate answering a problem that raises the exception
with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:
mock_rescore.side_effect = exception_class(u'test error \u03a9')
result = module.rescore_problem()
# Expect an AJAX alert message in 'success'
expected_msg = u'Error: test error \u03a9'
self.assertEqual(result['success'], expected_msg)
# Expect that the number of attempts is NOT incremented
self.assertEqual(module.attempts, 1)
def test_rescore_problem_student_input_error(self):
self._rescore_problem_error_helper(StudentInputError)
def test_rescore_problem_problem_error(self):
self._rescore_problem_error_helper(LoncapaProblemError)
def test_rescore_problem_response_error(self):
self._rescore_problem_error_helper(ResponseError)
def test_save_problem(self):
module = CapaFactory.create(done=False)
# Save the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that answers are saved to the problem
expected_answers = {CapaFactory.answer_key(): '3.14'}
self.assertEqual(module.lcp.student_answers, expected_answers)
# Expect that the result is success
self.assertTrue('success' in result and result['success'])
def test_save_problem_closed(self):
module = CapaFactory.create(done=False)
# Simulate that the problem is closed
with patch('xmodule.capa_module.CapaModule.closed') as mock_closed:
mock_closed.return_value = True
# Try to save the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that the result is failure
self.assertTrue('success' in result and not result['success'])
@ddt.data(
RANDOMIZATION.ALWAYS,
'true'
)
def test_save_problem_submitted_with_randomize(self, rerandomize):
# Capa XModule treats 'always' and 'true' equivalently
module = CapaFactory.create(rerandomize=rerandomize, done=True)
# Try to save
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that we cannot save
self.assertTrue('success' in result and not result['success'])
@ddt.data(
RANDOMIZATION.NEVER,
'false',
RANDOMIZATION.PER_STUDENT
)
def test_save_problem_submitted_no_randomize(self, rerandomize):
# Capa XModule treats 'false' and 'per_student' equivalently
module = CapaFactory.create(rerandomize=rerandomize, done=True)
# Try to save
get_request_dict = {CapaFactory.input_key(): '3.14'}
result = module.save_problem(get_request_dict)
# Expect that we succeed
self.assertTrue('success' in result and result['success'])
def test_check_button_name(self):
# If last attempt, button name changes to "Final Check"
# Just in case, we also check what happens if we have
# more attempts than allowed.
attempts = random.randint(1, 10)
module = CapaFactory.create(attempts=attempts - 1, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Final Check")
module = CapaFactory.create(attempts=attempts, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Final Check")
module = CapaFactory.create(attempts=attempts + 1, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Final Check")
# Otherwise, button name is "Check"
module = CapaFactory.create(attempts=attempts - 2, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Check")
module = CapaFactory.create(attempts=attempts - 3, max_attempts=attempts)
self.assertEqual(module.check_button_name(), "Check")
# If no limit on attempts, then always show "Check"
module = CapaFactory.create(attempts=attempts - 3)
self.assertEqual(module.check_button_name(), "Check")
module = CapaFactory.create(attempts=0)
self.assertEqual(module.check_button_name(), "Check")
def test_check_button_checking_name(self):
module = CapaFactory.create(attempts=1, max_attempts=10)
self.assertEqual(module.check_button_checking_name(), "Checking...")
module = CapaFactory.create(attempts=10, max_attempts=10)
self.assertEqual(module.check_button_checking_name(), "Checking...")
def test_check_button_name_customization(self):
module = CapaFactory.create(
attempts=1,
max_attempts=10,
text_customization={"custom_check": "Submit", "custom_final_check": "Final Submit"}
)
self.assertEqual(module.check_button_name(), "Submit")
module = CapaFactory.create(attempts=9,
max_attempts=10,
text_customization={"custom_check": "Submit", "custom_final_check": "Final Submit"}
)
self.assertEqual(module.check_button_name(), "Final Submit")
def test_check_button_checking_name_customization(self):
module = CapaFactory.create(
attempts=1,
max_attempts=10,
text_customization={
"custom_check": "Submit",
"custom_final_check": "Final Submit",
"custom_checking": "Checking..."
}
)
self.assertEqual(module.check_button_checking_name(), "Checking...")
module = CapaFactory.create(
attempts=9,
max_attempts=10,
text_customization={
"custom_check": "Submit",
"custom_final_check": "Final Submit",
"custom_checking": "Checking..."
}
)
self.assertEqual(module.check_button_checking_name(), "Checking...")
def test_should_show_check_button(self):
attempts = random.randint(1, 10)
# If we're after the deadline, do NOT show check button
module = CapaFactory.create(due=self.yesterday_str)
self.assertFalse(module.should_show_check_button())
# If user is out of attempts, do NOT show the check button
module = CapaFactory.create(attempts=attempts, max_attempts=attempts)
self.assertFalse(module.should_show_check_button())
# If survey question (max_attempts = 0), do NOT show the check button
module = CapaFactory.create(max_attempts=0)
self.assertFalse(module.should_show_check_button())
# If user submitted a problem but hasn't reset,
# do NOT show the check button
# Note: we can only reset when rerandomize="always" or "true"
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, done=True)
self.assertFalse(module.should_show_check_button())
module = CapaFactory.create(rerandomize="true", done=True)
self.assertFalse(module.should_show_check_button())
# Otherwise, DO show the check button
module = CapaFactory.create()
self.assertTrue(module.should_show_check_button())
# If the user has submitted the problem
# and we do NOT have a reset button, then we can show the check button
# Setting rerandomize to "never" or "false" ensures that the reset button
# is not shown
module = CapaFactory.create(rerandomize=RANDOMIZATION.NEVER, done=True)
self.assertTrue(module.should_show_check_button())
module = CapaFactory.create(rerandomize="false", done=True)
self.assertTrue(module.should_show_check_button())
module = CapaFactory.create(rerandomize=RANDOMIZATION.PER_STUDENT, done=True)
self.assertTrue(module.should_show_check_button())
def test_should_show_reset_button(self):
attempts = random.randint(1, 10)
# If we're after the deadline, do NOT show the reset button
module = CapaFactory.create(due=self.yesterday_str, done=True)
self.assertFalse(module.should_show_reset_button())
# If the user is out of attempts, do NOT show the reset button
module = CapaFactory.create(attempts=attempts, max_attempts=attempts, done=True)
self.assertFalse(module.should_show_reset_button())
# pre studio default value, DO show the reset button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, done=True)
self.assertTrue(module.should_show_reset_button())
# If survey question for capa (max_attempts = 0),
# DO show the reset button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, max_attempts=0, done=True)
self.assertTrue(module.should_show_reset_button())
# If the question is not correct
# DO show the reset button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, max_attempts=0, done=True, correct=False)
self.assertTrue(module.should_show_reset_button())
# If the question is correct and randomization is never
# DO not show the reset button
module = CapaFactory.create(rerandomize=RANDOMIZATION.NEVER, max_attempts=0, done=True, correct=True)
self.assertFalse(module.should_show_reset_button())
# If the question is correct and randomization is always
# Show the reset button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, max_attempts=0, done=True, correct=True)
self.assertTrue(module.should_show_reset_button())
# Don't show reset button if randomization is turned on and the question is not done
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, show_reset_button=False, done=False)
self.assertFalse(module.should_show_reset_button())
# Show reset button if randomization is turned on and the problem is done
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, show_reset_button=False, done=True)
self.assertTrue(module.should_show_reset_button())
def test_should_show_save_button(self):
attempts = random.randint(1, 10)
# If we're after the deadline, do NOT show the save button
module = CapaFactory.create(due=self.yesterday_str, done=True)
self.assertFalse(module.should_show_save_button())
# If the user is out of attempts, do NOT show the save button
module = CapaFactory.create(attempts=attempts, max_attempts=attempts, done=True)
self.assertFalse(module.should_show_save_button())
# If user submitted a problem but hasn't reset, do NOT show the save button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, done=True)
self.assertFalse(module.should_show_save_button())
module = CapaFactory.create(rerandomize="true", done=True)
self.assertFalse(module.should_show_save_button())
# If the user has unlimited attempts and we are not randomizing,
# then do NOT show a save button
# because they can keep using "Check"
module = CapaFactory.create(max_attempts=None, rerandomize=RANDOMIZATION.NEVER, done=False)
self.assertFalse(module.should_show_save_button())
module = CapaFactory.create(max_attempts=None, rerandomize="false", done=True)
self.assertFalse(module.should_show_save_button())
module = CapaFactory.create(max_attempts=None, rerandomize=RANDOMIZATION.PER_STUDENT, done=True)
self.assertFalse(module.should_show_save_button())
# pre-studio default, DO show the save button
module = CapaFactory.create(rerandomize=RANDOMIZATION.ALWAYS, done=False)
self.assertTrue(module.should_show_save_button())
# If we're not randomizing and we have limited attempts, then we can save
module = CapaFactory.create(rerandomize=RANDOMIZATION.NEVER, max_attempts=2, done=True)
self.assertTrue(module.should_show_save_button())
module = CapaFactory.create(rerandomize="false", max_attempts=2, done=True)
self.assertTrue(module.should_show_save_button())
module = CapaFactory.create(rerandomize=RANDOMIZATION.PER_STUDENT, max_attempts=2, done=True)
self.assertTrue(module.should_show_save_button())
# If survey question for capa (max_attempts = 0),
# DO show the save button
module = CapaFactory.create(max_attempts=0, done=False)
self.assertTrue(module.should_show_save_button())
def test_should_show_save_button_force_save_button(self):
# If we're after the deadline, do NOT show the save button
# even though we're forcing a save
module = CapaFactory.create(due=self.yesterday_str,
force_save_button="true",
done=True)
self.assertFalse(module.should_show_save_button())
# If the user is out of attempts, do NOT show the save button
attempts = random.randint(1, 10)
module = CapaFactory.create(attempts=attempts,
max_attempts=attempts,
force_save_button="true",
done=True)
self.assertFalse(module.should_show_save_button())
# Otherwise, if we force the save button,
# then show it even if we would ordinarily
# require a reset first
module = CapaFactory.create(force_save_button="true",
rerandomize=RANDOMIZATION.ALWAYS,
done=True)
self.assertTrue(module.should_show_save_button())
module = CapaFactory.create(force_save_button="true",
rerandomize="true",
done=True)
self.assertTrue(module.should_show_save_button())
def test_no_max_attempts(self):
module = CapaFactory.create(max_attempts='')
html = module.get_problem_html()
self.assertTrue(html is not None)
# assert that we got here without exploding
def test_get_problem_html(self):
module = CapaFactory.create()
# We've tested the show/hide button logic in other tests,
# so here we hard-wire the values
show_check_button = bool(random.randint(0, 1) % 2)
show_reset_button = bool(random.randint(0, 1) % 2)
show_save_button = bool(random.randint(0, 1) % 2)
module.should_show_check_button = Mock(return_value=show_check_button)
module.should_show_reset_button = Mock(return_value=show_reset_button)
module.should_show_save_button = Mock(return_value=show_save_button)
# Mock the system rendering function
module.system.render_template = Mock(return_value="<div>Test Template HTML</div>")
# Patch the capa problem's HTML rendering
with patch('capa.capa_problem.LoncapaProblem.get_html') as mock_html:
mock_html.return_value = "<div>Test Problem HTML</div>"
# Render the problem HTML
html = module.get_problem_html(encapsulate=False)
# Also render the problem encapsulated in a <div>
html_encapsulated = module.get_problem_html(encapsulate=True)
# Expect that we get the rendered template back
self.assertEqual(html, "<div>Test Template HTML</div>")
# Check the rendering context
render_args, _ = module.system.render_template.call_args
self.assertEqual(len(render_args), 2)
template_name = render_args[0]
self.assertEqual(template_name, "problem.html")
context = render_args[1]
self.assertEqual(context['problem']['html'], "<div>Test Problem HTML</div>")
self.assertEqual(bool(context['check_button']), show_check_button)
self.assertEqual(bool(context['reset_button']), show_reset_button)
self.assertEqual(bool(context['save_button']), show_save_button)
# Assert that the encapsulated html contains the original html
self.assertTrue(html in html_encapsulated)
def test_input_state_consistency(self):
module1 = CapaFactory.create()
module2 = CapaFactory.create()
# check to make sure that the input_state and the keys have the same values
module1.set_state_from_lcp()
self.assertEqual(module1.lcp.inputs.keys(), module1.input_state.keys())
module2.set_state_from_lcp()
intersection = set(module2.input_state.keys()).intersection(set(module1.input_state.keys()))
self.assertEqual(len(intersection), 0)
def test_get_problem_html_error(self):
"""
In production, when an error occurs with the problem HTML
rendering, a "dummy" problem is created with an error
message to display to the user.
"""
module = CapaFactory.create()
# Save the original problem so we can compare it later
original_problem = module.lcp
# Simulate throwing an exception when the capa problem
# is asked to render itself as HTML
module.lcp.get_html = Mock(side_effect=Exception("Test"))
# Stub out the get_test_system rendering function
module.system.render_template = Mock(return_value="<div>Test Template HTML</div>")
# Turn off DEBUG
module.system.DEBUG = False
# Try to render the module with DEBUG turned off
html = module.get_problem_html()
self.assertTrue(html is not None)
# Check the rendering context
render_args, _ = module.system.render_template.call_args
context = render_args[1]
self.assertTrue("error" in context['problem']['html'])
# Expect that the module has created a new dummy problem with the error
self.assertNotEqual(original_problem, module.lcp)
def test_get_problem_html_error_w_debug(self):
"""
Test the html response when an error occurs with DEBUG on
"""
module = CapaFactory.create()
# Simulate throwing an exception when the capa problem
# is asked to render itself as HTML
error_msg = u"Superterrible error happened: ☠"
module.lcp.get_html = Mock(side_effect=Exception(error_msg))
# Stub out the get_test_system rendering function
module.system.render_template = Mock(return_value="<div>Test Template HTML</div>")
# Make sure DEBUG is on
module.system.DEBUG = True
# Try to render the module with DEBUG turned on
html = module.get_problem_html()
self.assertTrue(html is not None)
# Check the rendering context
render_args, _ = module.system.render_template.call_args
context = render_args[1]
self.assertTrue(error_msg in context['problem']['html'])
@ddt.data(
'false',
'true',
RANDOMIZATION.NEVER,
RANDOMIZATION.PER_STUDENT,
RANDOMIZATION.ALWAYS,
RANDOMIZATION.ONRESET
)
def test_random_seed_no_change(self, rerandomize):
# Run the test for each possible rerandomize value
module = CapaFactory.create(rerandomize=rerandomize)
# Get the seed
# By this point, the module should have persisted the seed
seed = module.seed
self.assertTrue(seed is not None)
# If we're not rerandomizing, the seed is always set
# to the same value (1)
if rerandomize == RANDOMIZATION.NEVER:
self.assertEqual(seed, 1,
msg="Seed should always be 1 when rerandomize='%s'" % rerandomize)
# Check the problem
get_request_dict = {CapaFactory.input_key(): '3.14'}
module.check_problem(get_request_dict)
# Expect that the seed is the same
self.assertEqual(seed, module.seed)
# Save the problem
module.save_problem(get_request_dict)
# Expect that the seed is the same
self.assertEqual(seed, module.seed)
@ddt.data(
'false',
'true',
RANDOMIZATION.NEVER,
RANDOMIZATION.PER_STUDENT,
RANDOMIZATION.ALWAYS,
RANDOMIZATION.ONRESET
)
def test_random_seed_with_reset(self, rerandomize):
"""
Run the test for each possible rerandomize value
"""
def _reset_and_get_seed(module):
"""
Reset the XModule and return the module's seed
"""
# Simulate submitting an attempt
# We need to do this, or reset_problem() will
# fail because it won't re-randomize until the problem has been submitted
# the problem yet.
module.done = True
# Reset the problem
module.reset_problem({})
# Return the seed
return module.seed
def _retry_and_check(num_tries, test_func):
'''
Returns True if *test_func* was successful
(returned True) within *num_tries* attempts
*test_func* must be a function
of the form test_func() -> bool
'''
success = False
for i in range(num_tries):
if test_func() is True:
success = True
break
return success
module = CapaFactory.create(rerandomize=rerandomize, done=True)
# Get the seed
# By this point, the module should have persisted the seed
seed = module.seed
self.assertTrue(seed is not None)
# We do NOT want the seed to reset if rerandomize
# is set to 'never' -- it should still be 1
# The seed also stays the same if we're randomizing
# 'per_student': the same student should see the same problem
if rerandomize in [RANDOMIZATION.NEVER,
'false',
RANDOMIZATION.PER_STUDENT]:
self.assertEqual(seed, _reset_and_get_seed(module))
# Otherwise, we expect the seed to change
# to another valid seed
else:
# Since there's a small chance we might get the
# same seed again, give it 5 chances
# to generate a different seed
success = _retry_and_check(5, lambda: _reset_and_get_seed(module) != seed)
self.assertTrue(module.seed is not None)
msg = 'Could not get a new seed from reset after 5 tries'
self.assertTrue(success, msg)
@ddt.data(
'false',
'true',
RANDOMIZATION.NEVER,
RANDOMIZATION.PER_STUDENT,
RANDOMIZATION.ALWAYS,
RANDOMIZATION.ONRESET
)
def test_random_seed_with_reset_question_unsubmitted(self, rerandomize):
"""
Run the test for each possible rerandomize value
"""
def _reset_and_get_seed(module):
"""
Reset the XModule and return the module's seed
"""
# Reset the problem
# By default, the problem is instantiated as unsubmitted
module.reset_problem({})
# Return the seed
return module.seed
module = CapaFactory.create(rerandomize=rerandomize, done=False)
# Get the seed
# By this point, the module should have persisted the seed
seed = module.seed
self.assertTrue(seed is not None)
#the seed should never change because the student hasn't finished the problem
self.assertEqual(seed, _reset_and_get_seed(module))
@ddt.data(
RANDOMIZATION.ALWAYS,
RANDOMIZATION.PER_STUDENT,
'true',
RANDOMIZATION.ONRESET
)
def test_random_seed_bins(self, rerandomize):
# Assert that we are limiting the number of possible seeds.
# Get a bunch of seeds, they should all be in 0-999.
i = 200
while i > 0:
module = CapaFactory.create(rerandomize=rerandomize)
assert 0 <= module.seed < 1000
i -= 1
@patch('xmodule.capa_base.log')
@patch('xmodule.capa_base.Progress')
def test_get_progress_error(self, mock_progress, mock_log):
"""
Check that an exception given in `Progress` produces a `log.exception` call.
"""
error_types = [TypeError, ValueError]
for error_type in error_types:
mock_progress.side_effect = error_type
module = CapaFactory.create()
self.assertIsNone(module.get_progress())
mock_log.exception.assert_called_once_with('Got bad progress')
mock_log.reset_mock()
@patch('xmodule.capa_base.Progress')
def test_get_progress_no_error_if_weight_zero(self, mock_progress):
"""
Check that if the weight is 0 get_progress does not try to create a Progress object.
"""
mock_progress.return_value = True
module = CapaFactory.create()
module.weight = 0
progress = module.get_progress()
self.assertIsNone(progress)
self.assertFalse(mock_progress.called)
@patch('xmodule.capa_base.Progress')
def test_get_progress_calculate_progress_fraction(self, mock_progress):
"""
Check that score and total are calculated correctly for the progress fraction.
"""
module = CapaFactory.create()
module.weight = 1
module.get_progress()
mock_progress.assert_called_with(0, 1)
other_module = CapaFactory.create(correct=True)
other_module.weight = 1
other_module.get_progress()
mock_progress.assert_called_with(1, 1)
def test_get_html(self):
"""
Check that get_html() calls get_progress() with no arguments.
"""
module = CapaFactory.create()
module.get_progress = Mock(wraps=module.get_progress)
module.get_html()
module.get_progress.assert_called_once_with()
def test_get_problem(self):
"""
Check that get_problem() returns the expected dictionary.
"""
module = CapaFactory.create()
self.assertEquals(module.get_problem("data"), {'html': module.get_problem_html(encapsulate=False)})
# Standard question with shuffle="true" used by a few tests
common_shuffle_xml = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" shuffle="true">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
def test_check_unmask(self):
"""
Check that shuffle unmasking is plumbed through: when check_problem is called,
unmasked names should appear in the track_function event_info.
"""
module = CapaFactory.create(xml=self.common_shuffle_xml)
with patch.object(module.runtime, 'track_function') as mock_track_function:
get_request_dict = {CapaFactory.input_key(): 'choice_3'} # the correct choice
module.check_problem(get_request_dict)
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEqual(event_info['answers'][CapaFactory.answer_key()], 'choice_3')
# 'permutation' key added to record how problem was shown
self.assertEquals(event_info['permutation'][CapaFactory.answer_key()],
('shuffle', ['choice_3', 'choice_1', 'choice_2', 'choice_0']))
self.assertEquals(event_info['success'], 'correct')
@unittest.skip("masking temporarily disabled")
def test_save_unmask(self):
"""On problem save, unmasked data should appear on track_function."""
module = CapaFactory.create(xml=self.common_shuffle_xml)
with patch.object(module.runtime, 'track_function') as mock_track_function:
get_request_dict = {CapaFactory.input_key(): 'mask_0'}
module.save_problem(get_request_dict)
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEquals(event_info['answers'][CapaFactory.answer_key()], 'choice_2')
self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
@unittest.skip("masking temporarily disabled")
def test_reset_unmask(self):
"""On problem reset, unmask names should appear track_function."""
module = CapaFactory.create(xml=self.common_shuffle_xml)
get_request_dict = {CapaFactory.input_key(): 'mask_0'}
module.check_problem(get_request_dict)
# On reset, 'old_state' should use unmasked names
with patch.object(module.runtime, 'track_function') as mock_track_function:
module.reset_problem(None)
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEquals(mock_call[1][0], 'reset_problem')
self.assertEquals(event_info['old_state']['student_answers'][CapaFactory.answer_key()], 'choice_2')
self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
@unittest.skip("masking temporarily disabled")
def test_rescore_unmask(self):
"""On problem rescore, unmasked names should appear on track_function."""
module = CapaFactory.create(xml=self.common_shuffle_xml)
get_request_dict = {CapaFactory.input_key(): 'mask_0'}
module.check_problem(get_request_dict)
# On rescore, state/student_answers should use unmasked names
with patch.object(module.runtime, 'track_function') as mock_track_function:
module.rescore_problem()
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEquals(mock_call[1][0], 'problem_rescore')
self.assertEquals(event_info['state']['student_answers'][CapaFactory.answer_key()], 'choice_2')
self.assertIsNotNone(event_info['permutation'][CapaFactory.answer_key()])
def test_check_unmask_answerpool(self):
"""Check answer-pool question track_function uses unmasked names"""
xml = textwrap.dedent("""
<problem>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
module = CapaFactory.create(xml=xml)
with patch.object(module.runtime, 'track_function') as mock_track_function:
get_request_dict = {CapaFactory.input_key(): 'choice_2'} # mask_X form when masking enabled
module.check_problem(get_request_dict)
mock_call = mock_track_function.mock_calls[0]
event_info = mock_call[1][1]
self.assertEqual(event_info['answers'][CapaFactory.answer_key()], 'choice_2')
# 'permutation' key added to record how problem was shown
self.assertEquals(event_info['permutation'][CapaFactory.answer_key()],
('answerpool', ['choice_1', 'choice_3', 'choice_2', 'choice_0']))
self.assertEquals(event_info['success'], 'incorrect')
@ddt.ddt
class CapaDescriptorTest(unittest.TestCase):
def _create_descriptor(self, xml):
""" Creates a CapaDescriptor to run test against """
descriptor = CapaDescriptor(get_test_system(), scope_ids=1)
descriptor.data = xml
return descriptor
@ddt.data(*responsetypes.registry.registered_tags())
def test_all_response_types(self, response_tag):
""" Tests that every registered response tag is correctly returned """
xml = "<problem><{response_tag}></{response_tag}></problem>".format(response_tag=response_tag)
descriptor = self._create_descriptor(xml)
self.assertEquals(descriptor.problem_types, {response_tag})
def test_response_types_ignores_non_response_tags(self):
xml = textwrap.dedent("""
<problem>
<p>Label</p>
<div>Some comment</div>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="4">
<choice correct="false">Apple</choice>
<choice correct="false">Banana</choice>
<choice correct="false">Chocolate</choice>
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
</problem>
""")
descriptor = self._create_descriptor(xml)
self.assertEquals(descriptor.problem_types, {"multiplechoiceresponse"})
def test_response_types_multiple_tags(self):
xml = textwrap.dedent("""
<problem>
<p>Label</p>
<div>Some comment</div>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="1">
<choice correct ="true">Donut</choice>
</choicegroup>
</multiplechoiceresponse>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice" answer-pool="1">
<choice correct ="true">Buggy</choice>
</choicegroup>
</multiplechoiceresponse>
<optionresponse>
<optioninput label="Option" options="('1','2')" correct="2"></optioninput>
</optionresponse>
</problem>
""")
descriptor = self._create_descriptor(xml)
self.assertEquals(descriptor.problem_types, {"multiplechoiceresponse", "optionresponse"})
class ComplexEncoderTest(unittest.TestCase):
def test_default(self):
"""
Check that complex numbers can be encoded into JSON.
"""
complex_num = 1 - 1j
expected_str = '1-1*j'
json_str = json.dumps(complex_num, cls=ComplexEncoder)
self.assertEqual(expected_str, json_str[1:-1]) # ignore quotes
class TestProblemCheckTracking(unittest.TestCase):
"""
Ensure correct tracking information is included in events emitted during problem checks.
"""
def setUp(self):
super(TestProblemCheckTracking, self).setUp()
self.maxDiff = None
def test_choice_answer_text(self):
xml = """\
<problem display_name="Multiple Choice Questions">
<p>What color is the open ocean on a sunny day?</p>
<optionresponse>
<optioninput options="('yellow','blue','green')" correct="blue" label="What color is the open ocean on a sunny day?"/>
</optionresponse>
<p>Which piece of furniture is built for sitting?</p>
<multiplechoiceresponse>
<choicegroup type="MultipleChoice">
<choice correct="false"><text>a table</text></choice>
<choice correct="false"><text>a desk</text></choice>
<choice correct="true"><text>a chair</text></choice>
<choice correct="false"><text>a bookshelf</text></choice>
</choicegroup>
</multiplechoiceresponse>
<p>Which of the following are musical instruments?</p>
<choiceresponse>
<checkboxgroup direction="vertical" label="Which of the following are musical instruments?">
<choice correct="true">a piano</choice>
<choice correct="false">a tree</choice>
<choice correct="true">a guitar</choice>
<choice correct="false">a window</choice>
</checkboxgroup>
</choiceresponse>
</problem>
"""
# Whitespace screws up comparisons
xml = ''.join(line.strip() for line in xml.split('\n'))
factory = self.capa_factory_for_problem_xml(xml)
module = factory.create()
answer_input_dict = {
factory.input_key(2): 'blue',
factory.input_key(3): 'choice_0',
factory.input_key(4): ['choice_0', 'choice_1'],
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': 'What color is the open ocean on a sunny day?',
'answer': 'blue',
'response_type': 'optionresponse',
'input_type': 'optioninput',
'correct': True,
'variant': '',
},
factory.answer_key(3): {
'question': '',
'answer': u'<text>a table</text>',
'response_type': 'multiplechoiceresponse',
'input_type': 'choicegroup',
'correct': False,
'variant': '',
},
factory.answer_key(4): {
'question': 'Which of the following are musical instruments?',
'answer': [u'a piano', u'a tree'],
'response_type': 'choiceresponse',
'input_type': 'checkboxgroup',
'correct': False,
'variant': '',
},
})
def capa_factory_for_problem_xml(self, xml):
class CustomCapaFactory(CapaFactory):
"""
A factory for creating a Capa problem with arbitrary xml.
"""
sample_problem_xml = textwrap.dedent(xml)
return CustomCapaFactory
def get_event_for_answers(self, module, answer_input_dict):
with patch.object(module.runtime, 'track_function') as mock_track_function:
module.check_problem(answer_input_dict)
self.assertEquals(len(mock_track_function.mock_calls), 1)
mock_call = mock_track_function.mock_calls[0]
event = mock_call[1][1]
return event
def test_numerical_textline(self):
factory = CapaFactory
module = factory.create()
answer_input_dict = {
factory.input_key(2): '3.14'
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': '',
'answer': '3.14',
'response_type': 'numericalresponse',
'input_type': 'textline',
'correct': True,
'variant': '',
}
})
def test_multiple_inputs(self):
factory = self.capa_factory_for_problem_xml("""\
<problem display_name="Multiple Inputs">
<p>Choose the correct color</p>
<optionresponse>
<p>What color is the sky?</p>
<optioninput options="('yellow','blue','green')" correct="blue"/>
<p>What color are pine needles?</p>
<optioninput options="('yellow','blue','green')" correct="green"/>
</optionresponse>
</problem>
""")
module = factory.create()
answer_input_dict = {
factory.input_key(2, 1): 'blue',
factory.input_key(2, 2): 'yellow',
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2, 1): {
'question': '',
'answer': 'blue',
'response_type': 'optionresponse',
'input_type': 'optioninput',
'correct': True,
'variant': '',
},
factory.answer_key(2, 2): {
'question': '',
'answer': 'yellow',
'response_type': 'optionresponse',
'input_type': 'optioninput',
'correct': False,
'variant': '',
},
})
def test_rerandomized_inputs(self):
factory = CapaFactory
module = factory.create(rerandomize=RANDOMIZATION.ALWAYS)
answer_input_dict = {
factory.input_key(2): '3.14'
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': '',
'answer': '3.14',
'response_type': 'numericalresponse',
'input_type': 'textline',
'correct': True,
'variant': module.seed,
}
})
def test_file_inputs(self):
fnames = ["prog1.py", "prog2.py", "prog3.py"]
fpaths = [os.path.join(DATA_DIR, "capa", fname) for fname in fnames]
fileobjs = [open(fpath) for fpath in fpaths]
for fileobj in fileobjs:
self.addCleanup(fileobj.close)
factory = CapaFactoryWithFiles
module = factory.create()
# Mock the XQueueInterface.
xqueue_interface = XQueueInterface("http://example.com/xqueue", Mock())
xqueue_interface._http_post = Mock(return_value=(0, "ok")) # pylint: disable=protected-access
module.system.xqueue['interface'] = xqueue_interface
answer_input_dict = {
CapaFactoryWithFiles.input_key(response_num=2): fileobjs,
CapaFactoryWithFiles.input_key(response_num=3): 'None',
}
event = self.get_event_for_answers(module, answer_input_dict)
self.assertEquals(event['submission'], {
factory.answer_key(2): {
'question': '',
'answer': fpaths,
'response_type': 'coderesponse',
'input_type': 'filesubmission',
'correct': False,
'variant': '',
},
factory.answer_key(3): {
'answer': 'None',
'correct': True,
'question': '',
'response_type': 'customresponse',
'input_type': 'textline',
'variant': ''
}
})
def test_get_answer_with_jump_to_id_urls(self):
"""
Make sure replace_jump_to_id_urls() is called in get_answer.
"""
problem_xml = textwrap.dedent("""
<problem>
<p>What is 1+4?</p>
<numericalresponse answer="5">
<formulaequationinput />
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<a href="/jump_to_id/c0f8d54964bc44a4a1deb8ecce561ecd">here's the same link to the hint page.</a>
</div>
</solution>
</problem>
""")
data = dict()
problem = CapaFactory.create(showanswer='always', xml=problem_xml)
problem.runtime.replace_jump_to_id_urls = Mock()
problem.get_answer(data)
self.assertTrue(problem.runtime.replace_jump_to_id_urls.called)
| motion2015/a3 | common/lib/xmodule/xmodule/tests/test_capa_module.py | Python | agpl-3.0 | 80,522 |
#import the necessary modules
import sys
sys.path.append('/usr/local/lib/python2.7/dist-packages/')
import freenect
import cv2
import numpy as np
#function to get RGB image from kinect
def get_video():
array,_ = freenect.sync_get_video()
array = cv2.cvtColor(array,cv2.COLOR_RGB2BGR)
return array
#function to get depth image from kinect
def get_depth():
array,_ = freenect.sync_get_depth()
array = array.astype(np.uint8)
return array
if __name__ == "__main__":
while 1:
#get a frame from RGB camera
frame = get_video()
#get a frame from depth sensor
depth = get_depth()
#display RGB image
cv2.imshow('RGB image',frame)
#display depth image
cv2.imshow('Depth image',depth)
# quit program when 'esc' key is pressed
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
| miltonsarria/dsp-python | images/kinect_test.py | Python | mit | 922 |
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
from django.conf.urls.defaults import include
from django.conf.urls.defaults import patterns
from django.conf.urls.defaults import url
from django.contrib import admin
from lizard_ui.urls import debugmode_urlpatterns
from lizard_box import views
admin.autodiscover()
urlpatterns = patterns(
'',
# url(r'^ui/', include('lizard_ui.urls')),
# url(r'^map/', include('lizard_map.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^layout/(?P<slug>[^/]+)/',
views.LayoutView.as_view(),
name="lizard_box_layoutview"),
url(r'^box/(?P<name>[^/]+)/',
views.BoxView.as_view(),
name="lizard_box_boxview"),
# url(r'^something_else/$',
# views.SomeClassBasedView.as_view(),
# name='name_it_too'),
)
urlpatterns += debugmode_urlpatterns()
| lizardsystem/lizard-box | lizard_box/urls.py | Python | gpl-3.0 | 871 |
"""Tests for the BitField class."""
import unittest
import bitfield
__author__ = 'Brian Landers <[email protected]>'
class BitFieldTest(unittest.TestCase):
"""Tests for the BitField class."""
def setUp(self):
self.bits = bitfield.BitField(36)
def test_constructor(self):
for i in xrange(0, 36):
self.assertFalse(self.bits.test(i))
def test_constructor_args(self):
with self.assertRaises(ValueError):
_ = bitfield.BitField(0)
with self.assertRaises(ValueError):
_ = bitfield.BitField(-1)
def test_set(self):
for i in xrange(0, 36):
self.assertFalse(self.bits.test(i))
self.bits.set(17)
for i in xrange(0, 17):
self.assertFalse(self.bits.test(i))
self.assertTrue(self.bits.test(17))
for i in xrange(18, 36):
self.assertFalse(self.bits.test(i))
def test_set_args(self):
with self.assertRaises(ValueError):
self.bits.set(-1)
with self.assertRaises(ValueError):
self.bits.set(36)
def test_clear(self):
self.bits.set(17)
self.assertTrue(self.bits.test(17))
self.bits.clear(17)
self.assertFalse(self.bits.test(17))
def test_clear_args(self):
with self.assertRaises(ValueError):
self.bits.clear(-1)
with self.assertRaises(ValueError):
self.bits.clear(36)
def test_toggle(self):
self.assertFalse(self.bits.test(17))
self.bits.toggle(17)
self.assertTrue(self.bits.test(17))
self.bits.toggle(17)
self.assertFalse(self.bits.test(17))
def test_toggle_args(self):
with self.assertRaises(ValueError):
self.bits.toggle(-1)
with self.assertRaises(ValueError):
self.bits.toggle(36)
if __name__ == '__main__':
unittest.main()
| Packetslave/bitfield | bitfield_test.py | Python | apache-2.0 | 1,913 |
import pytest
from pytorch_lightning import Trainer
from tests.helpers import BoringModel
@pytest.mark.parametrize(
["min_epochs", "max_epochs", "min_steps", "max_steps"],
[
(None, 3, None, None),
(None, None, None, 20),
(None, 3, None, 20),
(None, None, 10, 20),
(1, 3, None, None),
(1, None, None, 20),
(None, 3, 10, None),
],
)
def test_min_max_steps_epochs(tmpdir, min_epochs, max_epochs, min_steps, max_steps):
"""
Tests that max_steps can be used without max_epochs
"""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
min_epochs=min_epochs,
max_epochs=max_epochs,
min_steps=min_steps,
max_steps=max_steps,
weights_summary=None,
)
trainer.fit(model)
# check training stopped at max_epochs or max_steps
if trainer.max_steps and not trainer.max_epochs:
assert trainer.global_step == trainer.max_steps
| williamFalcon/pytorch-lightning | tests/trainer/flags/test_min_max_epochs.py | Python | apache-2.0 | 987 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#Language Detection based on unicode range
# Copyright 2008 Santhosh Thottingal <[email protected]>
# http://www.smc.org.in
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import string
def detect_lang(text):
"""
Detect the language of the given text using the unicode range.
This function can take a chunk of text and return a dictionary
containing word-language key-value pairs.
"""
words=text.split(" ")
word_count=len(words)
word_iter=0
word=""
result_dict=dict()
while word_iter < word_count:
word=words[word_iter]
if(word):
orig_word = word
#remove the punctuations
for punct in string.punctuation:
word = word.replace(punct," ")
length = len(word)
index = 0
# scan left to write, skip any punctuations, the detection stops in the first match itself.
while index < length:
letter=word[index]
if not letter.isalpha():
index=index+1
continue
if ((ord(letter) >= 0x0D00) & (ord(letter) <=0x0D7F)):
result_dict[orig_word]= "ml_IN"
break
if ((ord(letter) >= 0x0980) & (ord(letter) <= 0x09FF)):
result_dict[orig_word]= "bn_IN"
break
if ((ord(letter) >= 0x0900) & (ord(letter) <= 0x097F)):
result_dict[orig_word]= "hi_IN"
break
if ((ord(letter) >=0x0A80) & (ord(letter) <= 0x0AFF)):
result_dict[orig_word]= "gu_IN"
break
if ((ord(letter) >= 0x0A00) & (ord(letter) <=0x0A7F)):
result_dict[orig_word]= "pa_IN"
break
if ((ord(letter) >= 0x0C80) & (ord(letter) <=0x0CFF)):
result_dict[orig_word]= "kn_IN"
break
if ((ord(letter) >= 0x0B00) & (ord(letter) <= 0x0B7F)):
result_dict[orig_word]= "or_IN"
break
if ((ord(letter) >= 0x0B80) & (ord(letter) <= 0x0BFF)):
result_dict[orig_word]= "ta_IN"
break
if ((ord(letter) >= 0x0C00) & (ord(letter) <= 0x0C7F)):
result_dict[orig_word]= "te_IN"
break
if ((letter <= u'z')): #this is fallback case.
result_dict[orig_word]= "en_US"
break
index=index+1
word_iter=word_iter+1
return result_dict
| smc/silpa | src/silpa/utils/langdetect.py | Python | agpl-3.0 | 3,409 |
import pygame.mixer as pm
from pygame_play_tone import Note
from time import sleep
#Default volume for Notes
DEFAULT_VOLUME=0.2
# Notes that can be called on, where C4 is middle C
C0 = 16.35
C0_SHARP = 17.32
D0 = 18.35
D0_SHARP = 19.45
E0 = 20.6
F0 = 21.83
F0_SHARP = 23.12
G0 = 24.5
G0_SHARP = 25.96
A0 = 27.5
A0_SHARP = 29.14
B0 = 30.87
C1 = 32.7
C1_SHARP = 34.65
D1 = 36.71
D1_SHARP = 38.89
E1 = 41.2
F1 = 43.65
F1_SHARP = 46.25
G1 = 49
G1_SHARP = 51.91
A1 = 55
A1_SHARP = 58.27
B1 = 61.74
C2 = 65.41
C2_SHARP = 69.3
D2 = 73.42
D2_SHARP = 77.78
E2 = 82.41
F2 = 87.31
F2_SHARP = 92.5
G2 = 98
G2_SHARP = 103.83
A2 = 110
A2_SHARP = 116.54
B2 = 123.47
C3 = 130.81
C3_SHARP = 138.59
D3 = 146.83
D3_SHARP = 155.56
E3 = 164.81
F3 = 174.61
F3_SHARP = 185
G3 = 196
G3_SHARP = 207.65
A3 = 220
A3_SHARP = 233.08
B3 = 246.94
C4 = 261.63
C4_SHARP = 277.18
D4 = 293.66
D4_SHARP = 311.13
E4 = 329.63
F4 = 349.23
F4_SHARP = 369.99
G4 = 392
G4_SHARP = 415.3
A4 = 440
A4_SHARP = 466.16
B4 = 493.88
C5 = 523.25
C5_SHARP = 554.37
D5 = 587.33
D5_SHARP = 622.25
E5 = 659.25
F5 = 698.46
F5_SHARP = 739.99
G5 = 783.99
G5_SHARP = 830.61
A5 = 880
A5_SHARP = 932.33
B5 = 987.77
C6 = 1046.5
C6_SHARP = 1108.73
D6 = 1174.66
D6_SHARP = 1244.51
E6 = 1318.51
F6 = 1396.91
F6_SHARP = 1479.98
G6 = 1567.98
G6_SHARP = 1661.22
A6 = 1760
A6_SHARP = 1864.66
B6 = 1975.53
def prepPlaying():
''' Initializes environment to play pygame noises '''
pm.pre_init(44100, -16, 1, 1024)
# pygame.init()
# pm.init() #Only works for non-Windows? #TODO Research this further to confirm
pm.init()
def playNote(note,time,volume=DEFAULT_VOLUME):
''' Plays a sound of a given frequency [note] in Hertz for duration
[time] in seconds at a particular volume, where [volume] is a
number between 0.0 and 1.0'''
sound = Note(note,volume)
sound.play(-1)
sleep(time)
sound.stop()
def blurNote(note,time,volume=DEFAULT_VOLUME,last_note=False):
''' Same as playNote, but will continue to play with other notes
that are not specified to stop. In order to stop blurring a
selection of notes together, have the last note be a playNote or
specify the last parameter [last_note] as True'''
sound = Note(note,volume)
sound.play(-1)
sleep(time)
if(last_note):
sound.stop()
| cornell-cup/cs-minibot-platform | python-interface/src/MiniBotFramework/Sound/note_library.py | Python | apache-2.0 | 2,243 |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 20 07:32:22 2015
@author: jmmauricio
"""
import os
directory = '/home/jmmauricio/Documents/public/jmmauricio6/RESEARCH/benches/ieee_118/ieee118_pvsync/code/ieee118_pv_50/simulations'
def rename_files(old_string, new_string,directory):
dir_list = os.listdir(directory)
for item in dir_list:
fileName, fileExtension = os.path.splitext(item)
if fileExtension == '.py':
new_fileName = fileName.replace(old_string,new_string)
print(fileName + ' -> ' + new_fileName)
print(os.path.join(directory,fileName + fileExtension))
os.rename(os.path.join(directory,fileName + fileExtension), os.path.join(directory,new_fileName+ fileExtension))
old_string = 'ieee118_50_pvs'
new_string = 'ieee118_50_pv'
rename_files(old_string, new_string, directory)
old_string = 'ieee118_50_pvs'
new_string = 'ieee118_50_pv'
old_string = 'ieee118_pvsync_50'
new_string = 'ieee118_pv_50'
'''
# old_string = 'ieee118_pvsync_base_load_trip_4'
# new_string = 'ieee118_pvsync_base_load_trip_80'
# from_bus_old = 7
# to_bus_old = 8
# from_bus_new = 30
# to_bus_new = 26
# old_string = 'ieee118_pvsync_base_fault_{from_bus_old}_line_{from_bus_old}_{to_bus_old}'.format(from_bus_old=from_bus_old,to_bus_old=to_bus_old)
# new_string = 'ieee118_pvsync_base_fault_{from_bus_new}_line_{from_bus_new}_{to_bus_new}'.format(from_bus_new=from_bus_new,to_bus_new=to_bus_new)
#
#
# directory = '/home/jmmauricio/Documents/public/jmmauricio6/RESEARCH/benches/ieee_118/ieee118_pvsync/doc/code/pvsync/ieee118_pvs_base'
#
# old_string = 'ieee12g'
# new_string = 'ieee118_pvs_base'
#
#
#
#
#
# old_string = 'ieee12g_10'
# new_string = 'ieee118_10'
#
# old_string = 'ieee12g_pvsync_base'
# new_string = 'ieee118_pvsync_base'
#
# #
# old_string = r'ieee_12_generic\code'
# new_string = r'ieee_118\ieee118_pvsync\code'
# #
# old_string = r'ieee12g_base_pvs_gen_trip_9'
# new_string = r'ieee118_base_pvs_gen_trip_89'
#
# old_string = r'ieee12g_base_pvs_load_trip_4'
# new_string = r'ieee118_base_pvs_load_trip_80'
#
#
# old_string = 'ieee12g_10_pvs_fault_{from_bus_old}_line_{from_bus_old}_{to_bus_old}'.format(from_bus_old=from_bus_old,to_bus_old=to_bus_old)
# new_string = 'ieee118_base_pvs_fault_{from_bus_new}_line_{from_bus_new}_{to_bus_new}'.format(from_bus_new=from_bus_new,to_bus_new=to_bus_new)
#
# #old_string = 'ieee12g_10_pvs_fault_38_line_38_65'
# #new_string = 'ieee118_base_pvs_fault_38_line_38_65'
#
old_string = 'gen_trip_12'
new_string = 'gen_trip_89'
# old_string = 'ieee118_10_pvs'
# new_string = 'ieee118_pvs_10'
'''
dir_list = os.listdir(directory)
for item in dir_list:
fileName, fileExtension = os.path.splitext(item)
if fileExtension == '.py':
file_path = os.path.join(directory,fileName + fileExtension)
file_obj = open(file_path, 'r')
string = file_obj.read()
string_new = string.replace(old_string, new_string)
file_obj.close()
print(string_new)
file_obj = open(file_path, 'w')
file_obj.write(string_new)
file_obj.close()
| jmmauricio/pypstools | dev/rename_files.py | Python | gpl-3.0 | 3,176 |
#!/usr/bin/env python
#
# urls.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# Author: Pietro Delsante <[email protected]>
# www.certego.net
#
from django.conf.urls import patterns, url
from main import views
urlpatterns = patterns('',
url(r'^task/((?P<task_id>\d+)/)?$', views.task, name='task'),
url(r'^new_task/$', views.new_task, name='new_task'),
url(r'^$', views.tasks, name='tasks'),
)
| pdelsante/pcapoptikon | main/urls.py | Python | gpl-2.0 | 1,023 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Get stats about your activity.
Example:
- my_activity.py for stats for the current week (last week on mondays).
- my_activity.py -Q for stats for last quarter.
- my_activity.py -Y for stats for this year.
- my_activity.py -b 4/5/12 for stats since 4/5/12.
- my_activity.py -b 4/5/12 -e 6/7/12 for stats between 4/5/12 and 6/7/12.
"""
# These services typically only provide a created time and a last modified time
# for each item for general queries. This is not enough to determine if there
# was activity in a given time period. So, we first query for all things created
# before end and modified after begin. Then, we get the details of each item and
# check those details to determine if there was activity in the given period.
# This means that query time scales mostly with (today() - begin).
import cookielib
import csv
import datetime
from datetime import datetime
from datetime import timedelta
from functools import partial
import json
import optparse
import os
import re
import subprocess
import sys
import urllib
import urllib2
import rietveld
from third_party import upload
# Imported later, once options are set.
webkitpy = None
try:
from dateutil.relativedelta import relativedelta # pylint: disable=F0401
except ImportError:
print 'python-dateutil package required'
exit(1)
# python-keyring provides easy access to the system keyring.
try:
import keyring # pylint: disable=W0611,F0401
except ImportError:
print 'Consider installing python-keyring'
def webkit_account(user):
if not webkitpy:
return None
committer_list = webkitpy.common.config.committers.CommitterList()
email = user + "@chromium.org"
return committer_list.account_by_email(email)
def user_to_webkit_email(user):
account = webkit_account(user)
if not account:
return None
return account.emails[0]
def user_to_webkit_owner_search(user):
account = webkit_account(user)
if not account:
return ['--author=%[email protected]' % user]
search = []
for email in account.emails:
search.append('--author=' + email)
# commit-bot is author for contributors who are not committers.
search.append('--grep=Patch by ' + account.full_name)
return search
def user_to_webkit_reviewer_search(user):
committer_list = webkitpy.common.config.committers.CommitterList()
email = user + "@chromium.org"
account = committer_list.reviewer_by_email(email)
if not account:
return []
return ['--grep=Reviewed by ' + account.full_name]
rietveld_instances = [
{
'url': 'codereview.chromium.org',
'shorturl': 'crrev.com',
'supports_owner_modified_query': True,
'requires_auth': False,
'email_domain': 'chromium.org',
},
{
'url': 'chromereviews.googleplex.com',
'shorturl': 'go/chromerev',
'supports_owner_modified_query': True,
'requires_auth': True,
'email_domain': 'google.com',
},
{
'url': 'codereview.appspot.com',
'supports_owner_modified_query': True,
'requires_auth': False,
'email_domain': 'chromium.org',
},
{
'url': 'breakpad.appspot.com',
'supports_owner_modified_query': False,
'requires_auth': False,
'email_domain': 'chromium.org',
},
]
gerrit_instances = [
{
'url': 'chromium-review.googlesource.com',
'shorturl': 'crosreview.com',
},
# TODO(deymo): chrome-internal-review requires login credentials. Enable once
# login support is added to this client. See crbug.com/281695.
#{
# 'url': 'chrome-internal-review.googlesource.com',
# 'shorturl': 'crosreview.com/i',
#},
{
'host': 'gerrit.chromium.org',
'port': 29418,
},
{
'host': 'gerrit-int.chromium.org',
'port': 29419,
},
]
google_code_projects = [
{
'name': 'chromium',
'shorturl': 'crbug.com',
},
{
'name': 'chromium-os',
'shorturl': 'crosbug.com',
},
{
'name': 'chrome-os-partner',
},
{
'name': 'google-breakpad',
},
{
'name': 'gyp',
},
{
'name': 'skia',
},
]
bugzilla_instances = [
{
'search_url': 'http://bugs.webkit.org/buglist.cgi',
'url': 'wkb.ug',
'user_func': user_to_webkit_email,
},
]
git_instances = [
{
'option': 'webkit_repo',
'change_re':
r'git-svn-id: http://svn\.webkit\.org/repository/webkit/trunk@(\d*)',
'change_url': 'trac.webkit.org/changeset',
'review_re': r'https://bugs\.webkit\.org/show_bug\.cgi\?id\=(\d*)',
'review_url': 'wkb.ug',
'review_prop': 'webkit_bug_id',
'owner_search_func': user_to_webkit_owner_search,
'reviewer_search_func': user_to_webkit_reviewer_search,
},
]
# Uses ClientLogin to authenticate the user for Google Code issue trackers.
def get_auth_token(email):
# KeyringCreds will use the system keyring on the first try, and prompt for
# a password on the next ones.
creds = upload.KeyringCreds('code.google.com', 'code.google.com', email)
for _ in xrange(3):
email, password = creds.GetUserCredentials()
url = 'https://www.google.com/accounts/ClientLogin'
data = urllib.urlencode({
'Email': email,
'Passwd': password,
'service': 'code',
'source': 'chrome-my-activity',
'accountType': 'GOOGLE',
})
req = urllib2.Request(url, data=data, headers={'Accept': 'text/plain'})
try:
response = urllib2.urlopen(req)
response_body = response.read()
response_dict = dict(x.split('=')
for x in response_body.split('\n') if x)
return response_dict['Auth']
except urllib2.HTTPError, e:
print e
print 'Unable to authenticate to code.google.com.'
print 'Some issues may be missing.'
return None
def username(email):
"""Keeps the username of an email address."""
return email and email.split('@', 1)[0]
def datetime_to_midnight(date):
return date - timedelta(hours=date.hour, minutes=date.minute,
seconds=date.second, microseconds=date.microsecond)
def get_quarter_of(date):
begin = (datetime_to_midnight(date) -
relativedelta(months=(date.month % 3) - 1, days=(date.day - 1)))
return begin, begin + relativedelta(months=3)
def get_year_of(date):
begin = (datetime_to_midnight(date) -
relativedelta(months=(date.month - 1), days=(date.day - 1)))
return begin, begin + relativedelta(years=1)
def get_week_of(date):
begin = (datetime_to_midnight(date) - timedelta(days=date.weekday()))
return begin, begin + timedelta(days=7)
def get_yes_or_no(msg):
while True:
response = raw_input(msg + ' yes/no [no] ')
if response == 'y' or response == 'yes':
return True
elif not response or response == 'n' or response == 'no':
return False
def datetime_from_gerrit(date_string):
return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S.%f000')
def datetime_from_rietveld(date_string):
return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S.%f')
def datetime_from_google_code(date_string):
return datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S.%fZ')
class MyActivity(object):
def __init__(self, options):
self.options = options
self.modified_after = options.begin
self.modified_before = options.end
self.user = options.user
self.changes = []
self.reviews = []
self.issues = []
self.check_cookies()
self.google_code_auth_token = None
self.webkit_repo = options.webkit_repo
if self.webkit_repo:
self.setup_webkit_info()
# Check the codereview cookie jar to determine which Rietveld instances to
# authenticate to.
def check_cookies(self):
cookie_file = os.path.expanduser('~/.codereview_upload_cookies')
cookie_jar = cookielib.MozillaCookieJar(cookie_file)
if not os.path.exists(cookie_file):
exit(1)
try:
cookie_jar.load()
print 'Found cookie file: %s' % cookie_file
except (cookielib.LoadError, IOError):
exit(1)
filtered_instances = []
def has_cookie(instance):
for cookie in cookie_jar:
if cookie.name == 'SACSID' and cookie.domain == instance['url']:
return True
if self.options.auth:
return get_yes_or_no('No cookie found for %s. Authorize for this '
'instance? (may require application-specific '
'password)' % instance['url'])
filtered_instances.append(instance)
return False
for instance in rietveld_instances:
instance['auth'] = has_cookie(instance)
if filtered_instances:
print ('No cookie found for the following Rietveld instance%s:' %
('s' if len(filtered_instances) > 1 else ''))
for instance in filtered_instances:
print '\t' + instance['url']
print 'Use --auth if you would like to authenticate to them.\n'
def rietveld_search(self, instance, owner=None, reviewer=None):
if instance['requires_auth'] and not instance['auth']:
return []
email = None if instance['auth'] else ''
remote = rietveld.Rietveld('https://' + instance['url'], email, None)
# See def search() in rietveld.py to see all the filters you can use.
query_modified_after = None
if instance['supports_owner_modified_query']:
query_modified_after = self.modified_after.strftime('%Y-%m-%d')
# Rietveld does not allow search by both created_before and modified_after.
# (And some instances don't allow search by both owner and modified_after)
owner_email = None
reviewer_email = None
if owner:
owner_email = owner + '@' + instance['email_domain']
if reviewer:
reviewer_email = reviewer + '@' + instance['email_domain']
issues = remote.search(
owner=owner_email,
reviewer=reviewer_email,
modified_after=query_modified_after,
with_messages=True)
issues = filter(
lambda i: (datetime_from_rietveld(i['created']) < self.modified_before),
issues)
issues = filter(
lambda i: (datetime_from_rietveld(i['modified']) > self.modified_after),
issues)
should_filter_by_user = True
issues = map(partial(self.process_rietveld_issue, instance), issues)
issues = filter(
partial(self.filter_issue, should_filter_by_user=should_filter_by_user),
issues)
issues = sorted(issues, key=lambda i: i['modified'], reverse=True)
return issues
def process_rietveld_issue(self, instance, issue):
ret = {}
ret['owner'] = issue['owner_email']
ret['author'] = ret['owner']
ret['reviewers'] = set(issue['reviewers'])
shorturl = instance['url']
if 'shorturl' in instance:
shorturl = instance['shorturl']
ret['review_url'] = 'http://%s/%d' % (shorturl, issue['issue'])
# Rietveld sometimes has '\r\n' instead of '\n'.
ret['header'] = issue['description'].replace('\r', '').split('\n')[0]
ret['modified'] = datetime_from_rietveld(issue['modified'])
ret['created'] = datetime_from_rietveld(issue['created'])
ret['replies'] = self.process_rietveld_replies(issue['messages'])
return ret
@staticmethod
def process_rietveld_replies(replies):
ret = []
for reply in replies:
r = {}
r['author'] = reply['sender']
r['created'] = datetime_from_rietveld(reply['date'])
r['content'] = ''
ret.append(r)
return ret
@staticmethod
def gerrit_changes_over_ssh(instance, filters):
# See https://review.openstack.org/Documentation/cmd-query.html
# Gerrit doesn't allow filtering by created time, only modified time.
gquery_cmd = ['ssh', '-p', str(instance['port']), instance['host'],
'gerrit', 'query',
'--format', 'JSON',
'--comments',
'--'] + filters
(stdout, _) = subprocess.Popen(gquery_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
# Drop the last line of the output with the stats.
issues = stdout.splitlines()[:-1]
return map(json.loads, issues)
@staticmethod
def gerrit_changes_over_rest(instance, filters):
# See https://gerrit-review.googlesource.com/Documentation/rest-api.html
# Gerrit doesn't allow filtering by created time, only modified time.
args = urllib.urlencode([
('q', ' '.join(filters)),
('o', 'MESSAGES'),
('o', 'LABELS')])
rest_url = 'https://%s/changes/?%s' % (instance['url'], args)
req = urllib2.Request(rest_url, headers={'Accept': 'text/plain'})
try:
response = urllib2.urlopen(req)
stdout = response.read()
except urllib2.HTTPError, e:
print 'ERROR: Looking up %r: %s' % (rest_url, e)
return []
# Check that the returned JSON starts with the right marker.
if stdout[:5] != ")]}'\n":
print 'ERROR: Marker not found on REST API response: %r' % stdout[:5]
return []
return json.loads(stdout[5:])
def gerrit_search(self, instance, owner=None, reviewer=None):
max_age = datetime.today() - self.modified_after
max_age = max_age.days * 24 * 3600 + max_age.seconds
user_filter = 'owner:%s' % owner if owner else 'reviewer:%s' % reviewer
filters = ['-age:%ss' % max_age, user_filter]
# Determine the gerrit interface to use: SSH or REST API:
if 'host' in instance:
issues = self.gerrit_changes_over_ssh(instance, filters)
issues = [self.process_gerrit_ssh_issue(instance, issue)
for issue in issues]
elif 'url' in instance:
issues = self.gerrit_changes_over_rest(instance, filters)
issues = [self.process_gerrit_rest_issue(instance, issue)
for issue in issues]
else:
raise Exception('Invalid gerrit_instances configuration.')
# TODO(cjhopman): should we filter abandoned changes?
issues = filter(self.filter_issue, issues)
issues = sorted(issues, key=lambda i: i['modified'], reverse=True)
return issues
def process_gerrit_ssh_issue(self, instance, issue):
ret = {}
ret['review_url'] = issue['url']
if 'shorturl' in instance:
ret['review_url'] = 'http://%s/%s' % (instance['shorturl'],
issue['number'])
ret['header'] = issue['subject']
ret['owner'] = issue['owner']['email']
ret['author'] = ret['owner']
ret['created'] = datetime.fromtimestamp(issue['createdOn'])
ret['modified'] = datetime.fromtimestamp(issue['lastUpdated'])
if 'comments' in issue:
ret['replies'] = self.process_gerrit_ssh_issue_replies(issue['comments'])
else:
ret['replies'] = []
ret['reviewers'] = set(r['author'] for r in ret['replies'])
ret['reviewers'].discard(ret['author'])
return ret
@staticmethod
def process_gerrit_ssh_issue_replies(replies):
ret = []
replies = filter(lambda r: 'email' in r['reviewer'], replies)
for reply in replies:
ret.append({
'author': reply['reviewer']['email'],
'created': datetime.fromtimestamp(reply['timestamp']),
'content': '',
})
return ret
def process_gerrit_rest_issue(self, instance, issue):
ret = {}
ret['review_url'] = 'https://%s/%s' % (instance['url'], issue['_number'])
if 'shorturl' in instance:
# TODO(deymo): Move this short link to https once crosreview.com supports
# it.
ret['review_url'] = 'http://%s/%s' % (instance['shorturl'],
issue['_number'])
ret['header'] = issue['subject']
ret['owner'] = issue['owner']['email']
ret['author'] = ret['owner']
ret['created'] = datetime_from_gerrit(issue['created'])
ret['modified'] = datetime_from_gerrit(issue['updated'])
if 'messages' in issue:
ret['replies'] = self.process_gerrit_rest_issue_replies(issue['messages'])
else:
ret['replies'] = []
ret['reviewers'] = set(r['author'] for r in ret['replies'])
ret['reviewers'].discard(ret['author'])
return ret
@staticmethod
def process_gerrit_rest_issue_replies(replies):
ret = []
replies = filter(lambda r: 'email' in r['author'], replies)
for reply in replies:
ret.append({
'author': reply['author']['email'],
'created': datetime_from_gerrit(reply['date']),
'content': reply['message'],
})
return ret
def google_code_issue_search(self, instance):
time_format = '%Y-%m-%dT%T'
# See http://code.google.com/p/support/wiki/IssueTrackerAPI
# q=<owner>@chromium.org does a full text search for <owner>@chromium.org.
# This will accept the issue if owner is the owner or in the cc list. Might
# have some false positives, though.
# Don't filter normally on modified_before because it can filter out things
# that were modified in the time period and then modified again after it.
gcode_url = ('https://code.google.com/feeds/issues/p/%s/issues/full' %
instance['name'])
gcode_data = urllib.urlencode({
'alt': 'json',
'max-results': '100000',
'q': '%s' % self.user,
'published-max': self.modified_before.strftime(time_format),
'updated-min': self.modified_after.strftime(time_format),
})
opener = urllib2.build_opener()
if self.google_code_auth_token:
opener.addheaders = [('Authorization', 'GoogleLogin auth=%s' %
self.google_code_auth_token)]
gcode_json = None
try:
gcode_get = opener.open(gcode_url + '?' + gcode_data)
gcode_json = json.load(gcode_get)
gcode_get.close()
except urllib2.HTTPError, _:
print 'Unable to access ' + instance['name'] + ' issue tracker.'
if not gcode_json or 'entry' not in gcode_json['feed']:
return []
issues = gcode_json['feed']['entry']
issues = map(partial(self.process_google_code_issue, instance), issues)
issues = filter(self.filter_issue, issues)
issues = sorted(issues, key=lambda i: i['modified'], reverse=True)
return issues
def process_google_code_issue(self, project, issue):
ret = {}
ret['created'] = datetime_from_google_code(issue['published']['$t'])
ret['modified'] = datetime_from_google_code(issue['updated']['$t'])
ret['owner'] = ''
if 'issues$owner' in issue:
ret['owner'] = issue['issues$owner']['issues$username']['$t']
ret['author'] = issue['author'][0]['name']['$t']
if 'shorturl' in project:
issue_id = issue['id']['$t']
issue_id = issue_id[issue_id.rfind('/') + 1:]
ret['url'] = 'http://%s/%d' % (project['shorturl'], int(issue_id))
else:
issue_url = issue['link'][1]
if issue_url['rel'] != 'alternate':
raise RuntimeError
ret['url'] = issue_url['href']
ret['header'] = issue['title']['$t']
ret['replies'] = self.get_google_code_issue_replies(issue)
return ret
def get_google_code_issue_replies(self, issue):
"""Get all the comments on the issue."""
replies_url = issue['link'][0]
if replies_url['rel'] != 'replies':
raise RuntimeError
replies_data = urllib.urlencode({
'alt': 'json',
'fields': 'entry(published,author,content)',
})
opener = urllib2.build_opener()
opener.addheaders = [('Authorization', 'GoogleLogin auth=%s' %
self.google_code_auth_token)]
try:
replies_get = opener.open(replies_url['href'] + '?' + replies_data)
except urllib2.HTTPError, _:
return []
replies_json = json.load(replies_get)
replies_get.close()
return self.process_google_code_issue_replies(replies_json)
@staticmethod
def process_google_code_issue_replies(replies):
if 'entry' not in replies['feed']:
return []
ret = []
for entry in replies['feed']['entry']:
e = {}
e['created'] = datetime_from_google_code(entry['published']['$t'])
e['content'] = entry['content']['$t']
e['author'] = entry['author'][0]['name']['$t']
ret.append(e)
return ret
@staticmethod
def git_cmd(repo, *args):
cmd = ['git', '--git-dir=%s/.git' % repo]
cmd.extend(args)
[stdout, _] = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
lines = str(stdout).split('\n')[:-1]
return lines
def git_search(self, instance, owner=None, reviewer=None):
repo = getattr(self, instance['option'])
if not repo:
return []
search = []
if owner:
search.extend(instance['owner_search_func'](owner))
if reviewer:
search.extend(instance['reviewer_search_func'](reviewer))
if not len(search):
return []
self.git_cmd(repo, 'fetch', 'origin')
time_format = '%Y-%m-%d %H:%M:%S'
log_args = [
'--after=' + self.modified_after.strftime(time_format),
'--before=' + self.modified_before.strftime(time_format),
'--format=%H'
]
commits = set()
for query in search:
query_args = [query]
query_args.extend(log_args)
commits |= set(self.git_cmd(repo, 'log', 'origin/master', *query_args))
ret = []
for commit in commits:
output = self.git_cmd(repo, 'log', commit + "^!", "--format=%cn%n%cd%n%B")
author = output[0]
date = datetime.strptime(output[1], "%a %b %d %H:%M:%S %Y +0000")
processed = self.process_git_commit(instance, author, date, output[2:])
if processed:
ret.append(processed)
ret = sorted(ret, key=lambda i: i['modified'], reverse=True)
return ret
@staticmethod
def process_git_commit(instance, author, date, log):
ret = {}
ret['owner'] = author
ret['author'] = author
ret['modified'] = date
ret['created'] = date
ret['header'] = log[0]
reviews = []
reviewers = []
changes = []
for line in log:
match = re.match(r'Reviewed by ([^.]*)', line)
if match:
reviewers.append(match.group(1))
if instance['review_re']:
match = re.match(instance['review_re'], line)
if match:
reviews.append(int(match.group(1)))
if instance['change_re']:
match = re.match(instance['change_re'], line)
if match:
changes.append(int(match.group(1)))
committer_list = webkitpy.common.config.committers.CommitterList()
ret['reviewers'] = set(
(committer_list.contributor_by_name(r).emails[0] for r in reviewers))
# Reviews more useful than change link itself, but tricky if multiple
# Reviews == bugs for WebKit changes
if len(reviews) == 1:
url = 'http://%s/%d' % (instance['review_url'], reviews[0])
if instance['review_prop']:
ret[instance['review_prop']] = reviews[0]
elif len(changes) == 1:
url = 'http://%s/%d' % (instance['change_url'], changes[0])
else:
# Couldn't find anything.
return None
ret['review_url'] = url
return ret
def bugzilla_issues(self, instance, user):
if instance['user_func']:
user = instance['user_func'](user)
if not user:
return []
# This search is a little iffy, as it returns any bug that has been
# modified over a time period in any way and that a user has ever commented
# on, but that's the best that Bugzilla can get us. Oops.
commented = { 'emaillongdesc1': 1 }
issues = self.bugzilla_search(instance, user, commented)
issues = filter(lambda issue: issue['owner'] != user, issues)
reported = { 'emailreporter1': 1, 'chfield': '[Bug creation]' }
issues.extend(self.bugzilla_search(instance, user, reported))
# Remove duplicates by bug id
seen = {}
pruned = []
for issue in issues:
bug_id = issue['webkit_bug_id']
if bug_id in seen:
continue
seen[bug_id] = True
pruned.append(issue)
# Bugzilla has no modified time, so sort by id?
pruned = sorted(pruned, key=lambda i: i['webkit_bug_id'])
return issues
def bugzilla_search(self, instance, user, params):
time_format = '%Y-%m-%d'
values = {
'chfieldfrom': self.modified_after.strftime(time_format),
'chfieldto': self.modified_before.strftime(time_format),
'ctype': 'csv',
'emailtype1': 'substring',
'email1': '%s' % user,
}
values.update(params)
# Must be GET not POST
data = urllib.urlencode(values)
req = urllib2.Request("%s?%s" % (instance['search_url'], data))
response = urllib2.urlopen(req)
reader = csv.reader(response)
reader.next() # skip the header line
issues = map(partial(self.process_bugzilla_issue, instance), reader)
return issues
@staticmethod
def process_bugzilla_issue(instance, issue):
bug_id, owner, desc = int(issue[0]), issue[4], issue[7]
ret = {}
ret['owner'] = owner
ret['author'] = owner
ret['review_url'] = 'http://%s/%d' % (instance['url'], bug_id)
ret['url'] = ret['review_url']
ret['header'] = desc
ret['webkit_bug_id'] = bug_id
return ret
def setup_webkit_info(self):
assert(self.webkit_repo)
git_dir = os.path.normpath(self.webkit_repo + "/.git")
if not os.path.exists(git_dir):
print "%s doesn't exist, skipping WebKit checks." % git_dir
self.webkit_repo = None
return
try:
self.git_cmd(self.webkit_repo, "fetch", "origin")
except subprocess.CalledProcessError:
print "Failed to update WebKit repo, skipping WebKit checks."
self.webkit_repo = None
return
path = "Tools/Scripts"
full_path = os.path.normpath("%s/%s" % (self.options.webkit_repo, path))
sys.path.append(full_path)
try:
global webkitpy
webkitpy = __import__('webkitpy.common.config.committers')
except ImportError:
print "Failed to import WebKit committer list, skipping WebKit checks."
self.webkit_repo = None
return
if not webkit_account(self.user):
email = self.user + "@chromium.org"
print "No %s in committers.py, skipping WebKit checks." % email
self.webkit_repo = None
def print_change(self, change):
optional_values = {
'reviewers': ', '.join(change['reviewers'])
}
self.print_generic(self.options.output_format,
self.options.output_format_changes,
change['header'],
change['review_url'],
change['author'],
optional_values)
def print_issue(self, issue):
optional_values = {
'owner': issue['owner'],
}
self.print_generic(self.options.output_format,
self.options.output_format_issues,
issue['header'],
issue['url'],
issue['author'],
optional_values)
def print_review(self, review):
self.print_generic(self.options.output_format,
self.options.output_format_reviews,
review['header'],
review['review_url'],
review['author'])
@staticmethod
def print_generic(default_fmt, specific_fmt,
title, url, author,
optional_values=None):
output_format = specific_fmt if specific_fmt is not None else default_fmt
output_format = unicode(output_format)
required_values = {
'title': title,
'url': url,
'author': author,
}
# Merge required and optional values.
if optional_values is not None:
values = dict(required_values.items() + optional_values.items())
else:
values = required_values
print output_format.format(**values)
def filter_issue(self, issue, should_filter_by_user=True):
def maybe_filter_username(email):
return not should_filter_by_user or username(email) == self.user
if (maybe_filter_username(issue['author']) and
self.filter_modified(issue['created'])):
return True
if (maybe_filter_username(issue['owner']) and
(self.filter_modified(issue['created']) or
self.filter_modified(issue['modified']))):
return True
for reply in issue['replies']:
if self.filter_modified(reply['created']):
if not should_filter_by_user:
break
if (username(reply['author']) == self.user
or (self.user + '@') in reply['content']):
break
else:
return False
return True
def filter_modified(self, modified):
return self.modified_after < modified and modified < self.modified_before
def auth_for_changes(self):
#TODO(cjhopman): Move authentication check for getting changes here.
pass
def auth_for_reviews(self):
# Reviews use all the same instances as changes so no authentication is
# required.
pass
def auth_for_issues(self):
self.google_code_auth_token = (
get_auth_token(self.options.local_user + '@chromium.org'))
def get_changes(self):
for instance in rietveld_instances:
self.changes += self.rietveld_search(instance, owner=self.user)
for instance in gerrit_instances:
self.changes += self.gerrit_search(instance, owner=self.user)
for instance in git_instances:
self.changes += self.git_search(instance, owner=self.user)
def print_changes(self):
if self.changes:
print '\nChanges:'
for change in self.changes:
self.print_change(change)
def get_reviews(self):
for instance in rietveld_instances:
self.reviews += self.rietveld_search(instance, reviewer=self.user)
for instance in gerrit_instances:
reviews = self.gerrit_search(instance, reviewer=self.user)
reviews = filter(lambda r: not username(r['owner']) == self.user, reviews)
self.reviews += reviews
for instance in git_instances:
self.reviews += self.git_search(instance, reviewer=self.user)
def print_reviews(self):
if self.reviews:
print '\nReviews:'
for review in self.reviews:
self.print_review(review)
def get_issues(self):
for project in google_code_projects:
self.issues += self.google_code_issue_search(project)
for instance in bugzilla_instances:
self.issues += self.bugzilla_issues(instance, self.user)
def print_issues(self):
if self.issues:
print '\nIssues:'
for issue in self.issues:
self.print_issue(issue)
def process_activities(self):
# If a webkit bug was a review, don't list it as an issue.
ids = {}
for review in self.reviews + self.changes:
if 'webkit_bug_id' in review:
ids[review['webkit_bug_id']] = True
def duplicate_issue(issue):
if 'webkit_bug_id' not in issue:
return False
return issue['webkit_bug_id'] in ids
self.issues = filter(lambda issue: not duplicate_issue(issue), self.issues)
def print_activity(self):
self.print_changes()
self.print_reviews()
self.print_issues()
def main():
# Silence upload.py.
rietveld.upload.verbosity = 0
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option(
'-u', '--user', metavar='<email>',
default=os.environ.get('USER'),
help='Filter on user, default=%default')
parser.add_option(
'--webkit_repo', metavar='<dir>',
default='%s' % os.environ.get('WEBKIT_DIR'),
help='Local path to WebKit repository, default=%default')
parser.add_option(
'-b', '--begin', metavar='<date>',
help='Filter issues created after the date')
parser.add_option(
'-e', '--end', metavar='<date>',
help='Filter issues created before the date')
quarter_begin, quarter_end = get_quarter_of(datetime.today() -
relativedelta(months=2))
parser.add_option(
'-Q', '--last_quarter', action='store_true',
help='Use last quarter\'s dates, e.g. %s to %s' % (
quarter_begin.strftime('%Y-%m-%d'), quarter_end.strftime('%Y-%m-%d')))
parser.add_option(
'-Y', '--this_year', action='store_true',
help='Use this year\'s dates')
parser.add_option(
'-w', '--week_of', metavar='<date>',
help='Show issues for week of the date')
parser.add_option(
'-a', '--auth',
action='store_true',
help='Ask to authenticate for instances with no auth cookie')
activity_types_group = optparse.OptionGroup(parser, 'Activity Types',
'By default, all activity will be looked up and '
'printed. If any of these are specified, only '
'those specified will be searched.')
activity_types_group.add_option(
'-c', '--changes',
action='store_true',
help='Show changes.')
activity_types_group.add_option(
'-i', '--issues',
action='store_true',
help='Show issues.')
activity_types_group.add_option(
'-r', '--reviews',
action='store_true',
help='Show reviews.')
parser.add_option_group(activity_types_group)
output_format_group = optparse.OptionGroup(parser, 'Output Format',
'By default, all activity will be printed in the '
'following format: {url} {title}. This can be '
'changed for either all activity types or '
'individually for each activity type. The format '
'is defined as documented for '
'string.format(...). The variables available for '
'all activity types are url, title and author. '
'Format options for specific activity types will '
'override the generic format.')
output_format_group.add_option(
'-f', '--output-format', metavar='<format>',
default=u'{url} {title}',
help='Specifies the format to use when printing all your activity.')
output_format_group.add_option(
'--output-format-changes', metavar='<format>',
default=None,
help='Specifies the format to use when printing changes. Supports the '
'additional variable {reviewers}')
output_format_group.add_option(
'--output-format-issues', metavar='<format>',
default=None,
help='Specifies the format to use when printing issues. Supports the '
'additional variable {owner}.')
output_format_group.add_option(
'--output-format-reviews', metavar='<format>',
default=None,
help='Specifies the format to use when printing reviews.')
parser.add_option_group(output_format_group)
# Remove description formatting
parser.format_description = (
lambda _: parser.description) # pylint: disable=E1101
options, args = parser.parse_args()
options.local_user = os.environ.get('USER')
if args:
parser.error('Args unsupported')
if not options.user:
parser.error('USER is not set, please use -u')
options.user = username(options.user)
if not options.begin:
if options.last_quarter:
begin, end = quarter_begin, quarter_end
elif options.this_year:
begin, end = get_year_of(datetime.today())
elif options.week_of:
begin, end = (get_week_of(datetime.strptime(options.week_of, '%m/%d/%y')))
else:
begin, end = (get_week_of(datetime.today() - timedelta(days=1)))
else:
begin = datetime.strptime(options.begin, '%m/%d/%y')
if options.end:
end = datetime.strptime(options.end, '%m/%d/%y')
else:
end = datetime.today()
options.begin, options.end = begin, end
print 'Searching for activity by %s' % options.user
print 'Using range %s to %s' % (options.begin, options.end)
my_activity = MyActivity(options)
if not (options.changes or options.reviews or options.issues):
options.changes = True
options.issues = True
options.reviews = True
# First do any required authentication so none of the user interaction has to
# wait for actual work.
if options.changes:
my_activity.auth_for_changes()
if options.reviews:
my_activity.auth_for_reviews()
if options.issues:
my_activity.auth_for_issues()
print 'Looking up activity.....'
if options.changes:
my_activity.get_changes()
if options.reviews:
my_activity.get_reviews()
if options.issues:
my_activity.get_issues()
my_activity.process_activities()
print '\n\n\n'
my_activity.print_changes()
my_activity.print_reviews()
my_activity.print_issues()
return 0
if __name__ == '__main__':
sys.exit(main())
| coreos/depot_tools | my_activity.py | Python | bsd-3-clause | 36,618 |
# -*- coding: utf-8 -*-
"""
Copyright 2014-2015 Ratina
@author: Savor d'Isavano
@date: 2015-02-09
Paginator helpers
"""
__author__ = "Savor d'Isavano"
from django.core.paginator import (
Paginator, EmptyPage, PageNotAnInteger
)
def make_page(object_list, *, per_page=5, current_page=1):
paginator = Paginator(object_list, per_page)
try:
objects = paginator.page(current_page)
except PageNotAnInteger:
objects = paginator.page(1)
except EmptyPage:
objects = paginator.page(paginator.num_pages)
return objects
| Ratina/ratina_portal | utils/paginator.py | Python | gpl-3.0 | 566 |
# Generated by Django 2.2.13 on 2020-08-24 11:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tasks', '0030_available'),
]
operations = [
migrations.AlterField(
model_name='task',
name='conditions',
field=models.ManyToManyField(blank=True, help_text='The list of conditions evaluated for this task.', related_name='tasks', to='conditions.Condition', verbose_name='Conditions'),
),
migrations.AlterField(
model_name='task',
name='end_attribute',
field=models.ForeignKey(blank=True, help_text='The attribute that is setting the end date for this task (optional, if no end date attribute is given, the start date attribute sets also the end date).', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tasks_as_end', to='domain.Attribute', verbose_name='End date attribute'),
),
migrations.AlterField(
model_name='task',
name='start_attribute',
field=models.ForeignKey(blank=True, help_text='The attribute that is setting the start date for this task.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tasks_as_start', to='domain.Attribute', verbose_name='Start date attribute'),
),
]
| rdmorganiser/rdmo | rdmo/tasks/migrations/0031_related_name.py | Python | apache-2.0 | 1,396 |
#!/usr/bin/python
import re
print 'flag = {}'
print 'op = {}'
print 'tag = {}'
print 'detail = {}'
print 'search = {}'
print 'value = {}'
print 'prefs = {}'
print 'flag[\'base\'] = 0x20'
with open("ECCodes.abstract") as f:
for line in f:
m = re.match(r"EC_CURRENT_PROTOCOL_VERSION[\t ]*(?P<code>0x[0-9a-fA-F]+)", line)
if m != None:
print 'protocol_version = ' + m.group('code')
m = re.match(r"EC_FLAG_(?P<type>[A-Z0-9_]*)[\t ]*(?P<code>0x[0-9a-fA-F]+)", line)
if m != None:
print 'flag[\''+m.group('type').lower() + '\'] = ' + m.group('code')
m = re.match(r"EC_OP_(?P<type>[A-Z0-9_]*)[\t ]*(?P<code>0x[0-9a-fA-F]+)", line)
if m != None:
print 'op[\''+m.group('type').lower() + '\'] = ' + m.group('code')
m = re.match(r"[\t ]*EC_TAG_(?P<type>[A-Z0-9_]*)[\t ]*(?P<code>0x[0-9a-fA-F]+)", line)
if m != None:
print 'tag[\''+m.group('type').lower() + '\'] = ' + m.group('code')
m = re.match(r"EC_DETAIL_(?P<type>[A-Z0-9_]*)[\t ]*(?P<code>0x[0-9a-fA-F]+)", line)
if m != None:
print 'detail[\''+m.group('type').lower() + '\'] = ' + m.group('code')
m = re.match(r"EC_SEARCH_(?P<type>[A-Z0-9_]*)[\t ]*(?P<code>0x[0-9a-fA-F]+)", line)
if m != None:
print 'search[\''+m.group('type').lower() + '\'] = ' + m.group('code')
m = re.match(r"EC_VALUE_(?P<type>[A-Z0-9_]*)[\t ]*(?P<code>0x[0-9a-fA-F]+)", line)
if m != None:
print 'value[\''+m.group('type').lower() + '\'] = ' + m.group('code')
m = re.match(r"EC_PREFS_(?P<type>[A-Z0-9_]*)[\t ]*(?P<code>0x[0-9a-fA-F]+)", line)
if m != None:
print 'prefs[\''+m.group('type').lower() + '\'] = ' + m.group('code') | castorinop/pyamulecmd | helper/gen_codes.py | Python | mit | 1,775 |
# -*- coding: utf-8 -*-
from flask import render_template
from flask.views import MethodView
from wbc.connectors import get_sphinx
class HomeHTML(MethodView):
def get(self):
return render_template('home.html', stats=self._get_stats())
@staticmethod
def _get_stats():
"""
:rtype: object
"""
sphinx = get_sphinx()
stats = sphinx.get_index_meta(index_name='wbc')
return {
'documents': int(stats['indexed_documents']),
'mbytes': int(int(stats['indexed_bytes']) / 1024 / 1024),
}
| macbre/wbc.macbre.net | app/wbc/views/html/home.py | Python | mit | 580 |
from django.contrib.gis.utils import LayerMapping
from lingcod.spacing.models import *
def load_land(file_name, verbose=True):
mapping = {
'geometry' : 'POLYGON',
}
lm = prep_layer_mapping(file_name, Land, mapping)
lm.save(strict=True, verbose=verbose)
def prep_layer_mapping(shpfile_name, model, mapping):
shpfile = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', shpfile_name))
lm = LayerMapping(model, shpfile, mapping, transform=False, encoding='iso-8859-1')
return lm | Alwnikrotikz/marinemap | lingcod/spacing/loader.py | Python | bsd-3-clause | 539 |
"""
URLResolver Addon for Kodi
Copyright (C) 2016 t0mm0, tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from urlresolver.lib import kodi
from urlresolver.lib import log_utils
from urlresolver.lib import cache
from urlresolver.lib.url_dispatcher import URL_Dispatcher
url_dispatcher = URL_Dispatcher()
def __enum(**enums):
return type('Enum', (), enums)
MODES = __enum(AUTH_RD='auth_rd', RESET_RD='reset_rd', RESET_CACHE='reset_cache')
@url_dispatcher.register(MODES.AUTH_RD)
def auth_rd():
kodi.close_all()
from urlresolver.plugins import realdebrid
if realdebrid.RealDebridResolver().authorize_resolver():
kodi.notify(msg='Real-Debrid Resolver Authorized', duration=5000)
@url_dispatcher.register(MODES.RESET_RD)
def reset_rd():
kodi.close_all()
kodi.sleep(500) # sleep or reset won't work for some reason
from urlresolver.plugins import realdebrid
rd = realdebrid.RealDebridResolver()
rd.reset_authorization()
kodi.notify(msg='Real-Debrid Authorization Reset', duration=5000)
@url_dispatcher.register(MODES.RESET_CACHE)
def reset_cache():
if cache.reset_cache():
kodi.notify(msg='Cache Reset')
else:
kodi.notify(msg='Cache Reset Failed')
def main(argv=None):
if sys.argv: argv = sys.argv
queries = kodi.parse_query(sys.argv[2])
log_utils.log('Version: |%s| Queries: |%s|' % (kodi.get_version(), queries))
log_utils.log('Args: |%s|' % (argv))
# don't process params that don't match our url exactly. (e.g. plugin://plugin.video.1channel/extrafanart)
plugin_url = 'plugin://%s/' % (kodi.get_id())
if argv[0] != plugin_url:
return
mode = queries.get('mode', None)
url_dispatcher.dispatch(mode, queries)
if __name__ == '__main__':
sys.exit(main())
| wndias/bc.repository | script.module.urlresolver/lib/default.py | Python | gpl-2.0 | 2,422 |
from __future__ import absolute_import
from pex.base import *
| Yasumoto/commons | src/python/twitter/common/python/base.py | Python | apache-2.0 | 62 |
from model_mommy import mommy
from django.test import override_settings
from django.urls import reverse
from meupet.models import Pet, PetStatus
from meupet.tests.tests import MEDIA_ROOT, MeuPetTestCase
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class PosterTest(MeuPetTestCase):
def setUp(self):
super().setUp()
status = mommy.make(PetStatus, description="status")
self.admin.phone = "99 99999-9999"
self.admin.save()
self.pet = mommy.make(Pet, owner=self.admin, status=status)
self.resp = self.client.get(reverse("meupet:poster", kwargs={"slug": self.pet.slug}))
def test_template_used(self):
"""Makes sure the correct template is used"""
self.assertTemplateUsed(self.resp, "meupet/poster.html")
def test_pet_in_context(self):
"""The pet should be present in the context"""
pet = self.resp.context["pet"]
self.assertIsInstance(pet, Pet)
def test_poster_info(self):
"""Pet information should be presented in the poster"""
contents = [
self.pet.status.description,
self.pet.name,
self.pet.description,
self.pet.get_sex_display().lower(),
self.pet.get_size_display().lower(),
self.pet.owner.phone,
]
for expected in contents:
with self.subTest():
self.assertContains(self.resp, expected)
| dirtycoder/pets | pets/meupet/tests/test_view_poster.py | Python | mit | 1,430 |
from pathlib import Path
import importlib.util
import ray
import time
def import_and_execute_test_script(relative_path_to_test_script: str):
"""Imports and executes a module from a path relative to Ray repo root."""
# get the ray folder
ray_path = next(
x for x in Path(__file__).resolve().parents if str(x).endswith("/ray")
)
notebook_path = ray_path.joinpath(relative_path_to_test_script)
assert notebook_path.exists()
spec = importlib.util.spec_from_file_location("notebook_test", notebook_path)
notebook_test_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(notebook_test_module)
def wait_for_cluster_client(
num_nodes: int, max_time_s: int, feedback_interval_s: int = 10
):
assert ray.is_initialized()
curr_nodes = 0
start = time.time()
next_feedback = start
max_time = start + max_time_s
while not curr_nodes >= num_nodes:
now = time.time()
if now >= max_time:
raise RuntimeError(
f"Maximum wait time reached, but only "
f"{curr_nodes}/{num_nodes} nodes came up. Aborting."
)
if now >= next_feedback:
passed = now - start
print(
f"Waiting for more nodes to come up: "
f"{curr_nodes}/{num_nodes} "
f"({passed:.0f} seconds passed)"
)
next_feedback = now + feedback_interval_s
time.sleep(5)
curr_nodes = len(ray.nodes())
passed = time.time() - start
print(
f"Cluster is up: {curr_nodes}/{num_nodes} nodes online after "
f"{passed:.0f} seconds"
)
| ray-project/ray | release/golden_notebook_tests/workloads/util.py | Python | apache-2.0 | 1,672 |
from flask.blueprints import Blueprint
import logging
from flask_login import login_required, current_user
from waitlist.ts3.connection import send_poke
from flask import jsonify
bp = Blueprint('api_ts3', __name__)
logger = logging.getLogger(__name__)
@bp.route("/test_poke")
@login_required
def test_poke():
send_poke(current_user.get_eve_name(), "Test Poke")
resp = jsonify(status_code=201, message="Poke was send!")
resp.status_code = 201
return resp
| SpeedProg/eve-inc-waitlist | waitlist/blueprints/api/teamspeak.py | Python | mit | 473 |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| integree/hello-world | manage.py | Python | mit | 648 |
from datetime import datetime
from lxml import etree
import pytest
from codalib import bagatom
@pytest.fixture(scope='module')
def queue_stub():
class QueueStub(object):
ark = 'ark:65443'
oxum = '1394.7'
url_list = 'http://example.com.urls'
status = 'Completed'
harvest_start = '2013-05-17T01:12:20Z'
harvest_end = '2013-05-17T01:12:33Z'
queue_position = 5
return QueueStub()
def test_return_value(queue_stub):
"""
Verify the return type is an instance of etree._Element.
"""
tree = bagatom.queueEntryToXML(queue_stub)
assert isinstance(tree, etree._Element)
def test_root_node(queue_stub):
"""
Verify the root element of the return tree is queueEntry.
"""
xml = bagatom.queueEntryToXML(queue_stub)
root = xml.xpath(
'/q:queueEntry',
namespaces={'q': bagatom.QXML_NAMESPACE}
)
assert len(root) == 1
@pytest.mark.parametrize('name,attr', [
('ark', 'ark'),
('oxum', 'oxum'),
('urlListLink', 'url_list'),
('status', 'status'),
('position', 'queue_position'),
('start', 'harvest_start'),
('end', 'harvest_end')
])
def test_has_element(name, attr, queue_stub):
"""
Check that various elements are present and have the expected text.
The `name` parameter is the element name, and the `attr` parameter is
the name of the attribute on the QueueStub fixture.
"""
xml = bagatom.queueEntryToXML(queue_stub)
element = xml.xpath(
'/q:queueEntry/q:{0}'.format(name),
namespaces={'q': bagatom.QXML_NAMESPACE}
)
expected = getattr(queue_stub, attr)
assert len(element) == 1
assert element[0].text == str(expected)
def test_queue_has_datetime_harvest_start(queue_stub):
"""
Check that queueEntryToXML accepts a datetime object in the
harvest_start property.
"""
queue_stub.harvest_start = datetime(2015, 1, 1, 0, 0, 0)
xml = bagatom.queueEntryToXML(queue_stub)
start = xml.xpath(
'/q:queueEntry/q:start',
namespaces={'q': bagatom.QXML_NAMESPACE}
)
assert len(start) == 1
assert start[0].text == queue_stub.harvest_start.strftime(
bagatom.TIME_FORMAT_STRING)
def test_queue_has_datetime_harvest_end(queue_stub):
"""
Check that queueEntryToXML accepts a datetime object in the
harvest_end property.
"""
queue_stub.harvest_end = datetime(2015, 1, 1, 0, 0, 0)
xml = bagatom.queueEntryToXML(queue_stub)
end = xml.xpath(
'/q:queueEntry/q:end',
namespaces={'q': bagatom.QXML_NAMESPACE}
)
assert len(end) == 1
assert end[0].text == queue_stub.harvest_end.strftime(
bagatom.TIME_FORMAT_STRING)
def test_queue_empty_start_end(queue_stub):
"""
Check that empty start/end values are omitted in QueueXML
"""
queue_stub.harvest_start = None
queue_stub.harvest_end = None
xml = bagatom.queueEntryToXML(queue_stub)
end = xml.xpath(
'/q:queueEntry/q:end',
namespaces={'q': bagatom.QXML_NAMESPACE}
)
start = xml.xpath(
'/q:queueEntry/q:start',
namespaces={'q': bagatom.QXML_NAMESPACE}
)
assert len(end) == 0
assert len(start) == 0
| unt-libraries/codalib | tests/bagatom/test_queueEntryToXML.py | Python | bsd-3-clause | 3,245 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import os
import codecs
import shutil
from termcolor import colored
from jinja2 import Template
from studio.launch import ROOT_PATH
from jinja2 import Environment, FileSystemLoader
JDIR = os.path.join(ROOT_PATH, 'jinja')
JENV = Environment(loader=FileSystemLoader(JDIR))
class cd(object):
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def mkdirs(path):
try:
print(colored('create directory %s' % path, 'blue'))
os.makedirs(path)
except OSError:
pass
def writefp(path, text):
with codecs.open(path, 'wb', 'utf-8') as fp:
print(colored('create file %s' % path, 'white'))
fp.write(text)
def build_structure(command, dist='.', tpl='default', **kwargs):
with cd(dist):
TEMPLATE_DIR = os.path.join(JDIR, command, tpl)
for root, dirs, files in os.walk(TEMPLATE_DIR):
reldir = os.path.relpath(root, start=JDIR)
relcurdir = os.path.relpath(root, start=TEMPLATE_DIR)
for dname in dirs:
dpath = Template(os.path.join(relcurdir,
dname)).render(**kwargs)
mkdirs(dpath)
for fname in files:
real_fname = fname[:-7] if fname.endswith('.jinja2') else fname
fpath = Template(os.path.join(relcurdir,
real_fname)).render(**kwargs)
if fname.endswith('.jinja2'):
text = JENV.get_template(os.path.join(reldir,
fname)).render(**kwargs)
writefp(fpath, text)
else:
shutil.copyfile(os.path.join(JDIR, reldir, fname), fpath)
if __name__ == '__main__':
build_structure('pypi', appname='daydayup')
| qisanstudio/qstudio-launch | src/studio/launch/commands/contrib.py | Python | mit | 2,178 |
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from hpOneView.resources.resource import ResourceClient, extract_id_from_uri
class PlanScripts(object):
URI = '/rest/plan-scripts'
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, self.URI)
self.__default_values = {
'type': 'PlanScript',
}
def get_all(self, start=0, count=-1, filter='', sort=''):
"""
Gets a list of Plan Scripts based on optional sorting and filtering, and constrained by start and count
parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: A list of Plan Scripts.
"""
return self._client.get_all(start, count, filter=filter, sort=sort)
def delete(self, resource, force=False, timeout=-1):
"""
Deletes a Plan Script object from the appliance based on its Plan Script UUID.
Args:
resource: dict object to delete
force:
If set to true, the operation completes despite any problems with
network connectivity or errors on the resource itself. The default is false.
timeout:
Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
bool: Indicates if the resource was successfully deleted.
"""
return self._client.delete(resource, force=force, timeout=timeout)
def get(self, id_or_uri):
"""
Retrieves the overview details of the selected Plan Script as per the selected attributes.
Args:
id_or_uri: ID or URI of the Plan Script.
Returns:
dict: The Plan Script.
"""
return self._client.get(id_or_uri)
def create(self, resource, timeout=-1):
"""
Adds a Plan Script using the information provided in the request body. The plan type can be one of the
following types: General, deploy and capture. Note: The OS type for the Plan Script is always assigned
as "ESXi".
Args:
resource (dict): Object to create.
timeout:
Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation
in OneView, it just stops waiting for its completion.
Returns:
dict: Created Plan Script.
"""
data = self.__default_values.copy()
data.update(resource)
return self._client.create(data, timeout=timeout)
def update(self, resource, timeout=-1):
"""
Updates the properties of the Plan Script.
Args:
resource (dict): Object to update.
timeout:
Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation
in OneView, it just stops waiting for its completion.
Returns:
dict: Updated resource.
"""
return self._client.update(resource, timeout=timeout)
def get_by(self, field, value):
"""
Gets all Plan Scripts that match the filter.
The search is case-insensitive.
Args:
field: Field name to filter.
value: Value to filter.
Returns:
list: A list of Plan Scripts.
"""
return self._client.get_by(field, value)
def retrieve_differences(self, id_or_uri, content, timeout=-1):
"""
Retrieves the modified contents of the selected Plan Script according to the provided content object, as per
the selected attributes.
Args:
id_or_uri: ID or URI of the Plan Script.
content (str): Plan Script content.
timeout:
Timeout in seconds. Waits for task completion by default. The timeout does not abort the operation
in OneView, it just stops waiting for its completion.
Returns:
dict: Script differences.
"""
uri = self.URI + "/differences/" + extract_id_from_uri(id_or_uri)
return self._client.create(content, uri=uri, timeout=timeout)
def get_usedby_and_readonly(self, id):
"""
Gets the build plans details os teh selected plan script as per the selected attributes.
Args:
id: ID of the Plan Script.
Returns:
array of build plans
"""
uri = self.URI + "/" + id + "/usedby/readonly"
return self._client.get(uri)
| HewlettPackard/python-hpOneView | hpOneView/image_streamer/resources/plan_scripts.py | Python | mit | 6,767 |
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy import asarray, empty, ravel, nonzero
from scipy.sparse import (isspmatrix_csc, isspmatrix_csr, isspmatrix,
SparseEfficiencyWarning, csc_matrix)
from . import _superlu
noScikit = False
try:
import scikits.umfpack as umfpack
except ImportError:
noScikit = True
useUmfpack = not noScikit
__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized',
'MatrixRankWarning']
class MatrixRankWarning(UserWarning):
pass
def use_solver(**kwargs):
"""
Select default sparse direct solver to be used.
Parameters
----------
useUmfpack : bool, optional
Use UMFPACK over SuperLU. Has effect only if scikits.umfpack is
installed. Default: True
Notes
-----
The default sparse solver is umfpack when available
(scikits.umfpack is installed). This can be changed by passing
useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
to gain some speed.
"""
if 'useUmfpack' in kwargs:
globals()['useUmfpack'] = kwargs['useUmfpack']
#TODO: pass other options to scikit
def spsolve(A, b, permc_spec=None, use_umfpack=True):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
If a vector, b.shape must be (n,) or (n, 1).
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
use_umfpack : bool, optional
if True (default) then use umfpack for the solution. This is
only referenced if b is a vector and ``scikit-umfpack`` is installed.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[1]
If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
Notes
-----
For solving the matrix expression AX = B, this solver assumes the resulting
matrix X is sparse, as is often the case for very sparse inputs. If the
resulting X is dense, the construction of this sparse result will be
relatively expensive. In that case, consider converting A to a dense
matrix and using scipy.linalg.solve or its variants.
"""
if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
A = csc_matrix(A)
warn('spsolve requires A be CSC or CSR matrix format',
SparseEfficiencyWarning)
# b is a vector only if b have shape (n,) or (n, 1)
b_is_sparse = isspmatrix(b)
if not b_is_sparse:
b = asarray(b)
b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
# validate input shapes
M, N = A.shape
if (M != N):
raise ValueError("matrix must be square (has shape %s)" % ((M, N),))
if M != b.shape[0]:
raise ValueError("matrix - rhs dimension mismatch (%s - %s)"
% (A.shape, b.shape[0]))
use_umfpack = use_umfpack and useUmfpack
if b_is_vector and use_umfpack:
if b_is_sparse:
b_vec = b.toarray()
else:
b_vec = b
b_vec = asarray(b_vec, dtype=A.dtype).ravel()
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
family = {'d': 'di', 'D': 'zi'}
umf = umfpack.UmfpackContext(family[A.dtype.char])
x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
autoTranspose=True)
else:
if b_is_vector and b_is_sparse:
b = b.toarray()
b_is_sparse = False
if not b_is_sparse:
if isspmatrix_csc(A):
flag = 1 # CSC format
else:
flag = 0 # CSR format
options = dict(ColPerm=permc_spec)
x, info = _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr,
b, flag, options=options)
if info != 0:
warn("Matrix is exactly singular", MatrixRankWarning)
x.fill(np.nan)
if b_is_vector:
x = x.ravel()
else:
# b is sparse
Afactsolve = factorized(A)
if not isspmatrix_csc(b):
warn('spsolve is more efficient when sparse b '
'is in the CSC matrix format', SparseEfficiencyWarning)
b = csc_matrix(b)
# Create a sparse output matrix by repeatedly applying
# the sparse factorization to solve columns of b.
data_segs = []
row_segs = []
col_segs = []
for j in range(b.shape[1]):
bj = b[:, j].A.ravel()
xj = Afactsolve(bj)
w = np.flatnonzero(xj)
segment_length = w.shape[0]
row_segs.append(w)
col_segs.append(np.ones(segment_length, dtype=int)*j)
data_segs.append(np.asarray(xj[w], dtype=A.dtype))
sparse_data = np.concatenate(data_segs)
sparse_row = np.concatenate(row_segs)
sparse_col = np.concatenate(col_segs)
x = A.__class__((sparse_data, (sparse_row, sparse_col)),
shape=b.shape, dtype=A.dtype)
return x
def splu(A, permc_spec=None, diag_pivot_thresh=None,
drop_tol=None, relax=None, panel_size=None, options=dict()):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A : sparse matrix
Sparse matrix to factorize. Should be in CSR or CSC format.
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
diag_pivot_thresh : float, optional
Threshold used for a diagonal entry to be an acceptable pivot.
See SuperLU user's guide for details [1]_
drop_tol : float, optional
(deprecated) No effect.
relax : int, optional
Expert option for customizing the degree of relaxing supernodes.
See SuperLU user's guide for details [1]_
panel_size : int, optional
Expert option for customizing the panel size.
See SuperLU user's guide for details [1]_
options : dict, optional
Dictionary containing additional expert options to SuperLU.
See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
for more details. For example, you can specify
``options=dict(Equil=False, IterRefine='SINGLE'))``
to turn equilibration off and perform a single iterative refinement.
Returns
-------
invA : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
spilu : incomplete LU decomposition
Notes
-----
This function uses the SuperLU library.
References
----------
.. [1] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=False, options=_options)
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
"""
Compute an incomplete LU decomposition for a sparse, square matrix.
The resulting object is an approximation to the inverse of `A`.
Parameters
----------
A : (N, N) array_like
Sparse matrix to factorize
drop_tol : float, optional
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
(default: 1e-4)
fill_factor : float, optional
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
drop_rule : str, optional
Comma-separated string of drop rules to use.
Available rules: ``basic``, ``prows``, ``column``, ``area``,
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
See SuperLU documentation for details.
Remaining other options
Same as for `splu`
Returns
-------
invA_approx : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
splu : complete LU decomposition
Notes
-----
To improve the better approximation to the inverse, you may need to
increase `fill_factor` AND decrease `drop_tol`.
This function uses the SuperLU library.
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
ILU_FillFactor=fill_factor,
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=True, options=_options)
def factorized(A):
"""
Return a fuction for solving a sparse linear system, with A pre-factorized.
Parameters
----------
A : (N, N) array_like
Input.
Returns
-------
solve : callable
To solve the linear system of equations given in `A`, the `solve`
callable should be passed an ndarray of shape (N,).
Examples
--------
>>> from scipy.sparse.linalg import factorized
>>> A = np.array([[ 3. , 2. , -1. ],
... [ 2. , -2. , 4. ],
... [-1. , 0.5, -1. ]])
>>> solve = factorized(A) # Makes LU decomposition.
>>> rhs1 = np.array([1, -2, 0])
>>> solve(rhs1) # Uses the LU factors.
array([ 1., -2., -2.])
"""
if useUmfpack:
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
family = {'d': 'di', 'D': 'zi'}
umf = umfpack.UmfpackContext(family[A.dtype.char])
# Make LU decomposition.
umf.numeric(A)
def solve(b):
return umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
return solve
else:
return splu(A).solve
| Gillu13/scipy | scipy/sparse/linalg/dsolve/linsolve.py | Python | bsd-3-clause | 12,538 |
import uuid
from PyDesignData.PyDesignObject import *
from PyDesignData.PyDesignAnalysis import *
from PyDesignData.PyDesignParameter import *
from PyDesignData.PyDesignUOM import *
import os
__author__ = 'magnus'
class PyDesignDocument(PyDesignNamedObject):
def __init__(self, name, pdid=None):
self._py_design_objects = {}
PyDesignNamedObject.__init__(self, name, self, pdid)
self._analyses = []
self._reports = []
self._path = ""
self._formulas = {}
self._global_properties = None
self._changed = False
self._base_units = []
if pdid is None: # New document
self._global_properties = PyDesignParameters("Global parameters", self)
self._base_units = PyDesignBaseUnit.create_base_units(self)
def get_expression(self, name):
expression = self._global_properties.get_parameter(name)
if expression is not None:
return expression
if name in self._formulas:
return self._formulas[name]
return None
def object_created(self, object):
"""
:type object: PyDesignObject
:param object:
:return:
"""
self._py_design_objects[object.id] = object
object.add_listener(self)
def on_event(self, event):
self._changed = True
@property
def changed(self):
return self._changed
@changed.setter
def changed(self, value):
self._changed = value
def get_object(self, pdid):
"""
:rtype: PyDesignObject
"""
return self._py_design_objects[pdid]
def get_analysis(self, index):
"""
:param index: The index of the analysis to get
:return: The analysis
:rtype: PyDesignAnalysis
"""
return self._analyses[index]
@property
def analysis_count(self):
return len(self._analyses)
def create_analysis(self, name, type):
"""
:param name: Name of analysis
:param type: Type of analysis
:return: Returns the newly created analysis
:rtype: PyDesignAnalysis
"""
analysis = PyDesignAnalysis(name, self, type)
self.do_event(PyDesignEvent(PyDesignEvent.BeforeItemAddedEvent, self, analysis))
self._analyses.append(analysis)
self.do_event(PyDesignEvent(PyDesignEvent.EndItemAddedEvent, self, analysis))
return analysis
@property
def global_parameters(self):
return self._global_properties
@property
def report_count(self):
return len(self._reports)
@staticmethod
def create_id():
return uuid.uuid1().urn[9:]
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
def undo(self):
return
def redo(self):
return
def is_modified(self):
return self._changed
def object_jsonable(self):
return {
"analyses": self._analyses,
"globalParameters": self._global_properties,
"reports": self._reports,
"baseUnits": self._base_units
}
@staticmethod
def deserialize(data):
"""
:rtype: PyDesignDocument
:param data:
:return:
"""
doc = PyDesignDocument(data["name"], data["id"])
doc._global_properties = PyDesignParameters.deserialize(doc, data["globalParameters"])
if "analyses" in data:
for analysis_data in data["analyses"]:
analysis = PyDesignAnalysis.deserialize(doc, analysis_data)
doc._analyses.append(analysis)
if "baseUnits" in data:
for base_unit_data in data["baseUnits"]:
base_unit = PyDesignBaseUnit.deserialize(doc, base_unit_data)
doc._base_units.append(base_unit)
# for report_data in data["reports"]:
# report = PyDesignReport.deserialize(report_data)
# doc._reports.append(report)
return doc
| pracedru/pyDesign | PyDesignData/PyDesignDocument.py | Python | mit | 4,089 |
from uw_nws.models import Person, Channel, Endpoint, Subscription
| uw-it-aca/uw-restclients | vm/v1/viewmodels.py | Python | apache-2.0 | 66 |
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import encodeutils
from glance.search.plugins import base
LOG = logging.getLogger(__name__)
class MetadefHandler(base.NotificationBase):
def __init__(self, *args, **kwargs):
super(MetadefHandler, self).__init__(*args, **kwargs)
self.namespace_delete_keys = ['deleted_at', 'deleted', 'created_at',
'updated_at', 'namespace_old']
self.property_delete_keys = ['deleted', 'deleted_at',
'name_old', 'namespace', 'name']
def process(self, ctxt, publisher_id, event_type, payload, metadata):
try:
actions = {
"metadef_namespace.create": self.create_ns,
"metadef_namespace.update": self.update_ns,
"metadef_namespace.delete": self.delete_ns,
"metadef_object.create": self.create_obj,
"metadef_object.update": self.update_obj,
"metadef_object.delete": self.delete_obj,
"metadef_property.create": self.create_prop,
"metadef_property.update": self.update_prop,
"metadef_property.delete": self.delete_prop,
"metadef_resource_type.create": self.create_rs,
"metadef_resource_type.delete": self.delete_rs,
"metadef_tag.create": self.create_tag,
"metadef_tag.update": self.update_tag,
"metadef_tag.delete": self.delete_tag,
"metadef_namespace.delete_properties": self.delete_props,
"metadef_namespace.delete_objects": self.delete_objects,
"metadef_namespace.delete_tags": self.delete_tags
}
actions[event_type](payload)
return oslo_messaging.NotificationResult.HANDLED
except Exception as e:
LOG.error(encodeutils.exception_to_unicode(e))
def run_create(self, id, payload):
self.engine.create(
index=self.index_name,
doc_type=self.document_type,
body=payload,
id=id
)
def run_update(self, id, payload, script=False):
if script:
self.engine.update(
index=self.index_name,
doc_type=self.document_type,
body=payload,
id=id)
else:
doc = {"doc": payload}
self.engine.update(
index=self.index_name,
doc_type=self.document_type,
body=doc,
id=id)
def run_delete(self, id):
self.engine.delete(
index=self.index_name,
doc_type=self.document_type,
id=id
)
def create_ns(self, payload):
id = payload['namespace']
self.run_create(id, self.format_namespace(payload))
def update_ns(self, payload):
id = payload['namespace_old']
self.run_update(id, self.format_namespace(payload))
def delete_ns(self, payload):
id = payload['namespace']
self.run_delete(id)
def create_obj(self, payload):
id = payload['namespace']
object = self.format_object(payload)
self.create_entity(id, "objects", object)
def update_obj(self, payload):
id = payload['namespace']
object = self.format_object(payload)
self.update_entity(id, "objects", object,
payload['name_old'], "name")
def delete_obj(self, payload):
id = payload['namespace']
self.delete_entity(id, "objects", payload['name'], "name")
def create_prop(self, payload):
id = payload['namespace']
property = self.format_property(payload)
self.create_entity(id, "properties", property)
def update_prop(self, payload):
id = payload['namespace']
property = self.format_property(payload)
self.update_entity(id, "properties", property,
payload['name_old'], "property")
def delete_prop(self, payload):
id = payload['namespace']
self.delete_entity(id, "properties", payload['name'], "property")
def create_rs(self, payload):
id = payload['namespace']
resource_type = dict()
resource_type['name'] = payload['name']
if payload['prefix']:
resource_type['prefix'] = payload['prefix']
if payload['properties_target']:
resource_type['properties_target'] = payload['properties_target']
self.create_entity(id, "resource_types", resource_type)
def delete_rs(self, payload):
id = payload['namespace']
self.delete_entity(id, "resource_types", payload['name'], "name")
def create_tag(self, payload):
id = payload['namespace']
tag = dict()
tag['name'] = payload['name']
self.create_entity(id, "tags", tag)
def update_tag(self, payload):
id = payload['namespace']
tag = dict()
tag['name'] = payload['name']
self.update_entity(id, "tags", tag, payload['name_old'], "name")
def delete_tag(self, payload):
id = payload['namespace']
self.delete_entity(id, "tags", payload['name'], "name")
def delete_props(self, payload):
self.delete_field(payload, "properties")
def delete_objects(self, payload):
self.delete_field(payload, "objects")
def delete_tags(self, payload):
self.delete_field(payload, "tags")
def create_entity(self, id, entity, entity_data):
script = ("if (ctx._source.containsKey('%(entity)s'))"
"{ctx._source.%(entity)s += entity_item }"
"else {ctx._source.%(entity)s=entity_list};" %
{"entity": entity})
params = {
"entity_item": entity_data,
"entity_list": [entity_data]
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def update_entity(self, id, entity, entity_data, entity_id, field_name):
entity_id = entity_id.lower()
script = ("obj=null; for(entity_item :ctx._source.%(entity)s)"
"{if(entity_item['%(field_name)s'].toLowerCase() "
" == entity_id ) obj=entity_item;};"
"if(obj!=null)ctx._source.%(entity)s.remove(obj);"
"if (ctx._source.containsKey('%(entity)s'))"
"{ctx._source.%(entity)s += entity_item; }"
"else {ctx._source.%(entity)s=entity_list;}" %
{"entity": entity, "field_name": field_name})
params = {
"entity_item": entity_data,
"entity_list": [entity_data],
"entity_id": entity_id
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def delete_entity(self, id, entity, entity_id, field_name):
entity_id = entity_id.lower()
script = ("obj=null; for(entity_item :ctx._source.%(entity)s)"
"{if(entity_item['%(field_name)s'].toLowerCase() "
" == entity_id ) obj=entity_item;};"
"if(obj!=null)ctx._source.%(entity)s.remove(obj);" %
{"entity": entity, "field_name": field_name})
params = {
"entity_id": entity_id
}
payload = {"script": script, "params": params}
self.run_update(id, payload=payload, script=True)
def delete_field(self, payload, field):
id = payload['namespace']
script = ("if (ctx._source.containsKey('%(field)s'))"
"{ctx._source.remove('%(field)s')}") % {"field": field}
payload = {"script": script}
self.run_update(id, payload=payload, script=True)
def format_namespace(self, payload):
for key in self.namespace_delete_keys:
if key in payload.keys():
del payload[key]
return payload
def format_object(self, payload):
formatted_object = dict()
formatted_object['name'] = payload['name']
formatted_object['description'] = payload['description']
if payload['required']:
formatted_object['required'] = payload['required']
formatted_object['properties'] = []
for property in payload['properties']:
formatted_property = self.format_property(property)
formatted_object['properties'].append(formatted_property)
return formatted_object
def format_property(self, payload):
prop_data = dict()
prop_data['property'] = payload['name']
for key, value in six.iteritems(payload):
if key not in self.property_delete_keys and value:
prop_data[key] = value
return prop_data
| vuntz/glance | glance/search/plugins/metadefs_notification_handler.py | Python | apache-2.0 | 9,496 |
########################################################################
# File : Optimizer.py
# Author : Stuart Paterson
########################################################################
"""
The Optimizer base class is an agent that polls for jobs with a specific
status and minor status pair. The checkJob method is overridden for all
optimizer instances and associated actions are performed there.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, exit as dExit
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.AccountingSystem.Client.Types.Job import Job as AccountingJob
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB
class OptimizerModule(AgentModule):
"""
The specific agents must provide the following methods:
* initialize() for initial settings
* beginExecution()
* execute() - the main method called in the agent cycle
* endExecution()
* finalize() - the graceful exit of the method, this one is usually used
for the agent restart
"""
#############################################################################
def __init__(self, *args, **kwargs):
""" c'tor
"""
AgentModule.__init__(self, *args, **kwargs)
self.jobDB = None
self.logDB = None
self.startingMinorStatus = None
self.startingMajorStatus = "Checking"
self.failedStatus = None
self.requiredJobInfo = 'jdl'
self._initResult = None
def initialize(self, jobDB=None, logDB=None):
""" Initialization of the Optimizer Agent.
"""
self.jobDB = JobDB() if jobDB is None else jobDB
if not self.jobDB.isValid():
dExit(1)
self.logDB = JobLoggingDB() if logDB is None else logDB
optimizerName = self.am_getModuleParam('agentName')
if optimizerName.endswith('Agent'):
optimizerName = optimizerName[:-len('Agent')]
self.am_setModuleParam('optimizerName', optimizerName)
self.startingMinorStatus = self.am_getModuleParam('optimizerName')
self.failedStatus = self.am_getOption("FailedJobStatus", 'Failed')
self.am_setOption("PollingTime", 30)
return self.initializeOptimizer()
def initializeOptimizer(self):
""" To be overwritten by inheriting class
"""
return S_OK()
#############################################################################
def execute(self):
""" The main agent execution method
"""
result = self.initializeOptimizer()
if not result['OK']:
return result
self._initResult = result['Value']
condition = {'Status': self.startingMajorStatus}
if self.startingMinorStatus:
condition['MinorStatus'] = self.startingMinorStatus
result = self.jobDB.selectJobs(condition)
if not result['OK']:
self.log.warn('Failed to get a job list from the JobDB')
return S_ERROR('Failed to get a job list from the JobDB')
if not result['Value']:
self.log.verbose('No pending jobs to process')
return S_OK('No work to do')
for job in result['Value']:
result = self.getJobDefinition(job)
if not result['OK']:
self.setFailedJob(job, result['Message'], '')
continue
jobDef = result['Value']
result = self.optimizeJob(job, jobDef['classad'])
return S_OK()
#############################################################################
def optimizeJob(self, job, classAdJob):
""" Call the corresponding Optimizer checkJob method
"""
self.log.info('Job %s will be processed by %sAgent' % (job, self.am_getModuleParam('optimizerName')))
result = self.checkJob(job, classAdJob)
if not result['OK']:
self.setFailedJob(job, result['Message'], classAdJob)
return result
#############################################################################
def getJobDefinition(self, job, jobDef=False):
""" Retrieve JDL of the Job and return jobDef dictionary
"""
if not jobDef:
jobDef = {}
# If not jdl in jobinfo load it
if 'jdl' not in jobDef:
if self.requiredJobInfo == 'jdlOriginal':
result = self.jobDB.getJobJDL(job, original=True)
if not result['OK']:
self.log.error("No JDL for job", "%s" % job)
return S_ERROR("No JDL for job")
jobDef['jdl'] = result['Value']
if self.requiredJobInfo == 'jdl':
result = self.jobDB.getJobJDL(job)
if not result['OK']:
self.log.error("No JDL for job", "%s" % job)
return S_ERROR("No JDL for job")
jobDef['jdl'] = result['Value']
# Load the classad if needed
if 'jdl' in jobDef and 'classad' not in jobDef:
try:
classad = ClassAd(jobDef['jdl'])
except BaseException:
self.log.debug("Cannot load JDL")
return S_ERROR('Illegal Job JDL')
if not classad.isOK():
self.log.debug("Warning: illegal JDL for job %s, will be marked problematic" % (job))
return S_ERROR('Illegal Job JDL')
jobDef['classad'] = classad
return S_OK(jobDef)
#############################################################################
def getOptimizerJobInfo(self, job, reportName):
"""This method gets job optimizer information that will
be used for
"""
self.log.verbose("self.jobDB.getJobOptParameter(%s,'%s')" % (job, reportName))
result = self.jobDB.getJobOptParameter(job, reportName)
if result['OK']:
value = result['Value']
if not value:
self.log.warn('JobDB returned null value for %s %s' % (job, reportName))
return S_ERROR('No optimizer info returned')
else:
try:
return S_OK(eval(value))
except BaseException as x:
return S_ERROR('Could not evaluate optimizer parameters: %s' % repr(x))
return result
#############################################################################
def setOptimizerJobInfo(self, job, reportName, value):
"""This method sets the job optimizer information that will subsequently
be used for job scheduling and TURL queries on the WN.
"""
self.log.verbose("self.jobDB.setJobOptParameter(%s,'%s','%s')" % (job, reportName, value))
if not self.am_Enabled():
return S_OK()
return self.jobDB.setJobOptParameter(job, reportName, str(value))
#############################################################################
def setOptimizerChain(self, job, value):
"""This method sets the job optimizer chain, in principle only needed by
one of the optimizers.
"""
self.log.verbose("self.jobDB.setOptimizerChain(%s,%s)" % (job, value))
if not self.am_Enabled():
return S_OK()
return self.jobDB.setOptimizerChain(job, value)
#############################################################################
def setNextOptimizer(self, job):
"""This method is executed when the optimizer instance has successfully
processed the job. The next optimizer in the chain will subsequently
start to work on the job.
"""
result = self.logDB.addLoggingRecord(job, status=self.startingMajorStatus,
minor=self.startingMinorStatus,
source=self.am_getModuleParam("optimizerName"))
if not result['OK']:
self.log.warn(result['Message'])
self.log.verbose("self.jobDB.setNextOptimizer(%s,'%s')" % (job, self.am_getModuleParam("optimizerName")))
return self.jobDB.setNextOptimizer(job, self.am_getModuleParam("optimizerName"))
#############################################################################
def updateJobStatus(self, job, status, minorStatus=None, appStatus=None):
"""This method updates the job status in the JobDB, this should only be
used to fail jobs due to the optimizer chain.
"""
self.log.verbose("self.jobDB.setJobStatus(%s,'Status/Minor/Application','%s'/'%s'/'%s',update=True)" %
(job, status, str(minorStatus), str(appStatus)))
if not self.am_Enabled():
return S_OK()
result = self.jobDB.setJobStatus(job, status, minorStatus, appStatus)
if not result['OK']:
return result
result = self.logDB.addLoggingRecord(job, status=status, minor=minorStatus, application=appStatus,
source=self.am_getModuleParam('optimizerName'))
if not result['OK']:
self.log.warn(result['Message'])
return S_OK()
#############################################################################
def setJobParam(self, job, reportName, value):
"""This method updates a job parameter in the JobDB.
"""
self.log.verbose("self.jobDB.setJobParameter(%s,'%s','%s')" % (job, reportName, value))
if not self.am_Enabled():
return S_OK()
return self.jobDB.setJobParameter(job, reportName, value)
#############################################################################
def setFailedJob(self, job, msg, classAdJob=None):
"""This method moves the job to the failed status
"""
self.log.verbose("self.updateJobStatus(%s,'%s','%s')" % (job, self.failedStatus, msg))
if not self.am_Enabled():
return S_OK()
self.updateJobStatus(job, self.failedStatus, msg)
if classAdJob:
self.sendAccountingRecord(job, msg, classAdJob)
#############################################################################
def checkJob(self, job, classad):
"""This method controls the checking of the job, should be overridden in a subclass
"""
self.log.warn('Optimizer: checkJob method should be implemented in a subclass')
return S_ERROR('Optimizer: checkJob method should be implemented in a subclass')
#############################################################################
def sendAccountingRecord(self, job, msg, classAdJob):
"""
Send and accounting record for the failed job
"""
accountingReport = AccountingJob()
accountingReport.setStartTime()
accountingReport.setEndTime()
owner = classAdJob.getAttributeString('Owner')
userGroup = classAdJob.getAttributeString('OwnerGroup')
jobGroup = classAdJob.getAttributeString('JobGroup')
jobType = classAdJob.getAttributeString('JobType')
jobClass = 'unknown'
if classAdJob.lookupAttribute('JobSplitType'):
jobClass = classAdJob.getAttributeString('JobSplitType')
inputData = []
processingType = 'unknown'
if classAdJob.lookupAttribute('ProcessingType'):
processingType = classAdJob.getAttributeString('ProcessingType')
if classAdJob.lookupAttribute('InputData'):
inputData = classAdJob.getListFromExpression('InputData')
inputDataFiles = len(inputData)
outputData = []
if classAdJob.lookupAttribute('OutputData'):
outputData = classAdJob.getListFromExpression('OutputData')
outputDataFiles = len(outputData)
acData = {
'User': owner,
'UserGroup': userGroup,
'JobGroup': jobGroup,
'JobType': jobType,
'JobClass': jobClass,
'ProcessingType': processingType,
'FinalMajorStatus': self.failedStatus,
'FinalMinorStatus': msg,
'CPUTime': 0.0,
'NormCPUTime': 0.0,
'ExecTime': 0.0,
'InputDataSize': 0.0,
'OutputDataSize': 0.0,
'InputDataFiles': inputDataFiles,
'OutputDataFiles': outputDataFiles,
'DiskSpace': 0.0,
'InputSandBoxSize': 0.0,
'OutputSandBoxSize': 0.0,
'ProcessedEvents': 0.0
}
self.log.verbose('Accounting Report is:')
self.log.verbose(acData)
accountingReport.setValuesFromDict(acData)
return accountingReport.commit()
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| petricm/DIRAC | WorkloadManagementSystem/Agent/OptimizerModule.py | Python | gpl-3.0 | 11,859 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests that all containers are imported correctly."""
import unittest
from tests import test_lib
class ContainersImportTest(test_lib.ImportCheckTestCase):
"""Tests that container classes are imported correctly."""
_IGNORABLE_FILES = frozenset(['manager.py', 'interface.py'])
def testContainersImported(self):
"""Tests that all parsers are imported."""
self._AssertFilesImportedInInit(
test_lib.CONTAINERS_PATH, self._IGNORABLE_FILES)
if __name__ == '__main__':
unittest.main()
| kiddinn/plaso | tests/containers/init_imports.py | Python | apache-2.0 | 558 |
import graph_tool as gt
from graph_tool import draw as gtd
import numpy as np
def lg2gt(g):
gr = gt.Graph()
vlabel = gr.new_vertex_property("string")
verts = {}
edges = {}
for v in g:
verts[v] = gr.add_vertex()
vlabel[verts[v]] = str(v)
gr.vertex_properties["label"] = vlabel
for v in g:
for w in g[v]:
edges[(v,w)] = gr.add_edge(verts[v], verts[w])
return gr
def plotg(g, layout='sfdp', pos=True):
gg = lg2gt(g)
if not pos:
if layout=='fr':
pos = gtd.fruchterman_reingold_layout(gg)
else:
pos = gtd.sfdp_layout(gg)
else:
pos = gg.new_vertex_property("vector<double>")
n = gg.num_vertices()
s = 2.0*np.pi/n
for v in range(gg.num_vertices()):
idx = int(gg.vertex_properties['label'][gg.vertex(v)]) - 1
pos[gg.vertex(v)] = (n * np.cos(s * idx),
n * np.sin(s * idx))
gtd.graph_draw(gg, pos,
vertex_text=gg.vertex_properties['label'],
vertex_font_size=32,
edge_pen_width=1,
edge_marker_size=15,
vertex_pen_width=1,
vertex_fill_color=[0.62109375,
0.875 ,
0.23828125,
1])
| pliz/gunfolds | tools/gtool.py | Python | gpl-3.0 | 1,378 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2015-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
{
'name': 'To-Do',
'summary': 'Manage your personal Tasks with this module.',
'version': '1.0',
'author': 'Carlos Eduardo Vercelino - CLVsol',
'category': 'Generic Modules/Others',
'license': 'AGPL-3',
'website': 'http://clvsol.com',
'depends': [
'clv_base',
],
'data': [
],
'test': [],
'installable': True,
'application': False,
'active': False,
}
| CLVsol/odoo_addons | clv_todo/__openerp__.py | Python | agpl-3.0 | 1,829 |
from __future__ import division
import Gnuplot
import logging
# Check for empty lists because Gnuplot-py will not plot if there are no entries.
default_width = 900
default_height = 600
# TODO: Is this an appropriate place for such a list? Maybe fnprobe.db would
# be better?
reject_types = [ "bulk_request_chk",
"bulk_request_ssk",
"bulk_insert_chk",
"bulk_insert_ssk" ]
# TODO: Repetitive width, height, filename existence and defaults; using them to initialize. Method annotation?
def CDF(in_list):
"""
Takes an input list as from a database query: a list of singleton tuples
of values.
Sorts the list and changes each item to be an x value followed by a y value
that sums to 100 over the list. Also returns the list.
"""
# Appended for each entry - should all add up to 1.
height = 100.0 / max(1.0, len(in_list))
# For GNUPlot smooth cumulative to work as intended the input must be sorted.
in_list.sort()
# TODO: This is strange. Is there a better way to add an element to each singleton tuple?
for index in xrange(len(in_list)):
in_list[index] = [in_list[index][0], height]
return in_list
def makePercentageHistogram(histMax, results):
"""
The histogram is capped at histMax.
results is a list of tuples of (value, occurrences).
Returns a list in which each element is [value, percentage] with those
at index maxHist being a sum of those at and above that value.
"""
# The database does not return a row for unseen values - fill them in.
hist = []
for value in range(histMax + 1):
hist.append([value, 0])
# Find count per value
for result in results:
if result[0] < len(hist):
hist[result[0]][1] = result[1]
else:
hist[histMax][1] += result[1]
# Replace each occurrence count with percentage.
total = max(1.0, sum([x[1] for x in hist]))
for entry in hist:
entry[1] = 100 * entry[1] / total
return hist
def g_init(width, height, filename):
g = Gnuplot.Gnuplot()
g('set terminal png size {0:n},{1:n}'.format(width, height))
g.set(output=filename)
return g
def add_sample_size_label(g, size):
g('set label "N = {0:n}" at graph 0.5, 0.9 center'.format(size))
def get_total_occurrences(in_list):
"""
Return total occurrences. Same input as makePercentageHistogram().
"""
total = 0
# TODO: Is there a one-liner for this?
for _, occurrences in in_list:
total += occurrences
return total
def plot_link_length(lengths, width=default_width, height=default_height,
filename=None):
if len(lengths) is 0:
logging.warning("No link lengths to plot.")
lengths = [[0.01]]
g = g_init(width, height, filename)
g.title('Link Length Distribution')
g.xlabel('Link Length (delta location)')
g.ylabel('Percent links with this length or less')
add_sample_size_label(g, len(lengths))
g('set logscale x')
# As location is circular and [0,1), largest difference is 0.5.
g.set(xrange='[0.00001:0.5]')
g.set(yrange='[0:100]')
g.plot(Gnuplot.Data(CDF(lengths), smooth='cumulative', title='Measured'),
Gnuplot.File('ideal-link', smooth='cumulative', title='Ideal'),
Gnuplot.File('flat-link', smooth='cumulative', title='Flat'))
# TODO: Generate the flat and ideal link length distribution files if they
# are missing. See:
# flat: https://github.com/Thynix/routing-simulator/blob/master/src/main/java/org/freenetproject/routing_simulator/graph/linklength/UniformLinkSource.java
# ideal: https://github.com/Thynix/routing-simulator/blob/master/src/main/java/org/freenetproject/routing_simulator/graph/linklength/KleinbergLinkSource.java
def plot_location_dist(locations, width=default_width, height=default_height,
filename=None):
if len(locations) is 0:
logging.warning("No locations to plot.")
locations = [[0.5]]
g = g_init(width, height, filename)
g('set key off')
g.title('Location Distribution')
g.xlabel('Location')
g.ylabel('Percent nodes with this location or less')
add_sample_size_label(g, len(locations))
g.set(xrange='[0:1.0]')
g.set(yrange='[0:100]')
g.plot(Gnuplot.Data(CDF(locations), smooth='cumulative'))
def plot_peer_count(counts, histMax, width=default_width,
height=default_height, filename=None):
if len(counts) is 0:
logging.warning("No peer counts to plot.")
counts = [[0, 0]]
g = g_init(width, height, filename)
g('set key off')
g.title('Peer Count Distribution')
g.xlabel('Reported Peers')
g.ylabel('Percent of Reports')
# TODO: Histogram-ness? Count total occurences.
add_sample_size_label(g, get_total_occurrences(counts))
g('set style data histogram')
g('set style fill solid border -1')
# Could mean missing the details of things beyond the bounds.
g.set(xrange='[1:%s]' % histMax)
g.set(yrange='[0:]')
g('set xtics 5')
g.plot(Gnuplot.Data(makePercentageHistogram(histMax, counts), with_='boxes'))
def plot_bulk_reject(counts, width=default_width, height=default_height,
filename=None):
g = g_init(width, height, filename)
g.title('Reject Distribution')
g.xlabel('Reported reject percentage')
g.ylabel('Percent reports')
# counts is a list of (value, occurrences) keyed by queue type. Any
# sample from any of the queue types could be omitted as "no data",
# so the actual sample size is not available from here. Use
# whichever happened to have the least "no data".
add_sample_size_label(g,
max([get_total_occurrences(x)
for x in counts.itervalues()]))
g('set style data histogram')
g('set logscale x')
g.set(xrange='[1:100]')
g.set(yrange='[0:]')
assert len(counts) > 0
for item in counts.items():
key = item[0]
if len(item[1]) is 0:
logging.warning("No entries for {0}.".format(item[0]))
counts[key] = [[0, 0]]
counts[key] = makePercentageHistogram(100, item[1])
# Title of each is the database table name, which is the map key.
g.plot(*[Gnuplot.Data(item[1], title=item[0], with_='lines') for item in counts.iteritems()])
def plot_uptime(uptimes, histMax, width=default_width, height=default_height,
filename=None):
if len(uptimes) is 0:
logging.warning("No uptimes to plot.")
uptimes = [[0, 0]]
# Adjust report by inverse uptime to get a network percentage estimate:
# nodes with high uptime are more often online to appear in results.
uptimes = [(uptime, count / (uptime + 10)) for uptime, count in uptimes]
g = g_init(width, height, filename)
g.title('Uptime Distribution')
g.xlabel('Reported 7-day uptime percentage')
g.ylabel('Estimated network percentage')
add_sample_size_label(g, get_total_occurrences(uptimes))
g('set style data histogram')
g('set style fill solid border -1')
g.set(xrange='[0:120]')
g.set(yrange='[0:]')
g.plot(Gnuplot.Data(makePercentageHistogram(histMax, uptimes), with_='boxes'))
| Thynix/pyProbe | fnprobe/gnuplots.py | Python | gpl-3.0 | 7,321 |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test ZMQ interface
#
from test_framework.test_framework import NavCoinTestFramework
from test_framework.util import *
import zmq
import struct
import http.client
import urllib.parse
class ZMQTest (NavCoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
port = 25555
def setup_nodes(self):
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port)
return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[
['-zmqpubhashtx=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:'+str(self.port)],
[],
[],
[]
])
def run_test(self):
self.sync_all()
genhashes = self.nodes[0].generate(1)
self.sync_all()
print("listen...")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
body = msg[1]
nseq = msg[2]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on hashtx
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on hashblock
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) #blockhash from generate must be equal to the hash received over zmq
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
blockcount = 0
for x in range(0,n*2):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
blockcount += 1
for x in range(0,n):
assert_equal(genhashes[x], zmqHashes[x]) #blockhash from generate must be equal to the hash received over zmq
#test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashtx":
hashZMQ = bytes_to_hex_str(body)
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
assert_equal(hashRPC, hashZMQ) #blockhash from generate must be equal to the hash received over zmq
if __name__ == '__main__':
ZMQTest ().main ()
| navcoindev/navcoin-core | qa/rpc-tests/zmq_test.py | Python | mit | 3,264 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.